Contiki 3.x
paging-prot-domains.c
1 /*
2  * Copyright (C) 2015, Intel Corporation. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution.
12  *
13  * 3. Neither the name of the copyright holder nor the names of its
14  * contributors may be used to endorse or promote products derived
15  * from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
20  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
21  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
28  * OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <string.h>
32 #include "dma.h"
33 #include "gdt.h"
34 #include "gdt-layout.h"
35 #include "helpers.h"
36 #include "idt.h"
37 #include "paging.h"
38 #include "prot-domains.h"
39 #include "segmentation.h"
40 #include "stacks.h"
41 #include "syscalls.h"
42 #include "tss.h"
43 
44 /*#define DBG_PAGE_ALLOC*/
45 
46 /* Enable PAE-mode paging */
47 #define CR4_PAE BIT(5)
48 
49 /* Extended Feature Enables MSR */
50 #define MSR_EFER 0xC0000080
51 
52 /* Enable Execute Disable bit support */
53 #define EFER_NXE BIT(11)
54 
55 /* Root page-directory-pointer table */
56 static pdpt_t root_pgtbl __attribute__((aligned(32))) ATTR_BSS_KERN;
57 /* Although the following page tables must be page-aligned, it is infeasible to
58  * apply the "aligned(4096)" attribute for the reasons described in the linker
59  * script.
60  */
61 /* Second-level page directory */
62 static page_table_t
63  second_lvl_pgtbl ATTR_BSS_KERN_PAGE_ALIGNED;
64 /* Leaf-level page table */
65 static page_table_t leaf_pgtbl ATTR_BSS_KERN_PAGE_ALIGNED;
66 
67 #define LINEAR_ADDR_BOUND (MIN_PAGE_SIZE * ENTRIES_PER_PAGE_TABLE)
68 
69 /*---------------------------------------------------------------------------*/
70 void
71 prot_domains_reg(dom_client_data_t *dcd,
72  uintptr_t mmio,
73  size_t mmio_sz,
74  uintptr_t meta,
75  size_t meta_sz,
76  bool pio)
77 {
78  dom_id_t dom_id = dcd->dom_id;
79  volatile struct dom_kern_data *dkd =
80  prot_domains_kern_data + dom_id;
81 
82  /* All addresses and sizes must be page-aligned */
83  if((PROT_DOMAINS_ACTUAL_CNT <= dom_id) ||
84  ((mmio & (MIN_PAGE_SIZE - 1)) != 0) ||
85  ((mmio_sz & (MIN_PAGE_SIZE - 1)) != 0) ||
86  ((meta & (MIN_PAGE_SIZE - 1)) != 0) ||
87  ((meta_sz & (MIN_PAGE_SIZE - 1)) != 0) ||
88  (PROT_DOMAINS_MAX_MMIO_SZ < mmio_sz) ||
89  (LINEAR_ADDR_BOUND < (PROT_DOMAINS_META_LINEAR_BASE + meta_sz))) {
90  halt();
91  }
92 
93  if((dkd->flags & PROT_DOMAINS_FLAG_INITED) == PROT_DOMAINS_FLAG_INITED) {
94  halt();
95  }
96 
97  dkd->mmio = mmio;
98  dkd->mmio_sz = mmio_sz;
99  dkd->meta = meta;
100  dkd->meta_sz = meta_sz;
101  dkd->flags = PROT_DOMAINS_FLAG_INITED;
102  if(pio) {
103  dkd->flags |= PROT_DOMAINS_FLAG_PIO;
104  }
105 }
106 /*---------------------------------------------------------------------------*/
107 static void __attribute__((regparm(3)))
108 set_ptes(uintptr_t start_la, uintptr_t start_pa, uintptr_t end_pa,
109  pte_t template)
110 {
111 #ifdef DBG_PAGE_ALLOC
112 #warning Checking page allocations at runtime.
113 
114  if(((start_la & (MIN_PAGE_SIZE - 1)) != 0) ||
115  ((start_pa & (MIN_PAGE_SIZE - 1)) != 0) ||
116  ((start_la & (MIN_PAGE_SIZE - 1)) != 0) ||
117  ((end_pa & (MIN_PAGE_SIZE - 1)) != 0) ||
118  (LINEAR_ADDR_BOUND <= (start_la + (end_pa - start_pa)))) {
119  halt();
120  }
121 #endif
122 
123  while(start_pa < end_pa) {
124  template.addr = start_pa >> 12;
125 
126  leaf_pgtbl[start_la >> MIN_PAGE_SIZE_SHAMT] = template;
127 
128 #ifdef X86_CONF_USE_INVLPG
129  __asm__("invlpg %0" :: "m" (*(uint8_t *)start_la));
130 #endif
131 
132  start_la += MIN_PAGE_SIZE;
133  start_pa += MIN_PAGE_SIZE;
134  }
135 }
136 /*---------------------------------------------------------------------------*/
137 static void __attribute__((fastcall))
138 set_ptes_identity_map(uintptr_t start_pa, uintptr_t end_pa, pte_t template)
139 {
140  set_ptes(start_pa, start_pa, end_pa, template);
141 }
142 /*---------------------------------------------------------------------------*/
143 static inline uint32_t __attribute__((always_inline))
144 prot_domains_switch(dom_id_t from_id, dom_id_t to_id,
145  interrupt_stack_t *intr_stk)
146 {
147  volatile dom_kern_data_t *from, *to;
148 
149  from = prot_domains_kern_data + from_id;
150  to = prot_domains_kern_data + to_id;
151 
152  if((from_id == DOM_ID_kern) ||
153  (to_id == DOM_ID_kern)) {
154  pte_t to_kern_data_pte = { .raw = 0 };
155  to_kern_data_pte.present = 1;
156  to_kern_data_pte.exec_disable = 1;
157  /* The kernel data region should always be accessible to supervisory code,
158  * but it is only accessible to user mode in the kernel protection domain.
159  */
160  to_kern_data_pte.user_accessible = 1;
161  if(to_id == DOM_ID_kern) {
162  to_kern_data_pte.writable = 1;
163  }
164 
165  set_ptes_identity_map((uintptr_t)&_sbss_kern_addr,
166  (uintptr_t)&_ebss_syscall_addr,
167  to_kern_data_pte);
168 
169  if(to_id != DOM_ID_kern) {
170  to_kern_data_pte.user_accessible = 0;
171  to_kern_data_pte.writable = 0;
172  }
173 
174  set_ptes_identity_map((uintptr_t)&_ebss_syscall_addr,
175  (uintptr_t)&_ebss_kern_addr,
176  to_kern_data_pte);
177  }
178 
179  if(to->mmio_sz != 0) {
180  pte_t pte = { .raw = 0 };
181  pte.present = 1;
182  pte.exec_disable = 1;
183  pte.user_accessible = 1;
184  pte.writable = 1;
185  /* disable caching of MMIO accesses */
186  pte.pcd = 1;
187 
188  set_ptes(PROT_DOMAINS_MMIO_LINEAR_BASE,
189  to->mmio,
190  to->mmio + to->mmio_sz,
191  pte);
192  }
193  if(to->mmio_sz < from->mmio_sz) {
194  pte_t pte = { .raw = 0 };
195 
196  set_ptes_identity_map(PROT_DOMAINS_MMIO_LINEAR_BASE + to->mmio_sz,
197  PROT_DOMAINS_MMIO_LINEAR_BASE + from->mmio_sz,
198  pte);
199  }
200 
201  if(to->meta_sz != 0) {
202  pte_t pte = { .raw = 0 };
203  pte.present = 1;
204  pte.exec_disable = 1;
205  pte.user_accessible = 1;
206  pte.writable = 1;
207 
208  set_ptes(PROT_DOMAINS_META_LINEAR_BASE,
209  to->meta,
210  to->meta + to->meta_sz,
211  pte);
212  }
213  if(to->meta_sz < from->meta_sz) {
214  pte_t pte = { .raw = 0 };
215 
216  set_ptes_identity_map(PROT_DOMAINS_META_LINEAR_BASE + to->mmio_sz,
217  PROT_DOMAINS_META_LINEAR_BASE + from->mmio_sz,
218  pte);
219  }
220 
221 #ifndef X86_CONF_USE_INVLPG
222  __asm__ __volatile__ ("mov %%cr3, %%eax\n\t"
223  "mov %%eax, %%cr3\n\t" ::: "eax");
224 #endif
225 
226  return 0;
227 }
228 /*---------------------------------------------------------------------------*/
229 void
230 prot_domains_gdt_init(void)
231 {
232  gdt_copy_desc_change_dpl(GDT_IDX_DATA, GDT_IDX_DATA_FLAT, PRIV_LVL_USER);
233  gdt_copy_desc_change_dpl(GDT_IDX_STK_INT, GDT_IDX_STK_EXC, PRIV_LVL_INT);
234 }
235 /*---------------------------------------------------------------------------*/
236 void
237 prot_domains_impl_init(void)
238 {
239  pte_t pte = { .raw = 0 };
240 
241  syscalls_int_init();
242 
243  /* Initialize page table: */
244 
245  pte.present = 1;
246  pte.addr = ((uint32_t)second_lvl_pgtbl) >> MIN_PAGE_SIZE_SHAMT;
247 
248  root_pgtbl[0] = pte;
249 
250  pte.writable = 1;
251  pte.user_accessible = 1;
252  pte.addr = ((uint32_t)leaf_pgtbl) >> MIN_PAGE_SIZE_SHAMT;
253 
254  second_lvl_pgtbl[0] = pte;
255 
256  /* Map code sections: */
257 
258  pte.writable = 0;
259  set_ptes_identity_map((uintptr_t)&_stext_addr, (uintptr_t)&_etext_addr, pte);
260 
261  /* Map data sections: */
262 
263  pte.writable = 1;
264  pte.exec_disable = 1;
265  set_ptes_identity_map((uintptr_t)stacks_main,
266  (uintptr_t)stacks_main +
267  STACKS_SIZE_MAIN +
268  STACKS_SIZE_EXC +
269  STACKS_SIZE_INT,
270  pte);
271  set_ptes_identity_map((uintptr_t)&_sdata_addr, (uintptr_t)&_edata_addr, pte);
272 
273  /* Enable XD bit support */
274  __asm__ __volatile__ ("wrmsr" :: "c" (MSR_EFER), "a" (EFER_NXE), "d" (0));
275 
276  /* Enable PAE */
277  __asm__ __volatile__ ("mov %%cr4, %%eax\n\t"
278  "or %0, %%eax\n\t"
279  "mov %%eax, %%cr4\n\t"
280  :
281  : "r" (CR4_PAE)
282  : "eax");
283 
284  /* Load CR3 */
285  __asm__ __volatile__ ("mov %0, %%cr3" :: "r" (root_pgtbl));
286 }
287 /*---------------------------------------------------------------------------*/
288 uintptr_t
289 prot_domains_lookup_meta_phys_base(dom_client_data_t *drv)
290 {
291  return prot_domains_kern_data[drv->dom_id].meta;
292 }
293 /*---------------------------------------------------------------------------*/
294 
295 /* Enable inter-procedural optimization with procedures in the following file:
296  */
297 #include "syscalls-int.c"
#define __attribute__(nothing)
Define attribute to nothing since it isn't handled by IAR.
Definition: iar.h:194
Page table entry format for PAE mode page table.
Definition: paging.h:40
Data associated with each protection domain that is owned by clients of that domain and used to ident...
Definition: prot-domains.h:247
uint64_t pcd
Disable caching.
Definition: paging.h:46