34 #include "gdt-layout.h"
38 #include "prot-domains.h"
39 #include "segmentation.h"
47 #define CR4_PAE BIT(5)
50 #define MSR_EFER 0xC0000080
53 #define EFER_NXE BIT(11)
56 static pdpt_t root_pgtbl
__attribute__((aligned(32))) ATTR_BSS_KERN;
63 second_lvl_pgtbl ATTR_BSS_KERN_PAGE_ALIGNED;
65 static page_table_t leaf_pgtbl ATTR_BSS_KERN_PAGE_ALIGNED;
67 #define LINEAR_ADDR_BOUND (MIN_PAGE_SIZE * ENTRIES_PER_PAGE_TABLE)
78 dom_id_t dom_id = dcd->dom_id;
79 volatile struct dom_kern_data *dkd =
80 prot_domains_kern_data + dom_id;
83 if((PROT_DOMAINS_ACTUAL_CNT <= dom_id) ||
84 ((mmio & (MIN_PAGE_SIZE - 1)) != 0) ||
85 ((mmio_sz & (MIN_PAGE_SIZE - 1)) != 0) ||
86 ((meta & (MIN_PAGE_SIZE - 1)) != 0) ||
87 ((meta_sz & (MIN_PAGE_SIZE - 1)) != 0) ||
88 (PROT_DOMAINS_MAX_MMIO_SZ < mmio_sz) ||
89 (LINEAR_ADDR_BOUND < (PROT_DOMAINS_META_LINEAR_BASE + meta_sz))) {
93 if((dkd->flags & PROT_DOMAINS_FLAG_INITED) == PROT_DOMAINS_FLAG_INITED) {
98 dkd->mmio_sz = mmio_sz;
100 dkd->meta_sz = meta_sz;
101 dkd->flags = PROT_DOMAINS_FLAG_INITED;
103 dkd->flags |= PROT_DOMAINS_FLAG_PIO;
108 set_ptes(uintptr_t start_la, uintptr_t start_pa, uintptr_t end_pa,
111 #ifdef DBG_PAGE_ALLOC
112 #warning Checking page allocations at runtime.
114 if(((start_la & (MIN_PAGE_SIZE - 1)) != 0) ||
115 ((start_pa & (MIN_PAGE_SIZE - 1)) != 0) ||
116 ((start_la & (MIN_PAGE_SIZE - 1)) != 0) ||
117 ((end_pa & (MIN_PAGE_SIZE - 1)) != 0) ||
118 (LINEAR_ADDR_BOUND <= (start_la + (end_pa - start_pa)))) {
123 while(start_pa < end_pa) {
124 template.addr = start_pa >> 12;
126 leaf_pgtbl[start_la >> MIN_PAGE_SIZE_SHAMT] =
template;
128 #ifdef X86_CONF_USE_INVLPG
129 __asm__(
"invlpg %0" ::
"m" (*(uint8_t *)start_la));
132 start_la += MIN_PAGE_SIZE;
133 start_pa += MIN_PAGE_SIZE;
138 set_ptes_identity_map(uintptr_t start_pa, uintptr_t end_pa,
pte_t template)
140 set_ptes(start_pa, start_pa, end_pa,
template);
144 prot_domains_switch(dom_id_t from_id, dom_id_t to_id,
145 interrupt_stack_t *intr_stk)
147 volatile dom_kern_data_t *from, *to;
149 from = prot_domains_kern_data + from_id;
150 to = prot_domains_kern_data + to_id;
152 if((from_id == DOM_ID_kern) ||
153 (to_id == DOM_ID_kern)) {
154 pte_t to_kern_data_pte = { .raw = 0 };
155 to_kern_data_pte.present = 1;
156 to_kern_data_pte.exec_disable = 1;
160 to_kern_data_pte.user_accessible = 1;
161 if(to_id == DOM_ID_kern) {
162 to_kern_data_pte.writable = 1;
165 set_ptes_identity_map((uintptr_t)&_sbss_kern_addr,
166 (uintptr_t)&_ebss_syscall_addr,
169 if(to_id != DOM_ID_kern) {
170 to_kern_data_pte.user_accessible = 0;
171 to_kern_data_pte.writable = 0;
174 set_ptes_identity_map((uintptr_t)&_ebss_syscall_addr,
175 (uintptr_t)&_ebss_kern_addr,
179 if(to->mmio_sz != 0) {
182 pte.exec_disable = 1;
183 pte.user_accessible = 1;
188 set_ptes(PROT_DOMAINS_MMIO_LINEAR_BASE,
190 to->mmio + to->mmio_sz,
193 if(to->mmio_sz < from->mmio_sz) {
196 set_ptes_identity_map(PROT_DOMAINS_MMIO_LINEAR_BASE + to->mmio_sz,
197 PROT_DOMAINS_MMIO_LINEAR_BASE + from->mmio_sz,
201 if(to->meta_sz != 0) {
204 pte.exec_disable = 1;
205 pte.user_accessible = 1;
208 set_ptes(PROT_DOMAINS_META_LINEAR_BASE,
210 to->meta + to->meta_sz,
213 if(to->meta_sz < from->meta_sz) {
216 set_ptes_identity_map(PROT_DOMAINS_META_LINEAR_BASE + to->mmio_sz,
217 PROT_DOMAINS_META_LINEAR_BASE + from->mmio_sz,
221 #ifndef X86_CONF_USE_INVLPG
222 __asm__ __volatile__ (
"mov %%cr3, %%eax\n\t"
223 "mov %%eax, %%cr3\n\t" :::
"eax");
230 prot_domains_gdt_init(
void)
232 gdt_copy_desc_change_dpl(GDT_IDX_DATA, GDT_IDX_DATA_FLAT, PRIV_LVL_USER);
233 gdt_copy_desc_change_dpl(GDT_IDX_STK_INT, GDT_IDX_STK_EXC, PRIV_LVL_INT);
237 prot_domains_impl_init(
void)
246 pte.addr = ((uint32_t)second_lvl_pgtbl) >> MIN_PAGE_SIZE_SHAMT;
251 pte.user_accessible = 1;
252 pte.addr = ((uint32_t)leaf_pgtbl) >> MIN_PAGE_SIZE_SHAMT;
254 second_lvl_pgtbl[0] = pte;
259 set_ptes_identity_map((uintptr_t)&_stext_addr, (uintptr_t)&_etext_addr, pte);
264 pte.exec_disable = 1;
265 set_ptes_identity_map((uintptr_t)stacks_main,
266 (uintptr_t)stacks_main +
271 set_ptes_identity_map((uintptr_t)&_sdata_addr, (uintptr_t)&_edata_addr, pte);
274 __asm__ __volatile__ (
"wrmsr" ::
"c" (MSR_EFER),
"a" (EFER_NXE),
"d" (0));
277 __asm__ __volatile__ (
"mov %%cr4, %%eax\n\t"
279 "mov %%eax, %%cr4\n\t"
285 __asm__ __volatile__ (
"mov %0, %%cr3" ::
"r" (root_pgtbl));
291 return prot_domains_kern_data[drv->dom_id].meta;
297 #include "syscalls-int.c"
#define __attribute__(nothing)
Define attribute to nothing since it isn't handled by IAR.
Page table entry format for PAE mode page table.
Data associated with each protection domain that is owned by clients of that domain and used to ident...
uint64_t pcd
Disable caching.