Contiki 3.x
syscalls-int.c
1 /*
2  * Copyright (C) 2015, Intel Corporation. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution.
12  *
13  * 3. Neither the name of the copyright holder nor the names of its
14  * contributors may be used to endorse or promote products derived
15  * from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
20  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
21  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
28  * OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include "prot-domains.h"
32 #include "tss.h"
33 #include "helpers.h"
34 #include "stacks.h"
35 #include "idt.h"
36 #include "syscalls.h"
37 #include "gdt.h"
38 #include "gdt-layout.h"
39 #include "interrupt.h"
40 
41 /**
42  * Current protection domain. Not protected, since it is just a convenience
43  * variable to avoid unneeded protection domain switches.
44  */
45 dom_id_t cur_dom = DOM_ID_app;
46 
47 /* defined in syscalls-int-asm.S */
48 void prot_domains_sysret_dispatcher(void);
49 
50 /* Maximum depth of inter-domain call stack */
51 #define MAX_INTER_DOM_CALL_STK_SZ 4
52 
53 /* Protected call stack for inter-domain system calls. The stack grows up. */
54 static volatile dom_id_t ATTR_BSS_KERN
55  inter_dom_call_stk[MAX_INTER_DOM_CALL_STK_SZ];
56 
57 /* Pointer to the next (free) slot in the inter-domain call stack */
58 static int ATTR_BSS_KERN inter_dom_call_stk_ptr;
59 
60 /*---------------------------------------------------------------------------*/
61 static inline void __attribute__((always_inline))
62 update_eflags(dom_id_t from_id, dom_id_t to_id, interrupt_stack_t *intr_stk)
63 {
64  if((to_id == DOM_ID_app) &&
65  (DT_SEL_GET_RPL(intr_stk->cs) == PRIV_LVL_USER)) {
66  /* Only enable interrupts in the application protection domain cooperative
67  * scheduling context.
68  */
69  intr_stk->eflags |= EFLAGS_IF;
70  } else {
71  intr_stk->eflags &= ~EFLAGS_IF;
72  }
73 }
74 /*---------------------------------------------------------------------------*/
75 static inline void __attribute__((always_inline))
76 dispatcher_tail(dom_id_t from_id, dom_id_t to_id, interrupt_stack_t *intr_stk)
77 {
78  cur_dom = to_id;
79 
80  prot_domains_switch(from_id, to_id, intr_stk);
81 
82  prot_domains_set_wp(true);
83 
84  update_eflags(from_id, to_id, intr_stk);
85 }
86 /*---------------------------------------------------------------------------*/
87 int main(void);
88 static inline void __attribute__((always_inline))
89 syscall_dispatcher_tail(interrupt_stack_t *intr_stk,
90  dom_id_t to_id,
91  uint32_t syscall_eip)
92 {
93  dom_id_t from_id;
94  uint32_t tmp;
95  volatile dom_kern_data_t ATTR_KERN_ADDR_SPACE *from_dkd, *to_dkd;
96 
97  uint32_t loc_call_stk_ptr;
98 
99  to_dkd = prot_domains_kern_data + to_id;
100 
101  /* This implementation of protection domains is non-reentrant. For example,
102  * it stores the return address taken from the stack of a caller domain
103  * while dispatching a system call and stores it in a single field in the
104  * kernel data associated with that protection domain. That model does not
105  * permit reentrancy.
106  */
107  KERN_READL(tmp, to_dkd->flags);
108  if((tmp & PROT_DOMAINS_FLAG_BUSY) == PROT_DOMAINS_FLAG_BUSY) {
109  halt();
110  }
111  tmp |= PROT_DOMAINS_FLAG_BUSY;
112  KERN_WRITEL(to_dkd->flags, tmp);
113 
114  /* Update the interrupt stack so that the IRET instruction will return to the
115  * system call entrypoint.
116  */
117  intr_stk->eip = syscall_eip;
118 
119  KERN_READL(loc_call_stk_ptr, inter_dom_call_stk_ptr);
120  /* Lookup the information for the caller */
121  KERN_READL(from_id, inter_dom_call_stk[loc_call_stk_ptr - 1]);
122  from_dkd = prot_domains_kern_data + from_id;
123 
124  /* Save the current return address from the unprivileged stack to a protected
125  * location in the kernel-owned data structure. This enforces return
126  * entrypoint control.
127  */
128  KERN_WRITEL(from_dkd->orig_ret_addr, *(uintptr_t *)intr_stk->esp);
129  /* Update the unprivileged stack so that when the system call body is
130  * complete, it will invoke the system call return stub.
131  */
132  *((uintptr_t *)intr_stk->esp) = (uintptr_t)prot_domains_sysret_stub;
133 
134  if(MAX_INTER_DOM_CALL_STK_SZ <= loc_call_stk_ptr) {
135  halt();
136  }
137  KERN_WRITEL(inter_dom_call_stk[loc_call_stk_ptr], to_id);
138 
139  loc_call_stk_ptr++;
140  KERN_WRITEL(inter_dom_call_stk_ptr, loc_call_stk_ptr);
141 
142  dispatcher_tail(from_id, to_id, intr_stk);
143 }
144 /*---------------------------------------------------------------------------*/
145 void __attribute__((fastcall))
146 prot_domains_syscall_dispatcher_impl(interrupt_stack_t *intr_stk,
147  dom_id_t to_id,
148  syscalls_entrypoint_t *syscall)
149 {
150  uint32_t tmp;
151  uint32_t syscall_eip;
152 
153  if(PROT_DOMAINS_ACTUAL_CNT <= to_id) {
154  halt();
155  }
156 
157  /* Get the approved entrypoint for the system call being invoked */
158 
159  if(!((((uintptr_t)syscalls_entrypoints) <= (uintptr_t)syscall) &&
160  (((uintptr_t)syscall) < (uintptr_t)syscalls_entrypoints_end) &&
161  (((((uintptr_t)syscall) - (uintptr_t)syscalls_entrypoints)
162  % sizeof(syscalls_entrypoint_t)) == 0))) {
163  /* Assert is not usable when switching protection domains */
164  halt();
165  }
166 
167  KERN_READL(tmp, syscall->doms);
168  if((BIT(to_id) & tmp) == 0) {
169  halt();
170  }
171 
172  KERN_READL(syscall_eip, syscall->entrypoint);
173 
174  prot_domains_set_wp(false);
175 
176  syscall_dispatcher_tail(intr_stk, to_id, syscall_eip);
177 }
178 /*---------------------------------------------------------------------------*/
179 int main(void);
180 void __attribute__((fastcall))
181 prot_domains_launch_kernel_impl(interrupt_stack_t *intr_stk)
182 {
183  KERN_WRITEL(inter_dom_call_stk[0], DOM_ID_app);
184 
185  KERN_WRITEL(inter_dom_call_stk_ptr, 1);
186 
187  syscall_dispatcher_tail(intr_stk, DOM_ID_kern, (uint32_t)main);
188 }
189 /*---------------------------------------------------------------------------*/
190 void __attribute__((fastcall))
191 prot_domains_sysret_dispatcher_impl(interrupt_stack_t *intr_stk)
192 {
193  dom_id_t from_id, to_id;
194  uint32_t loc_call_stk_ptr;
195  uint32_t flags;
196 
197  KERN_READL(loc_call_stk_ptr, inter_dom_call_stk_ptr);
198  if(loc_call_stk_ptr <= 1) {
199  halt();
200  }
201 
202  KERN_READL(from_id, inter_dom_call_stk[loc_call_stk_ptr - 1]);
203  KERN_READL(to_id, inter_dom_call_stk[loc_call_stk_ptr - 2]);
204 
205  KERN_READL(intr_stk->eip,
206  prot_domains_kern_data[to_id].orig_ret_addr);
207 
208  prot_domains_set_wp(false);
209 
210  KERN_READL(flags, prot_domains_kern_data[from_id].flags);
211  flags &= ~PROT_DOMAINS_FLAG_BUSY;
212  KERN_WRITEL(prot_domains_kern_data[from_id].flags, flags);
213 
214  KERN_WRITEL(inter_dom_call_stk_ptr, loc_call_stk_ptr - 1);
215 
216  dispatcher_tail(from_id, to_id, intr_stk);
217 }
218 /*---------------------------------------------------------------------------*/
219 /**
220  * \brief Lookup the current protection domain.
221  * \return Kernel data structure for the current protection domain.
222  */
223 static volatile dom_kern_data_t ATTR_KERN_ADDR_SPACE *
224 get_current_domain(void)
225 {
226  uint32_t loc_call_stk_ptr;
227  dom_id_t id;
228  KERN_READL(loc_call_stk_ptr, inter_dom_call_stk_ptr);
229  KERN_READL(id, inter_dom_call_stk[loc_call_stk_ptr - 1]);
230  return prot_domains_kern_data + id;
231 }
232 /*---------------------------------------------------------------------------*/
233 /**
234  * \brief Check whether the protection domain is authorized to perform port
235  * I/O from the cooperative scheduling context.
236  * \param dkd Protection domain to check
237  * \return Result of the check as a Boolean value
238  */
239 static bool
240 needs_port_io(volatile dom_kern_data_t ATTR_KERN_ADDR_SPACE *dkd)
241 {
242  uint32_t dkd_flags;
243  KERN_READL(dkd_flags, dkd->flags);
244  return (dkd_flags & PROT_DOMAINS_FLAG_PIO) == PROT_DOMAINS_FLAG_PIO;
245 }
246 /*---------------------------------------------------------------------------*/
247 /* Mark the context parameter as volatile so that writes to it will not get
248  * optimized out. This parameter is not handled like ordinary function
249  * parameters. It actually partially includes the contents of the exception
250  * stack, so updates to those locations can affect the operation of the
251  * subsequent interrupt return.
252  */
253 static void
254 gp_fault_handler(volatile struct interrupt_context context)
255 {
256  uint32_t cs_lim;
257  uint8_t opcode;
258 
259  volatile dom_kern_data_t ATTR_KERN_ADDR_SPACE *dkd = get_current_domain();
260  if (needs_port_io(dkd)) {
261  __asm__ __volatile__ (
262  "mov %%cs, %0\n\t"
263  "lsl %0, %0\n\t"
264  : "=r"(cs_lim));
265 
266  if (cs_lim < context.eip) {
267  halt();
268  }
269 
270  /* Load first byte of faulting instruction */
271  __asm__ __volatile__ (
272  "movb %%cs:%1, %0"
273  : "=q"(opcode)
274  : "m"(*(uint8_t *)context.eip));
275 
276  switch (opcode) {
277  case 0xEC: /* inb */
278  context.eax = (context.eax & ~0xFF) | inb((uint16_t)context.edx);
279  break;
280  case 0xED: /* inl */
281  context.eax = inl((uint16_t)context.edx);
282  break;
283  case 0xEE: /* outb */
284  outb((uint16_t)context.edx, (uint8_t)context.eax);
285  break;
286  case 0xEF: /* outl */
287  outl((uint16_t)context.edx, context.eax);
288  break;
289  default:
290  halt();
291  }
292 
293  /* Skip the faulting port I/O instruction that was emulated. */
294  context.eip++;
295  } else {
296  halt();
297  }
298 }
299 /*---------------------------------------------------------------------------*/
300 void
301 syscalls_int_init(void)
302 {
303  tss_init();
304 
305  SET_EXCEPTION_HANDLER(13, 1, gp_fault_handler);
306 
307  /* Register system call dispatchers: */
308 
309  idt_set_intr_gate_desc(PROT_DOMAINS_SYSCALL_DISPATCH_INT,
310  (uint32_t)prot_domains_syscall_dispatcher,
311  GDT_SEL_CODE_EXC,
312  PRIV_LVL_USER);
313  idt_set_intr_gate_desc(PROT_DOMAINS_SYSRET_DISPATCH_INT,
314  (uint32_t)prot_domains_sysret_dispatcher,
315  GDT_SEL_CODE_EXC,
316  PRIV_LVL_USER);
317 }
318 /*---------------------------------------------------------------------------*/
#define __attribute__(nothing)
Define attribute to nothing since it isn't handled by IAR.
Definition: iar.h:194
#define BIT(x)
Useful to reference a single bit of a byte.
static struct sicslowpan_addr_context * context
Addresses contexts for IPHC.
Definition: sicslowpan.c:501
int main(void)
This is main...
Definition: ethconfig.c:49