Contiki 3.x
multi-segment.h
1 /*
2  * Copyright (C) 2015, Intel Corporation. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution.
12  *
13  * 3. Neither the name of the copyright holder nor the names of its
14  * contributors may be used to endorse or promote products derived
15  * from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
20  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
21  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
28  * OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #ifndef CPU_X86_MM_MULTI_SEGMENT_H_
32 #define CPU_X86_MM_MULTI_SEGMENT_H_
33 
34 #include <stdint.h>
35 #include <stdlib.h>
36 #include "helpers.h"
37 #include "ldt-layout.h"
38 
39 #ifdef __clang__
40 #define __SEG_FS
41 #define __seg_fs __attribute__((address_space(257)))
42 #define __SEG_GS
43 #define __seg_gs __attribute__((address_space(256)))
44 #endif
45 
46 #ifdef __SEG_FS
47 #define ATTR_MMIO_ADDR_SPACE __seg_fs
48 #define ATTR_KERN_ADDR_SPACE __seg_fs
49 #else
50 #define ATTR_KERN_ADDR_SPACE
51 #endif
52 #ifdef __SEG_GS
53 #define ATTR_META_ADDR_SPACE __seg_gs
54 #endif
55 
56 void prot_domains_reg_multi_seg(volatile struct dom_kern_data ATTR_KERN_ADDR_SPACE *dkd,
57  uintptr_t mmio, size_t mmio_sz,
58  uintptr_t meta, size_t meta_sz);
59 void multi_segment_launch_kernel(void);
60 
61 #define MULTI_SEGMENT_ENTER_ISR(exc) \
62  "mov $" EXP_STRINGIFY(GDT_SEL_DATA) ", %%eax\n\t" \
63  /* Refresh DS and ES in case the userspace code corrupted them. */ \
64  "mov %%eax, %%ds\n\t" \
65  "mov %%eax, %%es\n\t" \
66  /* Refresh SEG_KERN. */ \
67  "mov $" EXP_STRINGIFY(LDT_SEL_KERN) ", %%eax\n\t" \
68  "mov %%eax, %%" SEG_KERN "s\n\t" \
69  ".if " #exc "\n\t" \
70  /* It is possible that a routine performing MMIO is being interrupted. */ \
71  /* Thus, it is necessary to save and restore the MMIO segment register */ \
72  /* (in a callee-saved register). */ \
73  "mov %%" SEG_MMIO "s, %%ebp\n\t" \
74  "mov $" EXP_STRINGIFY(GDT_SEL_DATA_KERN_EXC) ", %%eax\n\t" \
75  "mov %%eax, %%" SEG_KERN "s\n\t" \
76  ".endif\n\t"
77 #define MULTI_SEGMENT_LEAVE_ISR(exc) \
78  ".if " #exc "\n\t" \
79  "mov %%ebp, %%" SEG_MMIO "s\n\t" \
80  ".endif\n\t"
81 
82 /**
83  * The MMIO region is tightly bounded within a segment, so its base offset is
84  * always 0.
85  */
86 #define PROT_DOMAINS_MMIO(dcd) 0
87 /**
88  * The metadata region is tightly bounded within a segment, so its base offset
89  * is always 0.
90  */
91 #define PROT_DOMAINS_META(dcd) 0
92 
93 #define SEG_MMIO "f" /**< For MMIO accesses, when enabled. */
94 #define SEG_KERN "f" /**< For kernel data accesses */
95 #define SEG_META "g" /**< For metadata accesses */
96 
97 #define _SEG_READL(seg, dst, src) \
98  __asm__ __volatile__ ( \
99  "movl %%" seg "s:%[src_], %[dst_]" : [dst_]"=r"(dst) : [src_]"m"(src))
100 
101 #define _SEG_READW(seg, dst, src) \
102  __asm__ __volatile__ ( \
103  "movw %%" seg "s:%[src_], %[dst_]" : [dst_]"=r"(dst) : [src_]"m"(src))
104 
105 #define _SEG_READB(seg, dst, src) \
106  __asm__ __volatile__ ( \
107  "movb %%" seg "s:%[src_], %[dst_]" : [dst_]"=q"(dst) : [src_]"m"(src))
108 
109 #define _SEG_WRITEL(seg, dst, src) \
110  __asm__ __volatile__ ( \
111  "movl %[src_], %%" seg "s:%[dst_]" \
112  : [dst_]"=m"(dst) : [src_]"r"((uint32_t)(src)))
113 
114 #define _SEG_WRITEW(seg, dst, src) \
115  __asm__ __volatile__ ( \
116  "movw %[src_], %%" seg "s:%[dst_]" \
117  : [dst_]"=m"(dst) : [src_]"r"((uint16_t)(src)))
118 
119 #define _SEG_WRITEB(seg, dst, src) \
120  __asm__ __volatile__ ( \
121  "movb %[src_], %%" seg "s:%[dst_]" \
122  : [dst_]"=m"(dst) : [src_]"q"((uint8_t)(src)))
123 
124 #ifndef __SEG_FS
125 #define MMIO_READL(dst, src) _SEG_READL(SEG_MMIO, dst, src)
126 #define MMIO_READW(dst, src) _SEG_READW(SEG_MMIO, dst, src)
127 #define MMIO_READB(dst, src) _SEG_READB(SEG_MMIO, dst, src)
128 #define MMIO_WRITEL(dst, src) _SEG_WRITEL(SEG_MMIO, dst, src)
129 #define MMIO_WRITEW(dst, src) _SEG_WRITEW(SEG_MMIO, dst, src)
130 #define MMIO_WRITEB(dst, src) _SEG_WRITEB(SEG_MMIO, dst, src)
131 
132 #define KERN_READL(dst, src) _SEG_READL(SEG_KERN, dst, src)
133 #define KERN_READW(dst, src) _SEG_READW(SEG_KERN, dst, src)
134 #define KERN_READB(dst, src) _SEG_READB(SEG_KERN, dst, src)
135 #define KERN_WRITEL(dst, src) _SEG_WRITEL(SEG_KERN, dst, src)
136 #define KERN_WRITEW(dst, src) _SEG_WRITEW(SEG_KERN, dst, src)
137 #define KERN_WRITEB(dst, src) _SEG_WRITEB(SEG_KERN, dst, src)
138 #endif
139 
140 #ifndef __SEG_GS
141 #define META_READL(dst, src) _SEG_READL(SEG_META, dst, src)
142 #define META_READW(dst, src) _SEG_READW(SEG_META, dst, src)
143 #define META_READB(dst, src) _SEG_READB(SEG_META, dst, src)
144 #define META_WRITEL(dst, src) _SEG_WRITEL(SEG_META, dst, src)
145 #define META_WRITEW(dst, src) _SEG_WRITEW(SEG_META, dst, src)
146 #define META_WRITEB(dst, src) _SEG_WRITEB(SEG_META, dst, src)
147 #endif
148 
149 #define MEMCPY_FROM_META(dst, src, sz) \
150  { \
151  uintptr_t __dst = (uintptr_t)(dst); \
152  uintptr_t __src = (uintptr_t)(src); \
153  size_t __sz = (size_t)(sz); \
154  __asm__ __volatile__ ( \
155  "rep movsb %%" SEG_META "s:(%%esi), %%es:(%%edi)\n\t" \
156  : "+D"(__dst), "+S"(__src), "+c"(__sz)); \
157  }
158 
159 #define MEMCPY_TO_META(dst, src, sz) \
160  { \
161  uintptr_t __dst = (uintptr_t)(dst); \
162  uintptr_t __src = (uintptr_t)(src); \
163  size_t __sz = (size_t)(sz); \
164  __asm__ __volatile__ ( \
165  "push %%es\n\t" \
166  "push %%" SEG_META "s\n\t" \
167  "pop %%es\n\t" \
168  "rep movsb\n\t" \
169  "pop %%es\n\t" \
170  : "+D"(__dst), "+S"(__src), "+c"(__sz)); \
171  }
172 
173 /** Compute physical address from offset into kernel data space */
174 #define KERN_DATA_OFF_TO_PHYS_ADDR(x) \
175  (((uintptr_t)&_sbss_kern_addr) + (uintptr_t)(x))
176 /** Compute physical address from offset into default data space */
177 #define DATA_OFF_TO_PHYS_ADDR(x) \
178  (((uintptr_t)&_sdata_addr) + (uintptr_t)(x))
179 /** Compute kernel data offset from physical address in kernel data space */
180 #define PHYS_ADDR_TO_KERN_DATA_OFF(x) \
181  (((uintptr_t)(x)) - (uintptr_t)&_sbss_kern_addr)
182 
183 /**
184  * In multi-segment protection domain implementations, it is sufficient to just
185  * compare incoming pointers against the frame pointer. All incoming pointers
186  * are dereferenced in the main data segment, which only maps the stacks and
187  * the shared data section. Since the shared data section is at a higher
188  * address range than the stacks, the frame pointer check is sufficient.
189  */
190 #define PROT_DOMAINS_CHECK_INCOMING_PTR PROT_DOMAINS_CHECK_INCOMING_PTR_EBP
191 
192 void prot_domains_enable_mmio(void);
193 void prot_domains_disable_mmio(void);
194 
195 #endif /* CPU_X86_MM_MULTI_SEGMENT_H_ */