1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
3 
4 #include <xen/lib.h>
5 #include <xen/bitops.h>
6 #include <asm/processor.h>
7 
8 #define read_sreg(name)                                         \
9 ({  unsigned int __sel;                                         \
10     asm volatile ( "mov %%" STR(name) ",%0" : "=r" (__sel) );   \
11     __sel;                                                      \
12 })
13 
wbinvd(void)14 static inline void wbinvd(void)
15 {
16     asm volatile ( "wbinvd" ::: "memory" );
17 }
18 
wbnoinvd(void)19 static inline void wbnoinvd(void)
20 {
21     asm volatile ( "repe; wbinvd" : : : "memory" );
22 }
23 
clflush(const void * p)24 static inline void clflush(const void *p)
25 {
26     asm volatile ( "clflush %0" :: "m" (*(const char *)p) );
27 }
28 
clflushopt(const void * p)29 static inline void clflushopt(const void *p)
30 {
31     asm volatile ( "data16 clflush %0" :: "m" (*(const char *)p) );
32 }
33 
clwb(const void * p)34 static inline void clwb(const void *p)
35 {
36 #if defined(HAVE_AS_CLWB)
37     asm volatile ( "clwb %0" :: "m" (*(const char *)p) );
38 #elif defined(HAVE_AS_XSAVEOPT)
39     asm volatile ( "data16 xsaveopt %0" :: "m" (*(const char *)p) );
40 #else
41     asm volatile ( ".byte 0x66, 0x0f, 0xae, 0x32"
42                    :: "d" (p), "m" (*(const char *)p) );
43 #endif
44 }
45 
46 #define xchg(ptr,v) \
47     ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
48 
49 #include <asm/x86_64/system.h>
50 
51 /*
52  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
53  * Note 2: xchg has side effect, so that attribute volatile is necessary,
54  *   but generally the primitive is invalid, *ptr is output argument. --ANK
55  */
__xchg(unsigned long x,volatile void * ptr,int size)56 static always_inline unsigned long __xchg(
57     unsigned long x, volatile void *ptr, int size)
58 {
59     switch ( size )
60     {
61     case 1:
62         asm volatile ( "xchg %b[x], %[ptr]"
63                        : [x] "+q" (x), [ptr] "+m" (*(volatile uint8_t *)ptr)
64                        :: "memory" );
65         break;
66     case 2:
67         asm volatile ( "xchg %w[x], %[ptr]"
68                        : [x] "+r" (x), [ptr] "+m" (*(volatile uint16_t *)ptr)
69                        :: "memory" );
70         break;
71     case 4:
72         asm volatile ( "xchg %k[x], %[ptr]"
73                        : [x] "+r" (x), [ptr] "+m" (*(volatile uint32_t *)ptr)
74                        :: "memory" );
75         break;
76     case 8:
77         asm volatile ( "xchg %q[x], %[ptr]"
78                        : [x] "+r" (x), [ptr] "+m" (*(volatile uint64_t *)ptr)
79                        :: "memory" );
80         break;
81     }
82     return x;
83 }
84 
85 /*
86  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
87  * store NEW in MEM.  Return the initial value in MEM.  Success is
88  * indicated by comparing RETURN with OLD.
89  */
90 
__cmpxchg(volatile void * ptr,unsigned long old,unsigned long new,int size)91 static always_inline unsigned long __cmpxchg(
92     volatile void *ptr, unsigned long old, unsigned long new, int size)
93 {
94     unsigned long prev;
95     switch ( size )
96     {
97     case 1:
98         asm volatile ( "lock cmpxchg %b[new], %[ptr]"
99                        : "=a" (prev), [ptr] "+m" (*(volatile uint8_t *)ptr)
100                        : [new] "q" (new), "a" (old)
101                        : "memory" );
102         return prev;
103     case 2:
104         asm volatile ( "lock cmpxchg %w[new], %[ptr]"
105                        : "=a" (prev), [ptr] "+m" (*(volatile uint16_t *)ptr)
106                        : [new] "r" (new), "a" (old)
107                        : "memory" );
108         return prev;
109     case 4:
110         asm volatile ( "lock cmpxchg %k[new], %[ptr]"
111                        : "=a" (prev), [ptr] "+m" (*(volatile uint32_t *)ptr)
112                        : [new] "r" (new), "a" (old)
113                        : "memory" );
114         return prev;
115     case 8:
116         asm volatile ( "lock cmpxchg %q[new], %[ptr]"
117                        : "=a" (prev), [ptr] "+m" (*(volatile uint64_t *)ptr)
118                        : [new] "r" (new), "a" (old)
119                        : "memory" );
120         return prev;
121     }
122     return old;
123 }
124 
cmpxchg_local_(void * ptr,unsigned long old,unsigned long new,unsigned int size)125 static always_inline unsigned long cmpxchg_local_(
126     void *ptr, unsigned long old, unsigned long new, unsigned int size)
127 {
128     unsigned long prev = ~old;
129 
130     switch ( size )
131     {
132     case 1:
133         asm volatile ( "cmpxchg %b[new], %[ptr]"
134                        : "=a" (prev), [ptr] "+m" (*(uint8_t *)ptr)
135                        : [new] "q" (new), "a" (old) );
136         break;
137     case 2:
138         asm volatile ( "cmpxchg %w[new], %[ptr]"
139                        : "=a" (prev), [ptr] "+m" (*(uint16_t *)ptr)
140                        : [new] "r" (new), "a" (old) );
141         break;
142     case 4:
143         asm volatile ( "cmpxchg %k[new], %[ptr]"
144                        : "=a" (prev), [ptr] "+m" (*(uint32_t *)ptr)
145                        : [new] "r" (new), "a" (old) );
146         break;
147     case 8:
148         asm volatile ( "cmpxchg %q[new], %[ptr]"
149                        : "=a" (prev), [ptr] "+m" (*(uint64_t *)ptr)
150                        : [new] "r" (new), "a" (old) );
151         break;
152     }
153 
154     return prev;
155 }
156 
157 #define cmpxchgptr(ptr,o,n) ({                                          \
158     const __typeof__(**(ptr)) *__o = (o);                               \
159     __typeof__(**(ptr)) *__n = (n);                                     \
160     ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)__o,            \
161                                    (unsigned long)__n,sizeof(*(ptr)))); \
162 })
163 
164 /*
165  * Undefined symbol to cause link failure if a wrong size is used with
166  * arch_fetch_and_add().
167  */
168 extern unsigned long __bad_fetch_and_add_size(void);
169 
__xadd(volatile void * ptr,unsigned long v,int size)170 static always_inline unsigned long __xadd(
171     volatile void *ptr, unsigned long v, int size)
172 {
173     switch ( size )
174     {
175     case 1:
176         asm volatile ( "lock xadd %b[v], %[ptr]"
177                        : [v] "+q" (v), [ptr] "+m" (*(volatile uint8_t *)ptr)
178                        :: "memory");
179         return v;
180     case 2:
181         asm volatile ( "lock xadd %w[v], %[ptr]"
182                        : [v] "+r" (v), [ptr] "+m" (*(volatile uint16_t *)ptr)
183                        :: "memory");
184         return v;
185     case 4:
186         asm volatile ( "lock xadd %k[v], %[ptr]"
187                        : [v] "+r" (v), [ptr] "+m" (*(volatile uint32_t *)ptr)
188                        :: "memory");
189         return v;
190     case 8:
191         asm volatile ( "lock xadd %q[v], %[ptr]"
192                        : [v] "+r" (v), [ptr] "+m" (*(volatile uint64_t *)ptr)
193                        :: "memory");
194 
195         return v;
196     default:
197         return __bad_fetch_and_add_size();
198     }
199 }
200 
201 /*
202  * Atomically add @v to the 1, 2, 4, or 8 byte value at @ptr.  Returns
203  * the previous value.
204  *
205  * This is a full memory barrier.
206  */
207 #define arch_fetch_and_add(ptr, v) \
208     ((typeof(*(ptr)))__xadd(ptr, (typeof(*(ptr)))(v), sizeof(*(ptr))))
209 
210 /*
211  * Mandatory barriers, for enforced ordering of reads and writes, e.g. for use
212  * with MMIO devices mapped with reduced cacheability.
213  */
214 #define mb()            asm volatile ( "mfence" ::: "memory" )
215 #define rmb()           asm volatile ( "lfence" ::: "memory" )
216 #define wmb()           asm volatile ( "sfence" ::: "memory" )
217 
218 /*
219  * SMP barriers, for ordering of reads and writes between CPUs, most commonly
220  * used with shared memory.
221  *
222  * Both Intel and AMD agree that, from a programmer's viewpoint:
223  *  Loads cannot be reordered relative to other loads.
224  *  Stores cannot be reordered relative to other stores.
225  *  Loads may be reordered ahead of a unaliasing stores.
226  *
227  * Refer to the vendor system programming manuals for further details.
228  */
229 #define smp_mb()        mb()
230 #define smp_rmb()       barrier()
231 #define smp_wmb()       barrier()
232 
233 #define set_mb(var, value) do { xchg(&var, value); } while (0)
234 #define set_wmb(var, value) do { var = value; smp_wmb(); } while (0)
235 
236 #define smp_mb__before_atomic()    do { } while (0)
237 #define smp_mb__after_atomic()     do { } while (0)
238 
239 /**
240  * array_index_mask_nospec() - generate a mask that is ~0UL when the
241  *      bounds check succeeds and 0 otherwise
242  * @index: array element index
243  * @size: number of elements in array
244  *
245  * Returns:
246  *     0 - (index < size)
247  */
array_index_mask_nospec(unsigned long index,unsigned long size)248 static inline unsigned long array_index_mask_nospec(unsigned long index,
249                                                     unsigned long size)
250 {
251     unsigned long mask;
252 
253     asm volatile ( "cmp %[size], %[index]; sbb %[mask], %[mask];"
254                    : [mask] "=r" (mask)
255                    : [size] "g" (size), [index] "r" (index) );
256 
257     return mask;
258 }
259 
260 /* Override default implementation in nospec.h. */
261 #define array_index_mask_nospec array_index_mask_nospec
262 
263 #define local_irq_disable()     asm volatile ( "cli" : : : "memory" )
264 #define local_irq_enable()      asm volatile ( "sti" : : : "memory" )
265 
266 /* used in the idle loop; sti takes one instruction cycle to complete */
267 #define safe_halt()     asm volatile ( "sti; hlt" : : : "memory" )
268 /* used when interrupts are already enabled or to shutdown the processor */
269 #define halt()          asm volatile ( "hlt" : : : "memory" )
270 
271 #define local_save_flags(x)                                      \
272 ({                                                               \
273     BUILD_BUG_ON(sizeof(x) != sizeof(long));                     \
274     asm volatile ( "pushf" __OS " ; pop" __OS " %0" : "=g" (x)); \
275 })
276 #define local_irq_save(x)                                        \
277 ({                                                               \
278     local_save_flags(x);                                         \
279     local_irq_disable();                                         \
280 })
281 #define local_irq_restore(x)                                     \
282 ({                                                               \
283     BUILD_BUG_ON(sizeof(x) != sizeof(long));                     \
284     asm volatile ( "pushfq\n\t"                                  \
285                    "andq %0, (%%rsp)\n\t"                        \
286                    "orq  %1, (%%rsp)\n\t"                        \
287                    "popfq"                                       \
288                    : : "i?r" ( ~X86_EFLAGS_IF ),                 \
289                        "ri" ( (x) & X86_EFLAGS_IF ) );           \
290 })
291 
local_irq_is_enabled(void)292 static inline int local_irq_is_enabled(void)
293 {
294     unsigned long flags;
295     local_save_flags(flags);
296     return !!(flags & X86_EFLAGS_IF);
297 }
298 
299 #define BROKEN_ACPI_Sx          0x0001
300 #define BROKEN_INIT_AFTER_S1    0x0002
301 
302 void trap_init(void);
303 void init_idt_traps(void);
304 void load_system_tables(void);
305 void percpu_traps_init(void);
306 void subarch_percpu_traps_init(void);
307 
308 #endif
309