1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Based on arch/arm/include/asm/barrier.h
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 */
7 #ifndef __ASM_BARRIER_H
8 #define __ASM_BARRIER_H
9
10 #ifndef __ASSEMBLY__
11
12 #include <linux/kasan-checks.h>
13
14 #define __nops(n) ".rept " #n "\nnop\n.endr\n"
15 #define nops(n) asm volatile(__nops(n))
16
17 #define sev() asm volatile("sev" : : : "memory")
18 #define wfe() asm volatile("wfe" : : : "memory")
19 #define wfi() asm volatile("wfi" : : : "memory")
20
21 #define isb() asm volatile("isb" : : : "memory")
22 #define dmb(opt) asm volatile("dmb " #opt : : : "memory")
23 #define dsb(opt) asm volatile("dsb " #opt : : : "memory")
24
25 #define psb_csync() asm volatile("hint #17" : : : "memory")
26 #define __tsb_csync() asm volatile("hint #18" : : : "memory")
27 #define csdb() asm volatile("hint #20" : : : "memory")
28
29 #ifdef CONFIG_ARM64_PSEUDO_NMI
30 #define pmr_sync() \
31 do { \
32 extern struct static_key_false gic_pmr_sync; \
33 \
34 if (static_branch_unlikely(&gic_pmr_sync)) \
35 dsb(sy); \
36 } while(0)
37 #else
38 #define pmr_sync() do {} while (0)
39 #endif
40
41 #define mb() dsb(sy)
42 #define rmb() dsb(ld)
43 #define wmb() dsb(st)
44
45 #define dma_mb() dmb(osh)
46 #define dma_rmb() dmb(oshld)
47 #define dma_wmb() dmb(oshst)
48
49
50 #define tsb_csync() \
51 do { \
52 /* \
53 * CPUs affected by Arm Erratum 2054223 or 2067961 needs \
54 * another TSB to ensure the trace is flushed. The barriers \
55 * don't have to be strictly back to back, as long as the \
56 * CPU is in trace prohibited state. \
57 */ \
58 if (cpus_have_final_cap(ARM64_WORKAROUND_TSB_FLUSH_FAILURE)) \
59 __tsb_csync(); \
60 __tsb_csync(); \
61 } while (0)
62
63 /*
64 * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
65 * and 0 otherwise.
66 */
67 #define array_index_mask_nospec array_index_mask_nospec
array_index_mask_nospec(unsigned long idx,unsigned long sz)68 static inline unsigned long array_index_mask_nospec(unsigned long idx,
69 unsigned long sz)
70 {
71 unsigned long mask;
72
73 asm volatile(
74 " cmp %1, %2\n"
75 " sbc %0, xzr, xzr\n"
76 : "=r" (mask)
77 : "r" (idx), "Ir" (sz)
78 : "cc");
79
80 csdb();
81 return mask;
82 }
83
84 /*
85 * Ensure that reads of the counter are treated the same as memory reads
86 * for the purposes of ordering by subsequent memory barriers.
87 *
88 * This insanity brought to you by speculative system register reads,
89 * out-of-order memory accesses, sequence locks and Thomas Gleixner.
90 *
91 * https://lore.kernel.org/r/alpine.DEB.2.21.1902081950260.1662@nanos.tec.linutronix.de/
92 */
93 #define arch_counter_enforce_ordering(val) do { \
94 u64 tmp, _val = (val); \
95 \
96 asm volatile( \
97 " eor %0, %1, %1\n" \
98 " add %0, sp, %0\n" \
99 " ldr xzr, [%0]" \
100 : "=r" (tmp) : "r" (_val)); \
101 } while (0)
102
103 #define __smp_mb() dmb(ish)
104 #define __smp_rmb() dmb(ishld)
105 #define __smp_wmb() dmb(ishst)
106
107 #define __smp_store_release(p, v) \
108 do { \
109 typeof(p) __p = (p); \
110 union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u = \
111 { .__val = (__force __unqual_scalar_typeof(*p)) (v) }; \
112 compiletime_assert_atomic_type(*p); \
113 kasan_check_write(__p, sizeof(*p)); \
114 switch (sizeof(*p)) { \
115 case 1: \
116 asm volatile ("stlrb %w1, %0" \
117 : "=Q" (*__p) \
118 : "r" (*(__u8 *)__u.__c) \
119 : "memory"); \
120 break; \
121 case 2: \
122 asm volatile ("stlrh %w1, %0" \
123 : "=Q" (*__p) \
124 : "r" (*(__u16 *)__u.__c) \
125 : "memory"); \
126 break; \
127 case 4: \
128 asm volatile ("stlr %w1, %0" \
129 : "=Q" (*__p) \
130 : "r" (*(__u32 *)__u.__c) \
131 : "memory"); \
132 break; \
133 case 8: \
134 asm volatile ("stlr %1, %0" \
135 : "=Q" (*__p) \
136 : "r" (*(__u64 *)__u.__c) \
137 : "memory"); \
138 break; \
139 } \
140 } while (0)
141
142 #define __smp_load_acquire(p) \
143 ({ \
144 union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u; \
145 typeof(p) __p = (p); \
146 compiletime_assert_atomic_type(*p); \
147 kasan_check_read(__p, sizeof(*p)); \
148 switch (sizeof(*p)) { \
149 case 1: \
150 asm volatile ("ldarb %w0, %1" \
151 : "=r" (*(__u8 *)__u.__c) \
152 : "Q" (*__p) : "memory"); \
153 break; \
154 case 2: \
155 asm volatile ("ldarh %w0, %1" \
156 : "=r" (*(__u16 *)__u.__c) \
157 : "Q" (*__p) : "memory"); \
158 break; \
159 case 4: \
160 asm volatile ("ldar %w0, %1" \
161 : "=r" (*(__u32 *)__u.__c) \
162 : "Q" (*__p) : "memory"); \
163 break; \
164 case 8: \
165 asm volatile ("ldar %0, %1" \
166 : "=r" (*(__u64 *)__u.__c) \
167 : "Q" (*__p) : "memory"); \
168 break; \
169 } \
170 (typeof(*p))__u.__val; \
171 })
172
173 #define smp_cond_load_relaxed(ptr, cond_expr) \
174 ({ \
175 typeof(ptr) __PTR = (ptr); \
176 __unqual_scalar_typeof(*ptr) VAL; \
177 for (;;) { \
178 VAL = READ_ONCE(*__PTR); \
179 if (cond_expr) \
180 break; \
181 __cmpwait_relaxed(__PTR, VAL); \
182 } \
183 (typeof(*ptr))VAL; \
184 })
185
186 #define smp_cond_load_acquire(ptr, cond_expr) \
187 ({ \
188 typeof(ptr) __PTR = (ptr); \
189 __unqual_scalar_typeof(*ptr) VAL; \
190 for (;;) { \
191 VAL = smp_load_acquire(__PTR); \
192 if (cond_expr) \
193 break; \
194 __cmpwait_relaxed(__PTR, VAL); \
195 } \
196 (typeof(*ptr))VAL; \
197 })
198
199 #include <asm-generic/barrier.h>
200
201 #endif /* __ASSEMBLY__ */
202
203 #endif /* __ASM_BARRIER_H */
204