1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * KCSAN access checks and modifiers. These can be used to explicitly check
4 * uninstrumented accesses, or change KCSAN checking behaviour of accesses.
5 *
6 * Copyright (C) 2019, Google LLC.
7 */
8
9 #ifndef _LINUX_KCSAN_CHECKS_H
10 #define _LINUX_KCSAN_CHECKS_H
11
12 /* Note: Only include what is already included by compiler.h. */
13 #include <linux/compiler_attributes.h>
14 #include <linux/types.h>
15
16 /* Access types -- if KCSAN_ACCESS_WRITE is not set, the access is a read. */
17 #define KCSAN_ACCESS_WRITE (1 << 0) /* Access is a write. */
18 #define KCSAN_ACCESS_COMPOUND (1 << 1) /* Compounded read-write instrumentation. */
19 #define KCSAN_ACCESS_ATOMIC (1 << 2) /* Access is atomic. */
20 /* The following are special, and never due to compiler instrumentation. */
21 #define KCSAN_ACCESS_ASSERT (1 << 3) /* Access is an assertion. */
22 #define KCSAN_ACCESS_SCOPED (1 << 4) /* Access is a scoped access. */
23
24 /*
25 * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
26 * even in compilation units that selectively disable KCSAN, but must use KCSAN
27 * to validate access to an address. Never use these in header files!
28 */
29 #ifdef CONFIG_KCSAN
30 /**
31 * __kcsan_check_access - check generic access for races
32 *
33 * @ptr: address of access
34 * @size: size of access
35 * @type: access type modifier
36 */
37 void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
38
39 /**
40 * kcsan_disable_current - disable KCSAN for the current context
41 *
42 * Supports nesting.
43 */
44 void kcsan_disable_current(void);
45
46 /**
47 * kcsan_enable_current - re-enable KCSAN for the current context
48 *
49 * Supports nesting.
50 */
51 void kcsan_enable_current(void);
52 void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */
53
54 /**
55 * kcsan_nestable_atomic_begin - begin nestable atomic region
56 *
57 * Accesses within the atomic region may appear to race with other accesses but
58 * should be considered atomic.
59 */
60 void kcsan_nestable_atomic_begin(void);
61
62 /**
63 * kcsan_nestable_atomic_end - end nestable atomic region
64 */
65 void kcsan_nestable_atomic_end(void);
66
67 /**
68 * kcsan_flat_atomic_begin - begin flat atomic region
69 *
70 * Accesses within the atomic region may appear to race with other accesses but
71 * should be considered atomic.
72 */
73 void kcsan_flat_atomic_begin(void);
74
75 /**
76 * kcsan_flat_atomic_end - end flat atomic region
77 */
78 void kcsan_flat_atomic_end(void);
79
80 /**
81 * kcsan_atomic_next - consider following accesses as atomic
82 *
83 * Force treating the next n memory accesses for the current context as atomic
84 * operations.
85 *
86 * @n: number of following memory accesses to treat as atomic.
87 */
88 void kcsan_atomic_next(int n);
89
90 /**
91 * kcsan_set_access_mask - set access mask
92 *
93 * Set the access mask for all accesses for the current context if non-zero.
94 * Only value changes to bits set in the mask will be reported.
95 *
96 * @mask: bitmask
97 */
98 void kcsan_set_access_mask(unsigned long mask);
99
100 /* Scoped access information. */
101 struct kcsan_scoped_access {
102 struct list_head list;
103 /* Access information. */
104 const volatile void *ptr;
105 size_t size;
106 int type;
107 /* Location where scoped access was set up. */
108 unsigned long ip;
109 };
110 /*
111 * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
112 * out of scope; relies on attribute "cleanup", which is supported by all
113 * compilers that support KCSAN.
114 */
115 #define __kcsan_cleanup_scoped \
116 __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access)))
117
118 /**
119 * kcsan_begin_scoped_access - begin scoped access
120 *
121 * Begin scoped access and initialize @sa, which will cause KCSAN to
122 * continuously check the memory range in the current thread until
123 * kcsan_end_scoped_access() is called for @sa.
124 *
125 * Scoped accesses are implemented by appending @sa to an internal list for the
126 * current execution context, and then checked on every call into the KCSAN
127 * runtime.
128 *
129 * @ptr: address of access
130 * @size: size of access
131 * @type: access type modifier
132 * @sa: struct kcsan_scoped_access to use for the scope of the access
133 */
134 struct kcsan_scoped_access *
135 kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
136 struct kcsan_scoped_access *sa);
137
138 /**
139 * kcsan_end_scoped_access - end scoped access
140 *
141 * End a scoped access, which will stop KCSAN checking the memory range.
142 * Requires that kcsan_begin_scoped_access() was previously called once for @sa.
143 *
144 * @sa: a previously initialized struct kcsan_scoped_access
145 */
146 void kcsan_end_scoped_access(struct kcsan_scoped_access *sa);
147
148
149 #else /* CONFIG_KCSAN */
150
__kcsan_check_access(const volatile void * ptr,size_t size,int type)151 static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
152 int type) { }
153
kcsan_disable_current(void)154 static inline void kcsan_disable_current(void) { }
kcsan_enable_current(void)155 static inline void kcsan_enable_current(void) { }
kcsan_enable_current_nowarn(void)156 static inline void kcsan_enable_current_nowarn(void) { }
kcsan_nestable_atomic_begin(void)157 static inline void kcsan_nestable_atomic_begin(void) { }
kcsan_nestable_atomic_end(void)158 static inline void kcsan_nestable_atomic_end(void) { }
kcsan_flat_atomic_begin(void)159 static inline void kcsan_flat_atomic_begin(void) { }
kcsan_flat_atomic_end(void)160 static inline void kcsan_flat_atomic_end(void) { }
kcsan_atomic_next(int n)161 static inline void kcsan_atomic_next(int n) { }
kcsan_set_access_mask(unsigned long mask)162 static inline void kcsan_set_access_mask(unsigned long mask) { }
163
164 struct kcsan_scoped_access { };
165 #define __kcsan_cleanup_scoped __maybe_unused
166 static inline struct kcsan_scoped_access *
kcsan_begin_scoped_access(const volatile void * ptr,size_t size,int type,struct kcsan_scoped_access * sa)167 kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
168 struct kcsan_scoped_access *sa) { return sa; }
kcsan_end_scoped_access(struct kcsan_scoped_access * sa)169 static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { }
170
171 #endif /* CONFIG_KCSAN */
172
173 #ifdef __SANITIZE_THREAD__
174 /*
175 * Only calls into the runtime when the particular compilation unit has KCSAN
176 * instrumentation enabled. May be used in header files.
177 */
178 #define kcsan_check_access __kcsan_check_access
179
180 /*
181 * Only use these to disable KCSAN for accesses in the current compilation unit;
182 * calls into libraries may still perform KCSAN checks.
183 */
184 #define __kcsan_disable_current kcsan_disable_current
185 #define __kcsan_enable_current kcsan_enable_current_nowarn
186 #else
kcsan_check_access(const volatile void * ptr,size_t size,int type)187 static inline void kcsan_check_access(const volatile void *ptr, size_t size,
188 int type) { }
__kcsan_enable_current(void)189 static inline void __kcsan_enable_current(void) { }
__kcsan_disable_current(void)190 static inline void __kcsan_disable_current(void) { }
191 #endif
192
193 /**
194 * __kcsan_check_read - check regular read access for races
195 *
196 * @ptr: address of access
197 * @size: size of access
198 */
199 #define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0)
200
201 /**
202 * __kcsan_check_write - check regular write access for races
203 *
204 * @ptr: address of access
205 * @size: size of access
206 */
207 #define __kcsan_check_write(ptr, size) \
208 __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
209
210 /**
211 * __kcsan_check_read_write - check regular read-write access for races
212 *
213 * @ptr: address of access
214 * @size: size of access
215 */
216 #define __kcsan_check_read_write(ptr, size) \
217 __kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
218
219 /**
220 * kcsan_check_read - check regular read access for races
221 *
222 * @ptr: address of access
223 * @size: size of access
224 */
225 #define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0)
226
227 /**
228 * kcsan_check_write - check regular write access for races
229 *
230 * @ptr: address of access
231 * @size: size of access
232 */
233 #define kcsan_check_write(ptr, size) \
234 kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
235
236 /**
237 * kcsan_check_read_write - check regular read-write access for races
238 *
239 * @ptr: address of access
240 * @size: size of access
241 */
242 #define kcsan_check_read_write(ptr, size) \
243 kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
244
245 /*
246 * Check for atomic accesses: if atomic accesses are not ignored, this simply
247 * aliases to kcsan_check_access(), otherwise becomes a no-op.
248 */
249 #ifdef CONFIG_KCSAN_IGNORE_ATOMICS
250 #define kcsan_check_atomic_read(...) do { } while (0)
251 #define kcsan_check_atomic_write(...) do { } while (0)
252 #define kcsan_check_atomic_read_write(...) do { } while (0)
253 #else
254 #define kcsan_check_atomic_read(ptr, size) \
255 kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
256 #define kcsan_check_atomic_write(ptr, size) \
257 kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
258 #define kcsan_check_atomic_read_write(ptr, size) \
259 kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND)
260 #endif
261
262 /**
263 * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var
264 *
265 * Assert that there are no concurrent writes to @var; other readers are
266 * allowed. This assertion can be used to specify properties of concurrent code,
267 * where violation cannot be detected as a normal data race.
268 *
269 * For example, if we only have a single writer, but multiple concurrent
270 * readers, to avoid data races, all these accesses must be marked; even
271 * concurrent marked writes racing with the single writer are bugs.
272 * Unfortunately, due to being marked, they are no longer data races. For cases
273 * like these, we can use the macro as follows:
274 *
275 * .. code-block:: c
276 *
277 * void writer(void) {
278 * spin_lock(&update_foo_lock);
279 * ASSERT_EXCLUSIVE_WRITER(shared_foo);
280 * WRITE_ONCE(shared_foo, ...);
281 * spin_unlock(&update_foo_lock);
282 * }
283 * void reader(void) {
284 * // update_foo_lock does not need to be held!
285 * ... = READ_ONCE(shared_foo);
286 * }
287 *
288 * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough
289 * checking if a clear scope where no concurrent writes are expected exists.
290 *
291 * @var: variable to assert on
292 */
293 #define ASSERT_EXCLUSIVE_WRITER(var) \
294 __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
295
296 /*
297 * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is
298 * expected to be unique for the scope in which instances of kcsan_scoped_access
299 * are declared.
300 */
301 #define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix
302 #define __ASSERT_EXCLUSIVE_SCOPED(var, type, id) \
303 struct kcsan_scoped_access __kcsan_scoped_name(id, _) \
304 __kcsan_cleanup_scoped; \
305 struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p) \
306 __maybe_unused = kcsan_begin_scoped_access( \
307 &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \
308 &__kcsan_scoped_name(id, _))
309
310 /**
311 * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope
312 *
313 * Scoped variant of ASSERT_EXCLUSIVE_WRITER().
314 *
315 * Assert that there are no concurrent writes to @var for the duration of the
316 * scope in which it is introduced. This provides a better way to fully cover
317 * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and
318 * increases the likelihood for KCSAN to detect racing accesses.
319 *
320 * For example, it allows finding race-condition bugs that only occur due to
321 * state changes within the scope itself:
322 *
323 * .. code-block:: c
324 *
325 * void writer(void) {
326 * spin_lock(&update_foo_lock);
327 * {
328 * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo);
329 * WRITE_ONCE(shared_foo, 42);
330 * ...
331 * // shared_foo should still be 42 here!
332 * }
333 * spin_unlock(&update_foo_lock);
334 * }
335 * void buggy(void) {
336 * if (READ_ONCE(shared_foo) == 42)
337 * WRITE_ONCE(shared_foo, 1); // bug!
338 * }
339 *
340 * @var: variable to assert on
341 */
342 #define ASSERT_EXCLUSIVE_WRITER_SCOPED(var) \
343 __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__)
344
345 /**
346 * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
347 *
348 * Assert that there are no concurrent accesses to @var (no readers nor
349 * writers). This assertion can be used to specify properties of concurrent
350 * code, where violation cannot be detected as a normal data race.
351 *
352 * For example, where exclusive access is expected after determining no other
353 * users of an object are left, but the object is not actually freed. We can
354 * check that this property actually holds as follows:
355 *
356 * .. code-block:: c
357 *
358 * if (refcount_dec_and_test(&obj->refcnt)) {
359 * ASSERT_EXCLUSIVE_ACCESS(*obj);
360 * do_some_cleanup(obj);
361 * release_for_reuse(obj);
362 * }
363 *
364 * Note:
365 *
366 * 1. ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough
367 * checking if a clear scope where no concurrent accesses are expected exists.
368 *
369 * 2. For cases where the object is freed, `KASAN <kasan.html>`_ is a better
370 * fit to detect use-after-free bugs.
371 *
372 * @var: variable to assert on
373 */
374 #define ASSERT_EXCLUSIVE_ACCESS(var) \
375 __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
376
377 /**
378 * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope
379 *
380 * Scoped variant of ASSERT_EXCLUSIVE_ACCESS().
381 *
382 * Assert that there are no concurrent accesses to @var (no readers nor writers)
383 * for the entire duration of the scope in which it is introduced. This provides
384 * a better way to fully cover the enclosing scope, compared to multiple
385 * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect
386 * racing accesses.
387 *
388 * @var: variable to assert on
389 */
390 #define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var) \
391 __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__)
392
393 /**
394 * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var
395 *
396 * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER().
397 *
398 * Assert that there are no concurrent writes to a subset of bits in @var;
399 * concurrent readers are permitted. This assertion captures more detailed
400 * bit-level properties, compared to the other (word granularity) assertions.
401 * Only the bits set in @mask are checked for concurrent modifications, while
402 * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits
403 * are ignored.
404 *
405 * Use this for variables, where some bits must not be modified concurrently,
406 * yet other bits are expected to be modified concurrently.
407 *
408 * For example, variables where, after initialization, some bits are read-only,
409 * but other bits may still be modified concurrently. A reader may wish to
410 * assert that this is true as follows:
411 *
412 * .. code-block:: c
413 *
414 * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
415 * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
416 *
417 * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed
418 * to access the masked bits only, and KCSAN optimistically assumes it is
419 * therefore safe, even in the presence of data races, and marking it with
420 * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that
421 * it may still be advisable to do so, since we cannot reason about all compiler
422 * optimizations when it comes to bit manipulations (on the reader and writer
423 * side). If you are sure nothing can go wrong, we can write the above simply
424 * as:
425 *
426 * .. code-block:: c
427 *
428 * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
429 * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
430 *
431 * Another example, where this may be used, is when certain bits of @var may
432 * only be modified when holding the appropriate lock, but other bits may still
433 * be modified concurrently. Writers, where other bits may change concurrently,
434 * could use the assertion as follows:
435 *
436 * .. code-block:: c
437 *
438 * spin_lock(&foo_lock);
439 * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
440 * old_flags = flags;
441 * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
442 * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
443 * spin_unlock(&foo_lock);
444 *
445 * @var: variable to assert on
446 * @mask: only check for modifications to bits set in @mask
447 */
448 #define ASSERT_EXCLUSIVE_BITS(var, mask) \
449 do { \
450 kcsan_set_access_mask(mask); \
451 __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\
452 kcsan_set_access_mask(0); \
453 kcsan_atomic_next(1); \
454 } while (0)
455
456 #endif /* _LINUX_KCSAN_CHECKS_H */
457