1 // SPDX-License-Identifier: GPL-2.0
3 #define pr_fmt(fmt) "kcsan: " fmt
5 #include <linux/atomic.h>
7 #include <linux/delay.h>
8 #include <linux/export.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
12 #include <linux/moduleparam.h>
13 #include <linux/percpu.h>
14 #include <linux/preempt.h>
15 #include <linux/random.h>
16 #include <linux/sched.h>
17 #include <linux/uaccess.h>
23 static bool kcsan_early_enable
= IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE
);
24 unsigned int kcsan_udelay_task
= CONFIG_KCSAN_UDELAY_TASK
;
25 unsigned int kcsan_udelay_interrupt
= CONFIG_KCSAN_UDELAY_INTERRUPT
;
26 static long kcsan_skip_watch
= CONFIG_KCSAN_SKIP_WATCH
;
27 static bool kcsan_interrupt_watcher
= IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER
);
29 #ifdef MODULE_PARAM_PREFIX
30 #undef MODULE_PARAM_PREFIX
32 #define MODULE_PARAM_PREFIX "kcsan."
33 module_param_named(early_enable
, kcsan_early_enable
, bool, 0);
34 module_param_named(udelay_task
, kcsan_udelay_task
, uint
, 0644);
35 module_param_named(udelay_interrupt
, kcsan_udelay_interrupt
, uint
, 0644);
36 module_param_named(skip_watch
, kcsan_skip_watch
, long, 0644);
37 module_param_named(interrupt_watcher
, kcsan_interrupt_watcher
, bool, 0444);
41 /* Per-CPU kcsan_ctx for interrupts */
42 static DEFINE_PER_CPU(struct kcsan_ctx
, kcsan_cpu_ctx
) = {
45 .atomic_nest_count
= 0,
46 .in_flat_atomic
= false,
48 .scoped_accesses
= {LIST_POISON1
, NULL
},
52 * Helper macros to index into adjacent slots, starting from address slot
53 * itself, followed by the right and left slots.
55 * The purpose is 2-fold:
57 * 1. if during insertion the address slot is already occupied, check if
58 * any adjacent slots are free;
59 * 2. accesses that straddle a slot boundary due to size that exceeds a
60 * slot's range may check adjacent slots if any watchpoint matches.
62 * Note that accesses with very large size may still miss a watchpoint; however,
63 * given this should be rare, this is a reasonable trade-off to make, since this
66 * 1. excessive contention between watchpoint checks and setup;
67 * 2. larger number of simultaneous watchpoints without sacrificing
70 * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
74 * slot=63: [64, 65, 63]
76 #define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
79 * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
80 * slot (middle) is fine if we assume that races occur rarely. The set of
81 * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
82 * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
84 #define SLOT_IDX_FAST(slot, i) (slot + i)
87 * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
88 * able to safely update and access a watchpoint without introducing locking
89 * overhead, we encode each watchpoint as a single atomic long. The initial
90 * zero-initialized state matches INVALID_WATCHPOINT.
92 * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
93 * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
95 static atomic_long_t watchpoints
[CONFIG_KCSAN_NUM_WATCHPOINTS
+ NUM_SLOTS
-1];
98 * Instructions to skip watching counter, used in should_watch(). We use a
99 * per-CPU counter to avoid excessive contention.
101 static DEFINE_PER_CPU(long, kcsan_skip
);
103 /* For kcsan_prandom_u32_max(). */
104 static DEFINE_PER_CPU(struct rnd_state
, kcsan_rand_state
);
106 static __always_inline atomic_long_t
*find_watchpoint(unsigned long addr
,
109 long *encoded_watchpoint
)
111 const int slot
= watchpoint_slot(addr
);
112 const unsigned long addr_masked
= addr
& WATCHPOINT_ADDR_MASK
;
113 atomic_long_t
*watchpoint
;
114 unsigned long wp_addr_masked
;
119 BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS
< NUM_SLOTS
);
121 for (i
= 0; i
< NUM_SLOTS
; ++i
) {
122 watchpoint
= &watchpoints
[SLOT_IDX_FAST(slot
, i
)];
123 *encoded_watchpoint
= atomic_long_read(watchpoint
);
124 if (!decode_watchpoint(*encoded_watchpoint
, &wp_addr_masked
,
125 &wp_size
, &is_write
))
128 if (expect_write
&& !is_write
)
131 /* Check if the watchpoint matches the access. */
132 if (matching_access(wp_addr_masked
, wp_size
, addr_masked
, size
))
139 static inline atomic_long_t
*
140 insert_watchpoint(unsigned long addr
, size_t size
, bool is_write
)
142 const int slot
= watchpoint_slot(addr
);
143 const long encoded_watchpoint
= encode_watchpoint(addr
, size
, is_write
);
144 atomic_long_t
*watchpoint
;
147 /* Check slot index logic, ensuring we stay within array bounds. */
148 BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT
);
149 BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT
+1) != 0);
150 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS
-1, KCSAN_CHECK_ADJACENT
) != ARRAY_SIZE(watchpoints
)-1);
151 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS
-1, KCSAN_CHECK_ADJACENT
+1) != ARRAY_SIZE(watchpoints
) - NUM_SLOTS
);
153 for (i
= 0; i
< NUM_SLOTS
; ++i
) {
154 long expect_val
= INVALID_WATCHPOINT
;
156 /* Try to acquire this slot. */
157 watchpoint
= &watchpoints
[SLOT_IDX(slot
, i
)];
158 if (atomic_long_try_cmpxchg_relaxed(watchpoint
, &expect_val
, encoded_watchpoint
))
166 * Return true if watchpoint was successfully consumed, false otherwise.
168 * This may return false if:
170 * 1. another thread already consumed the watchpoint;
171 * 2. the thread that set up the watchpoint already removed it;
172 * 3. the watchpoint was removed and then re-used.
174 static __always_inline
bool
175 try_consume_watchpoint(atomic_long_t
*watchpoint
, long encoded_watchpoint
)
177 return atomic_long_try_cmpxchg_relaxed(watchpoint
, &encoded_watchpoint
, CONSUMED_WATCHPOINT
);
180 /* Return true if watchpoint was not touched, false if already consumed. */
181 static inline bool consume_watchpoint(atomic_long_t
*watchpoint
)
183 return atomic_long_xchg_relaxed(watchpoint
, CONSUMED_WATCHPOINT
) != CONSUMED_WATCHPOINT
;
186 /* Remove the watchpoint -- its slot may be reused after. */
187 static inline void remove_watchpoint(atomic_long_t
*watchpoint
)
189 atomic_long_set(watchpoint
, INVALID_WATCHPOINT
);
192 static __always_inline
struct kcsan_ctx
*get_ctx(void)
195 * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
196 * also result in calls that generate warnings in uaccess regions.
198 return in_task() ? ¤t
->kcsan_ctx
: raw_cpu_ptr(&kcsan_cpu_ctx
);
201 /* Check scoped accesses; never inline because this is a slow-path! */
202 static noinline
void kcsan_check_scoped_accesses(void)
204 struct kcsan_ctx
*ctx
= get_ctx();
205 struct list_head
*prev_save
= ctx
->scoped_accesses
.prev
;
206 struct kcsan_scoped_access
*scoped_access
;
208 ctx
->scoped_accesses
.prev
= NULL
; /* Avoid recursion. */
209 list_for_each_entry(scoped_access
, &ctx
->scoped_accesses
, list
)
210 __kcsan_check_access(scoped_access
->ptr
, scoped_access
->size
, scoped_access
->type
);
211 ctx
->scoped_accesses
.prev
= prev_save
;
214 /* Rules for generic atomic accesses. Called from fast-path. */
215 static __always_inline
bool
216 is_atomic(const volatile void *ptr
, size_t size
, int type
, struct kcsan_ctx
*ctx
)
218 if (type
& KCSAN_ACCESS_ATOMIC
)
222 * Unless explicitly declared atomic, never consider an assertion access
223 * as atomic. This allows using them also in atomic regions, such as
224 * seqlocks, without implicitly changing their semantics.
226 if (type
& KCSAN_ACCESS_ASSERT
)
229 if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC
) &&
230 (type
& KCSAN_ACCESS_WRITE
) && size
<= sizeof(long) &&
231 !(type
& KCSAN_ACCESS_COMPOUND
) && IS_ALIGNED((unsigned long)ptr
, size
))
232 return true; /* Assume aligned writes up to word size are atomic. */
234 if (ctx
->atomic_next
> 0) {
236 * Because we do not have separate contexts for nested
237 * interrupts, in case atomic_next is set, we simply assume that
238 * the outer interrupt set atomic_next. In the worst case, we
239 * will conservatively consider operations as atomic. This is a
240 * reasonable trade-off to make, since this case should be
241 * extremely rare; however, even if extremely rare, it could
242 * lead to false positives otherwise.
244 if ((hardirq_count() >> HARDIRQ_SHIFT
) < 2)
245 --ctx
->atomic_next
; /* in task, or outer interrupt */
249 return ctx
->atomic_nest_count
> 0 || ctx
->in_flat_atomic
;
252 static __always_inline
bool
253 should_watch(const volatile void *ptr
, size_t size
, int type
, struct kcsan_ctx
*ctx
)
256 * Never set up watchpoints when memory operations are atomic.
258 * Need to check this first, before kcsan_skip check below: (1) atomics
259 * should not count towards skipped instructions, and (2) to actually
260 * decrement kcsan_atomic_next for consecutive instruction stream.
262 if (is_atomic(ptr
, size
, type
, ctx
))
265 if (this_cpu_dec_return(kcsan_skip
) >= 0)
269 * NOTE: If we get here, kcsan_skip must always be reset in slow path
270 * via reset_kcsan_skip() to avoid underflow.
273 /* this operation should be watched */
278 * Returns a pseudo-random number in interval [0, ep_ro). See prandom_u32_max()
281 * The open-coded version here is using only safe primitives for all contexts
282 * where we can have KCSAN instrumentation. In particular, we cannot use
283 * prandom_u32() directly, as its tracepoint could cause recursion.
285 static u32
kcsan_prandom_u32_max(u32 ep_ro
)
287 struct rnd_state
*state
= &get_cpu_var(kcsan_rand_state
);
288 const u32 res
= prandom_u32_state(state
);
290 put_cpu_var(kcsan_rand_state
);
291 return (u32
)(((u64
) res
* ep_ro
) >> 32);
294 static inline void reset_kcsan_skip(void)
296 long skip_count
= kcsan_skip_watch
-
297 (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE
) ?
298 kcsan_prandom_u32_max(kcsan_skip_watch
) :
300 this_cpu_write(kcsan_skip
, skip_count
);
303 static __always_inline
bool kcsan_is_enabled(void)
305 return READ_ONCE(kcsan_enabled
) && get_ctx()->disable_count
== 0;
308 /* Introduce delay depending on context and configuration. */
309 static void delay_access(int type
)
311 unsigned int delay
= in_task() ? kcsan_udelay_task
: kcsan_udelay_interrupt
;
312 /* For certain access types, skew the random delay to be longer. */
313 unsigned int skew_delay_order
=
314 (type
& (KCSAN_ACCESS_COMPOUND
| KCSAN_ACCESS_ASSERT
)) ? 1 : 0;
316 delay
-= IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE
) ?
317 kcsan_prandom_u32_max(delay
>> skew_delay_order
) :
322 void kcsan_save_irqtrace(struct task_struct
*task
)
324 #ifdef CONFIG_TRACE_IRQFLAGS
325 task
->kcsan_save_irqtrace
= task
->irqtrace
;
329 void kcsan_restore_irqtrace(struct task_struct
*task
)
331 #ifdef CONFIG_TRACE_IRQFLAGS
332 task
->irqtrace
= task
->kcsan_save_irqtrace
;
337 * Pull everything together: check_access() below contains the performance
338 * critical operations; the fast-path (including check_access) functions should
339 * all be inlinable by the instrumentation functions.
341 * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
342 * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
343 * be filtered from the stacktrace, as well as give them unique names for the
344 * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
345 * since they do not access any user memory, but instrumentation is still
346 * emitted in UACCESS regions.
349 static noinline
void kcsan_found_watchpoint(const volatile void *ptr
,
352 atomic_long_t
*watchpoint
,
353 long encoded_watchpoint
)
358 if (!kcsan_is_enabled())
362 * The access_mask check relies on value-change comparison. To avoid
363 * reporting a race where e.g. the writer set up the watchpoint, but the
364 * reader has access_mask!=0, we have to ignore the found watchpoint.
366 if (get_ctx()->access_mask
!= 0)
370 * Consume the watchpoint as soon as possible, to minimize the chances
371 * of !consumed. Consuming the watchpoint must always be guarded by
372 * kcsan_is_enabled() check, as otherwise we might erroneously
373 * triggering reports when disabled.
375 consumed
= try_consume_watchpoint(watchpoint
, encoded_watchpoint
);
377 /* keep this after try_consume_watchpoint */
378 flags
= user_access_save();
381 kcsan_save_irqtrace(current
);
382 kcsan_report(ptr
, size
, type
, KCSAN_VALUE_CHANGE_MAYBE
,
383 KCSAN_REPORT_CONSUMED_WATCHPOINT
,
384 watchpoint
- watchpoints
);
385 kcsan_restore_irqtrace(current
);
388 * The other thread may not print any diagnostics, as it has
389 * already removed the watchpoint, or another thread consumed
390 * the watchpoint before this thread.
392 atomic_long_inc(&kcsan_counters
[KCSAN_COUNTER_REPORT_RACES
]);
395 if ((type
& KCSAN_ACCESS_ASSERT
) != 0)
396 atomic_long_inc(&kcsan_counters
[KCSAN_COUNTER_ASSERT_FAILURES
]);
398 atomic_long_inc(&kcsan_counters
[KCSAN_COUNTER_DATA_RACES
]);
400 user_access_restore(flags
);
404 kcsan_setup_watchpoint(const volatile void *ptr
, size_t size
, int type
)
406 const bool is_write
= (type
& KCSAN_ACCESS_WRITE
) != 0;
407 const bool is_assert
= (type
& KCSAN_ACCESS_ASSERT
) != 0;
408 atomic_long_t
*watchpoint
;
415 unsigned long access_mask
;
416 enum kcsan_value_change value_change
= KCSAN_VALUE_CHANGE_MAYBE
;
417 unsigned long ua_flags
= user_access_save();
418 unsigned long irq_flags
= 0;
421 * Always reset kcsan_skip counter in slow-path to avoid underflow; see
426 if (!kcsan_is_enabled())
430 * Special atomic rules: unlikely to be true, so we check them here in
431 * the slow-path, and not in the fast-path in is_atomic(). Call after
432 * kcsan_is_enabled(), as we may access memory that is not yet
433 * initialized during early boot.
435 if (!is_assert
&& kcsan_is_atomic_special(ptr
))
438 if (!check_encodable((unsigned long)ptr
, size
)) {
439 atomic_long_inc(&kcsan_counters
[KCSAN_COUNTER_UNENCODABLE_ACCESSES
]);
444 * Save and restore the IRQ state trace touched by KCSAN, since KCSAN's
445 * runtime is entered for every memory access, and potentially useful
446 * information is lost if dirtied by KCSAN.
448 kcsan_save_irqtrace(current
);
449 if (!kcsan_interrupt_watcher
)
450 local_irq_save(irq_flags
);
452 watchpoint
= insert_watchpoint((unsigned long)ptr
, size
, is_write
);
453 if (watchpoint
== NULL
) {
455 * Out of capacity: the size of 'watchpoints', and the frequency
456 * with which should_watch() returns true should be tweaked so
457 * that this case happens very rarely.
459 atomic_long_inc(&kcsan_counters
[KCSAN_COUNTER_NO_CAPACITY
]);
463 atomic_long_inc(&kcsan_counters
[KCSAN_COUNTER_SETUP_WATCHPOINTS
]);
464 atomic_long_inc(&kcsan_counters
[KCSAN_COUNTER_USED_WATCHPOINTS
]);
467 * Read the current value, to later check and infer a race if the data
468 * was modified via a non-instrumented access, e.g. from a device.
473 expect_value
._1
= READ_ONCE(*(const u8
*)ptr
);
476 expect_value
._2
= READ_ONCE(*(const u16
*)ptr
);
479 expect_value
._4
= READ_ONCE(*(const u32
*)ptr
);
482 expect_value
._8
= READ_ONCE(*(const u64
*)ptr
);
485 break; /* ignore; we do not diff the values */
488 if (IS_ENABLED(CONFIG_KCSAN_DEBUG
)) {
489 kcsan_disable_current();
490 pr_err("watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n",
491 is_write
? "write" : "read", size
, ptr
,
492 watchpoint_slot((unsigned long)ptr
),
493 encode_watchpoint((unsigned long)ptr
, size
, is_write
));
494 kcsan_enable_current();
498 * Delay this thread, to increase probability of observing a racy
499 * conflicting access.
504 * Re-read value, and check if it is as expected; if not, we infer a
507 access_mask
= get_ctx()->access_mask
;
510 expect_value
._1
^= READ_ONCE(*(const u8
*)ptr
);
512 expect_value
._1
&= (u8
)access_mask
;
515 expect_value
._2
^= READ_ONCE(*(const u16
*)ptr
);
517 expect_value
._2
&= (u16
)access_mask
;
520 expect_value
._4
^= READ_ONCE(*(const u32
*)ptr
);
522 expect_value
._4
&= (u32
)access_mask
;
525 expect_value
._8
^= READ_ONCE(*(const u64
*)ptr
);
527 expect_value
._8
&= (u64
)access_mask
;
530 break; /* ignore; we do not diff the values */
533 /* Were we able to observe a value-change? */
534 if (expect_value
._8
!= 0)
535 value_change
= KCSAN_VALUE_CHANGE_TRUE
;
537 /* Check if this access raced with another. */
538 if (!consume_watchpoint(watchpoint
)) {
540 * Depending on the access type, map a value_change of MAYBE to
541 * TRUE (always report) or FALSE (never report).
543 if (value_change
== KCSAN_VALUE_CHANGE_MAYBE
) {
544 if (access_mask
!= 0) {
546 * For access with access_mask, we require a
547 * value-change, as it is likely that races on
548 * ~access_mask bits are expected.
550 value_change
= KCSAN_VALUE_CHANGE_FALSE
;
551 } else if (size
> 8 || is_assert
) {
552 /* Always assume a value-change. */
553 value_change
= KCSAN_VALUE_CHANGE_TRUE
;
558 * No need to increment 'data_races' counter, as the racing
559 * thread already did.
561 * Count 'assert_failures' for each failed ASSERT access,
562 * therefore both this thread and the racing thread may
563 * increment this counter.
565 if (is_assert
&& value_change
== KCSAN_VALUE_CHANGE_TRUE
)
566 atomic_long_inc(&kcsan_counters
[KCSAN_COUNTER_ASSERT_FAILURES
]);
568 kcsan_report(ptr
, size
, type
, value_change
, KCSAN_REPORT_RACE_SIGNAL
,
569 watchpoint
- watchpoints
);
570 } else if (value_change
== KCSAN_VALUE_CHANGE_TRUE
) {
571 /* Inferring a race, since the value should not have changed. */
573 atomic_long_inc(&kcsan_counters
[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN
]);
575 atomic_long_inc(&kcsan_counters
[KCSAN_COUNTER_ASSERT_FAILURES
]);
577 if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN
) || is_assert
)
578 kcsan_report(ptr
, size
, type
, KCSAN_VALUE_CHANGE_TRUE
,
579 KCSAN_REPORT_RACE_UNKNOWN_ORIGIN
,
580 watchpoint
- watchpoints
);
584 * Remove watchpoint; must be after reporting, since the slot may be
585 * reused after this point.
587 remove_watchpoint(watchpoint
);
588 atomic_long_dec(&kcsan_counters
[KCSAN_COUNTER_USED_WATCHPOINTS
]);
590 if (!kcsan_interrupt_watcher
)
591 local_irq_restore(irq_flags
);
592 kcsan_restore_irqtrace(current
);
594 user_access_restore(ua_flags
);
597 static __always_inline
void check_access(const volatile void *ptr
, size_t size
,
600 const bool is_write
= (type
& KCSAN_ACCESS_WRITE
) != 0;
601 atomic_long_t
*watchpoint
;
602 long encoded_watchpoint
;
605 * Do nothing for 0 sized check; this comparison will be optimized out
606 * for constant sized instrumentation (__tsan_{read,write}N).
608 if (unlikely(size
== 0))
612 * Avoid user_access_save in fast-path: find_watchpoint is safe without
613 * user_access_save, as the address that ptr points to is only used to
614 * check if a watchpoint exists; ptr is never dereferenced.
616 watchpoint
= find_watchpoint((unsigned long)ptr
, size
, !is_write
,
617 &encoded_watchpoint
);
619 * It is safe to check kcsan_is_enabled() after find_watchpoint in the
620 * slow-path, as long as no state changes that cause a race to be
621 * detected and reported have occurred until kcsan_is_enabled() is
625 if (unlikely(watchpoint
!= NULL
))
626 kcsan_found_watchpoint(ptr
, size
, type
, watchpoint
,
629 struct kcsan_ctx
*ctx
= get_ctx(); /* Call only once in fast-path. */
631 if (unlikely(should_watch(ptr
, size
, type
, ctx
)))
632 kcsan_setup_watchpoint(ptr
, size
, type
);
633 else if (unlikely(ctx
->scoped_accesses
.prev
))
634 kcsan_check_scoped_accesses();
638 /* === Public interface ===================================================== */
640 void __init
kcsan_init(void)
644 kcsan_debugfs_init();
645 prandom_seed_full_state(&kcsan_rand_state
);
648 * We are in the init task, and no other tasks should be running;
649 * WRITE_ONCE without memory barrier is sufficient.
651 if (kcsan_early_enable
) {
652 pr_info("enabled early\n");
653 WRITE_ONCE(kcsan_enabled
, true);
657 /* === Exported interface =================================================== */
659 void kcsan_disable_current(void)
661 ++get_ctx()->disable_count
;
663 EXPORT_SYMBOL(kcsan_disable_current
);
665 void kcsan_enable_current(void)
667 if (get_ctx()->disable_count
-- == 0) {
669 * Warn if kcsan_enable_current() calls are unbalanced with
670 * kcsan_disable_current() calls, which causes disable_count to
671 * become negative and should not happen.
673 kcsan_disable_current(); /* restore to 0, KCSAN still enabled */
674 kcsan_disable_current(); /* disable to generate warning */
675 WARN(1, "Unbalanced %s()", __func__
);
676 kcsan_enable_current();
679 EXPORT_SYMBOL(kcsan_enable_current
);
681 void kcsan_enable_current_nowarn(void)
683 if (get_ctx()->disable_count
-- == 0)
684 kcsan_disable_current();
686 EXPORT_SYMBOL(kcsan_enable_current_nowarn
);
688 void kcsan_nestable_atomic_begin(void)
691 * Do *not* check and warn if we are in a flat atomic region: nestable
692 * and flat atomic regions are independent from each other.
693 * See include/linux/kcsan.h: struct kcsan_ctx comments for more
697 ++get_ctx()->atomic_nest_count
;
699 EXPORT_SYMBOL(kcsan_nestable_atomic_begin
);
701 void kcsan_nestable_atomic_end(void)
703 if (get_ctx()->atomic_nest_count
-- == 0) {
705 * Warn if kcsan_nestable_atomic_end() calls are unbalanced with
706 * kcsan_nestable_atomic_begin() calls, which causes
707 * atomic_nest_count to become negative and should not happen.
709 kcsan_nestable_atomic_begin(); /* restore to 0 */
710 kcsan_disable_current(); /* disable to generate warning */
711 WARN(1, "Unbalanced %s()", __func__
);
712 kcsan_enable_current();
715 EXPORT_SYMBOL(kcsan_nestable_atomic_end
);
717 void kcsan_flat_atomic_begin(void)
719 get_ctx()->in_flat_atomic
= true;
721 EXPORT_SYMBOL(kcsan_flat_atomic_begin
);
723 void kcsan_flat_atomic_end(void)
725 get_ctx()->in_flat_atomic
= false;
727 EXPORT_SYMBOL(kcsan_flat_atomic_end
);
729 void kcsan_atomic_next(int n
)
731 get_ctx()->atomic_next
= n
;
733 EXPORT_SYMBOL(kcsan_atomic_next
);
735 void kcsan_set_access_mask(unsigned long mask
)
737 get_ctx()->access_mask
= mask
;
739 EXPORT_SYMBOL(kcsan_set_access_mask
);
741 struct kcsan_scoped_access
*
742 kcsan_begin_scoped_access(const volatile void *ptr
, size_t size
, int type
,
743 struct kcsan_scoped_access
*sa
)
745 struct kcsan_ctx
*ctx
= get_ctx();
747 __kcsan_check_access(ptr
, size
, type
);
749 ctx
->disable_count
++; /* Disable KCSAN, in case list debugging is on. */
751 INIT_LIST_HEAD(&sa
->list
);
756 if (!ctx
->scoped_accesses
.prev
) /* Lazy initialize list head. */
757 INIT_LIST_HEAD(&ctx
->scoped_accesses
);
758 list_add(&sa
->list
, &ctx
->scoped_accesses
);
760 ctx
->disable_count
--;
763 EXPORT_SYMBOL(kcsan_begin_scoped_access
);
765 void kcsan_end_scoped_access(struct kcsan_scoped_access
*sa
)
767 struct kcsan_ctx
*ctx
= get_ctx();
769 if (WARN(!ctx
->scoped_accesses
.prev
, "Unbalanced %s()?", __func__
))
772 ctx
->disable_count
++; /* Disable KCSAN, in case list debugging is on. */
775 if (list_empty(&ctx
->scoped_accesses
))
777 * Ensure we do not enter kcsan_check_scoped_accesses()
778 * slow-path if unnecessary, and avoids requiring list_empty()
779 * in the fast-path (to avoid a READ_ONCE() and potential
782 ctx
->scoped_accesses
.prev
= NULL
;
784 ctx
->disable_count
--;
786 __kcsan_check_access(sa
->ptr
, sa
->size
, sa
->type
);
788 EXPORT_SYMBOL(kcsan_end_scoped_access
);
790 void __kcsan_check_access(const volatile void *ptr
, size_t size
, int type
)
792 check_access(ptr
, size
, type
);
794 EXPORT_SYMBOL(__kcsan_check_access
);
797 * KCSAN uses the same instrumentation that is emitted by supported compilers
798 * for ThreadSanitizer (TSAN).
800 * When enabled, the compiler emits instrumentation calls (the functions
801 * prefixed with "__tsan" below) for all loads and stores that it generated;
802 * inline asm is not instrumented.
804 * Note that, not all supported compiler versions distinguish aligned/unaligned
805 * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
806 * version to the generic version, which can handle both.
809 #define DEFINE_TSAN_READ_WRITE(size) \
810 void __tsan_read##size(void *ptr); \
811 void __tsan_read##size(void *ptr) \
813 check_access(ptr, size, 0); \
815 EXPORT_SYMBOL(__tsan_read##size); \
816 void __tsan_unaligned_read##size(void *ptr) \
817 __alias(__tsan_read##size); \
818 EXPORT_SYMBOL(__tsan_unaligned_read##size); \
819 void __tsan_write##size(void *ptr); \
820 void __tsan_write##size(void *ptr) \
822 check_access(ptr, size, KCSAN_ACCESS_WRITE); \
824 EXPORT_SYMBOL(__tsan_write##size); \
825 void __tsan_unaligned_write##size(void *ptr) \
826 __alias(__tsan_write##size); \
827 EXPORT_SYMBOL(__tsan_unaligned_write##size); \
828 void __tsan_read_write##size(void *ptr); \
829 void __tsan_read_write##size(void *ptr) \
831 check_access(ptr, size, \
832 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE); \
834 EXPORT_SYMBOL(__tsan_read_write##size); \
835 void __tsan_unaligned_read_write##size(void *ptr) \
836 __alias(__tsan_read_write##size); \
837 EXPORT_SYMBOL(__tsan_unaligned_read_write##size)
839 DEFINE_TSAN_READ_WRITE(1);
840 DEFINE_TSAN_READ_WRITE(2);
841 DEFINE_TSAN_READ_WRITE(4);
842 DEFINE_TSAN_READ_WRITE(8);
843 DEFINE_TSAN_READ_WRITE(16);
845 void __tsan_read_range(void *ptr
, size_t size
);
846 void __tsan_read_range(void *ptr
, size_t size
)
848 check_access(ptr
, size
, 0);
850 EXPORT_SYMBOL(__tsan_read_range
);
852 void __tsan_write_range(void *ptr
, size_t size
);
853 void __tsan_write_range(void *ptr
, size_t size
)
855 check_access(ptr
, size
, KCSAN_ACCESS_WRITE
);
857 EXPORT_SYMBOL(__tsan_write_range
);
860 * Use of explicit volatile is generally disallowed [1], however, volatile is
861 * still used in various concurrent context, whether in low-level
862 * synchronization primitives or for legacy reasons.
863 * [1] https://lwn.net/Articles/233479/
865 * We only consider volatile accesses atomic if they are aligned and would pass
866 * the size-check of compiletime_assert_rwonce_type().
868 #define DEFINE_TSAN_VOLATILE_READ_WRITE(size) \
869 void __tsan_volatile_read##size(void *ptr); \
870 void __tsan_volatile_read##size(void *ptr) \
872 const bool is_atomic = size <= sizeof(long long) && \
873 IS_ALIGNED((unsigned long)ptr, size); \
874 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
876 check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0); \
878 EXPORT_SYMBOL(__tsan_volatile_read##size); \
879 void __tsan_unaligned_volatile_read##size(void *ptr) \
880 __alias(__tsan_volatile_read##size); \
881 EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size); \
882 void __tsan_volatile_write##size(void *ptr); \
883 void __tsan_volatile_write##size(void *ptr) \
885 const bool is_atomic = size <= sizeof(long long) && \
886 IS_ALIGNED((unsigned long)ptr, size); \
887 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
889 check_access(ptr, size, \
890 KCSAN_ACCESS_WRITE | \
891 (is_atomic ? KCSAN_ACCESS_ATOMIC : 0)); \
893 EXPORT_SYMBOL(__tsan_volatile_write##size); \
894 void __tsan_unaligned_volatile_write##size(void *ptr) \
895 __alias(__tsan_volatile_write##size); \
896 EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size)
898 DEFINE_TSAN_VOLATILE_READ_WRITE(1);
899 DEFINE_TSAN_VOLATILE_READ_WRITE(2);
900 DEFINE_TSAN_VOLATILE_READ_WRITE(4);
901 DEFINE_TSAN_VOLATILE_READ_WRITE(8);
902 DEFINE_TSAN_VOLATILE_READ_WRITE(16);
905 * The below are not required by KCSAN, but can still be emitted by the
908 void __tsan_func_entry(void *call_pc
);
909 void __tsan_func_entry(void *call_pc
)
912 EXPORT_SYMBOL(__tsan_func_entry
);
913 void __tsan_func_exit(void);
914 void __tsan_func_exit(void)
917 EXPORT_SYMBOL(__tsan_func_exit
);
918 void __tsan_init(void);
919 void __tsan_init(void)
922 EXPORT_SYMBOL(__tsan_init
);
925 * Instrumentation for atomic builtins (__atomic_*, __sync_*).
927 * Normal kernel code _should not_ be using them directly, but some
928 * architectures may implement some or all atomics using the compilers'
931 * Note: If an architecture decides to fully implement atomics using the
932 * builtins, because they are implicitly instrumented by KCSAN (and KASAN,
933 * etc.), implementing the ARCH_ATOMIC interface (to get instrumentation via
934 * atomic-instrumented) is no longer necessary.
936 * TSAN instrumentation replaces atomic accesses with calls to any of the below
937 * functions, whose job is to also execute the operation itself.
940 #define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits) \
941 u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); \
942 u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
944 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
945 check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC); \
947 return __atomic_load_n(ptr, memorder); \
949 EXPORT_SYMBOL(__tsan_atomic##bits##_load); \
950 void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder); \
951 void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) \
953 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
954 check_access(ptr, bits / BITS_PER_BYTE, \
955 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC); \
957 __atomic_store_n(ptr, v, memorder); \
959 EXPORT_SYMBOL(__tsan_atomic##bits##_store)
961 #define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix) \
962 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \
963 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \
965 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
966 check_access(ptr, bits / BITS_PER_BYTE, \
967 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
968 KCSAN_ACCESS_ATOMIC); \
970 return __atomic_##op##suffix(ptr, v, memorder); \
972 EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
975 * Note: CAS operations are always classified as write, even in case they
976 * fail. We cannot perform check_access() after a write, as it might lead to
977 * false positives, in cases such as:
979 * T0: __atomic_compare_exchange_n(&p->flag, &old, 1, ...)
981 * T1: if (__atomic_load_n(&p->flag, ...)) {
986 * The only downside is that, if there are 3 threads, with one CAS that
987 * succeeds, another CAS that fails, and an unmarked racing operation, we may
988 * point at the wrong CAS as the source of the race. However, if we assume that
989 * all CAS can succeed in some other execution, the data race is still valid.
991 #define DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strength, weak) \
992 int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
993 u##bits val, int mo, int fail_mo); \
994 int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
995 u##bits val, int mo, int fail_mo) \
997 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
998 check_access(ptr, bits / BITS_PER_BYTE, \
999 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1000 KCSAN_ACCESS_ATOMIC); \
1002 return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \
1004 EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_##strength)
1006 #define DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits) \
1007 u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
1008 int mo, int fail_mo); \
1009 u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
1010 int mo, int fail_mo) \
1012 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1013 check_access(ptr, bits / BITS_PER_BYTE, \
1014 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1015 KCSAN_ACCESS_ATOMIC); \
1017 __atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \
1020 EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_val)
1022 #define DEFINE_TSAN_ATOMIC_OPS(bits) \
1023 DEFINE_TSAN_ATOMIC_LOAD_STORE(bits); \
1024 DEFINE_TSAN_ATOMIC_RMW(exchange, bits, _n); \
1025 DEFINE_TSAN_ATOMIC_RMW(fetch_add, bits, ); \
1026 DEFINE_TSAN_ATOMIC_RMW(fetch_sub, bits, ); \
1027 DEFINE_TSAN_ATOMIC_RMW(fetch_and, bits, ); \
1028 DEFINE_TSAN_ATOMIC_RMW(fetch_or, bits, ); \
1029 DEFINE_TSAN_ATOMIC_RMW(fetch_xor, bits, ); \
1030 DEFINE_TSAN_ATOMIC_RMW(fetch_nand, bits, ); \
1031 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strong, 0); \
1032 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, weak, 1); \
1033 DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)
1035 DEFINE_TSAN_ATOMIC_OPS(8);
1036 DEFINE_TSAN_ATOMIC_OPS(16);
1037 DEFINE_TSAN_ATOMIC_OPS(32);
1038 DEFINE_TSAN_ATOMIC_OPS(64);
1040 void __tsan_atomic_thread_fence(int memorder
);
1041 void __tsan_atomic_thread_fence(int memorder
)
1043 __atomic_thread_fence(memorder
);
1045 EXPORT_SYMBOL(__tsan_atomic_thread_fence
);
1047 void __tsan_atomic_signal_fence(int memorder
);
1048 void __tsan_atomic_signal_fence(int memorder
) { }
1049 EXPORT_SYMBOL(__tsan_atomic_signal_fence
);