1 // SPDX-License-Identifier: GPL-2.0
3 * KCSAN test with various race scenarious to test runtime behaviour. Since the
4 * interface with which KCSAN's reports are obtained is via the console, this is
5 * the output we should verify. For each test case checks the presence (or
6 * absence) of generated reports. Relies on 'console' tracepoint to capture
7 * reports as they appear in the kernel log.
9 * Makes use of KUnit for test organization, and the Torture framework for test
12 * Copyright (C) 2020, Google LLC.
13 * Author: Marco Elver <elver@google.com>
16 #include <kunit/test.h>
17 #include <linux/jiffies.h>
18 #include <linux/kcsan-checks.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/seqlock.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/timer.h>
25 #include <linux/torture.h>
26 #include <linux/tracepoint.h>
27 #include <linux/types.h>
28 #include <trace/events/printk.h>
30 #ifdef CONFIG_CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
31 #define __KCSAN_ACCESS_RW(alt) (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
33 #define __KCSAN_ACCESS_RW(alt) (alt)
36 /* Points to current test-case memory access "kernels". */
37 static void (*access_kernels
[2])(void);
39 static struct task_struct
**threads
; /* Lists of threads. */
40 static unsigned long end_time
; /* End time of test. */
42 /* Report as observed from console. */
48 .lock
= __SPIN_LOCK_UNLOCKED(observed
.lock
),
51 /* Setup test checking loop. */
52 static __no_kcsan
inline void
53 begin_test_checks(void (*func1
)(void), void (*func2
)(void))
55 kcsan_disable_current();
58 * Require at least as long as KCSAN_REPORT_ONCE_IN_MS, to ensure at
59 * least one race is reported.
61 end_time
= jiffies
+ msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS
+ 500);
63 /* Signal start; release potential initialization of shared data. */
64 smp_store_release(&access_kernels
[0], func1
);
65 smp_store_release(&access_kernels
[1], func2
);
68 /* End test checking loop. */
69 static __no_kcsan
inline bool
70 end_test_checks(bool stop
)
72 if (!stop
&& time_before(jiffies
, end_time
)) {
73 /* Continue checking */
78 kcsan_enable_current();
83 * Probe for console output: checks if a race was reported, and obtains observed
87 static void probe_console(void *ignore
, const char *buf
, size_t len
)
93 * Note that KCSAN reports under a global lock, so we do not risk the
94 * possibility of having multiple reports interleaved. If that were the
95 * case, we'd expect tests to fail.
98 spin_lock_irqsave(&observed
.lock
, flags
);
99 nlines
= observed
.nlines
;
101 if (strnstr(buf
, "BUG: KCSAN: ", len
) && strnstr(buf
, "test_", len
)) {
103 * KCSAN report and related to the test.
105 * The provided @buf is not NUL-terminated; copy no more than
106 * @len bytes and let strscpy() add the missing NUL-terminator.
108 strscpy(observed
.lines
[0], buf
, min(len
+ 1, sizeof(observed
.lines
[0])));
110 } else if ((nlines
== 1 || nlines
== 2) && strnstr(buf
, "bytes by", len
)) {
111 strscpy(observed
.lines
[nlines
++], buf
, min(len
+ 1, sizeof(observed
.lines
[0])));
113 if (strnstr(buf
, "race at unknown origin", len
)) {
114 if (WARN_ON(nlines
!= 2))
117 /* No second line of interest. */
118 strcpy(observed
.lines
[nlines
++], "<none>");
123 WRITE_ONCE(observed
.nlines
, nlines
); /* Publish new nlines. */
124 spin_unlock_irqrestore(&observed
.lock
, flags
);
127 /* Check if a report related to the test exists. */
129 static bool report_available(void)
131 return READ_ONCE(observed
.nlines
) == ARRAY_SIZE(observed
.lines
);
134 /* Report information we expect in a report. */
135 struct expect_report
{
136 /* Access information of both accesses. */
138 void *fn
; /* Function pointer to expected function of top frame. */
139 void *addr
; /* Address of access; unchecked if NULL. */
140 size_t size
; /* Size of access; unchecked if @addr is NULL. */
141 int type
; /* Access type, see KCSAN_ACCESS definitions. */
145 /* Check observed report matches information in @r. */
147 static bool report_matches(const struct expect_report
*r
)
149 const bool is_assert
= (r
->access
[0].type
| r
->access
[1].type
) & KCSAN_ACCESS_ASSERT
;
152 typeof(observed
.lines
) expect
;
157 /* Doubled-checked locking. */
158 if (!report_available())
161 /* Generate expected report contents. */
165 end
= &expect
[0][sizeof(expect
[0]) - 1];
166 cur
+= scnprintf(cur
, end
- cur
, "BUG: KCSAN: %s in ",
167 is_assert
? "assert: race" : "data-race");
168 if (r
->access
[1].fn
) {
172 /* Expect lexographically sorted function names in title. */
173 scnprintf(tmp
[0], sizeof(tmp
[0]), "%pS", r
->access
[0].fn
);
174 scnprintf(tmp
[1], sizeof(tmp
[1]), "%pS", r
->access
[1].fn
);
175 cmp
= strcmp(tmp
[0], tmp
[1]);
176 cur
+= scnprintf(cur
, end
- cur
, "%ps / %ps",
177 cmp
< 0 ? r
->access
[0].fn
: r
->access
[1].fn
,
178 cmp
< 0 ? r
->access
[1].fn
: r
->access
[0].fn
);
180 scnprintf(cur
, end
- cur
, "%pS", r
->access
[0].fn
);
181 /* The exact offset won't match, remove it. */
182 cur
= strchr(expect
[0], '+');
189 end
= &expect
[1][sizeof(expect
[1]) - 1];
190 if (!r
->access
[1].fn
)
191 cur
+= scnprintf(cur
, end
- cur
, "race at unknown origin, with ");
194 for (i
= 0; i
< 2; ++i
) {
195 const int ty
= r
->access
[i
].type
;
196 const char *const access_type
=
197 (ty
& KCSAN_ACCESS_ASSERT
) ?
198 ((ty
& KCSAN_ACCESS_WRITE
) ?
199 "assert no accesses" :
200 "assert no writes") :
201 ((ty
& KCSAN_ACCESS_WRITE
) ?
202 ((ty
& KCSAN_ACCESS_COMPOUND
) ?
206 const char *const access_type_aux
=
207 (ty
& KCSAN_ACCESS_ATOMIC
) ?
209 ((ty
& KCSAN_ACCESS_SCOPED
) ? " (scoped)" : "");
214 end
= &expect
[2][sizeof(expect
[2]) - 1];
216 if (!r
->access
[1].fn
) {
217 /* Dummy string if no second access is available. */
218 strcpy(cur
, "<none>");
223 cur
+= scnprintf(cur
, end
- cur
, "%s%s to ", access_type
,
226 if (r
->access
[i
].addr
) /* Address is optional. */
227 cur
+= scnprintf(cur
, end
- cur
, "0x%px of %zu bytes",
228 r
->access
[i
].addr
, r
->access
[i
].size
);
231 spin_lock_irqsave(&observed
.lock
, flags
);
232 if (!report_available())
233 goto out
; /* A new report is being captured. */
235 /* Finally match expected output to what we actually observed. */
236 ret
= strstr(observed
.lines
[0], expect
[0]) &&
237 /* Access info may appear in any order. */
238 ((strstr(observed
.lines
[1], expect
[1]) &&
239 strstr(observed
.lines
[2], expect
[2])) ||
240 (strstr(observed
.lines
[1], expect
[2]) &&
241 strstr(observed
.lines
[2], expect
[1])));
243 spin_unlock_irqrestore(&observed
.lock
, flags
);
247 /* ===== Test kernels ===== */
249 static long test_sink
;
250 static long test_var
;
251 /* @test_array should be large enough to fall into multiple watchpoint slots. */
252 static long test_array
[3 * PAGE_SIZE
/ sizeof(long)];
256 static DEFINE_SEQLOCK(test_seqlock
);
259 * Helper to avoid compiler optimizing out reads, and to generate source values
263 static noinline
void sink_value(long v
) { WRITE_ONCE(test_sink
, v
); }
265 static noinline
void test_kernel_read(void) { sink_value(test_var
); }
267 static noinline
void test_kernel_write(void)
269 test_var
= READ_ONCE_NOCHECK(test_sink
) + 1;
272 static noinline
void test_kernel_write_nochange(void) { test_var
= 42; }
274 /* Suffixed by value-change exception filter. */
275 static noinline
void test_kernel_write_nochange_rcu(void) { test_var
= 42; }
277 static noinline
void test_kernel_read_atomic(void)
279 sink_value(READ_ONCE(test_var
));
282 static noinline
void test_kernel_write_atomic(void)
284 WRITE_ONCE(test_var
, READ_ONCE_NOCHECK(test_sink
) + 1);
287 static noinline
void test_kernel_atomic_rmw(void)
289 /* Use builtin, so we can set up the "bad" atomic/non-atomic scenario. */
290 __atomic_fetch_add(&test_var
, 1, __ATOMIC_RELAXED
);
294 static noinline
void test_kernel_write_uninstrumented(void) { test_var
++; }
296 static noinline
void test_kernel_data_race(void) { data_race(test_var
++); }
298 static noinline
void test_kernel_assert_writer(void)
300 ASSERT_EXCLUSIVE_WRITER(test_var
);
303 static noinline
void test_kernel_assert_access(void)
305 ASSERT_EXCLUSIVE_ACCESS(test_var
);
308 #define TEST_CHANGE_BITS 0xff00ff00
310 static noinline
void test_kernel_change_bits(void)
312 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS
)) {
314 * Avoid race of unknown origin for this test, just pretend they
317 kcsan_nestable_atomic_begin();
318 test_var
^= TEST_CHANGE_BITS
;
319 kcsan_nestable_atomic_end();
321 WRITE_ONCE(test_var
, READ_ONCE(test_var
) ^ TEST_CHANGE_BITS
);
324 static noinline
void test_kernel_assert_bits_change(void)
326 ASSERT_EXCLUSIVE_BITS(test_var
, TEST_CHANGE_BITS
);
329 static noinline
void test_kernel_assert_bits_nochange(void)
331 ASSERT_EXCLUSIVE_BITS(test_var
, ~TEST_CHANGE_BITS
);
334 /* To check that scoped assertions do trigger anywhere in scope. */
335 static noinline
void test_enter_scope(void)
339 /* Unrelated accesses to scoped assert. */
340 READ_ONCE(test_sink
);
341 kcsan_check_read(&x
, sizeof(x
));
344 static noinline
void test_kernel_assert_writer_scoped(void)
346 ASSERT_EXCLUSIVE_WRITER_SCOPED(test_var
);
350 static noinline
void test_kernel_assert_access_scoped(void)
352 ASSERT_EXCLUSIVE_ACCESS_SCOPED(test_var
);
356 static noinline
void test_kernel_rmw_array(void)
360 for (i
= 0; i
< ARRAY_SIZE(test_array
); ++i
)
364 static noinline
void test_kernel_write_struct(void)
366 kcsan_check_write(&test_struct
, sizeof(test_struct
));
367 kcsan_disable_current();
368 test_struct
.val
[3]++; /* induce value change */
369 kcsan_enable_current();
372 static noinline
void test_kernel_write_struct_part(void)
374 test_struct
.val
[3] = 42;
377 static noinline
void test_kernel_read_struct_zero_size(void)
379 kcsan_check_read(&test_struct
.val
[3], 0);
382 static noinline
void test_kernel_jiffies_reader(void)
384 sink_value((long)jiffies
);
387 static noinline
void test_kernel_seqlock_reader(void)
392 seq
= read_seqbegin(&test_seqlock
);
393 sink_value(test_var
);
394 } while (read_seqretry(&test_seqlock
, seq
));
397 static noinline
void test_kernel_seqlock_writer(void)
401 write_seqlock_irqsave(&test_seqlock
, flags
);
403 write_sequnlock_irqrestore(&test_seqlock
, flags
);
406 static noinline
void test_kernel_atomic_builtins(void)
409 * Generate concurrent accesses, expecting no reports, ensuring KCSAN
410 * treats builtin atomics as actually atomic.
412 __atomic_load_n(&test_var
, __ATOMIC_RELAXED
);
415 /* ===== Test cases ===== */
417 /* Simple test with normal data race. */
419 static void test_basic(struct kunit
*test
)
421 const struct expect_report expect
= {
423 { test_kernel_write
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_WRITE
},
424 { test_kernel_read
, &test_var
, sizeof(test_var
), 0 },
427 static const struct expect_report never
= {
429 { test_kernel_read
, &test_var
, sizeof(test_var
), 0 },
430 { test_kernel_read
, &test_var
, sizeof(test_var
), 0 },
433 bool match_expect
= false;
434 bool match_never
= false;
436 begin_test_checks(test_kernel_write
, test_kernel_read
);
438 match_expect
|= report_matches(&expect
);
439 match_never
= report_matches(&never
);
440 } while (!end_test_checks(match_never
));
441 KUNIT_EXPECT_TRUE(test
, match_expect
);
442 KUNIT_EXPECT_FALSE(test
, match_never
);
446 * Stress KCSAN with lots of concurrent races on different addresses until
450 static void test_concurrent_races(struct kunit
*test
)
452 const struct expect_report expect
= {
454 /* NULL will match any address. */
455 { test_kernel_rmw_array
, NULL
, 0, __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE
) },
456 { test_kernel_rmw_array
, NULL
, 0, __KCSAN_ACCESS_RW(0) },
459 static const struct expect_report never
= {
461 { test_kernel_rmw_array
, NULL
, 0, 0 },
462 { test_kernel_rmw_array
, NULL
, 0, 0 },
465 bool match_expect
= false;
466 bool match_never
= false;
468 begin_test_checks(test_kernel_rmw_array
, test_kernel_rmw_array
);
470 match_expect
|= report_matches(&expect
);
471 match_never
|= report_matches(&never
);
472 } while (!end_test_checks(false));
473 KUNIT_EXPECT_TRUE(test
, match_expect
); /* Sanity check matches exist. */
474 KUNIT_EXPECT_FALSE(test
, match_never
);
477 /* Test the KCSAN_REPORT_VALUE_CHANGE_ONLY option. */
479 static void test_novalue_change(struct kunit
*test
)
481 const struct expect_report expect
= {
483 { test_kernel_write_nochange
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_WRITE
},
484 { test_kernel_read
, &test_var
, sizeof(test_var
), 0 },
487 bool match_expect
= false;
489 begin_test_checks(test_kernel_write_nochange
, test_kernel_read
);
491 match_expect
= report_matches(&expect
);
492 } while (!end_test_checks(match_expect
));
493 if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY
))
494 KUNIT_EXPECT_FALSE(test
, match_expect
);
496 KUNIT_EXPECT_TRUE(test
, match_expect
);
500 * Test that the rules where the KCSAN_REPORT_VALUE_CHANGE_ONLY option should
504 static void test_novalue_change_exception(struct kunit
*test
)
506 const struct expect_report expect
= {
508 { test_kernel_write_nochange_rcu
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_WRITE
},
509 { test_kernel_read
, &test_var
, sizeof(test_var
), 0 },
512 bool match_expect
= false;
514 begin_test_checks(test_kernel_write_nochange_rcu
, test_kernel_read
);
516 match_expect
= report_matches(&expect
);
517 } while (!end_test_checks(match_expect
));
518 KUNIT_EXPECT_TRUE(test
, match_expect
);
521 /* Test that data races of unknown origin are reported. */
523 static void test_unknown_origin(struct kunit
*test
)
525 const struct expect_report expect
= {
527 { test_kernel_read
, &test_var
, sizeof(test_var
), 0 },
531 bool match_expect
= false;
533 begin_test_checks(test_kernel_write_uninstrumented
, test_kernel_read
);
535 match_expect
= report_matches(&expect
);
536 } while (!end_test_checks(match_expect
));
537 if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN
))
538 KUNIT_EXPECT_TRUE(test
, match_expect
);
540 KUNIT_EXPECT_FALSE(test
, match_expect
);
543 /* Test KCSAN_ASSUME_PLAIN_WRITES_ATOMIC if it is selected. */
545 static void test_write_write_assume_atomic(struct kunit
*test
)
547 const struct expect_report expect
= {
549 { test_kernel_write
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_WRITE
},
550 { test_kernel_write
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_WRITE
},
553 bool match_expect
= false;
555 begin_test_checks(test_kernel_write
, test_kernel_write
);
557 sink_value(READ_ONCE(test_var
)); /* induce value-change */
558 match_expect
= report_matches(&expect
);
559 } while (!end_test_checks(match_expect
));
560 if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC
))
561 KUNIT_EXPECT_FALSE(test
, match_expect
);
563 KUNIT_EXPECT_TRUE(test
, match_expect
);
567 * Test that data races with writes larger than word-size are always reported,
568 * even if KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected.
571 static void test_write_write_struct(struct kunit
*test
)
573 const struct expect_report expect
= {
575 { test_kernel_write_struct
, &test_struct
, sizeof(test_struct
), KCSAN_ACCESS_WRITE
},
576 { test_kernel_write_struct
, &test_struct
, sizeof(test_struct
), KCSAN_ACCESS_WRITE
},
579 bool match_expect
= false;
581 begin_test_checks(test_kernel_write_struct
, test_kernel_write_struct
);
583 match_expect
= report_matches(&expect
);
584 } while (!end_test_checks(match_expect
));
585 KUNIT_EXPECT_TRUE(test
, match_expect
);
589 * Test that data races where only one write is larger than word-size are always
590 * reported, even if KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected.
593 static void test_write_write_struct_part(struct kunit
*test
)
595 const struct expect_report expect
= {
597 { test_kernel_write_struct
, &test_struct
, sizeof(test_struct
), KCSAN_ACCESS_WRITE
},
598 { test_kernel_write_struct_part
, &test_struct
.val
[3], sizeof(test_struct
.val
[3]), KCSAN_ACCESS_WRITE
},
601 bool match_expect
= false;
603 begin_test_checks(test_kernel_write_struct
, test_kernel_write_struct_part
);
605 match_expect
= report_matches(&expect
);
606 } while (!end_test_checks(match_expect
));
607 KUNIT_EXPECT_TRUE(test
, match_expect
);
610 /* Test that races with atomic accesses never result in reports. */
612 static void test_read_atomic_write_atomic(struct kunit
*test
)
614 bool match_never
= false;
616 begin_test_checks(test_kernel_read_atomic
, test_kernel_write_atomic
);
618 match_never
= report_available();
619 } while (!end_test_checks(match_never
));
620 KUNIT_EXPECT_FALSE(test
, match_never
);
623 /* Test that a race with an atomic and plain access result in reports. */
625 static void test_read_plain_atomic_write(struct kunit
*test
)
627 const struct expect_report expect
= {
629 { test_kernel_read
, &test_var
, sizeof(test_var
), 0 },
630 { test_kernel_write_atomic
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_WRITE
| KCSAN_ACCESS_ATOMIC
},
633 bool match_expect
= false;
635 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS
))
638 begin_test_checks(test_kernel_read
, test_kernel_write_atomic
);
640 match_expect
= report_matches(&expect
);
641 } while (!end_test_checks(match_expect
));
642 KUNIT_EXPECT_TRUE(test
, match_expect
);
645 /* Test that atomic RMWs generate correct report. */
647 static void test_read_plain_atomic_rmw(struct kunit
*test
)
649 const struct expect_report expect
= {
651 { test_kernel_read
, &test_var
, sizeof(test_var
), 0 },
652 { test_kernel_atomic_rmw
, &test_var
, sizeof(test_var
),
653 KCSAN_ACCESS_COMPOUND
| KCSAN_ACCESS_WRITE
| KCSAN_ACCESS_ATOMIC
},
656 bool match_expect
= false;
658 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS
))
661 begin_test_checks(test_kernel_read
, test_kernel_atomic_rmw
);
663 match_expect
= report_matches(&expect
);
664 } while (!end_test_checks(match_expect
));
665 KUNIT_EXPECT_TRUE(test
, match_expect
);
668 /* Zero-sized accesses should never cause data race reports. */
670 static void test_zero_size_access(struct kunit
*test
)
672 const struct expect_report expect
= {
674 { test_kernel_write_struct
, &test_struct
, sizeof(test_struct
), KCSAN_ACCESS_WRITE
},
675 { test_kernel_write_struct
, &test_struct
, sizeof(test_struct
), KCSAN_ACCESS_WRITE
},
678 const struct expect_report never
= {
680 { test_kernel_write_struct
, &test_struct
, sizeof(test_struct
), KCSAN_ACCESS_WRITE
},
681 { test_kernel_read_struct_zero_size
, &test_struct
.val
[3], 0, 0 },
684 bool match_expect
= false;
685 bool match_never
= false;
687 begin_test_checks(test_kernel_write_struct
, test_kernel_read_struct_zero_size
);
689 match_expect
|= report_matches(&expect
);
690 match_never
= report_matches(&never
);
691 } while (!end_test_checks(match_never
));
692 KUNIT_EXPECT_TRUE(test
, match_expect
); /* Sanity check. */
693 KUNIT_EXPECT_FALSE(test
, match_never
);
696 /* Test the data_race() macro. */
698 static void test_data_race(struct kunit
*test
)
700 bool match_never
= false;
702 begin_test_checks(test_kernel_data_race
, test_kernel_data_race
);
704 match_never
= report_available();
705 } while (!end_test_checks(match_never
));
706 KUNIT_EXPECT_FALSE(test
, match_never
);
710 static void test_assert_exclusive_writer(struct kunit
*test
)
712 const struct expect_report expect
= {
714 { test_kernel_assert_writer
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_ASSERT
},
715 { test_kernel_write_nochange
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_WRITE
},
718 bool match_expect
= false;
720 begin_test_checks(test_kernel_assert_writer
, test_kernel_write_nochange
);
722 match_expect
= report_matches(&expect
);
723 } while (!end_test_checks(match_expect
));
724 KUNIT_EXPECT_TRUE(test
, match_expect
);
728 static void test_assert_exclusive_access(struct kunit
*test
)
730 const struct expect_report expect
= {
732 { test_kernel_assert_access
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_ASSERT
| KCSAN_ACCESS_WRITE
},
733 { test_kernel_read
, &test_var
, sizeof(test_var
), 0 },
736 bool match_expect
= false;
738 begin_test_checks(test_kernel_assert_access
, test_kernel_read
);
740 match_expect
= report_matches(&expect
);
741 } while (!end_test_checks(match_expect
));
742 KUNIT_EXPECT_TRUE(test
, match_expect
);
746 static void test_assert_exclusive_access_writer(struct kunit
*test
)
748 const struct expect_report expect_access_writer
= {
750 { test_kernel_assert_access
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_ASSERT
| KCSAN_ACCESS_WRITE
},
751 { test_kernel_assert_writer
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_ASSERT
},
754 const struct expect_report expect_access_access
= {
756 { test_kernel_assert_access
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_ASSERT
| KCSAN_ACCESS_WRITE
},
757 { test_kernel_assert_access
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_ASSERT
| KCSAN_ACCESS_WRITE
},
760 const struct expect_report never
= {
762 { test_kernel_assert_writer
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_ASSERT
},
763 { test_kernel_assert_writer
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_ASSERT
},
766 bool match_expect_access_writer
= false;
767 bool match_expect_access_access
= false;
768 bool match_never
= false;
770 begin_test_checks(test_kernel_assert_access
, test_kernel_assert_writer
);
772 match_expect_access_writer
|= report_matches(&expect_access_writer
);
773 match_expect_access_access
|= report_matches(&expect_access_access
);
774 match_never
|= report_matches(&never
);
775 } while (!end_test_checks(match_never
));
776 KUNIT_EXPECT_TRUE(test
, match_expect_access_writer
);
777 KUNIT_EXPECT_TRUE(test
, match_expect_access_access
);
778 KUNIT_EXPECT_FALSE(test
, match_never
);
782 static void test_assert_exclusive_bits_change(struct kunit
*test
)
784 const struct expect_report expect
= {
786 { test_kernel_assert_bits_change
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_ASSERT
},
787 { test_kernel_change_bits
, &test_var
, sizeof(test_var
),
788 KCSAN_ACCESS_WRITE
| (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS
) ? 0 : KCSAN_ACCESS_ATOMIC
) },
791 bool match_expect
= false;
793 begin_test_checks(test_kernel_assert_bits_change
, test_kernel_change_bits
);
795 match_expect
= report_matches(&expect
);
796 } while (!end_test_checks(match_expect
));
797 KUNIT_EXPECT_TRUE(test
, match_expect
);
801 static void test_assert_exclusive_bits_nochange(struct kunit
*test
)
803 bool match_never
= false;
805 begin_test_checks(test_kernel_assert_bits_nochange
, test_kernel_change_bits
);
807 match_never
= report_available();
808 } while (!end_test_checks(match_never
));
809 KUNIT_EXPECT_FALSE(test
, match_never
);
813 static void test_assert_exclusive_writer_scoped(struct kunit
*test
)
815 const struct expect_report expect_start
= {
817 { test_kernel_assert_writer_scoped
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_ASSERT
| KCSAN_ACCESS_SCOPED
},
818 { test_kernel_write_nochange
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_WRITE
},
821 const struct expect_report expect_anywhere
= {
823 { test_enter_scope
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_ASSERT
| KCSAN_ACCESS_SCOPED
},
824 { test_kernel_write_nochange
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_WRITE
},
827 bool match_expect_start
= false;
828 bool match_expect_anywhere
= false;
830 begin_test_checks(test_kernel_assert_writer_scoped
, test_kernel_write_nochange
);
832 match_expect_start
|= report_matches(&expect_start
);
833 match_expect_anywhere
|= report_matches(&expect_anywhere
);
834 } while (!end_test_checks(match_expect_start
&& match_expect_anywhere
));
835 KUNIT_EXPECT_TRUE(test
, match_expect_start
);
836 KUNIT_EXPECT_TRUE(test
, match_expect_anywhere
);
840 static void test_assert_exclusive_access_scoped(struct kunit
*test
)
842 const struct expect_report expect_start1
= {
844 { test_kernel_assert_access_scoped
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_ASSERT
| KCSAN_ACCESS_WRITE
| KCSAN_ACCESS_SCOPED
},
845 { test_kernel_read
, &test_var
, sizeof(test_var
), 0 },
848 const struct expect_report expect_start2
= {
849 .access
= { expect_start1
.access
[0], expect_start1
.access
[0] },
851 const struct expect_report expect_inscope
= {
853 { test_enter_scope
, &test_var
, sizeof(test_var
), KCSAN_ACCESS_ASSERT
| KCSAN_ACCESS_WRITE
| KCSAN_ACCESS_SCOPED
},
854 { test_kernel_read
, &test_var
, sizeof(test_var
), 0 },
857 bool match_expect_start
= false;
858 bool match_expect_inscope
= false;
860 begin_test_checks(test_kernel_assert_access_scoped
, test_kernel_read
);
861 end_time
+= msecs_to_jiffies(1000); /* This test requires a bit more time. */
863 match_expect_start
|= report_matches(&expect_start1
) || report_matches(&expect_start2
);
864 match_expect_inscope
|= report_matches(&expect_inscope
);
865 } while (!end_test_checks(match_expect_start
&& match_expect_inscope
));
866 KUNIT_EXPECT_TRUE(test
, match_expect_start
);
867 KUNIT_EXPECT_TRUE(test
, match_expect_inscope
);
871 * jiffies is special (declared to be volatile) and its accesses are typically
872 * not marked; this test ensures that the compiler nor KCSAN gets confused about
873 * jiffies's declaration on different architectures.
876 static void test_jiffies_noreport(struct kunit
*test
)
878 bool match_never
= false;
880 begin_test_checks(test_kernel_jiffies_reader
, test_kernel_jiffies_reader
);
882 match_never
= report_available();
883 } while (!end_test_checks(match_never
));
884 KUNIT_EXPECT_FALSE(test
, match_never
);
887 /* Test that racing accesses in seqlock critical sections are not reported. */
889 static void test_seqlock_noreport(struct kunit
*test
)
891 bool match_never
= false;
893 begin_test_checks(test_kernel_seqlock_reader
, test_kernel_seqlock_writer
);
895 match_never
= report_available();
896 } while (!end_test_checks(match_never
));
897 KUNIT_EXPECT_FALSE(test
, match_never
);
901 * Test atomic builtins work and required instrumentation functions exist. We
902 * also test that KCSAN understands they're atomic by racing with them via
903 * test_kernel_atomic_builtins(), and expect no reports.
905 * The atomic builtins _SHOULD NOT_ be used in normal kernel code!
907 static void test_atomic_builtins(struct kunit
*test
)
909 bool match_never
= false;
911 begin_test_checks(test_kernel_atomic_builtins
, test_kernel_atomic_builtins
);
915 kcsan_enable_current();
917 __atomic_store_n(&test_var
, 42L, __ATOMIC_RELAXED
);
918 KUNIT_EXPECT_EQ(test
, 42L, __atomic_load_n(&test_var
, __ATOMIC_RELAXED
));
920 KUNIT_EXPECT_EQ(test
, 42L, __atomic_exchange_n(&test_var
, 20, __ATOMIC_RELAXED
));
921 KUNIT_EXPECT_EQ(test
, 20L, test_var
);
924 KUNIT_EXPECT_TRUE(test
, __atomic_compare_exchange_n(&test_var
, &tmp
, 30L,
927 KUNIT_EXPECT_EQ(test
, tmp
, 20L);
928 KUNIT_EXPECT_EQ(test
, test_var
, 30L);
929 KUNIT_EXPECT_FALSE(test
, __atomic_compare_exchange_n(&test_var
, &tmp
, 40L,
932 KUNIT_EXPECT_EQ(test
, tmp
, 30L);
933 KUNIT_EXPECT_EQ(test
, test_var
, 30L);
935 KUNIT_EXPECT_EQ(test
, 30L, __atomic_fetch_add(&test_var
, 1, __ATOMIC_RELAXED
));
936 KUNIT_EXPECT_EQ(test
, 31L, __atomic_fetch_sub(&test_var
, 1, __ATOMIC_RELAXED
));
937 KUNIT_EXPECT_EQ(test
, 30L, __atomic_fetch_and(&test_var
, 0xf, __ATOMIC_RELAXED
));
938 KUNIT_EXPECT_EQ(test
, 14L, __atomic_fetch_xor(&test_var
, 0xf, __ATOMIC_RELAXED
));
939 KUNIT_EXPECT_EQ(test
, 1L, __atomic_fetch_or(&test_var
, 0xf0, __ATOMIC_RELAXED
));
940 KUNIT_EXPECT_EQ(test
, 241L, __atomic_fetch_nand(&test_var
, 0xf, __ATOMIC_RELAXED
));
941 KUNIT_EXPECT_EQ(test
, -2L, test_var
);
943 __atomic_thread_fence(__ATOMIC_SEQ_CST
);
944 __atomic_signal_fence(__ATOMIC_SEQ_CST
);
946 kcsan_disable_current();
948 match_never
= report_available();
949 } while (!end_test_checks(match_never
));
950 KUNIT_EXPECT_FALSE(test
, match_never
);
954 * Each test case is run with different numbers of threads. Until KUnit supports
955 * passing arguments for each test case, we encode #threads in the test case
956 * name (read by get_num_threads()). [The '-' was chosen as a stylistic
957 * preference to separate test name and #threads.]
959 * The thread counts are chosen to cover potentially interesting boundaries and
960 * corner cases (range 2-5), and then stress the system with larger counts.
962 #define KCSAN_KUNIT_CASE(test_name) \
963 { .run_case = test_name, .name = #test_name "-02" }, \
964 { .run_case = test_name, .name = #test_name "-03" }, \
965 { .run_case = test_name, .name = #test_name "-04" }, \
966 { .run_case = test_name, .name = #test_name "-05" }, \
967 { .run_case = test_name, .name = #test_name "-08" }, \
968 { .run_case = test_name, .name = #test_name "-16" }
970 static struct kunit_case kcsan_test_cases
[] = {
971 KCSAN_KUNIT_CASE(test_basic
),
972 KCSAN_KUNIT_CASE(test_concurrent_races
),
973 KCSAN_KUNIT_CASE(test_novalue_change
),
974 KCSAN_KUNIT_CASE(test_novalue_change_exception
),
975 KCSAN_KUNIT_CASE(test_unknown_origin
),
976 KCSAN_KUNIT_CASE(test_write_write_assume_atomic
),
977 KCSAN_KUNIT_CASE(test_write_write_struct
),
978 KCSAN_KUNIT_CASE(test_write_write_struct_part
),
979 KCSAN_KUNIT_CASE(test_read_atomic_write_atomic
),
980 KCSAN_KUNIT_CASE(test_read_plain_atomic_write
),
981 KCSAN_KUNIT_CASE(test_read_plain_atomic_rmw
),
982 KCSAN_KUNIT_CASE(test_zero_size_access
),
983 KCSAN_KUNIT_CASE(test_data_race
),
984 KCSAN_KUNIT_CASE(test_assert_exclusive_writer
),
985 KCSAN_KUNIT_CASE(test_assert_exclusive_access
),
986 KCSAN_KUNIT_CASE(test_assert_exclusive_access_writer
),
987 KCSAN_KUNIT_CASE(test_assert_exclusive_bits_change
),
988 KCSAN_KUNIT_CASE(test_assert_exclusive_bits_nochange
),
989 KCSAN_KUNIT_CASE(test_assert_exclusive_writer_scoped
),
990 KCSAN_KUNIT_CASE(test_assert_exclusive_access_scoped
),
991 KCSAN_KUNIT_CASE(test_jiffies_noreport
),
992 KCSAN_KUNIT_CASE(test_seqlock_noreport
),
993 KCSAN_KUNIT_CASE(test_atomic_builtins
),
997 /* ===== End test cases ===== */
999 /* Get number of threads encoded in test name. */
1000 static bool __no_kcsan
1001 get_num_threads(const char *test
, int *nthreads
)
1003 int len
= strlen(test
);
1005 if (WARN_ON(len
< 3))
1008 *nthreads
= test
[len
- 1] - '0';
1009 *nthreads
+= (test
[len
- 2] - '0') * 10;
1011 if (WARN_ON(*nthreads
< 0))
1017 /* Concurrent accesses from interrupts. */
1019 static void access_thread_timer(struct timer_list
*timer
)
1021 static atomic_t cnt
= ATOMIC_INIT(0);
1025 idx
= (unsigned int)atomic_inc_return(&cnt
) % ARRAY_SIZE(access_kernels
);
1026 /* Acquire potential initialization. */
1027 func
= smp_load_acquire(&access_kernels
[idx
]);
1032 /* The main loop for each thread. */
1034 static int access_thread(void *arg
)
1036 struct timer_list timer
;
1037 unsigned int cnt
= 0;
1041 timer_setup_on_stack(&timer
, access_thread_timer
, 0);
1045 if (!timer_pending(&timer
))
1046 mod_timer(&timer
, jiffies
+ 1);
1048 /* Iterate through all kernels. */
1049 idx
= cnt
++ % ARRAY_SIZE(access_kernels
);
1050 /* Acquire potential initialization. */
1051 func
= smp_load_acquire(&access_kernels
[idx
]);
1055 } while (!torture_must_stop());
1056 del_timer_sync(&timer
);
1057 destroy_timer_on_stack(&timer
);
1059 torture_kthread_stopping("access_thread");
1064 static int test_init(struct kunit
*test
)
1066 unsigned long flags
;
1070 spin_lock_irqsave(&observed
.lock
, flags
);
1071 for (i
= 0; i
< ARRAY_SIZE(observed
.lines
); ++i
)
1072 observed
.lines
[i
][0] = '\0';
1073 observed
.nlines
= 0;
1074 spin_unlock_irqrestore(&observed
.lock
, flags
);
1076 if (!torture_init_begin((char *)test
->name
, 1))
1079 if (!get_num_threads(test
->name
, &nthreads
))
1082 if (WARN_ON(threads
))
1085 for (i
= 0; i
< ARRAY_SIZE(access_kernels
); ++i
) {
1086 if (WARN_ON(access_kernels
[i
]))
1090 if (!IS_ENABLED(CONFIG_PREEMPT
) || !IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER
)) {
1092 * Without any preemption, keep 2 CPUs free for other tasks, one
1093 * of which is the main test case function checking for
1094 * completion or failure.
1096 const int min_unused_cpus
= IS_ENABLED(CONFIG_PREEMPT_NONE
) ? 2 : 0;
1097 const int min_required_cpus
= 2 + min_unused_cpus
;
1099 if (num_online_cpus() < min_required_cpus
) {
1100 pr_err("%s: too few online CPUs (%u < %d) for test",
1101 test
->name
, num_online_cpus(), min_required_cpus
);
1103 } else if (nthreads
> num_online_cpus() - min_unused_cpus
) {
1104 nthreads
= num_online_cpus() - min_unused_cpus
;
1105 pr_warn("%s: limiting number of threads to %d\n",
1106 test
->name
, nthreads
);
1111 threads
= kcalloc(nthreads
+ 1, sizeof(struct task_struct
*),
1113 if (WARN_ON(!threads
))
1116 threads
[nthreads
] = NULL
;
1117 for (i
= 0; i
< nthreads
; ++i
) {
1118 if (torture_create_kthread(access_thread
, NULL
,
1136 static void test_exit(struct kunit
*test
)
1138 struct task_struct
**stop_thread
;
1141 if (torture_cleanup_begin())
1144 for (i
= 0; i
< ARRAY_SIZE(access_kernels
); ++i
)
1145 WRITE_ONCE(access_kernels
[i
], NULL
);
1148 for (stop_thread
= threads
; *stop_thread
; stop_thread
++)
1149 torture_stop_kthread(reader_thread
, *stop_thread
);
1155 torture_cleanup_end();
1158 static struct kunit_suite kcsan_test_suite
= {
1159 .name
= "kcsan-test",
1160 .test_cases
= kcsan_test_cases
,
1164 static struct kunit_suite
*kcsan_test_suites
[] = { &kcsan_test_suite
, NULL
};
1167 static void register_tracepoints(struct tracepoint
*tp
, void *ignore
)
1169 check_trace_callback_type_console(probe_console
);
1170 if (!strcmp(tp
->name
, "console"))
1171 WARN_ON(tracepoint_probe_register(tp
, probe_console
, NULL
));
1175 static void unregister_tracepoints(struct tracepoint
*tp
, void *ignore
)
1177 if (!strcmp(tp
->name
, "console"))
1178 tracepoint_probe_unregister(tp
, probe_console
, NULL
);
1182 * We only want to do tracepoints setup and teardown once, therefore we have to
1183 * customize the init and exit functions and cannot rely on kunit_test_suite().
1185 static int __init
kcsan_test_init(void)
1188 * Because we want to be able to build the test as a module, we need to
1189 * iterate through all known tracepoints, since the static registration
1192 for_each_kernel_tracepoint(register_tracepoints
, NULL
);
1193 return __kunit_test_suites_init(kcsan_test_suites
);
1196 static void kcsan_test_exit(void)
1198 __kunit_test_suites_exit(kcsan_test_suites
);
1199 for_each_kernel_tracepoint(unregister_tracepoints
, NULL
);
1200 tracepoint_synchronize_unregister();
1203 late_initcall(kcsan_test_init
);
1204 module_exit(kcsan_test_exit
);
1206 MODULE_LICENSE("GPL v2");
1207 MODULE_AUTHOR("Marco Elver <elver@google.com>");