1 // SPDX-License-Identifier: GPL-2.0
3 * Test cases for KMSAN.
4 * For each test case checks the presence (or absence) of generated reports.
5 * Relies on 'console' tracepoint to capture reports as they appear in the
8 * Copyright (C) 2021-2022, Google LLC.
9 * Author: Alexander Potapenko <glider@google.com>
13 #include <kunit/test.h>
16 #include <linux/jiffies.h>
17 #include <linux/kernel.h>
18 #include <linux/kmsan.h>
20 #include <linux/random.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/tracepoint.h>
25 #include <linux/vmalloc.h>
26 #include <trace/events/printk.h>
28 static DEFINE_PER_CPU(int, per_cpu_var
);
30 /* Report as observed from console. */
34 bool ignore
; /* Stop console output collection. */
37 .lock
= __SPIN_LOCK_UNLOCKED(observed
.lock
),
40 /* Probe for console output: obtains observed lines of interest. */
41 static void probe_console(void *ignore
, const char *buf
, size_t len
)
47 spin_lock_irqsave(&observed
.lock
, flags
);
49 if (strnstr(buf
, "BUG: KMSAN: ", len
)) {
51 * KMSAN report and related to the test.
53 * The provided @buf is not NUL-terminated; copy no more than
54 * @len bytes and let strscpy() add the missing NUL-terminator.
56 strscpy(observed
.header
, buf
,
57 min(len
+ 1, sizeof(observed
.header
)));
58 WRITE_ONCE(observed
.available
, true);
59 observed
.ignore
= true;
61 spin_unlock_irqrestore(&observed
.lock
, flags
);
64 /* Check if a report related to the test exists. */
65 static bool report_available(void)
67 return READ_ONCE(observed
.available
);
70 /* Reset observed.available, so that the test can trigger another report. */
71 static void report_reset(void)
75 spin_lock_irqsave(&observed
.lock
, flags
);
76 WRITE_ONCE(observed
.available
, false);
77 observed
.ignore
= false;
78 spin_unlock_irqrestore(&observed
.lock
, flags
);
81 /* Information we expect in a report. */
82 struct expect_report
{
83 const char *error_type
; /* Error type. */
85 * Kernel symbol from the error header, or NULL if no report is
91 /* Check observed report matches information in @r. */
92 static bool report_matches(const struct expect_report
*r
)
94 typeof(observed
.header
) expected_header
;
100 /* Doubled-checked locking. */
101 if (!report_available() || !r
->symbol
)
102 return (!report_available() && !r
->symbol
);
104 /* Generate expected report contents. */
107 cur
= expected_header
;
108 end
= &expected_header
[sizeof(expected_header
) - 1];
110 cur
+= scnprintf(cur
, end
- cur
, "BUG: KMSAN: %s", r
->error_type
);
112 scnprintf(cur
, end
- cur
, " in %s", r
->symbol
);
113 /* The exact offset won't match, remove it; also strip module name. */
114 cur
= strchr(expected_header
, '+');
118 spin_lock_irqsave(&observed
.lock
, flags
);
119 if (!report_available())
120 goto out
; /* A new report is being captured. */
122 /* Finally match expected output to what we actually observed. */
123 ret
= strstr(observed
.header
, expected_header
);
125 spin_unlock_irqrestore(&observed
.lock
, flags
);
130 /* ===== Test cases ===== */
132 /* Prevent replacing branch with select in LLVM. */
133 static noinline
void check_true(char *arg
)
135 pr_info("%s is true\n", arg
);
138 static noinline
void check_false(char *arg
)
140 pr_info("%s is false\n", arg
);
151 #define EXPECTATION_ETYPE_FN(e, reason, fn) \
152 struct expect_report e = { \
153 .error_type = reason, \
157 #define EXPECTATION_NO_REPORT(e) EXPECTATION_ETYPE_FN(e, NULL, NULL)
158 #define EXPECTATION_UNINIT_VALUE_FN(e, fn) \
159 EXPECTATION_ETYPE_FN(e, "uninit-value", fn)
160 #define EXPECTATION_UNINIT_VALUE(e) EXPECTATION_UNINIT_VALUE_FN(e, __func__)
161 #define EXPECTATION_USE_AFTER_FREE(e) \
162 EXPECTATION_ETYPE_FN(e, "use-after-free", __func__)
164 /* Test case: ensure that kmalloc() returns uninitialized memory. */
165 static void test_uninit_kmalloc(struct kunit
*test
)
167 EXPECTATION_UNINIT_VALUE(expect
);
170 kunit_info(test
, "uninitialized kmalloc test (UMR report)\n");
171 ptr
= kmalloc(sizeof(*ptr
), GFP_KERNEL
);
173 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
177 * Test case: ensure that kmalloc'ed memory becomes initialized after memset().
179 static void test_init_kmalloc(struct kunit
*test
)
181 EXPECTATION_NO_REPORT(expect
);
184 kunit_info(test
, "initialized kmalloc test (no reports)\n");
185 ptr
= kmalloc(sizeof(*ptr
), GFP_KERNEL
);
186 memset(ptr
, 0, sizeof(*ptr
));
188 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
191 /* Test case: ensure that kzalloc() returns initialized memory. */
192 static void test_init_kzalloc(struct kunit
*test
)
194 EXPECTATION_NO_REPORT(expect
);
197 kunit_info(test
, "initialized kzalloc test (no reports)\n");
198 ptr
= kzalloc(sizeof(*ptr
), GFP_KERNEL
);
200 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
203 /* Test case: ensure that local variables are uninitialized by default. */
204 static void test_uninit_stack_var(struct kunit
*test
)
206 EXPECTATION_UNINIT_VALUE(expect
);
209 kunit_info(test
, "uninitialized stack variable (UMR report)\n");
211 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
214 /* Test case: ensure that local variables with initializers are initialized. */
215 static void test_init_stack_var(struct kunit
*test
)
217 EXPECTATION_NO_REPORT(expect
);
218 volatile int cond
= 1;
220 kunit_info(test
, "initialized stack variable (no reports)\n");
222 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
225 static noinline
void two_param_fn_2(int arg1
, int arg2
)
231 static noinline
void one_param_fn(int arg
)
233 two_param_fn_2(arg
, arg
);
237 static noinline
void two_param_fn(int arg1
, int arg2
)
246 static void test_params(struct kunit
*test
)
248 #ifdef CONFIG_KMSAN_CHECK_PARAM_RETVAL
250 * With eager param/retval checking enabled, KMSAN will report an error
251 * before the call to two_param_fn().
253 EXPECTATION_UNINIT_VALUE_FN(expect
, "test_params");
255 EXPECTATION_UNINIT_VALUE_FN(expect
, "two_param_fn");
257 volatile int uninit
, init
= 1;
260 "uninit passed through a function parameter (UMR report)\n");
261 two_param_fn(uninit
, init
);
262 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
265 static int signed_sum3(int a
, int b
, int c
)
271 * Test case: ensure that uninitialized values are tracked through function
274 static void test_uninit_multiple_params(struct kunit
*test
)
276 EXPECTATION_UNINIT_VALUE(expect
);
277 volatile char b
= 3, c
;
280 kunit_info(test
, "uninitialized local passed to fn (UMR report)\n");
281 USE(signed_sum3(a
, b
, c
));
282 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
285 /* Helper function to make an array uninitialized. */
286 static noinline
void do_uninit_local_array(char *array
, int start
, int stop
)
288 volatile char uninit
;
290 for (int i
= start
; i
< stop
; i
++)
295 * Test case: ensure kmsan_check_memory() reports an error when checking
296 * uninitialized memory.
298 static void test_uninit_kmsan_check_memory(struct kunit
*test
)
300 EXPECTATION_UNINIT_VALUE_FN(expect
, "test_uninit_kmsan_check_memory");
301 volatile char local_array
[8];
305 "kmsan_check_memory() called on uninit local (UMR report)\n");
306 do_uninit_local_array((char *)local_array
, 5, 7);
308 kmsan_check_memory((char *)local_array
, 8);
309 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
313 * Test case: check that a virtual memory range created with vmap() from
314 * initialized pages is still considered as initialized.
316 static void test_init_kmsan_vmap_vunmap(struct kunit
*test
)
318 EXPECTATION_NO_REPORT(expect
);
319 const int npages
= 2;
323 kunit_info(test
, "pages initialized via vmap (no reports)\n");
325 pages
= kmalloc_array(npages
, sizeof(*pages
), GFP_KERNEL
);
326 for (int i
= 0; i
< npages
; i
++)
327 pages
[i
] = alloc_page(GFP_KERNEL
);
328 vbuf
= vmap(pages
, npages
, VM_MAP
, PAGE_KERNEL
);
329 memset(vbuf
, 0xfe, npages
* PAGE_SIZE
);
330 for (int i
= 0; i
< npages
; i
++)
331 kmsan_check_memory(page_address(pages
[i
]), PAGE_SIZE
);
335 for (int i
= 0; i
< npages
; i
++) {
337 __free_page(pages
[i
]);
340 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
344 * Test case: ensure that memset() can initialize a buffer allocated via
347 static void test_init_vmalloc(struct kunit
*test
)
349 EXPECTATION_NO_REPORT(expect
);
353 kunit_info(test
, "vmalloc buffer can be initialized (no reports)\n");
354 buf
= vmalloc(PAGE_SIZE
* npages
);
356 memset(buf
, 0xfe, PAGE_SIZE
* npages
);
358 for (int i
= 0; i
< npages
; i
++)
359 kmsan_check_memory(&buf
[PAGE_SIZE
* i
], PAGE_SIZE
);
361 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
364 /* Test case: ensure that use-after-free reporting works. */
365 static void test_uaf(struct kunit
*test
)
367 EXPECTATION_USE_AFTER_FREE(expect
);
371 kunit_info(test
, "use-after-free in kmalloc-ed buffer (UMR report)\n");
372 var
= kmalloc(80, GFP_KERNEL
);
375 /* Copy the invalid value before checking it. */
378 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
382 * Test case: ensure that uninitialized values are propagated through per-CPU
385 static void test_percpu_propagate(struct kunit
*test
)
387 EXPECTATION_UNINIT_VALUE(expect
);
388 volatile int uninit
, check
;
391 "uninit local stored to per_cpu memory (UMR report)\n");
393 this_cpu_write(per_cpu_var
, uninit
);
394 check
= this_cpu_read(per_cpu_var
);
396 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
400 * Test case: ensure that passing uninitialized values to printk() leads to an
403 static void test_printk(struct kunit
*test
)
405 #ifdef CONFIG_KMSAN_CHECK_PARAM_RETVAL
407 * With eager param/retval checking enabled, KMSAN will report an error
408 * before the call to pr_info().
410 EXPECTATION_UNINIT_VALUE_FN(expect
, "test_printk");
412 EXPECTATION_UNINIT_VALUE_FN(expect
, "number");
416 kunit_info(test
, "uninit local passed to pr_info() (UMR report)\n");
417 pr_info("%px contains %d\n", &uninit
, uninit
);
418 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
421 /* Prevent the compiler from inlining a memcpy() call. */
422 static noinline
void *memcpy_noinline(volatile void *dst
,
423 const volatile void *src
, size_t size
)
425 return memcpy((void *)dst
, (const void *)src
, size
);
428 /* Test case: ensure that memcpy() correctly copies initialized values. */
429 static void test_init_memcpy(struct kunit
*test
)
431 EXPECTATION_NO_REPORT(expect
);
432 volatile long long src
;
433 volatile long long dst
= 0;
438 "memcpy()ing aligned initialized src to aligned dst (no reports)\n");
439 memcpy_noinline((void *)&dst
, (void *)&src
, sizeof(src
));
440 kmsan_check_memory((void *)&dst
, sizeof(dst
));
441 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
445 * Test case: ensure that memcpy() correctly copies uninitialized values between
446 * aligned `src` and `dst`.
448 static void test_memcpy_aligned_to_aligned(struct kunit
*test
)
450 EXPECTATION_UNINIT_VALUE_FN(expect
, "test_memcpy_aligned_to_aligned");
451 volatile int uninit_src
;
452 volatile int dst
= 0;
456 "memcpy()ing aligned uninit src to aligned dst (UMR report)\n");
457 memcpy_noinline((void *)&dst
, (void *)&uninit_src
, sizeof(uninit_src
));
458 kmsan_check_memory((void *)&dst
, sizeof(dst
));
459 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
463 * Test case: ensure that memcpy() correctly copies uninitialized values between
464 * aligned `src` and unaligned `dst`.
466 * Copying aligned 4-byte value to an unaligned one leads to touching two
467 * aligned 4-byte values. This test case checks that KMSAN correctly reports an
468 * error on the mentioned two values.
470 static void test_memcpy_aligned_to_unaligned(struct kunit
*test
)
472 EXPECTATION_UNINIT_VALUE_FN(expect
, "test_memcpy_aligned_to_unaligned");
473 volatile int uninit_src
;
474 volatile char dst
[8] = { 0 };
478 "memcpy()ing aligned uninit src to unaligned dst (UMR report)\n");
479 kmsan_check_memory((void *)&uninit_src
, sizeof(uninit_src
));
480 memcpy_noinline((void *)&dst
[1], (void *)&uninit_src
,
482 kmsan_check_memory((void *)dst
, 4);
483 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
485 kmsan_check_memory((void *)&dst
[4], sizeof(uninit_src
));
486 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
490 * Test case: ensure that origin slots do not accidentally get overwritten with
491 * zeroes during memcpy().
493 * Previously, when copying memory from an aligned buffer to an unaligned one,
494 * if there were zero origins corresponding to zero shadow values in the source
495 * buffer, they could have ended up being copied to nonzero shadow values in the
496 * destination buffer:
498 * memcpy(0xffff888080a00000, 0xffff888080900002, 8)
500 * src (0xffff888080900002): ..xx .... xx..
501 * src origins: o111 0000 o222
502 * dst (0xffff888080a00000): xx.. ..xx
503 * dst origins: o111 0000
506 * (here . stands for an initialized byte, and x for an uninitialized one.
508 * Ensure that this does not happen anymore, and for both destination bytes
509 * the origin is nonzero (i.e. KMSAN reports an error).
511 static void test_memcpy_initialized_gap(struct kunit
*test
)
513 EXPECTATION_UNINIT_VALUE_FN(expect
, "test_memcpy_initialized_gap");
514 volatile char uninit_src
[12];
515 volatile char dst
[8] = { 0 };
519 "unaligned 4-byte initialized value gets a nonzero origin after memcpy() - (2 UMR reports)\n");
529 memcpy_noinline((void *)&dst
[0], (void *)&uninit_src
[2], 8);
531 kmsan_check_memory((void *)&dst
[0], 4);
532 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
534 kmsan_check_memory((void *)&dst
[2], 4);
535 KUNIT_EXPECT_FALSE(test
, report_matches(&expect
));
537 kmsan_check_memory((void *)&dst
[4], 4);
538 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
541 /* Generate test cases for memset16(), memset32(), memset64(). */
542 #define DEFINE_TEST_MEMSETXX(size) \
543 static void test_memset##size(struct kunit *test) \
545 EXPECTATION_NO_REPORT(expect); \
546 volatile uint##size##_t uninit; \
549 "memset" #size "() should initialize memory\n"); \
550 memset##size((uint##size##_t *)&uninit, 0, 1); \
551 kmsan_check_memory((void *)&uninit, sizeof(uninit)); \
552 KUNIT_EXPECT_TRUE(test, report_matches(&expect)); \
555 DEFINE_TEST_MEMSETXX(16)
556 DEFINE_TEST_MEMSETXX(32)
557 DEFINE_TEST_MEMSETXX(64)
559 static noinline
void fibonacci(int *array
, int size
, int start
)
561 if (start
< 2 || (start
== size
))
563 array
[start
] = array
[start
- 1] + array
[start
- 2];
564 fibonacci(array
, size
, start
+ 1);
567 static void test_long_origin_chain(struct kunit
*test
)
569 EXPECTATION_UNINIT_VALUE_FN(expect
, "test_long_origin_chain");
570 /* (KMSAN_MAX_ORIGIN_DEPTH * 2) recursive calls to fibonacci(). */
571 volatile int accum
[KMSAN_MAX_ORIGIN_DEPTH
* 2 + 2];
572 int last
= ARRAY_SIZE(accum
) - 1;
576 "origin chain exceeding KMSAN_MAX_ORIGIN_DEPTH (UMR report)\n");
578 * We do not set accum[1] to 0, so the uninitializedness will be carried
579 * over to accum[2..last].
582 fibonacci((int *)accum
, ARRAY_SIZE(accum
), 2);
583 kmsan_check_memory((void *)&accum
[last
], sizeof(int));
584 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
588 * Test case: ensure that saving/restoring/printing stacks to/from stackdepot
589 * does not trigger errors.
591 * KMSAN uses stackdepot to store origin stack traces, that's why we do not
592 * instrument lib/stackdepot.c. Yet it must properly mark its outputs as
593 * initialized because other kernel features (e.g. netdev tracker) may also
594 * access stackdepot from instrumented code.
596 static void test_stackdepot_roundtrip(struct kunit
*test
)
598 unsigned long src_entries
[16], *dst_entries
;
599 unsigned int src_nentries
, dst_nentries
;
600 EXPECTATION_NO_REPORT(expect
);
601 depot_stack_handle_t handle
;
603 kunit_info(test
, "testing stackdepot roundtrip (no reports)\n");
606 stack_trace_save(src_entries
, ARRAY_SIZE(src_entries
), 1);
607 handle
= stack_depot_save(src_entries
, src_nentries
, GFP_KERNEL
);
608 stack_depot_print(handle
);
609 dst_nentries
= stack_depot_fetch(handle
, &dst_entries
);
610 KUNIT_EXPECT_TRUE(test
, src_nentries
== dst_nentries
);
612 kmsan_check_memory((void *)dst_entries
,
613 sizeof(*dst_entries
) * dst_nentries
);
614 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
618 * Test case: ensure that kmsan_unpoison_memory() and the instrumentation work
621 static void test_unpoison_memory(struct kunit
*test
)
623 EXPECTATION_UNINIT_VALUE_FN(expect
, "test_unpoison_memory");
624 volatile char a
[4], b
[4];
628 "unpoisoning via the instrumentation vs. kmsan_unpoison_memory() (2 UMR reports)\n");
630 /* Initialize a[0] and check a[1]--a[3]. */
632 kmsan_check_memory((char *)&a
[1], 3);
633 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
637 /* Initialize b[0] and check b[1]--b[3]. */
638 kmsan_unpoison_memory((char *)&b
[0], 1);
639 kmsan_check_memory((char *)&b
[1], 3);
640 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
643 static void test_copy_from_kernel_nofault(struct kunit
*test
)
647 size_t size
= sizeof(buf
);
649 EXPECTATION_UNINIT_VALUE_FN(expect
, "copy_from_kernel_nofault");
652 "testing copy_from_kernel_nofault with uninitialized memory\n");
654 ret
= copy_from_kernel_nofault((char *)&buf
[0], (char *)&src
[0], size
);
656 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
659 static struct kunit_case kmsan_test_cases
[] = {
660 KUNIT_CASE(test_uninit_kmalloc
),
661 KUNIT_CASE(test_init_kmalloc
),
662 KUNIT_CASE(test_init_kzalloc
),
663 KUNIT_CASE(test_uninit_stack_var
),
664 KUNIT_CASE(test_init_stack_var
),
665 KUNIT_CASE(test_params
),
666 KUNIT_CASE(test_uninit_multiple_params
),
667 KUNIT_CASE(test_uninit_kmsan_check_memory
),
668 KUNIT_CASE(test_init_kmsan_vmap_vunmap
),
669 KUNIT_CASE(test_init_vmalloc
),
670 KUNIT_CASE(test_uaf
),
671 KUNIT_CASE(test_percpu_propagate
),
672 KUNIT_CASE(test_printk
),
673 KUNIT_CASE(test_init_memcpy
),
674 KUNIT_CASE(test_memcpy_aligned_to_aligned
),
675 KUNIT_CASE(test_memcpy_aligned_to_unaligned
),
676 KUNIT_CASE(test_memcpy_initialized_gap
),
677 KUNIT_CASE(test_memset16
),
678 KUNIT_CASE(test_memset32
),
679 KUNIT_CASE(test_memset64
),
680 KUNIT_CASE(test_long_origin_chain
),
681 KUNIT_CASE(test_stackdepot_roundtrip
),
682 KUNIT_CASE(test_unpoison_memory
),
683 KUNIT_CASE(test_copy_from_kernel_nofault
),
687 /* ===== End test cases ===== */
689 static int test_init(struct kunit
*test
)
693 spin_lock_irqsave(&observed
.lock
, flags
);
694 observed
.header
[0] = '\0';
695 observed
.ignore
= false;
696 observed
.available
= false;
697 spin_unlock_irqrestore(&observed
.lock
, flags
);
702 static void test_exit(struct kunit
*test
)
706 static int orig_panic_on_kmsan
;
708 static int kmsan_suite_init(struct kunit_suite
*suite
)
710 register_trace_console(probe_console
, NULL
);
711 orig_panic_on_kmsan
= panic_on_kmsan
;
716 static void kmsan_suite_exit(struct kunit_suite
*suite
)
718 unregister_trace_console(probe_console
, NULL
);
719 tracepoint_synchronize_unregister();
720 panic_on_kmsan
= orig_panic_on_kmsan
;
723 static struct kunit_suite kmsan_test_suite
= {
725 .test_cases
= kmsan_test_cases
,
728 .suite_init
= kmsan_suite_init
,
729 .suite_exit
= kmsan_suite_exit
,
731 kunit_test_suites(&kmsan_test_suite
);
733 MODULE_LICENSE("GPL");
734 MODULE_AUTHOR("Alexander Potapenko <glider@google.com>");