1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
8 #define pr_fmt(fmt) "kasan: test: " fmt
10 #include <kunit/test.h>
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/mempool.h>
18 #include <linux/mman.h>
19 #include <linux/module.h>
20 #include <linux/printk.h>
21 #include <linux/random.h>
22 #include <linux/set_memory.h>
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <linux/tracepoint.h>
26 #include <linux/uaccess.h>
27 #include <linux/vmalloc.h>
28 #include <trace/events/printk.h>
34 #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
36 MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
38 static bool multishot
;
40 /* Fields set based on lines observed in the console. */
47 * Some tests use these global variables to store return values from function
48 * calls that could otherwise be eliminated by the compiler as dead code.
50 void *kasan_ptr_result
;
53 /* Probe for console output: obtains test_status lines of interest. */
54 static void probe_console(void *ignore
, const char *buf
, size_t len
)
56 if (strnstr(buf
, "BUG: KASAN: ", len
))
57 WRITE_ONCE(test_status
.report_found
, true);
58 else if (strnstr(buf
, "Asynchronous fault: ", len
))
59 WRITE_ONCE(test_status
.async_fault
, true);
62 static int kasan_suite_init(struct kunit_suite
*suite
)
64 if (!kasan_enabled()) {
65 pr_err("Can't run KASAN tests with KASAN disabled");
69 /* Stop failing KUnit tests on KASAN reports. */
70 kasan_kunit_test_suite_start();
73 * Temporarily enable multi-shot mode. Otherwise, KASAN would only
74 * report the first detected bug and panic the kernel if panic_on_warn
77 multishot
= kasan_save_enable_multi_shot();
79 register_trace_console(probe_console
, NULL
);
83 static void kasan_suite_exit(struct kunit_suite
*suite
)
85 kasan_kunit_test_suite_end();
86 kasan_restore_multi_shot(multishot
);
87 unregister_trace_console(probe_console
, NULL
);
88 tracepoint_synchronize_unregister();
91 static void kasan_test_exit(struct kunit
*test
)
93 KUNIT_EXPECT_FALSE(test
, READ_ONCE(test_status
.report_found
));
97 * KUNIT_EXPECT_KASAN_FAIL - check that the executed expression produces a
98 * KASAN report; causes a KUnit test failure otherwise.
100 * @test: Currently executing KUnit test.
101 * @expression: Expression that must produce a KASAN report.
103 * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
104 * checking is auto-disabled. When this happens, this test handler reenables
105 * tag checking. As tag checking can be only disabled or enabled per CPU,
106 * this handler disables migration (preemption).
108 * Since the compiler doesn't see that the expression can change the test_status
109 * fields, it can reorder or optimize away the accesses to those fields.
110 * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
111 * expression to prevent that.
113 * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
114 * as false. This allows detecting KASAN reports that happen outside of the
115 * checks by asserting !test_status.report_found at the start of
116 * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
118 #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
119 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
120 kasan_sync_fault_possible()) \
122 KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found)); \
126 if (kasan_async_fault_possible()) \
127 kasan_force_async_fault(); \
128 if (!READ_ONCE(test_status.report_found)) { \
129 KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
130 "expected in \"" #expression \
131 "\", but none occurred"); \
133 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
134 kasan_sync_fault_possible()) { \
135 if (READ_ONCE(test_status.report_found) && \
136 !READ_ONCE(test_status.async_fault)) \
137 kasan_enable_hw_tags(); \
140 WRITE_ONCE(test_status.report_found, false); \
141 WRITE_ONCE(test_status.async_fault, false); \
144 #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
145 if (!IS_ENABLED(config)) \
146 kunit_skip((test), "Test requires " #config "=y"); \
149 #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
150 if (IS_ENABLED(config)) \
151 kunit_skip((test), "Test requires " #config "=n"); \
154 #define KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test) do { \
155 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \
156 break; /* No compiler instrumentation. */ \
157 if (IS_ENABLED(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX)) \
158 break; /* Should always be instrumented! */ \
159 if (IS_ENABLED(CONFIG_GENERIC_ENTRY)) \
160 kunit_skip((test), "Test requires checked mem*()"); \
163 static void kmalloc_oob_right(struct kunit
*test
)
166 size_t size
= 128 - KASAN_GRANULE_SIZE
- 5;
168 ptr
= kmalloc(size
, GFP_KERNEL
);
169 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
171 OPTIMIZER_HIDE_VAR(ptr
);
173 * An unaligned access past the requested kmalloc size.
174 * Only generic KASAN can precisely detect these.
176 if (IS_ENABLED(CONFIG_KASAN_GENERIC
))
177 KUNIT_EXPECT_KASAN_FAIL(test
, ptr
[size
] = 'x');
180 * An aligned access into the first out-of-bounds granule that falls
181 * within the aligned kmalloc object.
183 KUNIT_EXPECT_KASAN_FAIL(test
, ptr
[size
+ 5] = 'y');
185 /* Out-of-bounds access past the aligned kmalloc object. */
186 KUNIT_EXPECT_KASAN_FAIL(test
, ptr
[0] =
187 ptr
[size
+ KASAN_GRANULE_SIZE
+ 5]);
192 static void kmalloc_oob_left(struct kunit
*test
)
197 ptr
= kmalloc(size
, GFP_KERNEL
);
198 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
200 OPTIMIZER_HIDE_VAR(ptr
);
201 KUNIT_EXPECT_KASAN_FAIL(test
, *ptr
= *(ptr
- 1));
205 static void kmalloc_node_oob_right(struct kunit
*test
)
210 ptr
= kmalloc_node(size
, GFP_KERNEL
, 0);
211 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
213 OPTIMIZER_HIDE_VAR(ptr
);
214 KUNIT_EXPECT_KASAN_FAIL(test
, ptr
[0] = ptr
[size
]);
218 static void kmalloc_track_caller_oob_right(struct kunit
*test
)
221 size_t size
= 128 - KASAN_GRANULE_SIZE
;
224 * Check that KASAN detects out-of-bounds access for object allocated via
225 * kmalloc_track_caller().
227 ptr
= kmalloc_track_caller(size
, GFP_KERNEL
);
228 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
230 OPTIMIZER_HIDE_VAR(ptr
);
231 KUNIT_EXPECT_KASAN_FAIL(test
, ptr
[size
] = 'y');
236 * Check that KASAN detects out-of-bounds access for object allocated via
237 * kmalloc_node_track_caller().
239 ptr
= kmalloc_node_track_caller(size
, GFP_KERNEL
, 0);
240 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
242 OPTIMIZER_HIDE_VAR(ptr
);
243 KUNIT_EXPECT_KASAN_FAIL(test
, ptr
[size
] = 'y');
249 * Check that KASAN detects an out-of-bounds access for a big object allocated
250 * via kmalloc(). But not as big as to trigger the page_alloc fallback.
252 static void kmalloc_big_oob_right(struct kunit
*test
)
255 size_t size
= KMALLOC_MAX_CACHE_SIZE
- 256;
257 ptr
= kmalloc(size
, GFP_KERNEL
);
258 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
260 OPTIMIZER_HIDE_VAR(ptr
);
261 KUNIT_EXPECT_KASAN_FAIL(test
, ptr
[size
] = 0);
266 * The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk
267 * that does not fit into the largest slab cache and therefore is allocated via
268 * the page_alloc fallback.
271 static void kmalloc_large_oob_right(struct kunit
*test
)
274 size_t size
= KMALLOC_MAX_CACHE_SIZE
+ 10;
276 ptr
= kmalloc(size
, GFP_KERNEL
);
277 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
279 OPTIMIZER_HIDE_VAR(ptr
);
280 KUNIT_EXPECT_KASAN_FAIL(test
, ptr
[size
+ OOB_TAG_OFF
] = 0);
285 static void kmalloc_large_uaf(struct kunit
*test
)
288 size_t size
= KMALLOC_MAX_CACHE_SIZE
+ 10;
290 ptr
= kmalloc(size
, GFP_KERNEL
);
291 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
294 KUNIT_EXPECT_KASAN_FAIL(test
, ((volatile char *)ptr
)[0]);
297 static void kmalloc_large_invalid_free(struct kunit
*test
)
300 size_t size
= KMALLOC_MAX_CACHE_SIZE
+ 10;
302 ptr
= kmalloc(size
, GFP_KERNEL
);
303 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
305 KUNIT_EXPECT_KASAN_FAIL(test
, kfree(ptr
+ 1));
308 static void page_alloc_oob_right(struct kunit
*test
)
313 size_t size
= (1UL << (PAGE_SHIFT
+ order
));
316 * With generic KASAN page allocations have no redzones, thus
317 * out-of-bounds detection is not guaranteed.
318 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
320 KASAN_TEST_NEEDS_CONFIG_OFF(test
, CONFIG_KASAN_GENERIC
);
322 pages
= alloc_pages(GFP_KERNEL
, order
);
323 ptr
= page_address(pages
);
324 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
326 KUNIT_EXPECT_KASAN_FAIL(test
, ptr
[0] = ptr
[size
]);
327 free_pages((unsigned long)ptr
, order
);
330 static void page_alloc_uaf(struct kunit
*test
)
336 pages
= alloc_pages(GFP_KERNEL
, order
);
337 ptr
= page_address(pages
);
338 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
339 free_pages((unsigned long)ptr
, order
);
341 KUNIT_EXPECT_KASAN_FAIL(test
, ((volatile char *)ptr
)[0]);
344 static void krealloc_more_oob_helper(struct kunit
*test
,
345 size_t size1
, size_t size2
)
350 KUNIT_ASSERT_LT(test
, size1
, size2
);
351 middle
= size1
+ (size2
- size1
) / 2;
353 ptr1
= kmalloc(size1
, GFP_KERNEL
);
354 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr1
);
356 ptr2
= krealloc(ptr1
, size2
, GFP_KERNEL
);
357 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr2
);
359 /* Suppress -Warray-bounds warnings. */
360 OPTIMIZER_HIDE_VAR(ptr2
);
362 /* All offsets up to size2 must be accessible. */
363 ptr2
[size1
- 1] = 'x';
366 ptr2
[size2
- 1] = 'x';
368 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
369 if (IS_ENABLED(CONFIG_KASAN_GENERIC
))
370 KUNIT_EXPECT_KASAN_FAIL(test
, ptr2
[size2
] = 'x');
372 /* For all modes first aligned offset after size2 must be inaccessible. */
373 KUNIT_EXPECT_KASAN_FAIL(test
,
374 ptr2
[round_up(size2
, KASAN_GRANULE_SIZE
)] = 'x');
379 static void krealloc_less_oob_helper(struct kunit
*test
,
380 size_t size1
, size_t size2
)
385 KUNIT_ASSERT_LT(test
, size2
, size1
);
386 middle
= size2
+ (size1
- size2
) / 2;
388 ptr1
= kmalloc(size1
, GFP_KERNEL
);
389 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr1
);
391 ptr2
= krealloc(ptr1
, size2
, GFP_KERNEL
);
392 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr2
);
394 /* Suppress -Warray-bounds warnings. */
395 OPTIMIZER_HIDE_VAR(ptr2
);
397 /* Must be accessible for all modes. */
398 ptr2
[size2
- 1] = 'x';
400 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
401 if (IS_ENABLED(CONFIG_KASAN_GENERIC
))
402 KUNIT_EXPECT_KASAN_FAIL(test
, ptr2
[size2
] = 'x');
404 /* For all modes first aligned offset after size2 must be inaccessible. */
405 KUNIT_EXPECT_KASAN_FAIL(test
,
406 ptr2
[round_up(size2
, KASAN_GRANULE_SIZE
)] = 'x');
409 * For all modes all size2, middle, and size1 should land in separate
410 * granules and thus the latter two offsets should be inaccessible.
412 KUNIT_EXPECT_LE(test
, round_up(size2
, KASAN_GRANULE_SIZE
),
413 round_down(middle
, KASAN_GRANULE_SIZE
));
414 KUNIT_EXPECT_LE(test
, round_up(middle
, KASAN_GRANULE_SIZE
),
415 round_down(size1
, KASAN_GRANULE_SIZE
));
416 KUNIT_EXPECT_KASAN_FAIL(test
, ptr2
[middle
] = 'x');
417 KUNIT_EXPECT_KASAN_FAIL(test
, ptr2
[size1
- 1] = 'x');
418 KUNIT_EXPECT_KASAN_FAIL(test
, ptr2
[size1
] = 'x');
423 static void krealloc_more_oob(struct kunit
*test
)
425 krealloc_more_oob_helper(test
, 201, 235);
428 static void krealloc_less_oob(struct kunit
*test
)
430 krealloc_less_oob_helper(test
, 235, 201);
433 static void krealloc_large_more_oob(struct kunit
*test
)
435 krealloc_more_oob_helper(test
, KMALLOC_MAX_CACHE_SIZE
+ 201,
436 KMALLOC_MAX_CACHE_SIZE
+ 235);
439 static void krealloc_large_less_oob(struct kunit
*test
)
441 krealloc_less_oob_helper(test
, KMALLOC_MAX_CACHE_SIZE
+ 235,
442 KMALLOC_MAX_CACHE_SIZE
+ 201);
446 * Check that krealloc() detects a use-after-free, returns NULL,
447 * and doesn't unpoison the freed object.
449 static void krealloc_uaf(struct kunit
*test
)
455 ptr1
= kmalloc(size1
, GFP_KERNEL
);
456 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr1
);
459 KUNIT_EXPECT_KASAN_FAIL(test
, ptr2
= krealloc(ptr1
, size2
, GFP_KERNEL
));
460 KUNIT_ASSERT_NULL(test
, ptr2
);
461 KUNIT_EXPECT_KASAN_FAIL(test
, *(volatile char *)ptr1
);
464 static void kmalloc_oob_16(struct kunit
*test
)
470 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test
);
472 /* This test is specifically crafted for the generic mode. */
473 KASAN_TEST_NEEDS_CONFIG_ON(test
, CONFIG_KASAN_GENERIC
);
475 /* RELOC_HIDE to prevent gcc from warning about short alloc */
476 ptr1
= RELOC_HIDE(kmalloc(sizeof(*ptr1
) - 3, GFP_KERNEL
), 0);
477 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr1
);
479 ptr2
= kmalloc(sizeof(*ptr2
), GFP_KERNEL
);
480 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr2
);
482 OPTIMIZER_HIDE_VAR(ptr1
);
483 OPTIMIZER_HIDE_VAR(ptr2
);
484 KUNIT_EXPECT_KASAN_FAIL(test
, *ptr1
= *ptr2
);
489 static void kmalloc_uaf_16(struct kunit
*test
)
495 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test
);
497 ptr1
= kmalloc(sizeof(*ptr1
), GFP_KERNEL
);
498 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr1
);
500 ptr2
= kmalloc(sizeof(*ptr2
), GFP_KERNEL
);
501 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr2
);
504 KUNIT_EXPECT_KASAN_FAIL(test
, *ptr1
= *ptr2
);
509 * Note: in the memset tests below, the written range touches both valid and
510 * invalid memory. This makes sure that the instrumentation does not only check
511 * the starting address but the whole range.
514 static void kmalloc_oob_memset_2(struct kunit
*test
)
517 size_t size
= 128 - KASAN_GRANULE_SIZE
;
518 size_t memset_size
= 2;
520 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test
);
522 ptr
= kmalloc(size
, GFP_KERNEL
);
523 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
525 OPTIMIZER_HIDE_VAR(ptr
);
526 OPTIMIZER_HIDE_VAR(size
);
527 OPTIMIZER_HIDE_VAR(memset_size
);
528 KUNIT_EXPECT_KASAN_FAIL(test
, memset(ptr
+ size
- 1, 0, memset_size
));
532 static void kmalloc_oob_memset_4(struct kunit
*test
)
535 size_t size
= 128 - KASAN_GRANULE_SIZE
;
536 size_t memset_size
= 4;
538 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test
);
540 ptr
= kmalloc(size
, GFP_KERNEL
);
541 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
543 OPTIMIZER_HIDE_VAR(ptr
);
544 OPTIMIZER_HIDE_VAR(size
);
545 OPTIMIZER_HIDE_VAR(memset_size
);
546 KUNIT_EXPECT_KASAN_FAIL(test
, memset(ptr
+ size
- 3, 0, memset_size
));
550 static void kmalloc_oob_memset_8(struct kunit
*test
)
553 size_t size
= 128 - KASAN_GRANULE_SIZE
;
554 size_t memset_size
= 8;
556 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test
);
558 ptr
= kmalloc(size
, GFP_KERNEL
);
559 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
561 OPTIMIZER_HIDE_VAR(ptr
);
562 OPTIMIZER_HIDE_VAR(size
);
563 OPTIMIZER_HIDE_VAR(memset_size
);
564 KUNIT_EXPECT_KASAN_FAIL(test
, memset(ptr
+ size
- 7, 0, memset_size
));
568 static void kmalloc_oob_memset_16(struct kunit
*test
)
571 size_t size
= 128 - KASAN_GRANULE_SIZE
;
572 size_t memset_size
= 16;
574 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test
);
576 ptr
= kmalloc(size
, GFP_KERNEL
);
577 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
579 OPTIMIZER_HIDE_VAR(ptr
);
580 OPTIMIZER_HIDE_VAR(size
);
581 OPTIMIZER_HIDE_VAR(memset_size
);
582 KUNIT_EXPECT_KASAN_FAIL(test
, memset(ptr
+ size
- 15, 0, memset_size
));
586 static void kmalloc_oob_in_memset(struct kunit
*test
)
589 size_t size
= 128 - KASAN_GRANULE_SIZE
;
591 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test
);
593 ptr
= kmalloc(size
, GFP_KERNEL
);
594 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
596 OPTIMIZER_HIDE_VAR(ptr
);
597 OPTIMIZER_HIDE_VAR(size
);
598 KUNIT_EXPECT_KASAN_FAIL(test
,
599 memset(ptr
, 0, size
+ KASAN_GRANULE_SIZE
));
603 static void kmalloc_memmove_negative_size(struct kunit
*test
)
607 size_t invalid_size
= -2;
609 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test
);
612 * Hardware tag-based mode doesn't check memmove for negative size.
613 * As a result, this test introduces a side-effect memory corruption,
614 * which can result in a crash.
616 KASAN_TEST_NEEDS_CONFIG_OFF(test
, CONFIG_KASAN_HW_TAGS
);
618 ptr
= kmalloc(size
, GFP_KERNEL
);
619 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
621 memset((char *)ptr
, 0, 64);
622 OPTIMIZER_HIDE_VAR(ptr
);
623 OPTIMIZER_HIDE_VAR(invalid_size
);
624 KUNIT_EXPECT_KASAN_FAIL(test
,
625 memmove((char *)ptr
, (char *)ptr
+ 4, invalid_size
));
629 static void kmalloc_memmove_invalid_size(struct kunit
*test
)
633 size_t invalid_size
= size
;
635 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test
);
637 ptr
= kmalloc(size
, GFP_KERNEL
);
638 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
640 memset((char *)ptr
, 0, 64);
641 OPTIMIZER_HIDE_VAR(ptr
);
642 OPTIMIZER_HIDE_VAR(invalid_size
);
643 KUNIT_EXPECT_KASAN_FAIL(test
,
644 memmove((char *)ptr
, (char *)ptr
+ 4, invalid_size
));
648 static void kmalloc_uaf(struct kunit
*test
)
653 ptr
= kmalloc(size
, GFP_KERNEL
);
654 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
657 KUNIT_EXPECT_KASAN_FAIL(test
, ((volatile char *)ptr
)[8]);
660 static void kmalloc_uaf_memset(struct kunit
*test
)
665 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test
);
668 * Only generic KASAN uses quarantine, which is required to avoid a
669 * kernel memory corruption this test causes.
671 KASAN_TEST_NEEDS_CONFIG_ON(test
, CONFIG_KASAN_GENERIC
);
673 ptr
= kmalloc(size
, GFP_KERNEL
);
674 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
677 KUNIT_EXPECT_KASAN_FAIL(test
, memset(ptr
, 0, size
));
680 static void kmalloc_uaf2(struct kunit
*test
)
687 ptr1
= kmalloc(size
, GFP_KERNEL
);
688 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr1
);
692 ptr2
= kmalloc(size
, GFP_KERNEL
);
693 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr2
);
696 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
697 * Allow up to 16 attempts at generating different tags.
699 if (!IS_ENABLED(CONFIG_KASAN_GENERIC
) && ptr1
== ptr2
&& counter
++ < 16) {
704 KUNIT_EXPECT_KASAN_FAIL(test
, ((volatile char *)ptr1
)[40]);
705 KUNIT_EXPECT_PTR_NE(test
, ptr1
, ptr2
);
711 * Check that KASAN detects use-after-free when another object was allocated in
712 * the same slot. Relevant for the tag-based modes, which do not use quarantine.
714 static void kmalloc_uaf3(struct kunit
*test
)
719 /* This test is specifically crafted for tag-based modes. */
720 KASAN_TEST_NEEDS_CONFIG_OFF(test
, CONFIG_KASAN_GENERIC
);
722 ptr1
= kmalloc(size
, GFP_KERNEL
);
723 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr1
);
726 ptr2
= kmalloc(size
, GFP_KERNEL
);
727 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr2
);
730 KUNIT_EXPECT_KASAN_FAIL(test
, ((volatile char *)ptr1
)[8]);
733 static void kasan_atomics_helper(struct kunit
*test
, void *unsafe
, void *safe
)
735 int *i_unsafe
= unsafe
;
737 KUNIT_EXPECT_KASAN_FAIL(test
, READ_ONCE(*i_unsafe
));
738 KUNIT_EXPECT_KASAN_FAIL(test
, WRITE_ONCE(*i_unsafe
, 42));
739 KUNIT_EXPECT_KASAN_FAIL(test
, smp_load_acquire(i_unsafe
));
740 KUNIT_EXPECT_KASAN_FAIL(test
, smp_store_release(i_unsafe
, 42));
742 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_read(unsafe
));
743 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_set(unsafe
, 42));
744 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_add(42, unsafe
));
745 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_sub(42, unsafe
));
746 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_inc(unsafe
));
747 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_dec(unsafe
));
748 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_and(42, unsafe
));
749 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_andnot(42, unsafe
));
750 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_or(42, unsafe
));
751 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_xor(42, unsafe
));
752 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_xchg(unsafe
, 42));
753 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_cmpxchg(unsafe
, 21, 42));
754 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_try_cmpxchg(unsafe
, safe
, 42));
755 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_try_cmpxchg(safe
, unsafe
, 42));
756 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_sub_and_test(42, unsafe
));
757 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_dec_and_test(unsafe
));
758 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_inc_and_test(unsafe
));
759 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_add_negative(42, unsafe
));
760 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_add_unless(unsafe
, 21, 42));
761 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_inc_not_zero(unsafe
));
762 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_inc_unless_negative(unsafe
));
763 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_dec_unless_positive(unsafe
));
764 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_dec_if_positive(unsafe
));
766 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_read(unsafe
));
767 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_set(unsafe
, 42));
768 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_add(42, unsafe
));
769 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_sub(42, unsafe
));
770 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_inc(unsafe
));
771 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_dec(unsafe
));
772 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_and(42, unsafe
));
773 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_andnot(42, unsafe
));
774 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_or(42, unsafe
));
775 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_xor(42, unsafe
));
776 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_xchg(unsafe
, 42));
777 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_cmpxchg(unsafe
, 21, 42));
778 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_try_cmpxchg(unsafe
, safe
, 42));
779 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_try_cmpxchg(safe
, unsafe
, 42));
780 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_sub_and_test(42, unsafe
));
781 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_dec_and_test(unsafe
));
782 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_inc_and_test(unsafe
));
783 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_add_negative(42, unsafe
));
784 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_add_unless(unsafe
, 21, 42));
785 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_inc_not_zero(unsafe
));
786 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_inc_unless_negative(unsafe
));
787 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_dec_unless_positive(unsafe
));
788 KUNIT_EXPECT_KASAN_FAIL(test
, atomic_long_dec_if_positive(unsafe
));
791 static void kasan_atomics(struct kunit
*test
)
796 * Just as with kasan_bitops_tags(), we allocate 48 bytes of memory such
797 * that the following 16 bytes will make up the redzone.
799 a1
= kzalloc(48, GFP_KERNEL
);
800 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, a1
);
801 a2
= kzalloc(sizeof(atomic_long_t
), GFP_KERNEL
);
802 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, a2
);
804 /* Use atomics to access the redzone. */
805 kasan_atomics_helper(test
, a1
+ 48, a2
);
811 static void kmalloc_double_kzfree(struct kunit
*test
)
816 ptr
= kmalloc(size
, GFP_KERNEL
);
817 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
819 kfree_sensitive(ptr
);
820 KUNIT_EXPECT_KASAN_FAIL(test
, kfree_sensitive(ptr
));
823 /* Check that ksize() does NOT unpoison whole object. */
824 static void ksize_unpoisons_memory(struct kunit
*test
)
827 size_t size
= 128 - KASAN_GRANULE_SIZE
- 5;
830 ptr
= kmalloc(size
, GFP_KERNEL
);
831 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
833 real_size
= ksize(ptr
);
834 KUNIT_EXPECT_GT(test
, real_size
, size
);
836 OPTIMIZER_HIDE_VAR(ptr
);
838 /* These accesses shouldn't trigger a KASAN report. */
842 /* These must trigger a KASAN report. */
843 if (IS_ENABLED(CONFIG_KASAN_GENERIC
))
844 KUNIT_EXPECT_KASAN_FAIL(test
, ((volatile char *)ptr
)[size
]);
845 KUNIT_EXPECT_KASAN_FAIL(test
, ((volatile char *)ptr
)[size
+ 5]);
846 KUNIT_EXPECT_KASAN_FAIL(test
, ((volatile char *)ptr
)[real_size
- 1]);
852 * Check that a use-after-free is detected by ksize() and via normal accesses
855 static void ksize_uaf(struct kunit
*test
)
858 int size
= 128 - KASAN_GRANULE_SIZE
;
860 ptr
= kmalloc(size
, GFP_KERNEL
);
861 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
864 OPTIMIZER_HIDE_VAR(ptr
);
865 KUNIT_EXPECT_KASAN_FAIL(test
, ksize(ptr
));
866 KUNIT_EXPECT_KASAN_FAIL(test
, ((volatile char *)ptr
)[0]);
867 KUNIT_EXPECT_KASAN_FAIL(test
, ((volatile char *)ptr
)[size
]);
871 * The two tests below check that Generic KASAN prints auxiliary stack traces
872 * for RCU callbacks and workqueues. The reports need to be inspected manually.
874 * These tests are still enabled for other KASAN modes to make sure that all
875 * modes report bad accesses in tested scenarios.
878 static struct kasan_rcu_info
{
883 static void rcu_uaf_reclaim(struct rcu_head
*rp
)
885 struct kasan_rcu_info
*fp
=
886 container_of(rp
, struct kasan_rcu_info
, rcu
);
889 ((volatile struct kasan_rcu_info
*)fp
)->i
;
892 static void rcu_uaf(struct kunit
*test
)
894 struct kasan_rcu_info
*ptr
;
896 ptr
= kmalloc(sizeof(struct kasan_rcu_info
), GFP_KERNEL
);
897 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
899 global_rcu_ptr
= rcu_dereference_protected(
900 (struct kasan_rcu_info __rcu
*)ptr
, NULL
);
902 KUNIT_EXPECT_KASAN_FAIL(test
,
903 call_rcu(&global_rcu_ptr
->rcu
, rcu_uaf_reclaim
);
907 static void workqueue_uaf_work(struct work_struct
*work
)
912 static void workqueue_uaf(struct kunit
*test
)
914 struct workqueue_struct
*workqueue
;
915 struct work_struct
*work
;
917 workqueue
= create_workqueue("kasan_workqueue_test");
918 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, workqueue
);
920 work
= kmalloc(sizeof(struct work_struct
), GFP_KERNEL
);
921 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, work
);
923 INIT_WORK(work
, workqueue_uaf_work
);
924 queue_work(workqueue
, work
);
925 destroy_workqueue(workqueue
);
927 KUNIT_EXPECT_KASAN_FAIL(test
,
928 ((volatile struct work_struct
*)work
)->data
);
931 static void kfree_via_page(struct kunit
*test
)
936 unsigned long offset
;
938 ptr
= kmalloc(size
, GFP_KERNEL
);
939 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
941 page
= virt_to_page(ptr
);
942 offset
= offset_in_page(ptr
);
943 kfree(page_address(page
) + offset
);
946 static void kfree_via_phys(struct kunit
*test
)
952 ptr
= kmalloc(size
, GFP_KERNEL
);
953 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
955 phys
= virt_to_phys(ptr
);
956 kfree(phys_to_virt(phys
));
959 static void kmem_cache_oob(struct kunit
*test
)
963 struct kmem_cache
*cache
;
965 cache
= kmem_cache_create("test_cache", size
, 0, 0, NULL
);
966 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, cache
);
968 p
= kmem_cache_alloc(cache
, GFP_KERNEL
);
970 kunit_err(test
, "Allocation failed: %s\n", __func__
);
971 kmem_cache_destroy(cache
);
975 KUNIT_EXPECT_KASAN_FAIL(test
, *p
= p
[size
+ OOB_TAG_OFF
]);
977 kmem_cache_free(cache
, p
);
978 kmem_cache_destroy(cache
);
981 static void kmem_cache_double_free(struct kunit
*test
)
985 struct kmem_cache
*cache
;
987 cache
= kmem_cache_create("test_cache", size
, 0, 0, NULL
);
988 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, cache
);
990 p
= kmem_cache_alloc(cache
, GFP_KERNEL
);
992 kunit_err(test
, "Allocation failed: %s\n", __func__
);
993 kmem_cache_destroy(cache
);
997 kmem_cache_free(cache
, p
);
998 KUNIT_EXPECT_KASAN_FAIL(test
, kmem_cache_free(cache
, p
));
999 kmem_cache_destroy(cache
);
1002 static void kmem_cache_invalid_free(struct kunit
*test
)
1006 struct kmem_cache
*cache
;
1008 cache
= kmem_cache_create("test_cache", size
, 0, SLAB_TYPESAFE_BY_RCU
,
1010 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, cache
);
1012 p
= kmem_cache_alloc(cache
, GFP_KERNEL
);
1014 kunit_err(test
, "Allocation failed: %s\n", __func__
);
1015 kmem_cache_destroy(cache
);
1019 /* Trigger invalid free, the object doesn't get freed. */
1020 KUNIT_EXPECT_KASAN_FAIL(test
, kmem_cache_free(cache
, p
+ 1));
1023 * Properly free the object to prevent the "Objects remaining in
1024 * test_cache on __kmem_cache_shutdown" BUG failure.
1026 kmem_cache_free(cache
, p
);
1028 kmem_cache_destroy(cache
);
1031 static void kmem_cache_rcu_uaf(struct kunit
*test
)
1035 struct kmem_cache
*cache
;
1037 KASAN_TEST_NEEDS_CONFIG_ON(test
, CONFIG_SLUB_RCU_DEBUG
);
1039 cache
= kmem_cache_create("test_cache", size
, 0, SLAB_TYPESAFE_BY_RCU
,
1041 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, cache
);
1043 p
= kmem_cache_alloc(cache
, GFP_KERNEL
);
1045 kunit_err(test
, "Allocation failed: %s\n", __func__
);
1046 kmem_cache_destroy(cache
);
1053 /* Free the object - this will internally schedule an RCU callback. */
1054 kmem_cache_free(cache
, p
);
1057 * We should still be allowed to access the object at this point because
1058 * the cache is SLAB_TYPESAFE_BY_RCU and we've been in an RCU read-side
1059 * critical section since before the kmem_cache_free().
1066 * Wait for the RCU callback to execute; after this, the object should
1067 * have actually been freed from KASAN's perspective.
1071 KUNIT_EXPECT_KASAN_FAIL(test
, READ_ONCE(*p
));
1073 kmem_cache_destroy(cache
);
1076 static void empty_cache_ctor(void *object
) { }
1078 static void kmem_cache_double_destroy(struct kunit
*test
)
1080 struct kmem_cache
*cache
;
1082 /* Provide a constructor to prevent cache merging. */
1083 cache
= kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor
);
1084 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, cache
);
1085 kmem_cache_destroy(cache
);
1086 KUNIT_EXPECT_KASAN_FAIL(test
, kmem_cache_destroy(cache
));
1089 static void kmem_cache_accounted(struct kunit
*test
)
1094 struct kmem_cache
*cache
;
1096 cache
= kmem_cache_create("test_cache", size
, 0, SLAB_ACCOUNT
, NULL
);
1097 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, cache
);
1100 * Several allocations with a delay to allow for lazy per memcg kmem
1103 for (i
= 0; i
< 5; i
++) {
1104 p
= kmem_cache_alloc(cache
, GFP_KERNEL
);
1108 kmem_cache_free(cache
, p
);
1113 kmem_cache_destroy(cache
);
1116 static void kmem_cache_bulk(struct kunit
*test
)
1118 struct kmem_cache
*cache
;
1124 cache
= kmem_cache_create("test_cache", size
, 0, 0, NULL
);
1125 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, cache
);
1127 ret
= kmem_cache_alloc_bulk(cache
, GFP_KERNEL
, ARRAY_SIZE(p
), (void **)&p
);
1129 kunit_err(test
, "Allocation failed: %s\n", __func__
);
1130 kmem_cache_destroy(cache
);
1134 for (i
= 0; i
< ARRAY_SIZE(p
); i
++)
1135 p
[i
][0] = p
[i
][size
- 1] = 42;
1137 kmem_cache_free_bulk(cache
, ARRAY_SIZE(p
), (void **)&p
);
1138 kmem_cache_destroy(cache
);
1141 static void *mempool_prepare_kmalloc(struct kunit
*test
, mempool_t
*pool
, size_t size
)
1147 memset(pool
, 0, sizeof(*pool
));
1148 ret
= mempool_init_kmalloc_pool(pool
, pool_size
, size
);
1149 KUNIT_ASSERT_EQ(test
, ret
, 0);
1152 * Allocate one element to prevent mempool from freeing elements to the
1153 * underlying allocator and instead make it add them to the element
1154 * list when the tests trigger double-free and invalid-free bugs.
1155 * This allows testing KASAN annotations in add_element().
1157 elem
= mempool_alloc_preallocated(pool
);
1158 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, elem
);
1163 static struct kmem_cache
*mempool_prepare_slab(struct kunit
*test
, mempool_t
*pool
, size_t size
)
1165 struct kmem_cache
*cache
;
1169 cache
= kmem_cache_create("test_cache", size
, 0, 0, NULL
);
1170 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, cache
);
1172 memset(pool
, 0, sizeof(*pool
));
1173 ret
= mempool_init_slab_pool(pool
, pool_size
, cache
);
1174 KUNIT_ASSERT_EQ(test
, ret
, 0);
1177 * Do not allocate one preallocated element, as we skip the double-free
1178 * and invalid-free tests for slab mempool for simplicity.
1184 static void *mempool_prepare_page(struct kunit
*test
, mempool_t
*pool
, int order
)
1190 memset(pool
, 0, sizeof(*pool
));
1191 ret
= mempool_init_page_pool(pool
, pool_size
, order
);
1192 KUNIT_ASSERT_EQ(test
, ret
, 0);
1194 elem
= mempool_alloc_preallocated(pool
);
1195 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, elem
);
1200 static void mempool_oob_right_helper(struct kunit
*test
, mempool_t
*pool
, size_t size
)
1204 elem
= mempool_alloc_preallocated(pool
);
1205 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, elem
);
1207 OPTIMIZER_HIDE_VAR(elem
);
1209 if (IS_ENABLED(CONFIG_KASAN_GENERIC
))
1210 KUNIT_EXPECT_KASAN_FAIL(test
,
1211 ((volatile char *)&elem
[size
])[0]);
1213 KUNIT_EXPECT_KASAN_FAIL(test
,
1214 ((volatile char *)&elem
[round_up(size
, KASAN_GRANULE_SIZE
)])[0]);
1216 mempool_free(elem
, pool
);
1219 static void mempool_kmalloc_oob_right(struct kunit
*test
)
1222 size_t size
= 128 - KASAN_GRANULE_SIZE
- 5;
1225 extra_elem
= mempool_prepare_kmalloc(test
, &pool
, size
);
1227 mempool_oob_right_helper(test
, &pool
, size
);
1229 mempool_free(extra_elem
, &pool
);
1230 mempool_exit(&pool
);
1233 static void mempool_kmalloc_large_oob_right(struct kunit
*test
)
1236 size_t size
= KMALLOC_MAX_CACHE_SIZE
+ 1;
1239 extra_elem
= mempool_prepare_kmalloc(test
, &pool
, size
);
1241 mempool_oob_right_helper(test
, &pool
, size
);
1243 mempool_free(extra_elem
, &pool
);
1244 mempool_exit(&pool
);
1247 static void mempool_slab_oob_right(struct kunit
*test
)
1251 struct kmem_cache
*cache
;
1253 cache
= mempool_prepare_slab(test
, &pool
, size
);
1255 mempool_oob_right_helper(test
, &pool
, size
);
1257 mempool_exit(&pool
);
1258 kmem_cache_destroy(cache
);
1262 * Skip the out-of-bounds test for page mempool. With Generic KASAN, page
1263 * allocations have no redzones, and thus the out-of-bounds detection is not
1264 * guaranteed; see https://bugzilla.kernel.org/show_bug.cgi?id=210503. With
1265 * the tag-based KASAN modes, the neighboring allocation might have the same
1266 * tag; see https://bugzilla.kernel.org/show_bug.cgi?id=203505.
1269 static void mempool_uaf_helper(struct kunit
*test
, mempool_t
*pool
, bool page
)
1273 elem
= mempool_alloc_preallocated(pool
);
1274 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, elem
);
1276 mempool_free(elem
, pool
);
1278 ptr
= page
? page_address((struct page
*)elem
) : elem
;
1279 KUNIT_EXPECT_KASAN_FAIL(test
, ((volatile char *)ptr
)[0]);
1282 static void mempool_kmalloc_uaf(struct kunit
*test
)
1288 extra_elem
= mempool_prepare_kmalloc(test
, &pool
, size
);
1290 mempool_uaf_helper(test
, &pool
, false);
1292 mempool_free(extra_elem
, &pool
);
1293 mempool_exit(&pool
);
1296 static void mempool_kmalloc_large_uaf(struct kunit
*test
)
1299 size_t size
= KMALLOC_MAX_CACHE_SIZE
+ 1;
1302 extra_elem
= mempool_prepare_kmalloc(test
, &pool
, size
);
1304 mempool_uaf_helper(test
, &pool
, false);
1306 mempool_free(extra_elem
, &pool
);
1307 mempool_exit(&pool
);
1310 static void mempool_slab_uaf(struct kunit
*test
)
1314 struct kmem_cache
*cache
;
1316 cache
= mempool_prepare_slab(test
, &pool
, size
);
1318 mempool_uaf_helper(test
, &pool
, false);
1320 mempool_exit(&pool
);
1321 kmem_cache_destroy(cache
);
1324 static void mempool_page_alloc_uaf(struct kunit
*test
)
1330 extra_elem
= mempool_prepare_page(test
, &pool
, order
);
1332 mempool_uaf_helper(test
, &pool
, true);
1334 mempool_free(extra_elem
, &pool
);
1335 mempool_exit(&pool
);
1338 static void mempool_double_free_helper(struct kunit
*test
, mempool_t
*pool
)
1342 elem
= mempool_alloc_preallocated(pool
);
1343 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, elem
);
1345 mempool_free(elem
, pool
);
1347 KUNIT_EXPECT_KASAN_FAIL(test
, mempool_free(elem
, pool
));
1350 static void mempool_kmalloc_double_free(struct kunit
*test
)
1356 extra_elem
= mempool_prepare_kmalloc(test
, &pool
, size
);
1358 mempool_double_free_helper(test
, &pool
);
1360 mempool_free(extra_elem
, &pool
);
1361 mempool_exit(&pool
);
1364 static void mempool_kmalloc_large_double_free(struct kunit
*test
)
1367 size_t size
= KMALLOC_MAX_CACHE_SIZE
+ 1;
1370 extra_elem
= mempool_prepare_kmalloc(test
, &pool
, size
);
1372 mempool_double_free_helper(test
, &pool
);
1374 mempool_free(extra_elem
, &pool
);
1375 mempool_exit(&pool
);
1378 static void mempool_page_alloc_double_free(struct kunit
*test
)
1384 extra_elem
= mempool_prepare_page(test
, &pool
, order
);
1386 mempool_double_free_helper(test
, &pool
);
1388 mempool_free(extra_elem
, &pool
);
1389 mempool_exit(&pool
);
1392 static void mempool_kmalloc_invalid_free_helper(struct kunit
*test
, mempool_t
*pool
)
1396 elem
= mempool_alloc_preallocated(pool
);
1397 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, elem
);
1399 KUNIT_EXPECT_KASAN_FAIL(test
, mempool_free(elem
+ 1, pool
));
1401 mempool_free(elem
, pool
);
1404 static void mempool_kmalloc_invalid_free(struct kunit
*test
)
1410 extra_elem
= mempool_prepare_kmalloc(test
, &pool
, size
);
1412 mempool_kmalloc_invalid_free_helper(test
, &pool
);
1414 mempool_free(extra_elem
, &pool
);
1415 mempool_exit(&pool
);
1418 static void mempool_kmalloc_large_invalid_free(struct kunit
*test
)
1421 size_t size
= KMALLOC_MAX_CACHE_SIZE
+ 1;
1424 extra_elem
= mempool_prepare_kmalloc(test
, &pool
, size
);
1426 mempool_kmalloc_invalid_free_helper(test
, &pool
);
1428 mempool_free(extra_elem
, &pool
);
1429 mempool_exit(&pool
);
1433 * Skip the invalid-free test for page mempool. The invalid-free detection only
1434 * works for compound pages and mempool preallocates all page elements without
1435 * the __GFP_COMP flag.
1438 static char global_array
[10];
1440 static void kasan_global_oob_right(struct kunit
*test
)
1443 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
1444 * from failing here and panicking the kernel, access the array via a
1445 * volatile pointer, which will prevent the compiler from being able to
1446 * determine the array bounds.
1448 * This access uses a volatile pointer to char (char *volatile) rather
1449 * than the more conventional pointer to volatile char (volatile char *)
1450 * because we want to prevent the compiler from making inferences about
1451 * the pointer itself (i.e. its array bounds), not the data that it
1454 char *volatile array
= global_array
;
1455 char *p
= &array
[ARRAY_SIZE(global_array
) + 3];
1457 /* Only generic mode instruments globals. */
1458 KASAN_TEST_NEEDS_CONFIG_ON(test
, CONFIG_KASAN_GENERIC
);
1460 KUNIT_EXPECT_KASAN_FAIL(test
, *(volatile char *)p
);
1463 static void kasan_global_oob_left(struct kunit
*test
)
1465 char *volatile array
= global_array
;
1466 char *p
= array
- 3;
1469 * GCC is known to fail this test, skip it.
1470 * See https://bugzilla.kernel.org/show_bug.cgi?id=215051.
1472 KASAN_TEST_NEEDS_CONFIG_ON(test
, CONFIG_CC_IS_CLANG
);
1473 KASAN_TEST_NEEDS_CONFIG_ON(test
, CONFIG_KASAN_GENERIC
);
1474 KUNIT_EXPECT_KASAN_FAIL(test
, *(volatile char *)p
);
1477 static void kasan_stack_oob(struct kunit
*test
)
1479 char stack_array
[10];
1480 /* See comment in kasan_global_oob_right. */
1481 char *volatile array
= stack_array
;
1482 char *p
= &array
[ARRAY_SIZE(stack_array
) + OOB_TAG_OFF
];
1484 KASAN_TEST_NEEDS_CONFIG_ON(test
, CONFIG_KASAN_STACK
);
1486 KUNIT_EXPECT_KASAN_FAIL(test
, *(volatile char *)p
);
1489 static void kasan_alloca_oob_left(struct kunit
*test
)
1491 volatile int i
= 10;
1492 char alloca_array
[i
];
1493 /* See comment in kasan_global_oob_right. */
1494 char *volatile array
= alloca_array
;
1495 char *p
= array
- 1;
1497 /* Only generic mode instruments dynamic allocas. */
1498 KASAN_TEST_NEEDS_CONFIG_ON(test
, CONFIG_KASAN_GENERIC
);
1499 KASAN_TEST_NEEDS_CONFIG_ON(test
, CONFIG_KASAN_STACK
);
1501 KUNIT_EXPECT_KASAN_FAIL(test
, *(volatile char *)p
);
1504 static void kasan_alloca_oob_right(struct kunit
*test
)
1506 volatile int i
= 10;
1507 char alloca_array
[i
];
1508 /* See comment in kasan_global_oob_right. */
1509 char *volatile array
= alloca_array
;
1510 char *p
= array
+ i
;
1512 /* Only generic mode instruments dynamic allocas. */
1513 KASAN_TEST_NEEDS_CONFIG_ON(test
, CONFIG_KASAN_GENERIC
);
1514 KASAN_TEST_NEEDS_CONFIG_ON(test
, CONFIG_KASAN_STACK
);
1516 KUNIT_EXPECT_KASAN_FAIL(test
, *(volatile char *)p
);
1519 static void kasan_memchr(struct kunit
*test
)
1525 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1526 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1528 KASAN_TEST_NEEDS_CONFIG_OFF(test
, CONFIG_AMD_MEM_ENCRYPT
);
1531 size
= round_up(size
, OOB_TAG_OFF
);
1533 ptr
= kmalloc(size
, GFP_KERNEL
| __GFP_ZERO
);
1534 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
1536 OPTIMIZER_HIDE_VAR(ptr
);
1537 OPTIMIZER_HIDE_VAR(size
);
1538 KUNIT_EXPECT_KASAN_FAIL(test
,
1539 kasan_ptr_result
= memchr(ptr
, '1', size
+ 1));
1544 static void kasan_memcmp(struct kunit
*test
)
1551 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1552 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1554 KASAN_TEST_NEEDS_CONFIG_OFF(test
, CONFIG_AMD_MEM_ENCRYPT
);
1557 size
= round_up(size
, OOB_TAG_OFF
);
1559 ptr
= kmalloc(size
, GFP_KERNEL
| __GFP_ZERO
);
1560 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
1561 memset(arr
, 0, sizeof(arr
));
1563 OPTIMIZER_HIDE_VAR(ptr
);
1564 OPTIMIZER_HIDE_VAR(size
);
1565 KUNIT_EXPECT_KASAN_FAIL(test
,
1566 kasan_int_result
= memcmp(ptr
, arr
, size
+1));
1570 static void kasan_strings(struct kunit
*test
)
1576 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1577 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1579 KASAN_TEST_NEEDS_CONFIG_OFF(test
, CONFIG_AMD_MEM_ENCRYPT
);
1581 ptr
= kmalloc(size
, GFP_KERNEL
| __GFP_ZERO
);
1582 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
1587 * Try to cause only 1 invalid access (less spam in dmesg).
1588 * For that we need ptr to point to zeroed byte.
1589 * Skip metadata that could be stored in freed object so ptr
1590 * will likely point to zeroed byte.
1593 KUNIT_EXPECT_KASAN_FAIL(test
, kasan_ptr_result
= strchr(ptr
, '1'));
1595 KUNIT_EXPECT_KASAN_FAIL(test
, kasan_ptr_result
= strrchr(ptr
, '1'));
1597 KUNIT_EXPECT_KASAN_FAIL(test
, kasan_int_result
= strcmp(ptr
, "2"));
1599 KUNIT_EXPECT_KASAN_FAIL(test
, kasan_int_result
= strncmp(ptr
, "2", 1));
1601 KUNIT_EXPECT_KASAN_FAIL(test
, kasan_int_result
= strlen(ptr
));
1603 KUNIT_EXPECT_KASAN_FAIL(test
, kasan_int_result
= strnlen(ptr
, 1));
1606 static void kasan_bitops_modify(struct kunit
*test
, int nr
, void *addr
)
1608 KUNIT_EXPECT_KASAN_FAIL(test
, set_bit(nr
, addr
));
1609 KUNIT_EXPECT_KASAN_FAIL(test
, __set_bit(nr
, addr
));
1610 KUNIT_EXPECT_KASAN_FAIL(test
, clear_bit(nr
, addr
));
1611 KUNIT_EXPECT_KASAN_FAIL(test
, __clear_bit(nr
, addr
));
1612 KUNIT_EXPECT_KASAN_FAIL(test
, clear_bit_unlock(nr
, addr
));
1613 KUNIT_EXPECT_KASAN_FAIL(test
, __clear_bit_unlock(nr
, addr
));
1614 KUNIT_EXPECT_KASAN_FAIL(test
, change_bit(nr
, addr
));
1615 KUNIT_EXPECT_KASAN_FAIL(test
, __change_bit(nr
, addr
));
1618 static void kasan_bitops_test_and_modify(struct kunit
*test
, int nr
, void *addr
)
1620 KUNIT_EXPECT_KASAN_FAIL(test
, test_and_set_bit(nr
, addr
));
1621 KUNIT_EXPECT_KASAN_FAIL(test
, __test_and_set_bit(nr
, addr
));
1622 KUNIT_EXPECT_KASAN_FAIL(test
, test_and_set_bit_lock(nr
, addr
));
1623 KUNIT_EXPECT_KASAN_FAIL(test
, test_and_clear_bit(nr
, addr
));
1624 KUNIT_EXPECT_KASAN_FAIL(test
, __test_and_clear_bit(nr
, addr
));
1625 KUNIT_EXPECT_KASAN_FAIL(test
, test_and_change_bit(nr
, addr
));
1626 KUNIT_EXPECT_KASAN_FAIL(test
, __test_and_change_bit(nr
, addr
));
1627 KUNIT_EXPECT_KASAN_FAIL(test
, kasan_int_result
= test_bit(nr
, addr
));
1629 KUNIT_EXPECT_KASAN_FAIL(test
, kasan_int_result
=
1630 xor_unlock_is_negative_byte(1 << nr
, addr
));
1633 static void kasan_bitops_generic(struct kunit
*test
)
1637 /* This test is specifically crafted for the generic mode. */
1638 KASAN_TEST_NEEDS_CONFIG_ON(test
, CONFIG_KASAN_GENERIC
);
1641 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
1642 * this way we do not actually corrupt other memory.
1644 bits
= kzalloc(sizeof(*bits
) + 1, GFP_KERNEL
);
1645 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, bits
);
1648 * Below calls try to access bit within allocated memory; however, the
1649 * below accesses are still out-of-bounds, since bitops are defined to
1650 * operate on the whole long the bit is in.
1652 kasan_bitops_modify(test
, BITS_PER_LONG
, bits
);
1655 * Below calls try to access bit beyond allocated memory.
1657 kasan_bitops_test_and_modify(test
, BITS_PER_LONG
+ BITS_PER_BYTE
, bits
);
1662 static void kasan_bitops_tags(struct kunit
*test
)
1666 /* This test is specifically crafted for tag-based modes. */
1667 KASAN_TEST_NEEDS_CONFIG_OFF(test
, CONFIG_KASAN_GENERIC
);
1669 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
1670 bits
= kzalloc(48, GFP_KERNEL
);
1671 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, bits
);
1673 /* Do the accesses past the 48 allocated bytes, but within the redone. */
1674 kasan_bitops_modify(test
, BITS_PER_LONG
, (void *)bits
+ 48);
1675 kasan_bitops_test_and_modify(test
, BITS_PER_LONG
+ BITS_PER_BYTE
, (void *)bits
+ 48);
1680 static void vmalloc_helpers_tags(struct kunit
*test
)
1684 /* This test is intended for tag-based modes. */
1685 KASAN_TEST_NEEDS_CONFIG_OFF(test
, CONFIG_KASAN_GENERIC
);
1687 KASAN_TEST_NEEDS_CONFIG_ON(test
, CONFIG_KASAN_VMALLOC
);
1689 if (!kasan_vmalloc_enabled())
1690 kunit_skip(test
, "Test requires kasan.vmalloc=on");
1692 ptr
= vmalloc(PAGE_SIZE
);
1693 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
1695 /* Check that the returned pointer is tagged. */
1696 KUNIT_EXPECT_GE(test
, (u8
)get_tag(ptr
), (u8
)KASAN_TAG_MIN
);
1697 KUNIT_EXPECT_LT(test
, (u8
)get_tag(ptr
), (u8
)KASAN_TAG_KERNEL
);
1699 /* Make sure exported vmalloc helpers handle tagged pointers. */
1700 KUNIT_ASSERT_TRUE(test
, is_vmalloc_addr(ptr
));
1701 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, vmalloc_to_page(ptr
));
1703 #if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST)
1707 /* Make sure vmalloc'ed memory permissions can be changed. */
1708 rv
= set_memory_ro((unsigned long)ptr
, 1);
1709 KUNIT_ASSERT_GE(test
, rv
, 0);
1710 rv
= set_memory_rw((unsigned long)ptr
, 1);
1711 KUNIT_ASSERT_GE(test
, rv
, 0);
1718 static void vmalloc_oob(struct kunit
*test
)
1720 char *v_ptr
, *p_ptr
;
1722 size_t size
= PAGE_SIZE
/ 2 - KASAN_GRANULE_SIZE
- 5;
1724 KASAN_TEST_NEEDS_CONFIG_ON(test
, CONFIG_KASAN_VMALLOC
);
1726 if (!kasan_vmalloc_enabled())
1727 kunit_skip(test
, "Test requires kasan.vmalloc=on");
1729 v_ptr
= vmalloc(size
);
1730 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, v_ptr
);
1732 OPTIMIZER_HIDE_VAR(v_ptr
);
1735 * We have to be careful not to hit the guard page in vmalloc tests.
1736 * The MMU will catch that and crash us.
1739 /* Make sure in-bounds accesses are valid. */
1741 v_ptr
[size
- 1] = 0;
1744 * An unaligned access past the requested vmalloc size.
1745 * Only generic KASAN can precisely detect these.
1747 if (IS_ENABLED(CONFIG_KASAN_GENERIC
))
1748 KUNIT_EXPECT_KASAN_FAIL(test
, ((volatile char *)v_ptr
)[size
]);
1750 /* An aligned access into the first out-of-bounds granule. */
1751 KUNIT_EXPECT_KASAN_FAIL(test
, ((volatile char *)v_ptr
)[size
+ 5]);
1753 /* Check that in-bounds accesses to the physical page are valid. */
1754 page
= vmalloc_to_page(v_ptr
);
1755 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, page
);
1756 p_ptr
= page_address(page
);
1757 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, p_ptr
);
1763 * We can't check for use-after-unmap bugs in this nor in the following
1764 * vmalloc tests, as the page might be fully unmapped and accessing it
1765 * will crash the kernel.
1769 static void vmap_tags(struct kunit
*test
)
1771 char *p_ptr
, *v_ptr
;
1772 struct page
*p_page
, *v_page
;
1775 * This test is specifically crafted for the software tag-based mode,
1776 * the only tag-based mode that poisons vmap mappings.
1778 KASAN_TEST_NEEDS_CONFIG_ON(test
, CONFIG_KASAN_SW_TAGS
);
1780 KASAN_TEST_NEEDS_CONFIG_ON(test
, CONFIG_KASAN_VMALLOC
);
1782 if (!kasan_vmalloc_enabled())
1783 kunit_skip(test
, "Test requires kasan.vmalloc=on");
1785 p_page
= alloc_pages(GFP_KERNEL
, 1);
1786 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, p_page
);
1787 p_ptr
= page_address(p_page
);
1788 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, p_ptr
);
1790 v_ptr
= vmap(&p_page
, 1, VM_MAP
, PAGE_KERNEL
);
1791 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, v_ptr
);
1794 * We can't check for out-of-bounds bugs in this nor in the following
1795 * vmalloc tests, as allocations have page granularity and accessing
1796 * the guard page will crash the kernel.
1799 KUNIT_EXPECT_GE(test
, (u8
)get_tag(v_ptr
), (u8
)KASAN_TAG_MIN
);
1800 KUNIT_EXPECT_LT(test
, (u8
)get_tag(v_ptr
), (u8
)KASAN_TAG_KERNEL
);
1802 /* Make sure that in-bounds accesses through both pointers work. */
1806 /* Make sure vmalloc_to_page() correctly recovers the page pointer. */
1807 v_page
= vmalloc_to_page(v_ptr
);
1808 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, v_page
);
1809 KUNIT_EXPECT_PTR_EQ(test
, p_page
, v_page
);
1812 free_pages((unsigned long)p_ptr
, 1);
1815 static void vm_map_ram_tags(struct kunit
*test
)
1817 char *p_ptr
, *v_ptr
;
1821 * This test is specifically crafted for the software tag-based mode,
1822 * the only tag-based mode that poisons vm_map_ram mappings.
1824 KASAN_TEST_NEEDS_CONFIG_ON(test
, CONFIG_KASAN_SW_TAGS
);
1826 page
= alloc_pages(GFP_KERNEL
, 1);
1827 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, page
);
1828 p_ptr
= page_address(page
);
1829 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, p_ptr
);
1831 v_ptr
= vm_map_ram(&page
, 1, -1);
1832 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, v_ptr
);
1834 KUNIT_EXPECT_GE(test
, (u8
)get_tag(v_ptr
), (u8
)KASAN_TAG_MIN
);
1835 KUNIT_EXPECT_LT(test
, (u8
)get_tag(v_ptr
), (u8
)KASAN_TAG_KERNEL
);
1837 /* Make sure that in-bounds accesses through both pointers work. */
1841 vm_unmap_ram(v_ptr
, 1);
1842 free_pages((unsigned long)p_ptr
, 1);
1846 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
1847 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1850 static void match_all_not_assigned(struct kunit
*test
)
1856 KASAN_TEST_NEEDS_CONFIG_OFF(test
, CONFIG_KASAN_GENERIC
);
1858 for (i
= 0; i
< 256; i
++) {
1859 size
= get_random_u32_inclusive(1, 1024);
1860 ptr
= kmalloc(size
, GFP_KERNEL
);
1861 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
1862 KUNIT_EXPECT_GE(test
, (u8
)get_tag(ptr
), (u8
)KASAN_TAG_MIN
);
1863 KUNIT_EXPECT_LT(test
, (u8
)get_tag(ptr
), (u8
)KASAN_TAG_KERNEL
);
1867 for (i
= 0; i
< 256; i
++) {
1868 order
= get_random_u32_inclusive(1, 4);
1869 pages
= alloc_pages(GFP_KERNEL
, order
);
1870 ptr
= page_address(pages
);
1871 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
1872 KUNIT_EXPECT_GE(test
, (u8
)get_tag(ptr
), (u8
)KASAN_TAG_MIN
);
1873 KUNIT_EXPECT_LT(test
, (u8
)get_tag(ptr
), (u8
)KASAN_TAG_KERNEL
);
1874 free_pages((unsigned long)ptr
, order
);
1877 if (!kasan_vmalloc_enabled())
1880 for (i
= 0; i
< 256; i
++) {
1881 size
= get_random_u32_inclusive(1, 1024);
1882 ptr
= vmalloc(size
);
1883 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
1884 KUNIT_EXPECT_GE(test
, (u8
)get_tag(ptr
), (u8
)KASAN_TAG_MIN
);
1885 KUNIT_EXPECT_LT(test
, (u8
)get_tag(ptr
), (u8
)KASAN_TAG_KERNEL
);
1890 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
1891 static void match_all_ptr_tag(struct kunit
*test
)
1896 KASAN_TEST_NEEDS_CONFIG_OFF(test
, CONFIG_KASAN_GENERIC
);
1898 ptr
= kmalloc(128, GFP_KERNEL
);
1899 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
1901 /* Backup the assigned tag. */
1903 KUNIT_EXPECT_NE(test
, tag
, (u8
)KASAN_TAG_KERNEL
);
1905 /* Reset the tag to 0xff.*/
1906 ptr
= set_tag(ptr
, KASAN_TAG_KERNEL
);
1908 /* This access shouldn't trigger a KASAN report. */
1911 /* Recover the pointer tag and free. */
1912 ptr
= set_tag(ptr
, tag
);
1916 /* Check that there are no match-all memory tags for tag-based modes. */
1917 static void match_all_mem_tag(struct kunit
*test
)
1922 KASAN_TEST_NEEDS_CONFIG_OFF(test
, CONFIG_KASAN_GENERIC
);
1924 ptr
= kmalloc(128, GFP_KERNEL
);
1925 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
1926 KUNIT_EXPECT_NE(test
, (u8
)get_tag(ptr
), (u8
)KASAN_TAG_KERNEL
);
1928 /* For each possible tag value not matching the pointer tag. */
1929 for (tag
= KASAN_TAG_MIN
; tag
<= KASAN_TAG_KERNEL
; tag
++) {
1931 * For Software Tag-Based KASAN, skip the majority of tag
1932 * values to avoid the test printing too many reports.
1934 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS
) &&
1935 tag
>= KASAN_TAG_MIN
+ 8 && tag
<= KASAN_TAG_KERNEL
- 8)
1938 if (tag
== get_tag(ptr
))
1941 /* Mark the first memory granule with the chosen memory tag. */
1942 kasan_poison(ptr
, KASAN_GRANULE_SIZE
, (u8
)tag
, false);
1944 /* This access must cause a KASAN report. */
1945 KUNIT_EXPECT_KASAN_FAIL(test
, *ptr
= 0);
1948 /* Recover the memory tag and free. */
1949 kasan_poison(ptr
, KASAN_GRANULE_SIZE
, get_tag(ptr
), false);
1954 * Check that Rust performing a use-after-free using `unsafe` is detected.
1955 * This is a smoke test to make sure that Rust is being sanitized properly.
1957 static void rust_uaf(struct kunit
*test
)
1959 KASAN_TEST_NEEDS_CONFIG_ON(test
, CONFIG_RUST
);
1960 KUNIT_EXPECT_KASAN_FAIL(test
, kasan_test_rust_uaf());
1963 static void copy_to_kernel_nofault_oob(struct kunit
*test
)
1967 size_t size
= sizeof(buf
);
1970 * This test currently fails with the HW_TAGS mode. The reason is
1971 * unknown and needs to be investigated.
1973 KASAN_TEST_NEEDS_CONFIG_OFF(test
, CONFIG_KASAN_HW_TAGS
);
1975 ptr
= kmalloc(size
- KASAN_GRANULE_SIZE
, GFP_KERNEL
);
1976 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, ptr
);
1977 OPTIMIZER_HIDE_VAR(ptr
);
1980 * We test copy_to_kernel_nofault() to detect corrupted memory that is
1981 * being written into the kernel. In contrast,
1982 * copy_from_kernel_nofault() is primarily used in kernel helper
1983 * functions where the source address might be random or uninitialized.
1984 * Applying KASAN instrumentation to copy_from_kernel_nofault() could
1985 * lead to false positives. By focusing KASAN checks only on
1986 * copy_to_kernel_nofault(), we ensure that only valid memory is
1987 * written to the kernel, minimizing the risk of kernel corruption
1988 * while avoiding false positives in the reverse case.
1990 KUNIT_EXPECT_KASAN_FAIL(test
,
1991 copy_to_kernel_nofault(&buf
[0], ptr
, size
));
1992 KUNIT_EXPECT_KASAN_FAIL(test
,
1993 copy_to_kernel_nofault(ptr
, &buf
[0], size
));
1998 static void copy_user_test_oob(struct kunit
*test
)
2001 char __user
*usermem
;
2002 unsigned long useraddr
;
2003 size_t size
= 128 - KASAN_GRANULE_SIZE
;
2004 int __maybe_unused unused
;
2006 kmem
= kunit_kmalloc(test
, size
, GFP_KERNEL
);
2007 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, kmem
);
2009 useraddr
= kunit_vm_mmap(test
, NULL
, 0, PAGE_SIZE
,
2010 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
2011 MAP_ANONYMOUS
| MAP_PRIVATE
, 0);
2012 KUNIT_ASSERT_NE_MSG(test
, useraddr
, 0,
2013 "Could not create userspace mm");
2014 KUNIT_ASSERT_LT_MSG(test
, useraddr
, (unsigned long)TASK_SIZE
,
2015 "Failed to allocate user memory");
2017 OPTIMIZER_HIDE_VAR(size
);
2018 usermem
= (char __user
*)useraddr
;
2020 KUNIT_EXPECT_KASAN_FAIL(test
,
2021 unused
= copy_from_user(kmem
, usermem
, size
+ 1));
2022 KUNIT_EXPECT_KASAN_FAIL(test
,
2023 unused
= copy_to_user(usermem
, kmem
, size
+ 1));
2024 KUNIT_EXPECT_KASAN_FAIL(test
,
2025 unused
= __copy_from_user(kmem
, usermem
, size
+ 1));
2026 KUNIT_EXPECT_KASAN_FAIL(test
,
2027 unused
= __copy_to_user(usermem
, kmem
, size
+ 1));
2028 KUNIT_EXPECT_KASAN_FAIL(test
,
2029 unused
= __copy_from_user_inatomic(kmem
, usermem
, size
+ 1));
2030 KUNIT_EXPECT_KASAN_FAIL(test
,
2031 unused
= __copy_to_user_inatomic(usermem
, kmem
, size
+ 1));
2034 * Prepare a long string in usermem to avoid the strncpy_from_user test
2035 * bailing out on '\0' before it reaches out-of-bounds.
2037 memset(kmem
, 'a', size
);
2038 KUNIT_EXPECT_EQ(test
, copy_to_user(usermem
, kmem
, size
), 0);
2040 KUNIT_EXPECT_KASAN_FAIL(test
,
2041 unused
= strncpy_from_user(kmem
, usermem
, size
+ 1));
2044 static struct kunit_case kasan_kunit_test_cases
[] = {
2045 KUNIT_CASE(kmalloc_oob_right
),
2046 KUNIT_CASE(kmalloc_oob_left
),
2047 KUNIT_CASE(kmalloc_node_oob_right
),
2048 KUNIT_CASE(kmalloc_track_caller_oob_right
),
2049 KUNIT_CASE(kmalloc_big_oob_right
),
2050 KUNIT_CASE(kmalloc_large_oob_right
),
2051 KUNIT_CASE(kmalloc_large_uaf
),
2052 KUNIT_CASE(kmalloc_large_invalid_free
),
2053 KUNIT_CASE(page_alloc_oob_right
),
2054 KUNIT_CASE(page_alloc_uaf
),
2055 KUNIT_CASE(krealloc_more_oob
),
2056 KUNIT_CASE(krealloc_less_oob
),
2057 KUNIT_CASE(krealloc_large_more_oob
),
2058 KUNIT_CASE(krealloc_large_less_oob
),
2059 KUNIT_CASE(krealloc_uaf
),
2060 KUNIT_CASE(kmalloc_oob_16
),
2061 KUNIT_CASE(kmalloc_uaf_16
),
2062 KUNIT_CASE(kmalloc_oob_in_memset
),
2063 KUNIT_CASE(kmalloc_oob_memset_2
),
2064 KUNIT_CASE(kmalloc_oob_memset_4
),
2065 KUNIT_CASE(kmalloc_oob_memset_8
),
2066 KUNIT_CASE(kmalloc_oob_memset_16
),
2067 KUNIT_CASE(kmalloc_memmove_negative_size
),
2068 KUNIT_CASE(kmalloc_memmove_invalid_size
),
2069 KUNIT_CASE(kmalloc_uaf
),
2070 KUNIT_CASE(kmalloc_uaf_memset
),
2071 KUNIT_CASE(kmalloc_uaf2
),
2072 KUNIT_CASE(kmalloc_uaf3
),
2073 KUNIT_CASE(kmalloc_double_kzfree
),
2074 KUNIT_CASE(ksize_unpoisons_memory
),
2075 KUNIT_CASE(ksize_uaf
),
2076 KUNIT_CASE(rcu_uaf
),
2077 KUNIT_CASE(workqueue_uaf
),
2078 KUNIT_CASE(kfree_via_page
),
2079 KUNIT_CASE(kfree_via_phys
),
2080 KUNIT_CASE(kmem_cache_oob
),
2081 KUNIT_CASE(kmem_cache_double_free
),
2082 KUNIT_CASE(kmem_cache_invalid_free
),
2083 KUNIT_CASE(kmem_cache_rcu_uaf
),
2084 KUNIT_CASE(kmem_cache_double_destroy
),
2085 KUNIT_CASE(kmem_cache_accounted
),
2086 KUNIT_CASE(kmem_cache_bulk
),
2087 KUNIT_CASE(mempool_kmalloc_oob_right
),
2088 KUNIT_CASE(mempool_kmalloc_large_oob_right
),
2089 KUNIT_CASE(mempool_slab_oob_right
),
2090 KUNIT_CASE(mempool_kmalloc_uaf
),
2091 KUNIT_CASE(mempool_kmalloc_large_uaf
),
2092 KUNIT_CASE(mempool_slab_uaf
),
2093 KUNIT_CASE(mempool_page_alloc_uaf
),
2094 KUNIT_CASE(mempool_kmalloc_double_free
),
2095 KUNIT_CASE(mempool_kmalloc_large_double_free
),
2096 KUNIT_CASE(mempool_page_alloc_double_free
),
2097 KUNIT_CASE(mempool_kmalloc_invalid_free
),
2098 KUNIT_CASE(mempool_kmalloc_large_invalid_free
),
2099 KUNIT_CASE(kasan_global_oob_right
),
2100 KUNIT_CASE(kasan_global_oob_left
),
2101 KUNIT_CASE(kasan_stack_oob
),
2102 KUNIT_CASE(kasan_alloca_oob_left
),
2103 KUNIT_CASE(kasan_alloca_oob_right
),
2104 KUNIT_CASE(kasan_memchr
),
2105 KUNIT_CASE(kasan_memcmp
),
2106 KUNIT_CASE(kasan_strings
),
2107 KUNIT_CASE(kasan_bitops_generic
),
2108 KUNIT_CASE(kasan_bitops_tags
),
2109 KUNIT_CASE_SLOW(kasan_atomics
),
2110 KUNIT_CASE(vmalloc_helpers_tags
),
2111 KUNIT_CASE(vmalloc_oob
),
2112 KUNIT_CASE(vmap_tags
),
2113 KUNIT_CASE(vm_map_ram_tags
),
2114 KUNIT_CASE(match_all_not_assigned
),
2115 KUNIT_CASE(match_all_ptr_tag
),
2116 KUNIT_CASE(match_all_mem_tag
),
2117 KUNIT_CASE(copy_to_kernel_nofault_oob
),
2118 KUNIT_CASE(rust_uaf
),
2119 KUNIT_CASE(copy_user_test_oob
),
2123 static struct kunit_suite kasan_kunit_test_suite
= {
2125 .test_cases
= kasan_kunit_test_cases
,
2126 .exit
= kasan_test_exit
,
2127 .suite_init
= kasan_suite_init
,
2128 .suite_exit
= kasan_suite_exit
,
2131 kunit_test_suite(kasan_kunit_test_suite
);
2133 MODULE_LICENSE("GPL");