1 // SPDX-License-Identifier: GPL-2.0
3 * Test cases for KFENCE memory safety error detector. Since the interface with
4 * which KFENCE's reports are obtained is via the console, this is the output we
5 * should verify. For each test case checks the presence (or absence) of
6 * generated reports. Relies on 'console' tracepoint to capture reports as they
7 * appear in the kernel log.
9 * Copyright (C) 2020, Google LLC.
10 * Author: Alexander Potapenko <glider@google.com>
11 * Marco Elver <elver@google.com>
14 #include <kunit/test.h>
15 #include <linux/jiffies.h>
16 #include <linux/kernel.h>
17 #include <linux/kfence.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <linux/string.h>
23 #include <linux/tracepoint.h>
24 #include <trace/events/printk.h>
26 #include <asm/kfence.h>
30 /* May be overridden by <asm/kfence.h>. */
31 #ifndef arch_kfence_test_address
32 #define arch_kfence_test_address(addr) (addr)
35 #define KFENCE_TEST_REQUIRES(test, cond) do { \
37 kunit_skip((test), "Test requires: " #cond); \
40 /* Report as observed from console. */
46 .lock
= __SPIN_LOCK_UNLOCKED(observed
.lock
),
49 /* Probe for console output: obtains observed lines of interest. */
50 static void probe_console(void *ignore
, const char *buf
, size_t len
)
55 spin_lock_irqsave(&observed
.lock
, flags
);
56 nlines
= observed
.nlines
;
58 if (strnstr(buf
, "BUG: KFENCE: ", len
) && strnstr(buf
, "test_", len
)) {
60 * KFENCE report and related to the test.
62 * The provided @buf is not NUL-terminated; copy no more than
63 * @len bytes and let strscpy() add the missing NUL-terminator.
65 strscpy(observed
.lines
[0], buf
, min(len
+ 1, sizeof(observed
.lines
[0])));
67 } else if (nlines
== 1 && (strnstr(buf
, "at 0x", len
) || strnstr(buf
, "of 0x", len
))) {
68 strscpy(observed
.lines
[nlines
++], buf
, min(len
+ 1, sizeof(observed
.lines
[0])));
71 WRITE_ONCE(observed
.nlines
, nlines
); /* Publish new nlines. */
72 spin_unlock_irqrestore(&observed
.lock
, flags
);
75 /* Check if a report related to the test exists. */
76 static bool report_available(void)
78 return READ_ONCE(observed
.nlines
) == ARRAY_SIZE(observed
.lines
);
81 /* Information we expect in a report. */
82 struct expect_report
{
83 enum kfence_error_type type
; /* The type or error. */
84 void *fn
; /* Function pointer to expected function where access occurred. */
85 char *addr
; /* Address at which the bad access occurred. */
86 bool is_write
; /* Is access a write. */
89 static const char *get_access_type(const struct expect_report
*r
)
91 return r
->is_write
? "write" : "read";
94 /* Check observed report matches information in @r. */
95 static bool report_matches(const struct expect_report
*r
)
97 unsigned long addr
= (unsigned long)r
->addr
;
100 typeof(observed
.lines
) expect
;
104 /* Doubled-checked locking. */
105 if (!report_available())
108 /* Generate expected report contents. */
112 end
= &expect
[0][sizeof(expect
[0]) - 1];
114 case KFENCE_ERROR_OOB
:
115 cur
+= scnprintf(cur
, end
- cur
, "BUG: KFENCE: out-of-bounds %s",
118 case KFENCE_ERROR_UAF
:
119 cur
+= scnprintf(cur
, end
- cur
, "BUG: KFENCE: use-after-free %s",
122 case KFENCE_ERROR_CORRUPTION
:
123 cur
+= scnprintf(cur
, end
- cur
, "BUG: KFENCE: memory corruption");
125 case KFENCE_ERROR_INVALID
:
126 cur
+= scnprintf(cur
, end
- cur
, "BUG: KFENCE: invalid %s",
129 case KFENCE_ERROR_INVALID_FREE
:
130 cur
+= scnprintf(cur
, end
- cur
, "BUG: KFENCE: invalid free");
134 scnprintf(cur
, end
- cur
, " in %pS", r
->fn
);
135 /* The exact offset won't match, remove it; also strip module name. */
136 cur
= strchr(expect
[0], '+');
140 /* Access information */
142 end
= &expect
[1][sizeof(expect
[1]) - 1];
145 case KFENCE_ERROR_OOB
:
146 cur
+= scnprintf(cur
, end
- cur
, "Out-of-bounds %s at", get_access_type(r
));
147 addr
= arch_kfence_test_address(addr
);
149 case KFENCE_ERROR_UAF
:
150 cur
+= scnprintf(cur
, end
- cur
, "Use-after-free %s at", get_access_type(r
));
151 addr
= arch_kfence_test_address(addr
);
153 case KFENCE_ERROR_CORRUPTION
:
154 cur
+= scnprintf(cur
, end
- cur
, "Corrupted memory at");
156 case KFENCE_ERROR_INVALID
:
157 cur
+= scnprintf(cur
, end
- cur
, "Invalid %s at", get_access_type(r
));
158 addr
= arch_kfence_test_address(addr
);
160 case KFENCE_ERROR_INVALID_FREE
:
161 cur
+= scnprintf(cur
, end
- cur
, "Invalid free of");
165 cur
+= scnprintf(cur
, end
- cur
, " 0x%p", (void *)addr
);
167 spin_lock_irqsave(&observed
.lock
, flags
);
168 if (!report_available())
169 goto out
; /* A new report is being captured. */
171 /* Finally match expected output to what we actually observed. */
172 ret
= strstr(observed
.lines
[0], expect
[0]) && strstr(observed
.lines
[1], expect
[1]);
174 spin_unlock_irqrestore(&observed
.lock
, flags
);
178 /* ===== Test cases ===== */
180 #define TEST_PRIV_WANT_MEMCACHE ((void *)1)
182 /* Cache used by tests; if NULL, allocate from kmalloc instead. */
183 static struct kmem_cache
*test_cache
;
185 static size_t setup_test_cache(struct kunit
*test
, size_t size
, slab_flags_t flags
,
186 void (*ctor
)(void *))
188 if (test
->priv
!= TEST_PRIV_WANT_MEMCACHE
)
191 kunit_info(test
, "%s: size=%zu, ctor=%ps\n", __func__
, size
, ctor
);
194 * Use SLAB_NO_MERGE to prevent merging with existing caches.
195 * Use SLAB_ACCOUNT to allocate via memcg, if enabled.
197 flags
|= SLAB_NO_MERGE
| SLAB_ACCOUNT
;
198 test_cache
= kmem_cache_create("test", size
, 1, flags
, ctor
);
199 KUNIT_ASSERT_TRUE_MSG(test
, test_cache
, "could not create cache");
204 static void test_cache_destroy(void)
209 kmem_cache_destroy(test_cache
);
213 static inline size_t kmalloc_cache_alignment(size_t size
)
215 /* just to get ->align so no need to pass in the real caller */
216 enum kmalloc_cache_type type
= kmalloc_type(GFP_KERNEL
, 0);
217 return kmalloc_caches
[type
][__kmalloc_index(size
, false)]->align
;
220 /* Must always inline to match stack trace against caller. */
221 static __always_inline
void test_free(void *ptr
)
224 kmem_cache_free(test_cache
, ptr
);
230 * If this should be a KFENCE allocation, and on which side the allocation and
231 * the closest guard page should be.
233 enum allocation_policy
{
234 ALLOCATE_ANY
, /* KFENCE, any side. */
235 ALLOCATE_LEFT
, /* KFENCE, left side of page. */
236 ALLOCATE_RIGHT
, /* KFENCE, right side of page. */
237 ALLOCATE_NONE
, /* No KFENCE allocation. */
241 * Try to get a guarded allocation from KFENCE. Uses either kmalloc() or the
242 * current test_cache if set up.
244 static void *test_alloc(struct kunit
*test
, size_t size
, gfp_t gfp
, enum allocation_policy policy
)
247 unsigned long timeout
, resched_after
;
248 const char *policy_name
;
255 policy_name
= "left";
258 policy_name
= "right";
261 policy_name
= "none";
265 kunit_info(test
, "%s: size=%zu, gfp=%x, policy=%s, cache=%i\n", __func__
, size
, gfp
,
266 policy_name
, !!test_cache
);
269 * 100x the sample interval should be more than enough to ensure we get
270 * a KFENCE allocation eventually.
272 timeout
= jiffies
+ msecs_to_jiffies(100 * kfence_sample_interval
);
274 * Especially for non-preemption kernels, ensure the allocation-gate
275 * timer can catch up: after @resched_after, every failed allocation
276 * attempt yields, to ensure the allocation-gate timer is scheduled.
278 resched_after
= jiffies
+ msecs_to_jiffies(kfence_sample_interval
);
281 alloc
= kmem_cache_alloc(test_cache
, gfp
);
283 alloc
= kmalloc(size
, gfp
);
285 if (is_kfence_address(alloc
)) {
286 struct slab
*slab
= virt_to_slab(alloc
);
287 enum kmalloc_cache_type type
= kmalloc_type(GFP_KERNEL
, _RET_IP_
);
288 struct kmem_cache
*s
= test_cache
?:
289 kmalloc_caches
[type
][__kmalloc_index(size
, false)];
292 * Verify that various helpers return the right values
293 * even for KFENCE objects; these are required so that
294 * memcg accounting works correctly.
296 KUNIT_EXPECT_EQ(test
, obj_to_index(s
, slab
, alloc
), 0U);
297 KUNIT_EXPECT_EQ(test
, objs_per_slab(s
, slab
), 1);
299 if (policy
== ALLOCATE_ANY
)
301 if (policy
== ALLOCATE_LEFT
&& PAGE_ALIGNED(alloc
))
303 if (policy
== ALLOCATE_RIGHT
&& !PAGE_ALIGNED(alloc
))
305 } else if (policy
== ALLOCATE_NONE
)
310 if (time_after(jiffies
, resched_after
))
312 } while (time_before(jiffies
, timeout
));
314 KUNIT_ASSERT_TRUE_MSG(test
, false, "failed to allocate from KFENCE");
315 return NULL
; /* Unreachable. */
318 static void test_out_of_bounds_read(struct kunit
*test
)
321 struct expect_report expect
= {
322 .type
= KFENCE_ERROR_OOB
,
323 .fn
= test_out_of_bounds_read
,
328 setup_test_cache(test
, size
, 0, NULL
);
331 * If we don't have our own cache, adjust based on alignment, so that we
332 * actually access guard pages on either side.
335 size
= kmalloc_cache_alignment(size
);
337 /* Test both sides. */
339 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_LEFT
);
340 expect
.addr
= buf
- 1;
341 READ_ONCE(*expect
.addr
);
342 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
345 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_RIGHT
);
346 expect
.addr
= buf
+ size
;
347 READ_ONCE(*expect
.addr
);
348 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
352 static void test_out_of_bounds_write(struct kunit
*test
)
355 struct expect_report expect
= {
356 .type
= KFENCE_ERROR_OOB
,
357 .fn
= test_out_of_bounds_write
,
362 setup_test_cache(test
, size
, 0, NULL
);
363 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_LEFT
);
364 expect
.addr
= buf
- 1;
365 WRITE_ONCE(*expect
.addr
, 42);
366 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
370 static void test_use_after_free_read(struct kunit
*test
)
372 const size_t size
= 32;
373 struct expect_report expect
= {
374 .type
= KFENCE_ERROR_UAF
,
375 .fn
= test_use_after_free_read
,
379 setup_test_cache(test
, size
, 0, NULL
);
380 expect
.addr
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
);
381 test_free(expect
.addr
);
382 READ_ONCE(*expect
.addr
);
383 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
386 static void test_use_after_free_read_nofault(struct kunit
*test
)
388 const size_t size
= 32;
393 setup_test_cache(test
, size
, 0, NULL
);
394 addr
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
);
396 /* Use after free with *_nofault() */
397 ret
= copy_from_kernel_nofault(&dst
, addr
, 1);
398 KUNIT_EXPECT_EQ(test
, ret
, -EFAULT
);
399 KUNIT_EXPECT_FALSE(test
, report_available());
402 static void test_double_free(struct kunit
*test
)
404 const size_t size
= 32;
405 struct expect_report expect
= {
406 .type
= KFENCE_ERROR_INVALID_FREE
,
407 .fn
= test_double_free
,
410 setup_test_cache(test
, size
, 0, NULL
);
411 expect
.addr
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
);
412 test_free(expect
.addr
);
413 test_free(expect
.addr
); /* Double-free. */
414 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
417 static void test_invalid_addr_free(struct kunit
*test
)
419 const size_t size
= 32;
420 struct expect_report expect
= {
421 .type
= KFENCE_ERROR_INVALID_FREE
,
422 .fn
= test_invalid_addr_free
,
426 setup_test_cache(test
, size
, 0, NULL
);
427 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
);
428 expect
.addr
= buf
+ 1; /* Free on invalid address. */
429 test_free(expect
.addr
); /* Invalid address free. */
430 test_free(buf
); /* No error. */
431 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
434 static void test_corruption(struct kunit
*test
)
437 struct expect_report expect
= {
438 .type
= KFENCE_ERROR_CORRUPTION
,
439 .fn
= test_corruption
,
443 setup_test_cache(test
, size
, 0, NULL
);
445 /* Test both sides. */
447 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_LEFT
);
448 expect
.addr
= buf
+ size
;
449 WRITE_ONCE(*expect
.addr
, 42);
451 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
453 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_RIGHT
);
454 expect
.addr
= buf
- 1;
455 WRITE_ONCE(*expect
.addr
, 42);
457 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
461 * KFENCE is unable to detect an OOB if the allocation's alignment requirements
462 * leave a gap between the object and the guard page. Specifically, an
463 * allocation of e.g. 73 bytes is aligned on 8 and 128 bytes for SLUB or SLAB
464 * respectively. Therefore it is impossible for the allocated object to
465 * contiguously line up with the right guard page.
467 * However, we test that an access to memory beyond the gap results in KFENCE
468 * detecting an OOB access.
470 static void test_kmalloc_aligned_oob_read(struct kunit
*test
)
472 const size_t size
= 73;
473 const size_t align
= kmalloc_cache_alignment(size
);
474 struct expect_report expect
= {
475 .type
= KFENCE_ERROR_OOB
,
476 .fn
= test_kmalloc_aligned_oob_read
,
481 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_RIGHT
);
484 * The object is offset to the right, so there won't be an OOB to the
487 READ_ONCE(*(buf
- 1));
488 KUNIT_EXPECT_FALSE(test
, report_available());
491 * @buf must be aligned on @align, therefore buf + size belongs to the
492 * same page -> no OOB.
494 READ_ONCE(*(buf
+ size
));
495 KUNIT_EXPECT_FALSE(test
, report_available());
497 /* Overflowing by @align bytes will result in an OOB. */
498 expect
.addr
= buf
+ size
+ align
;
499 READ_ONCE(*expect
.addr
);
500 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
505 static void test_kmalloc_aligned_oob_write(struct kunit
*test
)
507 const size_t size
= 73;
508 struct expect_report expect
= {
509 .type
= KFENCE_ERROR_CORRUPTION
,
510 .fn
= test_kmalloc_aligned_oob_write
,
514 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_RIGHT
);
516 * The object is offset to the right, so we won't get a page
517 * fault immediately after it.
519 expect
.addr
= buf
+ size
;
520 WRITE_ONCE(*expect
.addr
, READ_ONCE(*expect
.addr
) + 1);
521 KUNIT_EXPECT_FALSE(test
, report_available());
523 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
526 /* Test cache shrinking and destroying with KFENCE. */
527 static void test_shrink_memcache(struct kunit
*test
)
529 const size_t size
= 32;
532 setup_test_cache(test
, size
, 0, NULL
);
533 KUNIT_EXPECT_TRUE(test
, test_cache
);
534 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
);
535 kmem_cache_shrink(test_cache
);
538 KUNIT_EXPECT_FALSE(test
, report_available());
541 static void ctor_set_x(void *obj
)
543 /* Every object has at least 8 bytes. */
547 /* Ensure that SL*B does not modify KFENCE objects on bulk free. */
548 static void test_free_bulk(struct kunit
*test
)
552 for (iter
= 0; iter
< 5; iter
++) {
553 const size_t size
= setup_test_cache(test
, get_random_u32_inclusive(8, 307),
554 0, (iter
& 1) ? ctor_set_x
: NULL
);
556 test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_RIGHT
),
557 test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_NONE
),
558 test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_LEFT
),
559 test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_NONE
),
560 test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_NONE
),
563 kmem_cache_free_bulk(test_cache
, ARRAY_SIZE(objects
), objects
);
564 KUNIT_ASSERT_FALSE(test
, report_available());
565 test_cache_destroy();
569 /* Test init-on-free works. */
570 static void test_init_on_free(struct kunit
*test
)
572 const size_t size
= 32;
573 struct expect_report expect
= {
574 .type
= KFENCE_ERROR_UAF
,
575 .fn
= test_init_on_free
,
580 KFENCE_TEST_REQUIRES(test
, IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON
));
581 /* Assume it hasn't been disabled on command line. */
583 setup_test_cache(test
, size
, 0, NULL
);
584 expect
.addr
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
);
585 for (i
= 0; i
< size
; i
++)
586 expect
.addr
[i
] = i
+ 1;
587 test_free(expect
.addr
);
589 for (i
= 0; i
< size
; i
++) {
591 * This may fail if the page was recycled by KFENCE and then
592 * written to again -- this however, is near impossible with a
595 KUNIT_EXPECT_EQ(test
, expect
.addr
[i
], (char)0);
597 if (!i
) /* Only check first access to not fail test if page is ever re-protected. */
598 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
602 /* Ensure that constructors work properly. */
603 static void test_memcache_ctor(struct kunit
*test
)
605 const size_t size
= 32;
609 setup_test_cache(test
, size
, 0, ctor_set_x
);
610 buf
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
);
612 for (i
= 0; i
< 8; i
++)
613 KUNIT_EXPECT_EQ(test
, buf
[i
], (char)'x');
617 KUNIT_EXPECT_FALSE(test
, report_available());
620 /* Test that memory is zeroed if requested. */
621 static void test_gfpzero(struct kunit
*test
)
623 const size_t size
= PAGE_SIZE
; /* PAGE_SIZE so we can use ALLOCATE_ANY. */
627 /* Skip if we think it'd take too long. */
628 KFENCE_TEST_REQUIRES(test
, kfence_sample_interval
<= 100);
630 setup_test_cache(test
, size
, 0, NULL
);
631 buf1
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
);
632 for (i
= 0; i
< size
; i
++)
636 /* Try to get same address again -- this can take a while. */
638 buf2
= test_alloc(test
, size
, GFP_KERNEL
| __GFP_ZERO
, ALLOCATE_ANY
);
643 if (kthread_should_stop() || (i
== CONFIG_KFENCE_NUM_OBJECTS
)) {
644 kunit_warn(test
, "giving up ... cannot get same object back\n");
650 for (i
= 0; i
< size
; i
++)
651 KUNIT_EXPECT_EQ(test
, buf2
[i
], (char)0);
655 KUNIT_EXPECT_FALSE(test
, report_available());
658 static void test_invalid_access(struct kunit
*test
)
660 const struct expect_report expect
= {
661 .type
= KFENCE_ERROR_INVALID
,
662 .fn
= test_invalid_access
,
663 .addr
= &__kfence_pool
[10],
667 READ_ONCE(__kfence_pool
[10]);
668 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
671 /* Test SLAB_TYPESAFE_BY_RCU works. */
672 static void test_memcache_typesafe_by_rcu(struct kunit
*test
)
674 const size_t size
= 32;
675 struct expect_report expect
= {
676 .type
= KFENCE_ERROR_UAF
,
677 .fn
= test_memcache_typesafe_by_rcu
,
681 setup_test_cache(test
, size
, SLAB_TYPESAFE_BY_RCU
, NULL
);
682 KUNIT_EXPECT_TRUE(test
, test_cache
); /* Want memcache. */
684 expect
.addr
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
);
688 test_free(expect
.addr
);
689 KUNIT_EXPECT_EQ(test
, *expect
.addr
, (char)42);
691 * Up to this point, memory should not have been freed yet, and
692 * therefore there should be no KFENCE report from the above access.
696 /* Above access to @expect.addr should not have generated a report! */
697 KUNIT_EXPECT_FALSE(test
, report_available());
699 /* Only after rcu_barrier() is the memory guaranteed to be freed. */
702 /* Expect use-after-free. */
703 KUNIT_EXPECT_EQ(test
, *expect
.addr
, (char)42);
704 KUNIT_EXPECT_TRUE(test
, report_matches(&expect
));
707 /* Test krealloc(). */
708 static void test_krealloc(struct kunit
*test
)
710 const size_t size
= 32;
711 const struct expect_report expect
= {
712 .type
= KFENCE_ERROR_UAF
,
714 .addr
= test_alloc(test
, size
, GFP_KERNEL
, ALLOCATE_ANY
),
717 char *buf
= expect
.addr
;
720 KUNIT_EXPECT_FALSE(test
, test_cache
);
721 KUNIT_EXPECT_EQ(test
, ksize(buf
), size
); /* Precise size match after KFENCE alloc. */
722 for (i
= 0; i
< size
; i
++)
725 /* Check that we successfully change the size. */
726 buf
= krealloc(buf
, size
* 3, GFP_KERNEL
); /* Grow. */
727 /* Note: Might no longer be a KFENCE alloc. */
728 KUNIT_EXPECT_GE(test
, ksize(buf
), size
* 3);
729 for (i
= 0; i
< size
; i
++)
730 KUNIT_EXPECT_EQ(test
, buf
[i
], (char)(i
+ 1));
731 for (; i
< size
* 3; i
++) /* Fill to extra bytes. */
734 buf
= krealloc(buf
, size
* 2, GFP_KERNEL
); /* Shrink. */
735 KUNIT_EXPECT_GE(test
, ksize(buf
), size
* 2);
736 for (i
= 0; i
< size
* 2; i
++)
737 KUNIT_EXPECT_EQ(test
, buf
[i
], (char)(i
+ 1));
739 buf
= krealloc(buf
, 0, GFP_KERNEL
); /* Free. */
740 KUNIT_EXPECT_EQ(test
, (unsigned long)buf
, (unsigned long)ZERO_SIZE_PTR
);
741 KUNIT_ASSERT_FALSE(test
, report_available()); /* No reports yet! */
743 READ_ONCE(*expect
.addr
); /* Ensure krealloc() actually freed earlier KFENCE object. */
744 KUNIT_ASSERT_TRUE(test
, report_matches(&expect
));
747 /* Test that some objects from a bulk allocation belong to KFENCE pool. */
748 static void test_memcache_alloc_bulk(struct kunit
*test
)
750 const size_t size
= 32;
752 unsigned long timeout
;
754 setup_test_cache(test
, size
, 0, NULL
);
755 KUNIT_EXPECT_TRUE(test
, test_cache
); /* Want memcache. */
757 * 100x the sample interval should be more than enough to ensure we get
758 * a KFENCE allocation eventually.
760 timeout
= jiffies
+ msecs_to_jiffies(100 * kfence_sample_interval
);
763 int i
, num
= kmem_cache_alloc_bulk(test_cache
, GFP_ATOMIC
, ARRAY_SIZE(objects
),
767 for (i
= 0; i
< ARRAY_SIZE(objects
); i
++) {
768 if (is_kfence_address(objects
[i
])) {
773 kmem_cache_free_bulk(test_cache
, num
, objects
);
775 * kmem_cache_alloc_bulk() disables interrupts, and calling it
776 * in a tight loop may not give KFENCE a chance to switch the
777 * static branch. Call cond_resched() to let KFENCE chime in.
780 } while (!pass
&& time_before(jiffies
, timeout
));
782 KUNIT_EXPECT_TRUE(test
, pass
);
783 KUNIT_EXPECT_FALSE(test
, report_available());
787 * KUnit does not provide a way to provide arguments to tests, and we encode
788 * additional info in the name. Set up 2 tests per test case, one using the
789 * default allocator, and another using a custom memcache (suffix '-memcache').
791 #define KFENCE_KUNIT_CASE(test_name) \
792 { .run_case = test_name, .name = #test_name }, \
793 { .run_case = test_name, .name = #test_name "-memcache" }
795 static struct kunit_case kfence_test_cases
[] = {
796 KFENCE_KUNIT_CASE(test_out_of_bounds_read
),
797 KFENCE_KUNIT_CASE(test_out_of_bounds_write
),
798 KFENCE_KUNIT_CASE(test_use_after_free_read
),
799 KFENCE_KUNIT_CASE(test_use_after_free_read_nofault
),
800 KFENCE_KUNIT_CASE(test_double_free
),
801 KFENCE_KUNIT_CASE(test_invalid_addr_free
),
802 KFENCE_KUNIT_CASE(test_corruption
),
803 KFENCE_KUNIT_CASE(test_free_bulk
),
804 KFENCE_KUNIT_CASE(test_init_on_free
),
805 KUNIT_CASE(test_kmalloc_aligned_oob_read
),
806 KUNIT_CASE(test_kmalloc_aligned_oob_write
),
807 KUNIT_CASE(test_shrink_memcache
),
808 KUNIT_CASE(test_memcache_ctor
),
809 KUNIT_CASE(test_invalid_access
),
810 KUNIT_CASE(test_gfpzero
),
811 KUNIT_CASE(test_memcache_typesafe_by_rcu
),
812 KUNIT_CASE(test_krealloc
),
813 KUNIT_CASE(test_memcache_alloc_bulk
),
817 /* ===== End test cases ===== */
819 static int test_init(struct kunit
*test
)
827 spin_lock_irqsave(&observed
.lock
, flags
);
828 for (i
= 0; i
< ARRAY_SIZE(observed
.lines
); i
++)
829 observed
.lines
[i
][0] = '\0';
831 spin_unlock_irqrestore(&observed
.lock
, flags
);
833 /* Any test with 'memcache' in its name will want a memcache. */
834 if (strstr(test
->name
, "memcache"))
835 test
->priv
= TEST_PRIV_WANT_MEMCACHE
;
842 static void test_exit(struct kunit
*test
)
844 test_cache_destroy();
847 static int kfence_suite_init(struct kunit_suite
*suite
)
849 register_trace_console(probe_console
, NULL
);
853 static void kfence_suite_exit(struct kunit_suite
*suite
)
855 unregister_trace_console(probe_console
, NULL
);
856 tracepoint_synchronize_unregister();
859 static struct kunit_suite kfence_test_suite
= {
861 .test_cases
= kfence_test_cases
,
864 .suite_init
= kfence_suite_init
,
865 .suite_exit
= kfence_suite_exit
,
868 kunit_test_suites(&kfence_test_suite
);
870 MODULE_LICENSE("GPL v2");
871 MODULE_AUTHOR("Alexander Potapenko <glider@google.com>, Marco Elver <elver@google.com>");
872 MODULE_DESCRIPTION("kfence unit test suite");