1 // SPDX-License-Identifier: GPL-2.0
3 * KFENCE guarded object allocator and fault handling.
5 * Copyright (C) 2020, Google LLC.
8 #define pr_fmt(fmt) "kfence: " fmt
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/irq_work.h>
15 #include <linux/jhash.h>
16 #include <linux/kcsan-checks.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/log2.h>
22 #include <linux/memblock.h>
23 #include <linux/moduleparam.h>
24 #include <linux/notifier.h>
25 #include <linux/panic_notifier.h>
26 #include <linux/random.h>
27 #include <linux/rcupdate.h>
28 #include <linux/sched/clock.h>
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/string.h>
34 #include <asm/kfence.h>
38 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
39 #define KFENCE_WARN_ON(cond) \
41 const bool __cond = WARN_ON(cond); \
42 if (unlikely(__cond)) { \
43 WRITE_ONCE(kfence_enabled, false); \
44 disabled_by_warn = true; \
49 /* === Data ================================================================= */
51 static bool kfence_enabled __read_mostly
;
52 static bool disabled_by_warn __read_mostly
;
54 unsigned long kfence_sample_interval __read_mostly
= CONFIG_KFENCE_SAMPLE_INTERVAL
;
55 EXPORT_SYMBOL_GPL(kfence_sample_interval
); /* Export for test modules. */
57 #ifdef MODULE_PARAM_PREFIX
58 #undef MODULE_PARAM_PREFIX
60 #define MODULE_PARAM_PREFIX "kfence."
62 static int kfence_enable_late(void);
63 static int param_set_sample_interval(const char *val
, const struct kernel_param
*kp
)
66 int ret
= kstrtoul(val
, 0, &num
);
71 /* Using 0 to indicate KFENCE is disabled. */
72 if (!num
&& READ_ONCE(kfence_enabled
)) {
73 pr_info("disabled\n");
74 WRITE_ONCE(kfence_enabled
, false);
77 *((unsigned long *)kp
->arg
) = num
;
79 if (num
&& !READ_ONCE(kfence_enabled
) && system_state
!= SYSTEM_BOOTING
)
80 return disabled_by_warn
? -EINVAL
: kfence_enable_late();
84 static int param_get_sample_interval(char *buffer
, const struct kernel_param
*kp
)
86 if (!READ_ONCE(kfence_enabled
))
87 return sprintf(buffer
, "0\n");
89 return param_get_ulong(buffer
, kp
);
92 static const struct kernel_param_ops sample_interval_param_ops
= {
93 .set
= param_set_sample_interval
,
94 .get
= param_get_sample_interval
,
96 module_param_cb(sample_interval
, &sample_interval_param_ops
, &kfence_sample_interval
, 0600);
98 /* Pool usage% threshold when currently covered allocations are skipped. */
99 static unsigned long kfence_skip_covered_thresh __read_mostly
= 75;
100 module_param_named(skip_covered_thresh
, kfence_skip_covered_thresh
, ulong
, 0644);
102 /* Allocation burst count: number of excess KFENCE allocations per sample. */
103 static unsigned int kfence_burst __read_mostly
;
104 module_param_named(burst
, kfence_burst
, uint
, 0644);
106 /* If true, use a deferrable timer. */
107 static bool kfence_deferrable __read_mostly
= IS_ENABLED(CONFIG_KFENCE_DEFERRABLE
);
108 module_param_named(deferrable
, kfence_deferrable
, bool, 0444);
110 /* If true, check all canary bytes on panic. */
111 static bool kfence_check_on_panic __read_mostly
;
112 module_param_named(check_on_panic
, kfence_check_on_panic
, bool, 0444);
114 /* The pool of pages used for guard pages and objects. */
115 char *__kfence_pool __read_mostly
;
116 EXPORT_SYMBOL(__kfence_pool
); /* Export for test modules. */
119 * Per-object metadata, with one-to-one mapping of object metadata to
120 * backing pages (in __kfence_pool).
122 static_assert(CONFIG_KFENCE_NUM_OBJECTS
> 0);
123 struct kfence_metadata
*kfence_metadata __read_mostly
;
126 * If kfence_metadata is not NULL, it may be accessed by kfence_shutdown_cache().
127 * So introduce kfence_metadata_init to initialize metadata, and then make
128 * kfence_metadata visible after initialization is successful. This prevents
129 * potential UAF or access to uninitialized metadata.
131 static struct kfence_metadata
*kfence_metadata_init __read_mostly
;
133 /* Freelist with available objects. */
134 static struct list_head kfence_freelist
= LIST_HEAD_INIT(kfence_freelist
);
135 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock
); /* Lock protecting freelist. */
138 * The static key to set up a KFENCE allocation; or if static keys are not used
139 * to gate allocations, to avoid a load and compare if KFENCE is disabled.
141 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key
);
143 /* Gates the allocation, ensuring only one succeeds in a given period. */
144 atomic_t kfence_allocation_gate
= ATOMIC_INIT(1);
147 * A Counting Bloom filter of allocation coverage: limits currently covered
148 * allocations of the same source filling up the pool.
150 * Assuming a range of 15%-85% unique allocations in the pool at any point in
151 * time, the below parameters provide a probablity of 0.02-0.33 for false
152 * positive hits respectively:
154 * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
156 #define ALLOC_COVERED_HNUM 2
157 #define ALLOC_COVERED_ORDER (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
158 #define ALLOC_COVERED_SIZE (1 << ALLOC_COVERED_ORDER)
159 #define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER)
160 #define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1)
161 static atomic_t alloc_covered
[ALLOC_COVERED_SIZE
];
163 /* Stack depth used to determine uniqueness of an allocation. */
164 #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
167 * Randomness for stack hashes, making the same collisions across reboots and
168 * different machines less likely.
170 static u32 stack_hash_seed __ro_after_init
;
172 /* Statistics counters for debugfs. */
173 enum kfence_counter_id
{
174 KFENCE_COUNTER_ALLOCATED
,
175 KFENCE_COUNTER_ALLOCS
,
176 KFENCE_COUNTER_FREES
,
177 KFENCE_COUNTER_ZOMBIES
,
179 KFENCE_COUNTER_SKIP_INCOMPAT
,
180 KFENCE_COUNTER_SKIP_CAPACITY
,
181 KFENCE_COUNTER_SKIP_COVERED
,
182 KFENCE_COUNTER_COUNT
,
184 static atomic_long_t counters
[KFENCE_COUNTER_COUNT
];
185 static const char *const counter_names
[] = {
186 [KFENCE_COUNTER_ALLOCATED
] = "currently allocated",
187 [KFENCE_COUNTER_ALLOCS
] = "total allocations",
188 [KFENCE_COUNTER_FREES
] = "total frees",
189 [KFENCE_COUNTER_ZOMBIES
] = "zombie allocations",
190 [KFENCE_COUNTER_BUGS
] = "total bugs",
191 [KFENCE_COUNTER_SKIP_INCOMPAT
] = "skipped allocations (incompatible)",
192 [KFENCE_COUNTER_SKIP_CAPACITY
] = "skipped allocations (capacity)",
193 [KFENCE_COUNTER_SKIP_COVERED
] = "skipped allocations (covered)",
195 static_assert(ARRAY_SIZE(counter_names
) == KFENCE_COUNTER_COUNT
);
197 /* === Internals ============================================================ */
199 static inline bool should_skip_covered(void)
201 unsigned long thresh
= (CONFIG_KFENCE_NUM_OBJECTS
* kfence_skip_covered_thresh
) / 100;
203 return atomic_long_read(&counters
[KFENCE_COUNTER_ALLOCATED
]) > thresh
;
206 static u32
get_alloc_stack_hash(unsigned long *stack_entries
, size_t num_entries
)
208 num_entries
= min(num_entries
, UNIQUE_ALLOC_STACK_DEPTH
);
209 num_entries
= filter_irq_stacks(stack_entries
, num_entries
);
210 return jhash(stack_entries
, num_entries
* sizeof(stack_entries
[0]), stack_hash_seed
);
214 * Adds (or subtracts) count @val for allocation stack trace hash
215 * @alloc_stack_hash from Counting Bloom filter.
217 static void alloc_covered_add(u32 alloc_stack_hash
, int val
)
221 for (i
= 0; i
< ALLOC_COVERED_HNUM
; i
++) {
222 atomic_add(val
, &alloc_covered
[alloc_stack_hash
& ALLOC_COVERED_MASK
]);
223 alloc_stack_hash
= ALLOC_COVERED_HNEXT(alloc_stack_hash
);
228 * Returns true if the allocation stack trace hash @alloc_stack_hash is
229 * currently contained (non-zero count) in Counting Bloom filter.
231 static bool alloc_covered_contains(u32 alloc_stack_hash
)
235 for (i
= 0; i
< ALLOC_COVERED_HNUM
; i
++) {
236 if (!atomic_read(&alloc_covered
[alloc_stack_hash
& ALLOC_COVERED_MASK
]))
238 alloc_stack_hash
= ALLOC_COVERED_HNEXT(alloc_stack_hash
);
244 static bool kfence_protect(unsigned long addr
)
246 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr
, PAGE_SIZE
), true));
249 static bool kfence_unprotect(unsigned long addr
)
251 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr
, PAGE_SIZE
), false));
254 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata
*meta
)
256 unsigned long offset
= (meta
- kfence_metadata
+ 1) * PAGE_SIZE
* 2;
257 unsigned long pageaddr
= (unsigned long)&__kfence_pool
[offset
];
259 /* The checks do not affect performance; only called from slow-paths. */
261 /* Only call with a pointer into kfence_metadata. */
262 if (KFENCE_WARN_ON(meta
< kfence_metadata
||
263 meta
>= kfence_metadata
+ CONFIG_KFENCE_NUM_OBJECTS
))
267 * This metadata object only ever maps to 1 page; verify that the stored
268 * address is in the expected range.
270 if (KFENCE_WARN_ON(ALIGN_DOWN(meta
->addr
, PAGE_SIZE
) != pageaddr
))
276 static inline bool kfence_obj_allocated(const struct kfence_metadata
*meta
)
278 enum kfence_object_state state
= READ_ONCE(meta
->state
);
280 return state
== KFENCE_OBJECT_ALLOCATED
|| state
== KFENCE_OBJECT_RCU_FREEING
;
284 * Update the object's metadata state, including updating the alloc/free stacks
285 * depending on the state transition.
288 metadata_update_state(struct kfence_metadata
*meta
, enum kfence_object_state next
,
289 unsigned long *stack_entries
, size_t num_stack_entries
)
291 struct kfence_track
*track
=
292 next
== KFENCE_OBJECT_ALLOCATED
? &meta
->alloc_track
: &meta
->free_track
;
294 lockdep_assert_held(&meta
->lock
);
296 /* Stack has been saved when calling rcu, skip. */
297 if (READ_ONCE(meta
->state
) == KFENCE_OBJECT_RCU_FREEING
)
301 memcpy(track
->stack_entries
, stack_entries
,
302 num_stack_entries
* sizeof(stack_entries
[0]));
305 * Skip over 1 (this) functions; noinline ensures we do not
306 * accidentally skip over the caller by never inlining.
308 num_stack_entries
= stack_trace_save(track
->stack_entries
, KFENCE_STACK_DEPTH
, 1);
310 track
->num_stack_entries
= num_stack_entries
;
311 track
->pid
= task_pid_nr(current
);
312 track
->cpu
= raw_smp_processor_id();
313 track
->ts_nsec
= local_clock(); /* Same source as printk timestamps. */
317 * Pairs with READ_ONCE() in
318 * kfence_shutdown_cache(),
319 * kfence_handle_page_fault().
321 WRITE_ONCE(meta
->state
, next
);
325 #define check_canary_attributes noinline __no_kmsan_checks
327 #define check_canary_attributes inline
330 /* Check canary byte at @addr. */
331 static check_canary_attributes
bool check_canary_byte(u8
*addr
)
333 struct kfence_metadata
*meta
;
336 if (likely(*addr
== KFENCE_CANARY_PATTERN_U8(addr
)))
339 atomic_long_inc(&counters
[KFENCE_COUNTER_BUGS
]);
341 meta
= addr_to_metadata((unsigned long)addr
);
342 raw_spin_lock_irqsave(&meta
->lock
, flags
);
343 kfence_report_error((unsigned long)addr
, false, NULL
, meta
, KFENCE_ERROR_CORRUPTION
);
344 raw_spin_unlock_irqrestore(&meta
->lock
, flags
);
349 static inline void set_canary(const struct kfence_metadata
*meta
)
351 const unsigned long pageaddr
= ALIGN_DOWN(meta
->addr
, PAGE_SIZE
);
352 unsigned long addr
= pageaddr
;
355 * The canary may be written to part of the object memory, but it does
356 * not affect it. The user should initialize the object before using it.
358 for (; addr
< meta
->addr
; addr
+= sizeof(u64
))
359 *((u64
*)addr
) = KFENCE_CANARY_PATTERN_U64
;
361 addr
= ALIGN_DOWN(meta
->addr
+ meta
->size
, sizeof(u64
));
362 for (; addr
- pageaddr
< PAGE_SIZE
; addr
+= sizeof(u64
))
363 *((u64
*)addr
) = KFENCE_CANARY_PATTERN_U64
;
366 static check_canary_attributes
void
367 check_canary(const struct kfence_metadata
*meta
)
369 const unsigned long pageaddr
= ALIGN_DOWN(meta
->addr
, PAGE_SIZE
);
370 unsigned long addr
= pageaddr
;
373 * We'll iterate over each canary byte per-side until a corrupted byte
374 * is found. However, we'll still iterate over the canary bytes to the
375 * right of the object even if there was an error in the canary bytes to
376 * the left of the object. Specifically, if check_canary_byte()
377 * generates an error, showing both sides might give more clues as to
378 * what the error is about when displaying which bytes were corrupted.
381 /* Apply to left of object. */
382 for (; meta
->addr
- addr
>= sizeof(u64
); addr
+= sizeof(u64
)) {
383 if (unlikely(*((u64
*)addr
) != KFENCE_CANARY_PATTERN_U64
))
388 * If the canary is corrupted in a certain 64 bytes, or the canary
389 * memory cannot be completely covered by multiple consecutive 64 bytes,
390 * it needs to be checked one by one.
392 for (; addr
< meta
->addr
; addr
++) {
393 if (unlikely(!check_canary_byte((u8
*)addr
)))
397 /* Apply to right of object. */
398 for (addr
= meta
->addr
+ meta
->size
; addr
% sizeof(u64
) != 0; addr
++) {
399 if (unlikely(!check_canary_byte((u8
*)addr
)))
402 for (; addr
- pageaddr
< PAGE_SIZE
; addr
+= sizeof(u64
)) {
403 if (unlikely(*((u64
*)addr
) != KFENCE_CANARY_PATTERN_U64
)) {
405 for (; addr
- pageaddr
< PAGE_SIZE
; addr
++) {
406 if (!check_canary_byte((u8
*)addr
))
413 static void *kfence_guarded_alloc(struct kmem_cache
*cache
, size_t size
, gfp_t gfp
,
414 unsigned long *stack_entries
, size_t num_stack_entries
,
415 u32 alloc_stack_hash
)
417 struct kfence_metadata
*meta
= NULL
;
421 const bool random_right_allocate
= get_random_u32_below(2);
422 const bool random_fault
= CONFIG_KFENCE_STRESS_TEST_FAULTS
&&
423 !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS
);
425 /* Try to obtain a free object. */
426 raw_spin_lock_irqsave(&kfence_freelist_lock
, flags
);
427 if (!list_empty(&kfence_freelist
)) {
428 meta
= list_entry(kfence_freelist
.next
, struct kfence_metadata
, list
);
429 list_del_init(&meta
->list
);
431 raw_spin_unlock_irqrestore(&kfence_freelist_lock
, flags
);
433 atomic_long_inc(&counters
[KFENCE_COUNTER_SKIP_CAPACITY
]);
437 if (unlikely(!raw_spin_trylock_irqsave(&meta
->lock
, flags
))) {
439 * This is extremely unlikely -- we are reporting on a
440 * use-after-free, which locked meta->lock, and the reporting
441 * code via printk calls kmalloc() which ends up in
442 * kfence_alloc() and tries to grab the same object that we're
443 * reporting on. While it has never been observed, lockdep does
444 * report that there is a possibility of deadlock. Fix it by
445 * using trylock and bailing out gracefully.
447 raw_spin_lock_irqsave(&kfence_freelist_lock
, flags
);
448 /* Put the object back on the freelist. */
449 list_add_tail(&meta
->list
, &kfence_freelist
);
450 raw_spin_unlock_irqrestore(&kfence_freelist_lock
, flags
);
455 meta
->addr
= metadata_to_pageaddr(meta
);
456 /* Unprotect if we're reusing this page. */
457 if (meta
->state
== KFENCE_OBJECT_FREED
)
458 kfence_unprotect(meta
->addr
);
461 * Note: for allocations made before RNG initialization, will always
462 * return zero. We still benefit from enabling KFENCE as early as
463 * possible, even when the RNG is not yet available, as this will allow
464 * KFENCE to detect bugs due to earlier allocations. The only downside
465 * is that the out-of-bounds accesses detected are deterministic for
468 if (random_right_allocate
) {
469 /* Allocate on the "right" side, re-calculate address. */
470 meta
->addr
+= PAGE_SIZE
- size
;
471 meta
->addr
= ALIGN_DOWN(meta
->addr
, cache
->align
);
474 addr
= (void *)meta
->addr
;
476 /* Update remaining metadata. */
477 metadata_update_state(meta
, KFENCE_OBJECT_ALLOCATED
, stack_entries
, num_stack_entries
);
478 /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
479 WRITE_ONCE(meta
->cache
, cache
);
481 meta
->alloc_stack_hash
= alloc_stack_hash
;
482 raw_spin_unlock_irqrestore(&meta
->lock
, flags
);
484 alloc_covered_add(alloc_stack_hash
, 1);
486 /* Set required slab fields. */
487 slab
= virt_to_slab((void *)meta
->addr
);
488 slab
->slab_cache
= cache
;
491 /* Memory initialization. */
495 * We check slab_want_init_on_alloc() ourselves, rather than letting
496 * SL*B do the initialization, as otherwise we might overwrite KFENCE's
499 if (unlikely(slab_want_init_on_alloc(gfp
, cache
)))
500 memzero_explicit(addr
, size
);
505 kfence_protect(meta
->addr
); /* Random "faults" by protecting the object. */
507 atomic_long_inc(&counters
[KFENCE_COUNTER_ALLOCATED
]);
508 atomic_long_inc(&counters
[KFENCE_COUNTER_ALLOCS
]);
513 static void kfence_guarded_free(void *addr
, struct kfence_metadata
*meta
, bool zombie
)
515 struct kcsan_scoped_access assert_page_exclusive
;
519 raw_spin_lock_irqsave(&meta
->lock
, flags
);
521 if (!kfence_obj_allocated(meta
) || meta
->addr
!= (unsigned long)addr
) {
522 /* Invalid or double-free, bail out. */
523 atomic_long_inc(&counters
[KFENCE_COUNTER_BUGS
]);
524 kfence_report_error((unsigned long)addr
, false, NULL
, meta
,
525 KFENCE_ERROR_INVALID_FREE
);
526 raw_spin_unlock_irqrestore(&meta
->lock
, flags
);
530 /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
531 kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr
, PAGE_SIZE
), PAGE_SIZE
,
532 KCSAN_ACCESS_SCOPED
| KCSAN_ACCESS_WRITE
| KCSAN_ACCESS_ASSERT
,
533 &assert_page_exclusive
);
535 if (CONFIG_KFENCE_STRESS_TEST_FAULTS
)
536 kfence_unprotect((unsigned long)addr
); /* To check canary bytes. */
538 /* Restore page protection if there was an OOB access. */
539 if (meta
->unprotected_page
) {
540 memzero_explicit((void *)ALIGN_DOWN(meta
->unprotected_page
, PAGE_SIZE
), PAGE_SIZE
);
541 kfence_protect(meta
->unprotected_page
);
542 meta
->unprotected_page
= 0;
545 /* Mark the object as freed. */
546 metadata_update_state(meta
, KFENCE_OBJECT_FREED
, NULL
, 0);
547 init
= slab_want_init_on_free(meta
->cache
);
548 raw_spin_unlock_irqrestore(&meta
->lock
, flags
);
550 alloc_covered_add(meta
->alloc_stack_hash
, -1);
552 /* Check canary bytes for memory corruption. */
556 * Clear memory if init-on-free is set. While we protect the page, the
557 * data is still there, and after a use-after-free is detected, we
558 * unprotect the page, so the data is still accessible.
560 if (!zombie
&& unlikely(init
))
561 memzero_explicit(addr
, meta
->size
);
563 /* Protect to detect use-after-frees. */
564 kfence_protect((unsigned long)addr
);
566 kcsan_end_scoped_access(&assert_page_exclusive
);
568 /* Add it to the tail of the freelist for reuse. */
569 raw_spin_lock_irqsave(&kfence_freelist_lock
, flags
);
570 KFENCE_WARN_ON(!list_empty(&meta
->list
));
571 list_add_tail(&meta
->list
, &kfence_freelist
);
572 raw_spin_unlock_irqrestore(&kfence_freelist_lock
, flags
);
574 atomic_long_dec(&counters
[KFENCE_COUNTER_ALLOCATED
]);
575 atomic_long_inc(&counters
[KFENCE_COUNTER_FREES
]);
577 /* See kfence_shutdown_cache(). */
578 atomic_long_inc(&counters
[KFENCE_COUNTER_ZOMBIES
]);
582 static void rcu_guarded_free(struct rcu_head
*h
)
584 struct kfence_metadata
*meta
= container_of(h
, struct kfence_metadata
, rcu_head
);
586 kfence_guarded_free((void *)meta
->addr
, meta
, false);
590 * Initialization of the KFENCE pool after its allocation.
591 * Returns 0 on success; otherwise returns the address up to
592 * which partial initialization succeeded.
594 static unsigned long kfence_init_pool(void)
600 if (!arch_kfence_init_pool())
601 return (unsigned long)__kfence_pool
;
603 addr
= (unsigned long)__kfence_pool
;
604 pages
= virt_to_page(__kfence_pool
);
607 * Set up object pages: they must have PG_slab set, to avoid freeing
608 * these as real pages.
610 * We also want to avoid inserting kfence_free() in the kfree()
611 * fast-path in SLUB, and therefore need to ensure kfree() correctly
612 * enters __slab_free() slow-path.
614 for (i
= 0; i
< KFENCE_POOL_SIZE
/ PAGE_SIZE
; i
++) {
615 struct slab
*slab
= page_slab(nth_page(pages
, i
));
620 __folio_set_slab(slab_folio(slab
));
622 slab
->obj_exts
= (unsigned long)&kfence_metadata_init
[i
/ 2 - 1].obj_exts
|
628 * Protect the first 2 pages. The first page is mostly unnecessary, and
629 * merely serves as an extended guard page. However, adding one
630 * additional page in the beginning gives us an even number of pages,
631 * which simplifies the mapping of address to metadata index.
633 for (i
= 0; i
< 2; i
++) {
634 if (unlikely(!kfence_protect(addr
)))
640 for (i
= 0; i
< CONFIG_KFENCE_NUM_OBJECTS
; i
++) {
641 struct kfence_metadata
*meta
= &kfence_metadata_init
[i
];
643 /* Initialize metadata. */
644 INIT_LIST_HEAD(&meta
->list
);
645 raw_spin_lock_init(&meta
->lock
);
646 meta
->state
= KFENCE_OBJECT_UNUSED
;
647 meta
->addr
= addr
; /* Initialize for validation in metadata_to_pageaddr(). */
648 list_add_tail(&meta
->list
, &kfence_freelist
);
650 /* Protect the right redzone. */
651 if (unlikely(!kfence_protect(addr
+ PAGE_SIZE
)))
654 addr
+= 2 * PAGE_SIZE
;
658 * Make kfence_metadata visible only when initialization is successful.
659 * Otherwise, if the initialization fails and kfence_metadata is freed,
660 * it may cause UAF in kfence_shutdown_cache().
662 smp_store_release(&kfence_metadata
, kfence_metadata_init
);
666 for (i
= 0; i
< KFENCE_POOL_SIZE
/ PAGE_SIZE
; i
++) {
667 struct slab
*slab
= page_slab(nth_page(pages
, i
));
674 __folio_clear_slab(slab_folio(slab
));
680 static bool __init
kfence_init_pool_early(void)
687 addr
= kfence_init_pool();
691 * The pool is live and will never be deallocated from this point on.
692 * Ignore the pool object from the kmemleak phys object tree, as it would
693 * otherwise overlap with allocations returned by kfence_alloc(), which
694 * are registered with kmemleak through the slab post-alloc hook.
696 kmemleak_ignore_phys(__pa(__kfence_pool
));
701 * Only release unprotected pages, and do not try to go back and change
702 * page attributes due to risk of failing to do so as well. If changing
703 * page attributes for some pages fails, it is very likely that it also
704 * fails for the first page, and therefore expect addr==__kfence_pool in
705 * most failure cases.
707 memblock_free_late(__pa(addr
), KFENCE_POOL_SIZE
- (addr
- (unsigned long)__kfence_pool
));
708 __kfence_pool
= NULL
;
710 memblock_free_late(__pa(kfence_metadata_init
), KFENCE_METADATA_SIZE
);
711 kfence_metadata_init
= NULL
;
716 /* === DebugFS Interface ==================================================== */
718 static int stats_show(struct seq_file
*seq
, void *v
)
722 seq_printf(seq
, "enabled: %i\n", READ_ONCE(kfence_enabled
));
723 for (i
= 0; i
< KFENCE_COUNTER_COUNT
; i
++)
724 seq_printf(seq
, "%s: %ld\n", counter_names
[i
], atomic_long_read(&counters
[i
]));
728 DEFINE_SHOW_ATTRIBUTE(stats
);
731 * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
732 * start_object() and next_object() return the object index + 1, because NULL is used
735 static void *start_object(struct seq_file
*seq
, loff_t
*pos
)
737 if (*pos
< CONFIG_KFENCE_NUM_OBJECTS
)
738 return (void *)((long)*pos
+ 1);
742 static void stop_object(struct seq_file
*seq
, void *v
)
746 static void *next_object(struct seq_file
*seq
, void *v
, loff_t
*pos
)
749 if (*pos
< CONFIG_KFENCE_NUM_OBJECTS
)
750 return (void *)((long)*pos
+ 1);
754 static int show_object(struct seq_file
*seq
, void *v
)
756 struct kfence_metadata
*meta
= &kfence_metadata
[(long)v
- 1];
759 raw_spin_lock_irqsave(&meta
->lock
, flags
);
760 kfence_print_object(seq
, meta
);
761 raw_spin_unlock_irqrestore(&meta
->lock
, flags
);
762 seq_puts(seq
, "---------------------------------\n");
767 static const struct seq_operations objects_sops
= {
768 .start
= start_object
,
773 DEFINE_SEQ_ATTRIBUTE(objects
);
775 static int kfence_debugfs_init(void)
777 struct dentry
*kfence_dir
;
779 if (!READ_ONCE(kfence_enabled
))
782 kfence_dir
= debugfs_create_dir("kfence", NULL
);
783 debugfs_create_file("stats", 0444, kfence_dir
, NULL
, &stats_fops
);
784 debugfs_create_file("objects", 0400, kfence_dir
, NULL
, &objects_fops
);
788 late_initcall(kfence_debugfs_init
);
790 /* === Panic Notifier ====================================================== */
792 static void kfence_check_all_canary(void)
796 for (i
= 0; i
< CONFIG_KFENCE_NUM_OBJECTS
; i
++) {
797 struct kfence_metadata
*meta
= &kfence_metadata
[i
];
799 if (kfence_obj_allocated(meta
))
804 static int kfence_check_canary_callback(struct notifier_block
*nb
,
805 unsigned long reason
, void *arg
)
807 kfence_check_all_canary();
811 static struct notifier_block kfence_check_canary_notifier
= {
812 .notifier_call
= kfence_check_canary_callback
,
815 /* === Allocation Gate Timer ================================================ */
817 static struct delayed_work kfence_timer
;
819 #ifdef CONFIG_KFENCE_STATIC_KEYS
820 /* Wait queue to wake up allocation-gate timer task. */
821 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait
);
823 static void wake_up_kfence_timer(struct irq_work
*work
)
825 wake_up(&allocation_wait
);
827 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work
, wake_up_kfence_timer
);
831 * Set up delayed work, which will enable and disable the static key. We need to
832 * use a work queue (rather than a simple timer), since enabling and disabling a
833 * static key cannot be done from an interrupt.
835 * Note: Toggling a static branch currently causes IPIs, and here we'll end up
836 * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
837 * more aggressive sampling intervals), we could get away with a variant that
838 * avoids IPIs, at the cost of not immediately capturing allocations if the
839 * instructions remain cached.
841 static void toggle_allocation_gate(struct work_struct
*work
)
843 if (!READ_ONCE(kfence_enabled
))
846 atomic_set(&kfence_allocation_gate
, -kfence_burst
);
847 #ifdef CONFIG_KFENCE_STATIC_KEYS
848 /* Enable static key, and await allocation to happen. */
849 static_branch_enable(&kfence_allocation_key
);
851 wait_event_idle(allocation_wait
, atomic_read(&kfence_allocation_gate
) > 0);
853 /* Disable static key and reset timer. */
854 static_branch_disable(&kfence_allocation_key
);
856 queue_delayed_work(system_unbound_wq
, &kfence_timer
,
857 msecs_to_jiffies(kfence_sample_interval
));
860 /* === Public interface ===================================================== */
862 void __init
kfence_alloc_pool_and_metadata(void)
864 if (!kfence_sample_interval
)
868 * If the pool has already been initialized by arch, there is no need to
869 * re-allocate the memory pool.
872 __kfence_pool
= memblock_alloc(KFENCE_POOL_SIZE
, PAGE_SIZE
);
874 if (!__kfence_pool
) {
875 pr_err("failed to allocate pool\n");
879 /* The memory allocated by memblock has been zeroed out. */
880 kfence_metadata_init
= memblock_alloc(KFENCE_METADATA_SIZE
, PAGE_SIZE
);
881 if (!kfence_metadata_init
) {
882 pr_err("failed to allocate metadata\n");
883 memblock_free(__kfence_pool
, KFENCE_POOL_SIZE
);
884 __kfence_pool
= NULL
;
888 static void kfence_init_enable(void)
890 if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS
))
891 static_branch_enable(&kfence_allocation_key
);
893 if (kfence_deferrable
)
894 INIT_DEFERRABLE_WORK(&kfence_timer
, toggle_allocation_gate
);
896 INIT_DELAYED_WORK(&kfence_timer
, toggle_allocation_gate
);
898 if (kfence_check_on_panic
)
899 atomic_notifier_chain_register(&panic_notifier_list
, &kfence_check_canary_notifier
);
901 WRITE_ONCE(kfence_enabled
, true);
902 queue_delayed_work(system_unbound_wq
, &kfence_timer
, 0);
904 pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE
,
905 CONFIG_KFENCE_NUM_OBJECTS
, (void *)__kfence_pool
,
906 (void *)(__kfence_pool
+ KFENCE_POOL_SIZE
));
909 void __init
kfence_init(void)
911 stack_hash_seed
= get_random_u32();
913 /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
914 if (!kfence_sample_interval
)
917 if (!kfence_init_pool_early()) {
918 pr_err("%s failed\n", __func__
);
922 kfence_init_enable();
925 static int kfence_init_late(void)
927 const unsigned long nr_pages_pool
= KFENCE_POOL_SIZE
/ PAGE_SIZE
;
928 const unsigned long nr_pages_meta
= KFENCE_METADATA_SIZE
/ PAGE_SIZE
;
929 unsigned long addr
= (unsigned long)__kfence_pool
;
930 unsigned long free_size
= KFENCE_POOL_SIZE
;
933 #ifdef CONFIG_CONTIG_ALLOC
936 pages
= alloc_contig_pages(nr_pages_pool
, GFP_KERNEL
, first_online_node
,
941 __kfence_pool
= page_to_virt(pages
);
942 pages
= alloc_contig_pages(nr_pages_meta
, GFP_KERNEL
, first_online_node
,
945 kfence_metadata_init
= page_to_virt(pages
);
947 if (nr_pages_pool
> MAX_ORDER_NR_PAGES
||
948 nr_pages_meta
> MAX_ORDER_NR_PAGES
) {
949 pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
953 __kfence_pool
= alloc_pages_exact(KFENCE_POOL_SIZE
, GFP_KERNEL
);
957 kfence_metadata_init
= alloc_pages_exact(KFENCE_METADATA_SIZE
, GFP_KERNEL
);
960 if (!kfence_metadata_init
)
963 memzero_explicit(kfence_metadata_init
, KFENCE_METADATA_SIZE
);
964 addr
= kfence_init_pool();
966 kfence_init_enable();
967 kfence_debugfs_init();
971 pr_err("%s failed\n", __func__
);
972 free_size
= KFENCE_POOL_SIZE
- (addr
- (unsigned long)__kfence_pool
);
975 #ifdef CONFIG_CONTIG_ALLOC
976 free_contig_range(page_to_pfn(virt_to_page((void *)kfence_metadata_init
)),
979 free_contig_range(page_to_pfn(virt_to_page((void *)addr
)),
980 free_size
/ PAGE_SIZE
);
982 free_pages_exact((void *)kfence_metadata_init
, KFENCE_METADATA_SIZE
);
984 free_pages_exact((void *)addr
, free_size
);
987 kfence_metadata_init
= NULL
;
988 __kfence_pool
= NULL
;
992 static int kfence_enable_late(void)
995 return kfence_init_late();
997 WRITE_ONCE(kfence_enabled
, true);
998 queue_delayed_work(system_unbound_wq
, &kfence_timer
, 0);
999 pr_info("re-enabled\n");
1003 void kfence_shutdown_cache(struct kmem_cache
*s
)
1005 unsigned long flags
;
1006 struct kfence_metadata
*meta
;
1009 /* Pairs with release in kfence_init_pool(). */
1010 if (!smp_load_acquire(&kfence_metadata
))
1013 for (i
= 0; i
< CONFIG_KFENCE_NUM_OBJECTS
; i
++) {
1016 meta
= &kfence_metadata
[i
];
1019 * If we observe some inconsistent cache and state pair where we
1020 * should have returned false here, cache destruction is racing
1021 * with either kmem_cache_alloc() or kmem_cache_free(). Taking
1022 * the lock will not help, as different critical section
1023 * serialization will have the same outcome.
1025 if (READ_ONCE(meta
->cache
) != s
|| !kfence_obj_allocated(meta
))
1028 raw_spin_lock_irqsave(&meta
->lock
, flags
);
1029 in_use
= meta
->cache
== s
&& kfence_obj_allocated(meta
);
1030 raw_spin_unlock_irqrestore(&meta
->lock
, flags
);
1034 * This cache still has allocations, and we should not
1035 * release them back into the freelist so they can still
1036 * safely be used and retain the kernel's default
1037 * behaviour of keeping the allocations alive (leak the
1038 * cache); however, they effectively become "zombie
1039 * allocations" as the KFENCE objects are the only ones
1040 * still in use and the owning cache is being destroyed.
1042 * We mark them freed, so that any subsequent use shows
1043 * more useful error messages that will include stack
1044 * traces of the user of the object, the original
1045 * allocation, and caller to shutdown_cache().
1047 kfence_guarded_free((void *)meta
->addr
, meta
, /*zombie=*/true);
1051 for (i
= 0; i
< CONFIG_KFENCE_NUM_OBJECTS
; i
++) {
1052 meta
= &kfence_metadata
[i
];
1055 if (READ_ONCE(meta
->cache
) != s
|| READ_ONCE(meta
->state
) != KFENCE_OBJECT_FREED
)
1058 raw_spin_lock_irqsave(&meta
->lock
, flags
);
1059 if (meta
->cache
== s
&& meta
->state
== KFENCE_OBJECT_FREED
)
1061 raw_spin_unlock_irqrestore(&meta
->lock
, flags
);
1065 void *__kfence_alloc(struct kmem_cache
*s
, size_t size
, gfp_t flags
)
1067 unsigned long stack_entries
[KFENCE_STACK_DEPTH
];
1068 size_t num_stack_entries
;
1069 u32 alloc_stack_hash
;
1070 int allocation_gate
;
1073 * Perform size check before switching kfence_allocation_gate, so that
1074 * we don't disable KFENCE without making an allocation.
1076 if (size
> PAGE_SIZE
) {
1077 atomic_long_inc(&counters
[KFENCE_COUNTER_SKIP_INCOMPAT
]);
1082 * Skip allocations from non-default zones, including DMA. We cannot
1083 * guarantee that pages in the KFENCE pool will have the requested
1084 * properties (e.g. reside in DMAable memory).
1086 if ((flags
& GFP_ZONEMASK
) ||
1087 (s
->flags
& (SLAB_CACHE_DMA
| SLAB_CACHE_DMA32
))) {
1088 atomic_long_inc(&counters
[KFENCE_COUNTER_SKIP_INCOMPAT
]);
1093 * Skip allocations for this slab, if KFENCE has been disabled for
1096 if (s
->flags
& SLAB_SKIP_KFENCE
)
1099 allocation_gate
= atomic_inc_return(&kfence_allocation_gate
);
1100 if (allocation_gate
> 1)
1102 #ifdef CONFIG_KFENCE_STATIC_KEYS
1104 * waitqueue_active() is fully ordered after the update of
1105 * kfence_allocation_gate per atomic_inc_return().
1107 if (allocation_gate
== 1 && waitqueue_active(&allocation_wait
)) {
1109 * Calling wake_up() here may deadlock when allocations happen
1110 * from within timer code. Use an irq_work to defer it.
1112 irq_work_queue(&wake_up_kfence_timer_work
);
1116 if (!READ_ONCE(kfence_enabled
))
1119 num_stack_entries
= stack_trace_save(stack_entries
, KFENCE_STACK_DEPTH
, 0);
1122 * Do expensive check for coverage of allocation in slow-path after
1123 * allocation_gate has already become non-zero, even though it might
1124 * mean not making any allocation within a given sample interval.
1126 * This ensures reasonable allocation coverage when the pool is almost
1127 * full, including avoiding long-lived allocations of the same source
1128 * filling up the pool (e.g. pagecache allocations).
1130 alloc_stack_hash
= get_alloc_stack_hash(stack_entries
, num_stack_entries
);
1131 if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash
)) {
1132 atomic_long_inc(&counters
[KFENCE_COUNTER_SKIP_COVERED
]);
1136 return kfence_guarded_alloc(s
, size
, flags
, stack_entries
, num_stack_entries
,
1140 size_t kfence_ksize(const void *addr
)
1142 const struct kfence_metadata
*meta
= addr_to_metadata((unsigned long)addr
);
1145 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1146 * either a use-after-free or invalid access.
1148 return meta
? meta
->size
: 0;
1151 void *kfence_object_start(const void *addr
)
1153 const struct kfence_metadata
*meta
= addr_to_metadata((unsigned long)addr
);
1156 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1157 * either a use-after-free or invalid access.
1159 return meta
? (void *)meta
->addr
: NULL
;
1162 void __kfence_free(void *addr
)
1164 struct kfence_metadata
*meta
= addr_to_metadata((unsigned long)addr
);
1167 KFENCE_WARN_ON(meta
->obj_exts
.objcg
);
1170 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
1171 * the object, as the object page may be recycled for other-typed
1172 * objects once it has been freed. meta->cache may be NULL if the cache
1174 * Save the stack trace here so that reports show where the user freed
1177 if (unlikely(meta
->cache
&& (meta
->cache
->flags
& SLAB_TYPESAFE_BY_RCU
))) {
1178 unsigned long flags
;
1180 raw_spin_lock_irqsave(&meta
->lock
, flags
);
1181 metadata_update_state(meta
, KFENCE_OBJECT_RCU_FREEING
, NULL
, 0);
1182 raw_spin_unlock_irqrestore(&meta
->lock
, flags
);
1183 call_rcu(&meta
->rcu_head
, rcu_guarded_free
);
1185 kfence_guarded_free(addr
, meta
, false);
1189 bool kfence_handle_page_fault(unsigned long addr
, bool is_write
, struct pt_regs
*regs
)
1191 const int page_index
= (addr
- (unsigned long)__kfence_pool
) / PAGE_SIZE
;
1192 struct kfence_metadata
*to_report
= NULL
;
1193 enum kfence_error_type error_type
;
1194 unsigned long flags
;
1196 if (!is_kfence_address((void *)addr
))
1199 if (!READ_ONCE(kfence_enabled
)) /* If disabled at runtime ... */
1200 return kfence_unprotect(addr
); /* ... unprotect and proceed. */
1202 atomic_long_inc(&counters
[KFENCE_COUNTER_BUGS
]);
1204 if (page_index
% 2) {
1205 /* This is a redzone, report a buffer overflow. */
1206 struct kfence_metadata
*meta
;
1209 meta
= addr_to_metadata(addr
- PAGE_SIZE
);
1210 if (meta
&& kfence_obj_allocated(meta
)) {
1212 /* Data race ok; distance calculation approximate. */
1213 distance
= addr
- data_race(meta
->addr
+ meta
->size
);
1216 meta
= addr_to_metadata(addr
+ PAGE_SIZE
);
1217 if (meta
&& kfence_obj_allocated(meta
)) {
1218 /* Data race ok; distance calculation approximate. */
1219 if (!to_report
|| distance
> data_race(meta
->addr
) - addr
)
1226 raw_spin_lock_irqsave(&to_report
->lock
, flags
);
1227 to_report
->unprotected_page
= addr
;
1228 error_type
= KFENCE_ERROR_OOB
;
1231 * If the object was freed before we took the look we can still
1232 * report this as an OOB -- the report will simply show the
1233 * stacktrace of the free as well.
1236 to_report
= addr_to_metadata(addr
);
1240 raw_spin_lock_irqsave(&to_report
->lock
, flags
);
1241 error_type
= KFENCE_ERROR_UAF
;
1243 * We may race with __kfence_alloc(), and it is possible that a
1244 * freed object may be reallocated. We simply report this as a
1245 * use-after-free, with the stack trace showing the place where
1246 * the object was re-allocated.
1252 kfence_report_error(addr
, is_write
, regs
, to_report
, error_type
);
1253 raw_spin_unlock_irqrestore(&to_report
->lock
, flags
);
1255 /* This may be a UAF or OOB access, but we can't be sure. */
1256 kfence_report_error(addr
, is_write
, regs
, NULL
, KFENCE_ERROR_INVALID
);
1259 return kfence_unprotect(addr
); /* Unprotect and let access proceed. */