1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains core generic KASAN code.
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
12 #include <linux/export.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/kasan.h>
16 #include <linux/kernel.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/linkage.h>
20 #include <linux/memblock.h>
21 #include <linux/memory.h>
23 #include <linux/module.h>
24 #include <linux/printk.h>
25 #include <linux/sched.h>
26 #include <linux/sched/task_stack.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/stackdepot.h>
30 #include <linux/stacktrace.h>
31 #include <linux/string.h>
32 #include <linux/types.h>
33 #include <linux/vmalloc.h>
34 #include <linux/bug.h>
40 * All functions below always inlined so compiler could
41 * perform better optimizations in each of __asan_loadX/__assn_storeX
42 * depending on memory access size X.
45 static __always_inline
bool memory_is_poisoned_1(const void *addr
)
47 s8 shadow_value
= *(s8
*)kasan_mem_to_shadow(addr
);
49 if (unlikely(shadow_value
)) {
50 s8 last_accessible_byte
= (unsigned long)addr
& KASAN_GRANULE_MASK
;
51 return unlikely(last_accessible_byte
>= shadow_value
);
57 static __always_inline
bool memory_is_poisoned_2_4_8(const void *addr
,
60 u8
*shadow_addr
= (u8
*)kasan_mem_to_shadow(addr
);
63 * Access crosses 8(shadow size)-byte boundary. Such access maps
64 * into 2 shadow bytes, so we need to check them both.
66 if (unlikely((((unsigned long)addr
+ size
- 1) & KASAN_GRANULE_MASK
) < size
- 1))
67 return *shadow_addr
|| memory_is_poisoned_1(addr
+ size
- 1);
69 return memory_is_poisoned_1(addr
+ size
- 1);
72 static __always_inline
bool memory_is_poisoned_16(const void *addr
)
74 u16
*shadow_addr
= (u16
*)kasan_mem_to_shadow(addr
);
76 /* Unaligned 16-bytes access maps into 3 shadow bytes. */
77 if (unlikely(!IS_ALIGNED((unsigned long)addr
, KASAN_GRANULE_SIZE
)))
78 return *shadow_addr
|| memory_is_poisoned_1(addr
+ 15);
83 static __always_inline
unsigned long bytes_is_nonzero(const u8
*start
,
88 return (unsigned long)start
;
96 static __always_inline
unsigned long memory_is_nonzero(const void *start
,
101 unsigned int prefix
= (unsigned long)start
% 8;
103 if (end
- start
<= 16)
104 return bytes_is_nonzero(start
, end
- start
);
108 ret
= bytes_is_nonzero(start
, prefix
);
114 words
= (end
- start
) / 8;
116 if (unlikely(*(u64
*)start
))
117 return bytes_is_nonzero(start
, 8);
122 return bytes_is_nonzero(start
, (end
- start
) % 8);
125 static __always_inline
bool memory_is_poisoned_n(const void *addr
, size_t size
)
129 ret
= memory_is_nonzero(kasan_mem_to_shadow(addr
),
130 kasan_mem_to_shadow(addr
+ size
- 1) + 1);
133 const void *last_byte
= addr
+ size
- 1;
134 s8
*last_shadow
= (s8
*)kasan_mem_to_shadow(last_byte
);
135 s8 last_accessible_byte
= (unsigned long)last_byte
& KASAN_GRANULE_MASK
;
137 if (unlikely(ret
!= (unsigned long)last_shadow
||
138 last_accessible_byte
>= *last_shadow
))
144 static __always_inline
bool memory_is_poisoned(const void *addr
, size_t size
)
146 if (__builtin_constant_p(size
)) {
149 return memory_is_poisoned_1(addr
);
153 return memory_is_poisoned_2_4_8(addr
, size
);
155 return memory_is_poisoned_16(addr
);
161 return memory_is_poisoned_n(addr
, size
);
164 static __always_inline
bool check_region_inline(const void *addr
,
165 size_t size
, bool write
,
166 unsigned long ret_ip
)
168 if (!kasan_arch_is_ready())
171 if (unlikely(size
== 0))
174 if (unlikely(addr
+ size
< addr
))
175 return !kasan_report(addr
, size
, write
, ret_ip
);
177 if (unlikely(!addr_has_metadata(addr
)))
178 return !kasan_report(addr
, size
, write
, ret_ip
);
180 if (likely(!memory_is_poisoned(addr
, size
)))
183 return !kasan_report(addr
, size
, write
, ret_ip
);
186 bool kasan_check_range(const void *addr
, size_t size
, bool write
,
187 unsigned long ret_ip
)
189 return check_region_inline(addr
, size
, write
, ret_ip
);
192 bool kasan_byte_accessible(const void *addr
)
196 if (!kasan_arch_is_ready())
199 shadow_byte
= READ_ONCE(*(s8
*)kasan_mem_to_shadow(addr
));
201 return shadow_byte
>= 0 && shadow_byte
< KASAN_GRANULE_SIZE
;
204 void kasan_cache_shrink(struct kmem_cache
*cache
)
206 kasan_quarantine_remove_cache(cache
);
209 void kasan_cache_shutdown(struct kmem_cache
*cache
)
211 if (!__kmem_cache_empty(cache
))
212 kasan_quarantine_remove_cache(cache
);
215 static void register_global(struct kasan_global
*global
)
217 size_t aligned_size
= round_up(global
->size
, KASAN_GRANULE_SIZE
);
219 kasan_unpoison(global
->beg
, global
->size
, false);
221 kasan_poison(global
->beg
+ aligned_size
,
222 global
->size_with_redzone
- aligned_size
,
223 KASAN_GLOBAL_REDZONE
, false);
226 void __asan_register_globals(void *ptr
, ssize_t size
)
229 struct kasan_global
*globals
= ptr
;
231 for (i
= 0; i
< size
; i
++)
232 register_global(&globals
[i
]);
234 EXPORT_SYMBOL(__asan_register_globals
);
236 void __asan_unregister_globals(void *ptr
, ssize_t size
)
239 EXPORT_SYMBOL(__asan_unregister_globals
);
241 #define DEFINE_ASAN_LOAD_STORE(size) \
242 void __asan_load##size(void *addr) \
244 check_region_inline(addr, size, false, _RET_IP_); \
246 EXPORT_SYMBOL(__asan_load##size); \
247 __alias(__asan_load##size) \
248 void __asan_load##size##_noabort(void *); \
249 EXPORT_SYMBOL(__asan_load##size##_noabort); \
250 void __asan_store##size(void *addr) \
252 check_region_inline(addr, size, true, _RET_IP_); \
254 EXPORT_SYMBOL(__asan_store##size); \
255 __alias(__asan_store##size) \
256 void __asan_store##size##_noabort(void *); \
257 EXPORT_SYMBOL(__asan_store##size##_noabort)
259 DEFINE_ASAN_LOAD_STORE(1);
260 DEFINE_ASAN_LOAD_STORE(2);
261 DEFINE_ASAN_LOAD_STORE(4);
262 DEFINE_ASAN_LOAD_STORE(8);
263 DEFINE_ASAN_LOAD_STORE(16);
265 void __asan_loadN(void *addr
, ssize_t size
)
267 kasan_check_range(addr
, size
, false, _RET_IP_
);
269 EXPORT_SYMBOL(__asan_loadN
);
271 __alias(__asan_loadN
)
272 void __asan_loadN_noabort(void *, ssize_t
);
273 EXPORT_SYMBOL(__asan_loadN_noabort
);
275 void __asan_storeN(void *addr
, ssize_t size
)
277 kasan_check_range(addr
, size
, true, _RET_IP_
);
279 EXPORT_SYMBOL(__asan_storeN
);
281 __alias(__asan_storeN
)
282 void __asan_storeN_noabort(void *, ssize_t
);
283 EXPORT_SYMBOL(__asan_storeN_noabort
);
285 /* to shut up compiler complaints */
286 void __asan_handle_no_return(void) {}
287 EXPORT_SYMBOL(__asan_handle_no_return
);
289 /* Emitted by compiler to poison alloca()ed objects. */
290 void __asan_alloca_poison(void *addr
, ssize_t size
)
292 size_t rounded_up_size
= round_up(size
, KASAN_GRANULE_SIZE
);
293 size_t padding_size
= round_up(size
, KASAN_ALLOCA_REDZONE_SIZE
) -
295 size_t rounded_down_size
= round_down(size
, KASAN_GRANULE_SIZE
);
297 const void *left_redzone
= (const void *)(addr
-
298 KASAN_ALLOCA_REDZONE_SIZE
);
299 const void *right_redzone
= (const void *)(addr
+ rounded_up_size
);
301 WARN_ON(!IS_ALIGNED((unsigned long)addr
, KASAN_ALLOCA_REDZONE_SIZE
));
303 kasan_unpoison((const void *)(addr
+ rounded_down_size
),
304 size
- rounded_down_size
, false);
305 kasan_poison(left_redzone
, KASAN_ALLOCA_REDZONE_SIZE
,
306 KASAN_ALLOCA_LEFT
, false);
307 kasan_poison(right_redzone
, padding_size
+ KASAN_ALLOCA_REDZONE_SIZE
,
308 KASAN_ALLOCA_RIGHT
, false);
310 EXPORT_SYMBOL(__asan_alloca_poison
);
312 /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
313 void __asan_allocas_unpoison(void *stack_top
, ssize_t stack_bottom
)
315 if (unlikely(!stack_top
|| stack_top
> (void *)stack_bottom
))
318 kasan_unpoison(stack_top
, (void *)stack_bottom
- stack_top
, false);
320 EXPORT_SYMBOL(__asan_allocas_unpoison
);
322 /* Emitted by the compiler to [un]poison local variables. */
323 #define DEFINE_ASAN_SET_SHADOW(byte) \
324 void __asan_set_shadow_##byte(const void *addr, ssize_t size) \
326 __memset((void *)addr, 0x##byte, size); \
328 EXPORT_SYMBOL(__asan_set_shadow_##byte)
330 DEFINE_ASAN_SET_SHADOW(00);
331 DEFINE_ASAN_SET_SHADOW(f1
);
332 DEFINE_ASAN_SET_SHADOW(f2
);
333 DEFINE_ASAN_SET_SHADOW(f3
);
334 DEFINE_ASAN_SET_SHADOW(f5
);
335 DEFINE_ASAN_SET_SHADOW(f8
);
338 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
339 * For larger allocations larger redzones are used.
341 static inline unsigned int optimal_redzone(unsigned int object_size
)
344 object_size
<= 64 - 16 ? 16 :
345 object_size
<= 128 - 32 ? 32 :
346 object_size
<= 512 - 64 ? 64 :
347 object_size
<= 4096 - 128 ? 128 :
348 object_size
<= (1 << 14) - 256 ? 256 :
349 object_size
<= (1 << 15) - 512 ? 512 :
350 object_size
<= (1 << 16) - 1024 ? 1024 : 2048;
353 void kasan_cache_create(struct kmem_cache
*cache
, unsigned int *size
,
356 unsigned int ok_size
;
357 unsigned int optimal_size
;
358 unsigned int rem_free_meta_size
;
359 unsigned int orig_alloc_meta_offset
;
361 if (!kasan_requires_meta())
365 * SLAB_KASAN is used to mark caches that are sanitized by KASAN and
366 * that thus have per-object metadata. Currently, this flag is used in
367 * slab_ksize() to account for per-object metadata when calculating the
368 * size of the accessible memory within the object. Additionally, we use
369 * SLAB_NO_MERGE to prevent merging of caches with per-object metadata.
371 *flags
|= SLAB_KASAN
| SLAB_NO_MERGE
;
375 /* Add alloc meta into the redzone. */
376 cache
->kasan_info
.alloc_meta_offset
= *size
;
377 *size
+= sizeof(struct kasan_alloc_meta
);
379 /* If alloc meta doesn't fit, don't add it. */
380 if (*size
> KMALLOC_MAX_SIZE
) {
381 cache
->kasan_info
.alloc_meta_offset
= 0;
383 /* Continue, since free meta might still fit. */
387 orig_alloc_meta_offset
= cache
->kasan_info
.alloc_meta_offset
;
390 * Store free meta in the redzone when it's not possible to store
391 * it in the object. This is the case when:
392 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
393 * be touched after it was freed, or
394 * 2. Object has a constructor, which means it's expected to
395 * retain its content until the next allocation, or
396 * 3. It is from a kmalloc cache which enables the debug option
397 * to store original size.
399 if ((cache
->flags
& SLAB_TYPESAFE_BY_RCU
) || cache
->ctor
||
400 slub_debug_orig_size(cache
)) {
401 cache
->kasan_info
.free_meta_offset
= *size
;
402 *size
+= sizeof(struct kasan_free_meta
);
403 goto free_meta_added
;
407 * Otherwise, if the object is large enough to contain free meta,
408 * store it within the object.
410 if (sizeof(struct kasan_free_meta
) <= cache
->object_size
) {
411 /* cache->kasan_info.free_meta_offset = 0 is implied. */
412 goto free_meta_added
;
416 * For smaller objects, store the beginning of free meta within the
417 * object and the end in the redzone. And thus shift the location of
418 * alloc meta to free up space for free meta.
419 * This is only possible when slub_debug is disabled, as otherwise
420 * the end of free meta will overlap with slub_debug metadata.
422 if (!__slub_debug_enabled()) {
423 rem_free_meta_size
= sizeof(struct kasan_free_meta
) -
425 *size
+= rem_free_meta_size
;
426 if (cache
->kasan_info
.alloc_meta_offset
!= 0)
427 cache
->kasan_info
.alloc_meta_offset
+= rem_free_meta_size
;
428 goto free_meta_added
;
432 * If the object is small and slub_debug is enabled, store free meta
433 * in the redzone after alloc meta.
435 cache
->kasan_info
.free_meta_offset
= *size
;
436 *size
+= sizeof(struct kasan_free_meta
);
439 /* If free meta doesn't fit, don't add it. */
440 if (*size
> KMALLOC_MAX_SIZE
) {
441 cache
->kasan_info
.free_meta_offset
= KASAN_NO_FREE_META
;
442 cache
->kasan_info
.alloc_meta_offset
= orig_alloc_meta_offset
;
446 /* Calculate size with optimal redzone. */
447 optimal_size
= cache
->object_size
+ optimal_redzone(cache
->object_size
);
448 /* Limit it with KMALLOC_MAX_SIZE. */
449 if (optimal_size
> KMALLOC_MAX_SIZE
)
450 optimal_size
= KMALLOC_MAX_SIZE
;
451 /* Use optimal size if the size with added metas is not large enough. */
452 if (*size
< optimal_size
)
453 *size
= optimal_size
;
456 struct kasan_alloc_meta
*kasan_get_alloc_meta(struct kmem_cache
*cache
,
459 if (!cache
->kasan_info
.alloc_meta_offset
)
461 return (void *)object
+ cache
->kasan_info
.alloc_meta_offset
;
464 struct kasan_free_meta
*kasan_get_free_meta(struct kmem_cache
*cache
,
467 BUILD_BUG_ON(sizeof(struct kasan_free_meta
) > 32);
468 if (cache
->kasan_info
.free_meta_offset
== KASAN_NO_FREE_META
)
470 return (void *)object
+ cache
->kasan_info
.free_meta_offset
;
473 void kasan_init_object_meta(struct kmem_cache
*cache
, const void *object
)
475 struct kasan_alloc_meta
*alloc_meta
;
477 alloc_meta
= kasan_get_alloc_meta(cache
, object
);
479 /* Zero out alloc meta to mark it as invalid. */
480 __memset(alloc_meta
, 0, sizeof(*alloc_meta
));
484 * Explicitly marking free meta as invalid is not required: the shadow
485 * value for the first 8 bytes of a newly allocated object is not
486 * KASAN_SLAB_FREE_META.
490 static void release_alloc_meta(struct kasan_alloc_meta
*meta
)
492 /* Zero out alloc meta to mark it as invalid. */
493 __memset(meta
, 0, sizeof(*meta
));
496 static void release_free_meta(const void *object
, struct kasan_free_meta
*meta
)
498 if (!kasan_arch_is_ready())
501 /* Check if free meta is valid. */
502 if (*(u8
*)kasan_mem_to_shadow(object
) != KASAN_SLAB_FREE_META
)
505 /* Mark free meta as invalid. */
506 *(u8
*)kasan_mem_to_shadow(object
) = KASAN_SLAB_FREE
;
509 size_t kasan_metadata_size(struct kmem_cache
*cache
, bool in_object
)
511 struct kasan_cache
*info
= &cache
->kasan_info
;
513 if (!kasan_requires_meta())
517 return (info
->free_meta_offset
?
518 0 : sizeof(struct kasan_free_meta
));
520 return (info
->alloc_meta_offset
?
521 sizeof(struct kasan_alloc_meta
) : 0) +
522 ((info
->free_meta_offset
&&
523 info
->free_meta_offset
!= KASAN_NO_FREE_META
) ?
524 sizeof(struct kasan_free_meta
) : 0);
527 static void __kasan_record_aux_stack(void *addr
, depot_flags_t depot_flags
)
529 struct slab
*slab
= kasan_addr_to_slab(addr
);
530 struct kmem_cache
*cache
;
531 struct kasan_alloc_meta
*alloc_meta
;
534 if (is_kfence_address(addr
) || !slab
)
537 cache
= slab
->slab_cache
;
538 object
= nearest_obj(cache
, slab
, addr
);
539 alloc_meta
= kasan_get_alloc_meta(cache
, object
);
543 alloc_meta
->aux_stack
[1] = alloc_meta
->aux_stack
[0];
544 alloc_meta
->aux_stack
[0] = kasan_save_stack(0, depot_flags
);
547 void kasan_record_aux_stack(void *addr
)
549 return __kasan_record_aux_stack(addr
, STACK_DEPOT_FLAG_CAN_ALLOC
);
552 void kasan_record_aux_stack_noalloc(void *addr
)
554 return __kasan_record_aux_stack(addr
, 0);
557 void kasan_save_alloc_info(struct kmem_cache
*cache
, void *object
, gfp_t flags
)
559 struct kasan_alloc_meta
*alloc_meta
;
561 alloc_meta
= kasan_get_alloc_meta(cache
, object
);
565 /* Invalidate previous stack traces (might exist for krealloc or mempool). */
566 release_alloc_meta(alloc_meta
);
568 kasan_save_track(&alloc_meta
->alloc_track
, flags
);
571 void kasan_save_free_info(struct kmem_cache
*cache
, void *object
)
573 struct kasan_free_meta
*free_meta
;
575 free_meta
= kasan_get_free_meta(cache
, object
);
579 /* Invalidate previous stack trace (might exist for mempool). */
580 release_free_meta(object
, free_meta
);
582 kasan_save_track(&free_meta
->free_track
, 0);
584 /* Mark free meta as valid. */
585 *(u8
*)kasan_mem_to_shadow(object
) = KASAN_SLAB_FREE_META
;