1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains common generic and tag-based KASAN code.
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
17 #define __KASAN_INTERNAL
19 #include <linux/export.h>
20 #include <linux/interrupt.h>
21 #include <linux/init.h>
22 #include <linux/kasan.h>
23 #include <linux/kernel.h>
24 #include <linux/kmemleak.h>
25 #include <linux/linkage.h>
26 #include <linux/memblock.h>
27 #include <linux/memory.h>
29 #include <linux/module.h>
30 #include <linux/printk.h>
31 #include <linux/sched.h>
32 #include <linux/sched/task_stack.h>
33 #include <linux/slab.h>
34 #include <linux/stacktrace.h>
35 #include <linux/string.h>
36 #include <linux/types.h>
37 #include <linux/vmalloc.h>
38 #include <linux/bug.h>
43 static inline int in_irqentry_text(unsigned long ptr
)
45 return (ptr
>= (unsigned long)&__irqentry_text_start
&&
46 ptr
< (unsigned long)&__irqentry_text_end
) ||
47 (ptr
>= (unsigned long)&__softirqentry_text_start
&&
48 ptr
< (unsigned long)&__softirqentry_text_end
);
51 static inline void filter_irq_stacks(struct stack_trace
*trace
)
55 if (!trace
->nr_entries
)
57 for (i
= 0; i
< trace
->nr_entries
; i
++)
58 if (in_irqentry_text(trace
->entries
[i
])) {
59 /* Include the irqentry function into the stack. */
60 trace
->nr_entries
= i
+ 1;
65 static inline depot_stack_handle_t
save_stack(gfp_t flags
)
67 unsigned long entries
[KASAN_STACK_DEPTH
];
68 struct stack_trace trace
= {
71 .max_entries
= KASAN_STACK_DEPTH
,
75 save_stack_trace(&trace
);
76 filter_irq_stacks(&trace
);
77 if (trace
.nr_entries
!= 0 &&
78 trace
.entries
[trace
.nr_entries
-1] == ULONG_MAX
)
81 return depot_save_stack(&trace
, flags
);
84 static inline void set_track(struct kasan_track
*track
, gfp_t flags
)
86 track
->pid
= current
->pid
;
87 track
->stack
= save_stack(flags
);
90 void kasan_enable_current(void)
92 current
->kasan_depth
++;
95 void kasan_disable_current(void)
97 current
->kasan_depth
--;
100 void kasan_check_read(const volatile void *p
, unsigned int size
)
102 check_memory_region((unsigned long)p
, size
, false, _RET_IP_
);
104 EXPORT_SYMBOL(kasan_check_read
);
106 void kasan_check_write(const volatile void *p
, unsigned int size
)
108 check_memory_region((unsigned long)p
, size
, true, _RET_IP_
);
110 EXPORT_SYMBOL(kasan_check_write
);
113 void *memset(void *addr
, int c
, size_t len
)
115 check_memory_region((unsigned long)addr
, len
, true, _RET_IP_
);
117 return __memset(addr
, c
, len
);
121 void *memmove(void *dest
, const void *src
, size_t len
)
123 check_memory_region((unsigned long)src
, len
, false, _RET_IP_
);
124 check_memory_region((unsigned long)dest
, len
, true, _RET_IP_
);
126 return __memmove(dest
, src
, len
);
130 void *memcpy(void *dest
, const void *src
, size_t len
)
132 check_memory_region((unsigned long)src
, len
, false, _RET_IP_
);
133 check_memory_region((unsigned long)dest
, len
, true, _RET_IP_
);
135 return __memcpy(dest
, src
, len
);
139 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
140 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
142 void kasan_poison_shadow(const void *address
, size_t size
, u8 value
)
144 void *shadow_start
, *shadow_end
;
147 * Perform shadow offset calculation based on untagged address, as
148 * some of the callers (e.g. kasan_poison_object_data) pass tagged
149 * addresses to this function.
151 address
= reset_tag(address
);
153 shadow_start
= kasan_mem_to_shadow(address
);
154 shadow_end
= kasan_mem_to_shadow(address
+ size
);
156 __memset(shadow_start
, value
, shadow_end
- shadow_start
);
159 void kasan_unpoison_shadow(const void *address
, size_t size
)
161 u8 tag
= get_tag(address
);
164 * Perform shadow offset calculation based on untagged address, as
165 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
166 * addresses to this function.
168 address
= reset_tag(address
);
170 kasan_poison_shadow(address
, size
, tag
);
172 if (size
& KASAN_SHADOW_MASK
) {
173 u8
*shadow
= (u8
*)kasan_mem_to_shadow(address
+ size
);
175 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS
))
178 *shadow
= size
& KASAN_SHADOW_MASK
;
182 static void __kasan_unpoison_stack(struct task_struct
*task
, const void *sp
)
184 void *base
= task_stack_page(task
);
185 size_t size
= sp
- base
;
187 kasan_unpoison_shadow(base
, size
);
190 /* Unpoison the entire stack for a task. */
191 void kasan_unpoison_task_stack(struct task_struct
*task
)
193 __kasan_unpoison_stack(task
, task_stack_page(task
) + THREAD_SIZE
);
196 /* Unpoison the stack for the current task beyond a watermark sp value. */
197 asmlinkage
void kasan_unpoison_task_stack_below(const void *watermark
)
200 * Calculate the task stack base address. Avoid using 'current'
201 * because this function is called by early resume code which hasn't
202 * yet set up the percpu register (%gs).
204 void *base
= (void *)((unsigned long)watermark
& ~(THREAD_SIZE
- 1));
206 kasan_unpoison_shadow(base
, watermark
- base
);
210 * Clear all poison for the region between the current SP and a provided
211 * watermark value, as is sometimes required prior to hand-crafted asm function
212 * returns in the middle of functions.
214 void kasan_unpoison_stack_above_sp_to(const void *watermark
)
216 const void *sp
= __builtin_frame_address(0);
217 size_t size
= watermark
- sp
;
219 if (WARN_ON(sp
> watermark
))
221 kasan_unpoison_shadow(sp
, size
);
224 void kasan_alloc_pages(struct page
*page
, unsigned int order
)
229 if (unlikely(PageHighMem(page
)))
233 for (i
= 0; i
< (1 << order
); i
++)
234 page_kasan_tag_set(page
+ i
, tag
);
235 kasan_unpoison_shadow(page_address(page
), PAGE_SIZE
<< order
);
238 void kasan_free_pages(struct page
*page
, unsigned int order
)
240 if (likely(!PageHighMem(page
)))
241 kasan_poison_shadow(page_address(page
),
247 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
248 * For larger allocations larger redzones are used.
250 static inline unsigned int optimal_redzone(unsigned int object_size
)
252 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS
))
256 object_size
<= 64 - 16 ? 16 :
257 object_size
<= 128 - 32 ? 32 :
258 object_size
<= 512 - 64 ? 64 :
259 object_size
<= 4096 - 128 ? 128 :
260 object_size
<= (1 << 14) - 256 ? 256 :
261 object_size
<= (1 << 15) - 512 ? 512 :
262 object_size
<= (1 << 16) - 1024 ? 1024 : 2048;
265 void kasan_cache_create(struct kmem_cache
*cache
, unsigned int *size
,
268 unsigned int orig_size
= *size
;
269 unsigned int redzone_size
;
272 /* Add alloc meta. */
273 cache
->kasan_info
.alloc_meta_offset
= *size
;
274 *size
+= sizeof(struct kasan_alloc_meta
);
277 if (IS_ENABLED(CONFIG_KASAN_GENERIC
) &&
278 (cache
->flags
& SLAB_TYPESAFE_BY_RCU
|| cache
->ctor
||
279 cache
->object_size
< sizeof(struct kasan_free_meta
))) {
280 cache
->kasan_info
.free_meta_offset
= *size
;
281 *size
+= sizeof(struct kasan_free_meta
);
284 redzone_size
= optimal_redzone(cache
->object_size
);
285 redzone_adjust
= redzone_size
- (*size
- cache
->object_size
);
286 if (redzone_adjust
> 0)
287 *size
+= redzone_adjust
;
289 *size
= min_t(unsigned int, KMALLOC_MAX_SIZE
,
290 max(*size
, cache
->object_size
+ redzone_size
));
293 * If the metadata doesn't fit, don't enable KASAN at all.
295 if (*size
<= cache
->kasan_info
.alloc_meta_offset
||
296 *size
<= cache
->kasan_info
.free_meta_offset
) {
297 cache
->kasan_info
.alloc_meta_offset
= 0;
298 cache
->kasan_info
.free_meta_offset
= 0;
303 *flags
|= SLAB_KASAN
;
306 size_t kasan_metadata_size(struct kmem_cache
*cache
)
308 return (cache
->kasan_info
.alloc_meta_offset
?
309 sizeof(struct kasan_alloc_meta
) : 0) +
310 (cache
->kasan_info
.free_meta_offset
?
311 sizeof(struct kasan_free_meta
) : 0);
314 struct kasan_alloc_meta
*get_alloc_info(struct kmem_cache
*cache
,
317 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta
) > 32);
318 return (void *)object
+ cache
->kasan_info
.alloc_meta_offset
;
321 struct kasan_free_meta
*get_free_info(struct kmem_cache
*cache
,
324 BUILD_BUG_ON(sizeof(struct kasan_free_meta
) > 32);
325 return (void *)object
+ cache
->kasan_info
.free_meta_offset
;
328 void kasan_poison_slab(struct page
*page
)
332 for (i
= 0; i
< (1 << compound_order(page
)); i
++)
333 page_kasan_tag_reset(page
+ i
);
334 kasan_poison_shadow(page_address(page
),
335 PAGE_SIZE
<< compound_order(page
),
336 KASAN_KMALLOC_REDZONE
);
339 void kasan_unpoison_object_data(struct kmem_cache
*cache
, void *object
)
341 kasan_unpoison_shadow(object
, cache
->object_size
);
344 void kasan_poison_object_data(struct kmem_cache
*cache
, void *object
)
346 kasan_poison_shadow(object
,
347 round_up(cache
->object_size
, KASAN_SHADOW_SCALE_SIZE
),
348 KASAN_KMALLOC_REDZONE
);
352 * This function assigns a tag to an object considering the following:
353 * 1. A cache might have a constructor, which might save a pointer to a slab
354 * object somewhere (e.g. in the object itself). We preassign a tag for
355 * each object in caches with constructors during slab creation and reuse
356 * the same tag each time a particular object is allocated.
357 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
358 * accessed after being freed. We preassign tags for objects in these
360 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
361 * is stored as an array of indexes instead of a linked list. Assign tags
362 * based on objects indexes, so that objects that are next to each other
363 * get different tags.
365 static u8
assign_tag(struct kmem_cache
*cache
, const void *object
,
366 bool init
, bool keep_tag
)
369 * 1. When an object is kmalloc()'ed, two hooks are called:
370 * kasan_slab_alloc() and kasan_kmalloc(). We assign the
371 * tag only in the first one.
372 * 2. We reuse the same tag for krealloc'ed objects.
375 return get_tag(object
);
378 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
379 * set, assign a tag when the object is being allocated (init == false).
381 if (!cache
->ctor
&& !(cache
->flags
& SLAB_TYPESAFE_BY_RCU
))
382 return init
? KASAN_TAG_KERNEL
: random_tag();
384 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
386 /* For SLAB assign tags based on the object index in the freelist. */
387 return (u8
)obj_to_index(cache
, virt_to_page(object
), (void *)object
);
390 * For SLUB assign a random tag during slab creation, otherwise reuse
391 * the already assigned tag.
393 return init
? random_tag() : get_tag(object
);
397 void * __must_check
kasan_init_slab_obj(struct kmem_cache
*cache
,
400 struct kasan_alloc_meta
*alloc_info
;
402 if (!(cache
->flags
& SLAB_KASAN
))
403 return (void *)object
;
405 alloc_info
= get_alloc_info(cache
, object
);
406 __memset(alloc_info
, 0, sizeof(*alloc_info
));
408 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS
))
409 object
= set_tag(object
,
410 assign_tag(cache
, object
, true, false));
412 return (void *)object
;
415 static inline bool shadow_invalid(u8 tag
, s8 shadow_byte
)
417 if (IS_ENABLED(CONFIG_KASAN_GENERIC
))
418 return shadow_byte
< 0 ||
419 shadow_byte
>= KASAN_SHADOW_SCALE_SIZE
;
421 return tag
!= (u8
)shadow_byte
;
424 static bool __kasan_slab_free(struct kmem_cache
*cache
, void *object
,
425 unsigned long ip
, bool quarantine
)
430 unsigned long rounded_up_size
;
432 tag
= get_tag(object
);
433 tagged_object
= object
;
434 object
= reset_tag(object
);
436 if (unlikely(nearest_obj(cache
, virt_to_head_page(object
), object
) !=
438 kasan_report_invalid_free(tagged_object
, ip
);
442 /* RCU slabs could be legally used after free within the RCU period */
443 if (unlikely(cache
->flags
& SLAB_TYPESAFE_BY_RCU
))
446 shadow_byte
= READ_ONCE(*(s8
*)kasan_mem_to_shadow(object
));
447 if (shadow_invalid(tag
, shadow_byte
)) {
448 kasan_report_invalid_free(tagged_object
, ip
);
452 rounded_up_size
= round_up(cache
->object_size
, KASAN_SHADOW_SCALE_SIZE
);
453 kasan_poison_shadow(object
, rounded_up_size
, KASAN_KMALLOC_FREE
);
455 if ((IS_ENABLED(CONFIG_KASAN_GENERIC
) && !quarantine
) ||
456 unlikely(!(cache
->flags
& SLAB_KASAN
)))
459 set_track(&get_alloc_info(cache
, object
)->free_track
, GFP_NOWAIT
);
460 quarantine_put(get_free_info(cache
, object
), cache
);
462 return IS_ENABLED(CONFIG_KASAN_GENERIC
);
465 bool kasan_slab_free(struct kmem_cache
*cache
, void *object
, unsigned long ip
)
467 return __kasan_slab_free(cache
, object
, ip
, true);
470 static void *__kasan_kmalloc(struct kmem_cache
*cache
, const void *object
,
471 size_t size
, gfp_t flags
, bool keep_tag
)
473 unsigned long redzone_start
;
474 unsigned long redzone_end
;
477 if (gfpflags_allow_blocking(flags
))
480 if (unlikely(object
== NULL
))
483 redzone_start
= round_up((unsigned long)(object
+ size
),
484 KASAN_SHADOW_SCALE_SIZE
);
485 redzone_end
= round_up((unsigned long)object
+ cache
->object_size
,
486 KASAN_SHADOW_SCALE_SIZE
);
488 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS
))
489 tag
= assign_tag(cache
, object
, false, keep_tag
);
491 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
492 kasan_unpoison_shadow(set_tag(object
, tag
), size
);
493 kasan_poison_shadow((void *)redzone_start
, redzone_end
- redzone_start
,
494 KASAN_KMALLOC_REDZONE
);
496 if (cache
->flags
& SLAB_KASAN
)
497 set_track(&get_alloc_info(cache
, object
)->alloc_track
, flags
);
499 return set_tag(object
, tag
);
502 void * __must_check
kasan_slab_alloc(struct kmem_cache
*cache
, void *object
,
505 return __kasan_kmalloc(cache
, object
, cache
->object_size
, flags
, false);
508 void * __must_check
kasan_kmalloc(struct kmem_cache
*cache
, const void *object
,
509 size_t size
, gfp_t flags
)
511 return __kasan_kmalloc(cache
, object
, size
, flags
, true);
513 EXPORT_SYMBOL(kasan_kmalloc
);
515 void * __must_check
kasan_kmalloc_large(const void *ptr
, size_t size
,
519 unsigned long redzone_start
;
520 unsigned long redzone_end
;
522 if (gfpflags_allow_blocking(flags
))
525 if (unlikely(ptr
== NULL
))
528 page
= virt_to_page(ptr
);
529 redzone_start
= round_up((unsigned long)(ptr
+ size
),
530 KASAN_SHADOW_SCALE_SIZE
);
531 redzone_end
= (unsigned long)ptr
+ (PAGE_SIZE
<< compound_order(page
));
533 kasan_unpoison_shadow(ptr
, size
);
534 kasan_poison_shadow((void *)redzone_start
, redzone_end
- redzone_start
,
540 void * __must_check
kasan_krealloc(const void *object
, size_t size
, gfp_t flags
)
544 if (unlikely(object
== ZERO_SIZE_PTR
))
545 return (void *)object
;
547 page
= virt_to_head_page(object
);
549 if (unlikely(!PageSlab(page
)))
550 return kasan_kmalloc_large(object
, size
, flags
);
552 return __kasan_kmalloc(page
->slab_cache
, object
, size
,
556 void kasan_poison_kfree(void *ptr
, unsigned long ip
)
560 page
= virt_to_head_page(ptr
);
562 if (unlikely(!PageSlab(page
))) {
563 if (ptr
!= page_address(page
)) {
564 kasan_report_invalid_free(ptr
, ip
);
567 kasan_poison_shadow(ptr
, PAGE_SIZE
<< compound_order(page
),
570 __kasan_slab_free(page
->slab_cache
, ptr
, ip
, false);
574 void kasan_kfree_large(void *ptr
, unsigned long ip
)
576 if (ptr
!= page_address(virt_to_head_page(ptr
)))
577 kasan_report_invalid_free(ptr
, ip
);
578 /* The object will be poisoned by page_alloc. */
581 int kasan_module_alloc(void *addr
, size_t size
)
586 unsigned long shadow_start
;
588 shadow_start
= (unsigned long)kasan_mem_to_shadow(addr
);
589 scaled_size
= (size
+ KASAN_SHADOW_MASK
) >> KASAN_SHADOW_SCALE_SHIFT
;
590 shadow_size
= round_up(scaled_size
, PAGE_SIZE
);
592 if (WARN_ON(!PAGE_ALIGNED(shadow_start
)))
595 ret
= __vmalloc_node_range(shadow_size
, 1, shadow_start
,
596 shadow_start
+ shadow_size
,
598 PAGE_KERNEL
, VM_NO_GUARD
, NUMA_NO_NODE
,
599 __builtin_return_address(0));
602 __memset(ret
, KASAN_SHADOW_INIT
, shadow_size
);
603 find_vm_area(addr
)->flags
|= VM_KASAN
;
604 kmemleak_ignore(ret
);
611 void kasan_free_shadow(const struct vm_struct
*vm
)
613 if (vm
->flags
& VM_KASAN
)
614 vfree(kasan_mem_to_shadow(vm
->addr
));
617 #ifdef CONFIG_MEMORY_HOTPLUG
618 static bool shadow_mapped(unsigned long addr
)
620 pgd_t
*pgd
= pgd_offset_k(addr
);
628 p4d
= p4d_offset(pgd
, addr
);
631 pud
= pud_offset(p4d
, addr
);
636 * We can't use pud_large() or pud_huge(), the first one is
637 * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
638 * pud_bad(), if pud is bad then it's bad because it's huge.
642 pmd
= pmd_offset(pud
, addr
);
648 pte
= pte_offset_kernel(pmd
, addr
);
649 return !pte_none(*pte
);
652 static int __meminit
kasan_mem_notifier(struct notifier_block
*nb
,
653 unsigned long action
, void *data
)
655 struct memory_notify
*mem_data
= data
;
656 unsigned long nr_shadow_pages
, start_kaddr
, shadow_start
;
657 unsigned long shadow_end
, shadow_size
;
659 nr_shadow_pages
= mem_data
->nr_pages
>> KASAN_SHADOW_SCALE_SHIFT
;
660 start_kaddr
= (unsigned long)pfn_to_kaddr(mem_data
->start_pfn
);
661 shadow_start
= (unsigned long)kasan_mem_to_shadow((void *)start_kaddr
);
662 shadow_size
= nr_shadow_pages
<< PAGE_SHIFT
;
663 shadow_end
= shadow_start
+ shadow_size
;
665 if (WARN_ON(mem_data
->nr_pages
% KASAN_SHADOW_SCALE_SIZE
) ||
666 WARN_ON(start_kaddr
% (KASAN_SHADOW_SCALE_SIZE
<< PAGE_SHIFT
)))
670 case MEM_GOING_ONLINE
: {
674 * If shadow is mapped already than it must have been mapped
675 * during the boot. This could happen if we onlining previously
678 if (shadow_mapped(shadow_start
))
681 ret
= __vmalloc_node_range(shadow_size
, PAGE_SIZE
, shadow_start
,
682 shadow_end
, GFP_KERNEL
,
683 PAGE_KERNEL
, VM_NO_GUARD
,
684 pfn_to_nid(mem_data
->start_pfn
),
685 __builtin_return_address(0));
689 kmemleak_ignore(ret
);
692 case MEM_CANCEL_ONLINE
:
694 struct vm_struct
*vm
;
697 * shadow_start was either mapped during boot by kasan_init()
698 * or during memory online by __vmalloc_node_range().
699 * In the latter case we can use vfree() to free shadow.
700 * Non-NULL result of the find_vm_area() will tell us if
701 * that was the second case.
703 * Currently it's not possible to free shadow mapped
704 * during boot by kasan_init(). It's because the code
705 * to do that hasn't been written yet. So we'll just
708 vm
= find_vm_area((void *)shadow_start
);
710 vfree((void *)shadow_start
);
717 static int __init
kasan_memhotplug_init(void)
719 hotplug_memory_notifier(kasan_mem_notifier
, 0);
724 core_initcall(kasan_memhotplug_init
);