1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains common generic and tag-based KASAN code.
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
17 #include <linux/export.h>
18 #include <linux/init.h>
19 #include <linux/kasan.h>
20 #include <linux/kernel.h>
21 #include <linux/kmemleak.h>
22 #include <linux/linkage.h>
23 #include <linux/memblock.h>
24 #include <linux/memory.h>
26 #include <linux/module.h>
27 #include <linux/printk.h>
28 #include <linux/sched.h>
29 #include <linux/sched/task_stack.h>
30 #include <linux/slab.h>
31 #include <linux/stacktrace.h>
32 #include <linux/string.h>
33 #include <linux/types.h>
34 #include <linux/vmalloc.h>
35 #include <linux/bug.h>
36 #include <linux/uaccess.h>
38 #include <asm/cacheflush.h>
39 #include <asm/tlbflush.h>
44 static inline depot_stack_handle_t
save_stack(gfp_t flags
)
46 unsigned long entries
[KASAN_STACK_DEPTH
];
47 unsigned int nr_entries
;
49 nr_entries
= stack_trace_save(entries
, ARRAY_SIZE(entries
), 0);
50 nr_entries
= filter_irq_stacks(entries
, nr_entries
);
51 return stack_depot_save(entries
, nr_entries
, flags
);
54 static inline void set_track(struct kasan_track
*track
, gfp_t flags
)
56 track
->pid
= current
->pid
;
57 track
->stack
= save_stack(flags
);
60 void kasan_enable_current(void)
62 current
->kasan_depth
++;
65 void kasan_disable_current(void)
67 current
->kasan_depth
--;
70 bool __kasan_check_read(const volatile void *p
, unsigned int size
)
72 return check_memory_region((unsigned long)p
, size
, false, _RET_IP_
);
74 EXPORT_SYMBOL(__kasan_check_read
);
76 bool __kasan_check_write(const volatile void *p
, unsigned int size
)
78 return check_memory_region((unsigned long)p
, size
, true, _RET_IP_
);
80 EXPORT_SYMBOL(__kasan_check_write
);
83 void *memset(void *addr
, int c
, size_t len
)
85 if (!check_memory_region((unsigned long)addr
, len
, true, _RET_IP_
))
88 return __memset(addr
, c
, len
);
91 #ifdef __HAVE_ARCH_MEMMOVE
93 void *memmove(void *dest
, const void *src
, size_t len
)
95 if (!check_memory_region((unsigned long)src
, len
, false, _RET_IP_
) ||
96 !check_memory_region((unsigned long)dest
, len
, true, _RET_IP_
))
99 return __memmove(dest
, src
, len
);
104 void *memcpy(void *dest
, const void *src
, size_t len
)
106 if (!check_memory_region((unsigned long)src
, len
, false, _RET_IP_
) ||
107 !check_memory_region((unsigned long)dest
, len
, true, _RET_IP_
))
110 return __memcpy(dest
, src
, len
);
114 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
115 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
117 void kasan_poison_shadow(const void *address
, size_t size
, u8 value
)
119 void *shadow_start
, *shadow_end
;
122 * Perform shadow offset calculation based on untagged address, as
123 * some of the callers (e.g. kasan_poison_object_data) pass tagged
124 * addresses to this function.
126 address
= reset_tag(address
);
128 shadow_start
= kasan_mem_to_shadow(address
);
129 shadow_end
= kasan_mem_to_shadow(address
+ size
);
131 __memset(shadow_start
, value
, shadow_end
- shadow_start
);
134 void kasan_unpoison_shadow(const void *address
, size_t size
)
136 u8 tag
= get_tag(address
);
139 * Perform shadow offset calculation based on untagged address, as
140 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
141 * addresses to this function.
143 address
= reset_tag(address
);
145 kasan_poison_shadow(address
, size
, tag
);
147 if (size
& KASAN_SHADOW_MASK
) {
148 u8
*shadow
= (u8
*)kasan_mem_to_shadow(address
+ size
);
150 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS
))
153 *shadow
= size
& KASAN_SHADOW_MASK
;
157 static void __kasan_unpoison_stack(struct task_struct
*task
, const void *sp
)
159 void *base
= task_stack_page(task
);
160 size_t size
= sp
- base
;
162 kasan_unpoison_shadow(base
, size
);
165 /* Unpoison the entire stack for a task. */
166 void kasan_unpoison_task_stack(struct task_struct
*task
)
168 __kasan_unpoison_stack(task
, task_stack_page(task
) + THREAD_SIZE
);
171 /* Unpoison the stack for the current task beyond a watermark sp value. */
172 asmlinkage
void kasan_unpoison_task_stack_below(const void *watermark
)
175 * Calculate the task stack base address. Avoid using 'current'
176 * because this function is called by early resume code which hasn't
177 * yet set up the percpu register (%gs).
179 void *base
= (void *)((unsigned long)watermark
& ~(THREAD_SIZE
- 1));
181 kasan_unpoison_shadow(base
, watermark
- base
);
185 * Clear all poison for the region between the current SP and a provided
186 * watermark value, as is sometimes required prior to hand-crafted asm function
187 * returns in the middle of functions.
189 void kasan_unpoison_stack_above_sp_to(const void *watermark
)
191 const void *sp
= __builtin_frame_address(0);
192 size_t size
= watermark
- sp
;
194 if (WARN_ON(sp
> watermark
))
196 kasan_unpoison_shadow(sp
, size
);
199 void kasan_alloc_pages(struct page
*page
, unsigned int order
)
204 if (unlikely(PageHighMem(page
)))
208 for (i
= 0; i
< (1 << order
); i
++)
209 page_kasan_tag_set(page
+ i
, tag
);
210 kasan_unpoison_shadow(page_address(page
), PAGE_SIZE
<< order
);
213 void kasan_free_pages(struct page
*page
, unsigned int order
)
215 if (likely(!PageHighMem(page
)))
216 kasan_poison_shadow(page_address(page
),
222 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
223 * For larger allocations larger redzones are used.
225 static inline unsigned int optimal_redzone(unsigned int object_size
)
227 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS
))
231 object_size
<= 64 - 16 ? 16 :
232 object_size
<= 128 - 32 ? 32 :
233 object_size
<= 512 - 64 ? 64 :
234 object_size
<= 4096 - 128 ? 128 :
235 object_size
<= (1 << 14) - 256 ? 256 :
236 object_size
<= (1 << 15) - 512 ? 512 :
237 object_size
<= (1 << 16) - 1024 ? 1024 : 2048;
240 void kasan_cache_create(struct kmem_cache
*cache
, unsigned int *size
,
243 unsigned int orig_size
= *size
;
244 unsigned int redzone_size
;
247 /* Add alloc meta. */
248 cache
->kasan_info
.alloc_meta_offset
= *size
;
249 *size
+= sizeof(struct kasan_alloc_meta
);
252 if (IS_ENABLED(CONFIG_KASAN_GENERIC
) &&
253 (cache
->flags
& SLAB_TYPESAFE_BY_RCU
|| cache
->ctor
||
254 cache
->object_size
< sizeof(struct kasan_free_meta
))) {
255 cache
->kasan_info
.free_meta_offset
= *size
;
256 *size
+= sizeof(struct kasan_free_meta
);
259 redzone_size
= optimal_redzone(cache
->object_size
);
260 redzone_adjust
= redzone_size
- (*size
- cache
->object_size
);
261 if (redzone_adjust
> 0)
262 *size
+= redzone_adjust
;
264 *size
= min_t(unsigned int, KMALLOC_MAX_SIZE
,
265 max(*size
, cache
->object_size
+ redzone_size
));
268 * If the metadata doesn't fit, don't enable KASAN at all.
270 if (*size
<= cache
->kasan_info
.alloc_meta_offset
||
271 *size
<= cache
->kasan_info
.free_meta_offset
) {
272 cache
->kasan_info
.alloc_meta_offset
= 0;
273 cache
->kasan_info
.free_meta_offset
= 0;
278 *flags
|= SLAB_KASAN
;
281 size_t kasan_metadata_size(struct kmem_cache
*cache
)
283 return (cache
->kasan_info
.alloc_meta_offset
?
284 sizeof(struct kasan_alloc_meta
) : 0) +
285 (cache
->kasan_info
.free_meta_offset
?
286 sizeof(struct kasan_free_meta
) : 0);
289 struct kasan_alloc_meta
*get_alloc_info(struct kmem_cache
*cache
,
292 return (void *)object
+ cache
->kasan_info
.alloc_meta_offset
;
295 struct kasan_free_meta
*get_free_info(struct kmem_cache
*cache
,
298 BUILD_BUG_ON(sizeof(struct kasan_free_meta
) > 32);
299 return (void *)object
+ cache
->kasan_info
.free_meta_offset
;
303 static void kasan_set_free_info(struct kmem_cache
*cache
,
304 void *object
, u8 tag
)
306 struct kasan_alloc_meta
*alloc_meta
;
309 alloc_meta
= get_alloc_info(cache
, object
);
311 #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
312 idx
= alloc_meta
->free_track_idx
;
313 alloc_meta
->free_pointer_tag
[idx
] = tag
;
314 alloc_meta
->free_track_idx
= (idx
+ 1) % KASAN_NR_FREE_STACKS
;
317 set_track(&alloc_meta
->free_track
[idx
], GFP_NOWAIT
);
320 void kasan_poison_slab(struct page
*page
)
324 for (i
= 0; i
< compound_nr(page
); i
++)
325 page_kasan_tag_reset(page
+ i
);
326 kasan_poison_shadow(page_address(page
), page_size(page
),
327 KASAN_KMALLOC_REDZONE
);
330 void kasan_unpoison_object_data(struct kmem_cache
*cache
, void *object
)
332 kasan_unpoison_shadow(object
, cache
->object_size
);
335 void kasan_poison_object_data(struct kmem_cache
*cache
, void *object
)
337 kasan_poison_shadow(object
,
338 round_up(cache
->object_size
, KASAN_SHADOW_SCALE_SIZE
),
339 KASAN_KMALLOC_REDZONE
);
343 * This function assigns a tag to an object considering the following:
344 * 1. A cache might have a constructor, which might save a pointer to a slab
345 * object somewhere (e.g. in the object itself). We preassign a tag for
346 * each object in caches with constructors during slab creation and reuse
347 * the same tag each time a particular object is allocated.
348 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
349 * accessed after being freed. We preassign tags for objects in these
351 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
352 * is stored as an array of indexes instead of a linked list. Assign tags
353 * based on objects indexes, so that objects that are next to each other
354 * get different tags.
356 static u8
assign_tag(struct kmem_cache
*cache
, const void *object
,
357 bool init
, bool keep_tag
)
360 * 1. When an object is kmalloc()'ed, two hooks are called:
361 * kasan_slab_alloc() and kasan_kmalloc(). We assign the
362 * tag only in the first one.
363 * 2. We reuse the same tag for krealloc'ed objects.
366 return get_tag(object
);
369 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
370 * set, assign a tag when the object is being allocated (init == false).
372 if (!cache
->ctor
&& !(cache
->flags
& SLAB_TYPESAFE_BY_RCU
))
373 return init
? KASAN_TAG_KERNEL
: random_tag();
375 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
377 /* For SLAB assign tags based on the object index in the freelist. */
378 return (u8
)obj_to_index(cache
, virt_to_page(object
), (void *)object
);
381 * For SLUB assign a random tag during slab creation, otherwise reuse
382 * the already assigned tag.
384 return init
? random_tag() : get_tag(object
);
388 void * __must_check
kasan_init_slab_obj(struct kmem_cache
*cache
,
391 struct kasan_alloc_meta
*alloc_info
;
393 if (!(cache
->flags
& SLAB_KASAN
))
394 return (void *)object
;
396 alloc_info
= get_alloc_info(cache
, object
);
397 __memset(alloc_info
, 0, sizeof(*alloc_info
));
399 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS
))
400 object
= set_tag(object
,
401 assign_tag(cache
, object
, true, false));
403 return (void *)object
;
406 static inline bool shadow_invalid(u8 tag
, s8 shadow_byte
)
408 if (IS_ENABLED(CONFIG_KASAN_GENERIC
))
409 return shadow_byte
< 0 ||
410 shadow_byte
>= KASAN_SHADOW_SCALE_SIZE
;
412 /* else CONFIG_KASAN_SW_TAGS: */
413 if ((u8
)shadow_byte
== KASAN_TAG_INVALID
)
415 if ((tag
!= KASAN_TAG_KERNEL
) && (tag
!= (u8
)shadow_byte
))
421 static bool __kasan_slab_free(struct kmem_cache
*cache
, void *object
,
422 unsigned long ip
, bool quarantine
)
427 unsigned long rounded_up_size
;
429 tag
= get_tag(object
);
430 tagged_object
= object
;
431 object
= reset_tag(object
);
433 if (unlikely(nearest_obj(cache
, virt_to_head_page(object
), object
) !=
435 kasan_report_invalid_free(tagged_object
, ip
);
439 /* RCU slabs could be legally used after free within the RCU period */
440 if (unlikely(cache
->flags
& SLAB_TYPESAFE_BY_RCU
))
443 shadow_byte
= READ_ONCE(*(s8
*)kasan_mem_to_shadow(object
));
444 if (shadow_invalid(tag
, shadow_byte
)) {
445 kasan_report_invalid_free(tagged_object
, ip
);
449 rounded_up_size
= round_up(cache
->object_size
, KASAN_SHADOW_SCALE_SIZE
);
450 kasan_poison_shadow(object
, rounded_up_size
, KASAN_KMALLOC_FREE
);
452 if ((IS_ENABLED(CONFIG_KASAN_GENERIC
) && !quarantine
) ||
453 unlikely(!(cache
->flags
& SLAB_KASAN
)))
456 kasan_set_free_info(cache
, object
, tag
);
458 quarantine_put(get_free_info(cache
, object
), cache
);
460 return IS_ENABLED(CONFIG_KASAN_GENERIC
);
463 bool kasan_slab_free(struct kmem_cache
*cache
, void *object
, unsigned long ip
)
465 return __kasan_slab_free(cache
, object
, ip
, true);
468 static void *__kasan_kmalloc(struct kmem_cache
*cache
, const void *object
,
469 size_t size
, gfp_t flags
, bool keep_tag
)
471 unsigned long redzone_start
;
472 unsigned long redzone_end
;
475 if (gfpflags_allow_blocking(flags
))
478 if (unlikely(object
== NULL
))
481 redzone_start
= round_up((unsigned long)(object
+ size
),
482 KASAN_SHADOW_SCALE_SIZE
);
483 redzone_end
= round_up((unsigned long)object
+ cache
->object_size
,
484 KASAN_SHADOW_SCALE_SIZE
);
486 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS
))
487 tag
= assign_tag(cache
, object
, false, keep_tag
);
489 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
490 kasan_unpoison_shadow(set_tag(object
, tag
), size
);
491 kasan_poison_shadow((void *)redzone_start
, redzone_end
- redzone_start
,
492 KASAN_KMALLOC_REDZONE
);
494 if (cache
->flags
& SLAB_KASAN
)
495 set_track(&get_alloc_info(cache
, object
)->alloc_track
, flags
);
497 return set_tag(object
, tag
);
500 void * __must_check
kasan_slab_alloc(struct kmem_cache
*cache
, void *object
,
503 return __kasan_kmalloc(cache
, object
, cache
->object_size
, flags
, false);
506 void * __must_check
kasan_kmalloc(struct kmem_cache
*cache
, const void *object
,
507 size_t size
, gfp_t flags
)
509 return __kasan_kmalloc(cache
, object
, size
, flags
, true);
511 EXPORT_SYMBOL(kasan_kmalloc
);
513 void * __must_check
kasan_kmalloc_large(const void *ptr
, size_t size
,
517 unsigned long redzone_start
;
518 unsigned long redzone_end
;
520 if (gfpflags_allow_blocking(flags
))
523 if (unlikely(ptr
== NULL
))
526 page
= virt_to_page(ptr
);
527 redzone_start
= round_up((unsigned long)(ptr
+ size
),
528 KASAN_SHADOW_SCALE_SIZE
);
529 redzone_end
= (unsigned long)ptr
+ page_size(page
);
531 kasan_unpoison_shadow(ptr
, size
);
532 kasan_poison_shadow((void *)redzone_start
, redzone_end
- redzone_start
,
538 void * __must_check
kasan_krealloc(const void *object
, size_t size
, gfp_t flags
)
542 if (unlikely(object
== ZERO_SIZE_PTR
))
543 return (void *)object
;
545 page
= virt_to_head_page(object
);
547 if (unlikely(!PageSlab(page
)))
548 return kasan_kmalloc_large(object
, size
, flags
);
550 return __kasan_kmalloc(page
->slab_cache
, object
, size
,
554 void kasan_poison_kfree(void *ptr
, unsigned long ip
)
558 page
= virt_to_head_page(ptr
);
560 if (unlikely(!PageSlab(page
))) {
561 if (ptr
!= page_address(page
)) {
562 kasan_report_invalid_free(ptr
, ip
);
565 kasan_poison_shadow(ptr
, page_size(page
), KASAN_FREE_PAGE
);
567 __kasan_slab_free(page
->slab_cache
, ptr
, ip
, false);
571 void kasan_kfree_large(void *ptr
, unsigned long ip
)
573 if (ptr
!= page_address(virt_to_head_page(ptr
)))
574 kasan_report_invalid_free(ptr
, ip
);
575 /* The object will be poisoned by page_alloc. */
578 #ifndef CONFIG_KASAN_VMALLOC
579 int kasan_module_alloc(void *addr
, size_t size
)
584 unsigned long shadow_start
;
586 shadow_start
= (unsigned long)kasan_mem_to_shadow(addr
);
587 scaled_size
= (size
+ KASAN_SHADOW_MASK
) >> KASAN_SHADOW_SCALE_SHIFT
;
588 shadow_size
= round_up(scaled_size
, PAGE_SIZE
);
590 if (WARN_ON(!PAGE_ALIGNED(shadow_start
)))
593 ret
= __vmalloc_node_range(shadow_size
, 1, shadow_start
,
594 shadow_start
+ shadow_size
,
596 PAGE_KERNEL
, VM_NO_GUARD
, NUMA_NO_NODE
,
597 __builtin_return_address(0));
600 __memset(ret
, KASAN_SHADOW_INIT
, shadow_size
);
601 find_vm_area(addr
)->flags
|= VM_KASAN
;
602 kmemleak_ignore(ret
);
609 void kasan_free_shadow(const struct vm_struct
*vm
)
611 if (vm
->flags
& VM_KASAN
)
612 vfree(kasan_mem_to_shadow(vm
->addr
));
616 extern void __kasan_report(unsigned long addr
, size_t size
, bool is_write
, unsigned long ip
);
617 extern bool report_enabled(void);
619 bool kasan_report(unsigned long addr
, size_t size
, bool is_write
, unsigned long ip
)
621 unsigned long flags
= user_access_save();
624 if (likely(report_enabled())) {
625 __kasan_report(addr
, size
, is_write
, ip
);
629 user_access_restore(flags
);
634 #ifdef CONFIG_MEMORY_HOTPLUG
635 static bool shadow_mapped(unsigned long addr
)
637 pgd_t
*pgd
= pgd_offset_k(addr
);
645 p4d
= p4d_offset(pgd
, addr
);
648 pud
= pud_offset(p4d
, addr
);
653 * We can't use pud_large() or pud_huge(), the first one is
654 * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
655 * pud_bad(), if pud is bad then it's bad because it's huge.
659 pmd
= pmd_offset(pud
, addr
);
665 pte
= pte_offset_kernel(pmd
, addr
);
666 return !pte_none(*pte
);
669 static int __meminit
kasan_mem_notifier(struct notifier_block
*nb
,
670 unsigned long action
, void *data
)
672 struct memory_notify
*mem_data
= data
;
673 unsigned long nr_shadow_pages
, start_kaddr
, shadow_start
;
674 unsigned long shadow_end
, shadow_size
;
676 nr_shadow_pages
= mem_data
->nr_pages
>> KASAN_SHADOW_SCALE_SHIFT
;
677 start_kaddr
= (unsigned long)pfn_to_kaddr(mem_data
->start_pfn
);
678 shadow_start
= (unsigned long)kasan_mem_to_shadow((void *)start_kaddr
);
679 shadow_size
= nr_shadow_pages
<< PAGE_SHIFT
;
680 shadow_end
= shadow_start
+ shadow_size
;
682 if (WARN_ON(mem_data
->nr_pages
% KASAN_SHADOW_SCALE_SIZE
) ||
683 WARN_ON(start_kaddr
% (KASAN_SHADOW_SCALE_SIZE
<< PAGE_SHIFT
)))
687 case MEM_GOING_ONLINE
: {
691 * If shadow is mapped already than it must have been mapped
692 * during the boot. This could happen if we onlining previously
695 if (shadow_mapped(shadow_start
))
698 ret
= __vmalloc_node_range(shadow_size
, PAGE_SIZE
, shadow_start
,
699 shadow_end
, GFP_KERNEL
,
700 PAGE_KERNEL
, VM_NO_GUARD
,
701 pfn_to_nid(mem_data
->start_pfn
),
702 __builtin_return_address(0));
706 kmemleak_ignore(ret
);
709 case MEM_CANCEL_ONLINE
:
711 struct vm_struct
*vm
;
714 * shadow_start was either mapped during boot by kasan_init()
715 * or during memory online by __vmalloc_node_range().
716 * In the latter case we can use vfree() to free shadow.
717 * Non-NULL result of the find_vm_area() will tell us if
718 * that was the second case.
720 * Currently it's not possible to free shadow mapped
721 * during boot by kasan_init(). It's because the code
722 * to do that hasn't been written yet. So we'll just
725 vm
= find_vm_area((void *)shadow_start
);
727 vfree((void *)shadow_start
);
734 static int __init
kasan_memhotplug_init(void)
736 hotplug_memory_notifier(kasan_mem_notifier
, 0);
741 core_initcall(kasan_memhotplug_init
);
744 #ifdef CONFIG_KASAN_VMALLOC
745 static int kasan_populate_vmalloc_pte(pte_t
*ptep
, unsigned long addr
,
751 if (likely(!pte_none(*ptep
)))
754 page
= __get_free_page(GFP_KERNEL
);
758 memset((void *)page
, KASAN_VMALLOC_INVALID
, PAGE_SIZE
);
759 pte
= pfn_pte(PFN_DOWN(__pa(page
)), PAGE_KERNEL
);
761 spin_lock(&init_mm
.page_table_lock
);
762 if (likely(pte_none(*ptep
))) {
763 set_pte_at(&init_mm
, addr
, ptep
, pte
);
766 spin_unlock(&init_mm
.page_table_lock
);
772 int kasan_populate_vmalloc(unsigned long addr
, unsigned long size
)
774 unsigned long shadow_start
, shadow_end
;
777 if (!is_vmalloc_or_module_addr((void *)addr
))
780 shadow_start
= (unsigned long)kasan_mem_to_shadow((void *)addr
);
781 shadow_start
= ALIGN_DOWN(shadow_start
, PAGE_SIZE
);
782 shadow_end
= (unsigned long)kasan_mem_to_shadow((void *)addr
+ size
);
783 shadow_end
= ALIGN(shadow_end
, PAGE_SIZE
);
785 ret
= apply_to_page_range(&init_mm
, shadow_start
,
786 shadow_end
- shadow_start
,
787 kasan_populate_vmalloc_pte
, NULL
);
791 flush_cache_vmap(shadow_start
, shadow_end
);
794 * We need to be careful about inter-cpu effects here. Consider:
797 * WRITE_ONCE(p, vmalloc(100)); while (x = READ_ONCE(p)) ;
800 * With compiler instrumentation, that ends up looking like this:
803 * // vmalloc() allocates memory
804 * // let a = area->addr
805 * // we reach kasan_populate_vmalloc
806 * // and call kasan_unpoison_shadow:
807 * STORE shadow(a), unpoison_val
809 * STORE shadow(a+99), unpoison_val x = LOAD p
810 * // rest of vmalloc process <data dependency>
811 * STORE p, a LOAD shadow(x+99)
813 * If there is no barrier between the end of unpoisioning the shadow
814 * and the store of the result to p, the stores could be committed
815 * in a different order by CPU#0, and CPU#1 could erroneously observe
816 * poison in the shadow.
818 * We need some sort of barrier between the stores.
820 * In the vmalloc() case, this is provided by a smp_wmb() in
821 * clear_vm_uninitialized_flag(). In the per-cpu allocator and in
822 * get_vm_area() and friends, the caller gets shadow allocated but
823 * doesn't have any pages mapped into the virtual address space that
824 * has been reserved. Mapping those pages in will involve taking and
825 * releasing a page-table lock, which will provide the barrier.
832 * Poison the shadow for a vmalloc region. Called as part of the
833 * freeing process at the time the region is freed.
835 void kasan_poison_vmalloc(const void *start
, unsigned long size
)
837 if (!is_vmalloc_or_module_addr(start
))
840 size
= round_up(size
, KASAN_SHADOW_SCALE_SIZE
);
841 kasan_poison_shadow(start
, size
, KASAN_VMALLOC_INVALID
);
844 void kasan_unpoison_vmalloc(const void *start
, unsigned long size
)
846 if (!is_vmalloc_or_module_addr(start
))
849 kasan_unpoison_shadow(start
, size
);
852 static int kasan_depopulate_vmalloc_pte(pte_t
*ptep
, unsigned long addr
,
857 page
= (unsigned long)__va(pte_pfn(*ptep
) << PAGE_SHIFT
);
859 spin_lock(&init_mm
.page_table_lock
);
861 if (likely(!pte_none(*ptep
))) {
862 pte_clear(&init_mm
, addr
, ptep
);
865 spin_unlock(&init_mm
.page_table_lock
);
871 * Release the backing for the vmalloc region [start, end), which
872 * lies within the free region [free_region_start, free_region_end).
874 * This can be run lazily, long after the region was freed. It runs
875 * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap
878 * How does this work?
879 * -------------------
881 * We have a region that is page aligned, labelled as A.
882 * That might not map onto the shadow in a way that is page-aligned:
886 * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc
887 * -------- -------- -------- -------- --------
890 * \-------\|/------/ |/---------------/
892 * |??AAAAAA|AAAAAAAA|AA??????| < shadow
895 * First we align the start upwards and the end downwards, so that the
896 * shadow of the region aligns with shadow page boundaries. In the
897 * example, this gives us the shadow page (2). This is the shadow entirely
898 * covered by this allocation.
900 * Then we have the tricky bits. We want to know if we can free the
901 * partially covered shadow pages - (1) and (3) in the example. For this,
902 * we are given the start and end of the free region that contains this
903 * allocation. Extending our previous example, we could have:
905 * free_region_start free_region_end
908 * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc
909 * -------- -------- -------- -------- --------
912 * \-------\|/------/ |/---------------/
914 * |FFAAAAAA|AAAAAAAA|AAF?????| < shadow
917 * Once again, we align the start of the free region up, and the end of
918 * the free region down so that the shadow is page aligned. So we can free
919 * page (1) - we know no allocation currently uses anything in that page,
920 * because all of it is in the vmalloc free region. But we cannot free
921 * page (3), because we can't be sure that the rest of it is unused.
923 * We only consider pages that contain part of the original region for
924 * freeing: we don't try to free other pages from the free region or we'd
925 * end up trying to free huge chunks of virtual address space.
930 * How do we know that we're not freeing a page that is simultaneously
931 * being used for a fresh allocation in kasan_populate_vmalloc(_pte)?
933 * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running
934 * at the same time. While we run under free_vmap_area_lock, the population
937 * free_vmap_area_lock instead operates to ensure that the larger range
938 * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and
939 * the per-cpu region-finding algorithm both run under free_vmap_area_lock,
940 * no space identified as free will become used while we are running. This
941 * means that so long as we are careful with alignment and only free shadow
942 * pages entirely covered by the free region, we will not run in to any
943 * trouble - any simultaneous allocations will be for disjoint regions.
945 void kasan_release_vmalloc(unsigned long start
, unsigned long end
,
946 unsigned long free_region_start
,
947 unsigned long free_region_end
)
949 void *shadow_start
, *shadow_end
;
950 unsigned long region_start
, region_end
;
953 region_start
= ALIGN(start
, PAGE_SIZE
* KASAN_SHADOW_SCALE_SIZE
);
954 region_end
= ALIGN_DOWN(end
, PAGE_SIZE
* KASAN_SHADOW_SCALE_SIZE
);
956 free_region_start
= ALIGN(free_region_start
,
957 PAGE_SIZE
* KASAN_SHADOW_SCALE_SIZE
);
959 if (start
!= region_start
&&
960 free_region_start
< region_start
)
961 region_start
-= PAGE_SIZE
* KASAN_SHADOW_SCALE_SIZE
;
963 free_region_end
= ALIGN_DOWN(free_region_end
,
964 PAGE_SIZE
* KASAN_SHADOW_SCALE_SIZE
);
966 if (end
!= region_end
&&
967 free_region_end
> region_end
)
968 region_end
+= PAGE_SIZE
* KASAN_SHADOW_SCALE_SIZE
;
970 shadow_start
= kasan_mem_to_shadow((void *)region_start
);
971 shadow_end
= kasan_mem_to_shadow((void *)region_end
);
973 if (shadow_end
> shadow_start
) {
974 size
= shadow_end
- shadow_start
;
975 apply_to_existing_page_range(&init_mm
,
976 (unsigned long)shadow_start
,
977 size
, kasan_depopulate_vmalloc_pte
,
979 flush_tlb_kernel_range((unsigned long)shadow_start
,
980 (unsigned long)shadow_end
);