2 * This file contains shadow memory manipulation code.
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 * Some code borrowed from https://github.com/xairy/kasan-prototype by
8 * Andrey Konovalov <andreyknvl@gmail.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #define DISABLE_BRANCH_PROFILING
19 #include <linux/export.h>
20 #include <linux/interrupt.h>
21 #include <linux/init.h>
22 #include <linux/kasan.h>
23 #include <linux/kernel.h>
24 #include <linux/kmemleak.h>
25 #include <linux/linkage.h>
26 #include <linux/memblock.h>
27 #include <linux/memory.h>
29 #include <linux/module.h>
30 #include <linux/printk.h>
31 #include <linux/sched.h>
32 #include <linux/sched/task_stack.h>
33 #include <linux/slab.h>
34 #include <linux/stacktrace.h>
35 #include <linux/string.h>
36 #include <linux/types.h>
37 #include <linux/vmalloc.h>
38 #include <linux/bug.h>
43 void kasan_enable_current(void)
45 current
->kasan_depth
++;
48 void kasan_disable_current(void)
50 current
->kasan_depth
--;
54 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
55 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
57 static void kasan_poison_shadow(const void *address
, size_t size
, u8 value
)
59 void *shadow_start
, *shadow_end
;
61 shadow_start
= kasan_mem_to_shadow(address
);
62 shadow_end
= kasan_mem_to_shadow(address
+ size
);
64 memset(shadow_start
, value
, shadow_end
- shadow_start
);
67 void kasan_unpoison_shadow(const void *address
, size_t size
)
69 kasan_poison_shadow(address
, size
, 0);
71 if (size
& KASAN_SHADOW_MASK
) {
72 u8
*shadow
= (u8
*)kasan_mem_to_shadow(address
+ size
);
73 *shadow
= size
& KASAN_SHADOW_MASK
;
77 static void __kasan_unpoison_stack(struct task_struct
*task
, const void *sp
)
79 void *base
= task_stack_page(task
);
80 size_t size
= sp
- base
;
82 kasan_unpoison_shadow(base
, size
);
85 /* Unpoison the entire stack for a task. */
86 void kasan_unpoison_task_stack(struct task_struct
*task
)
88 __kasan_unpoison_stack(task
, task_stack_page(task
) + THREAD_SIZE
);
91 /* Unpoison the stack for the current task beyond a watermark sp value. */
92 asmlinkage
void kasan_unpoison_task_stack_below(const void *watermark
)
95 * Calculate the task stack base address. Avoid using 'current'
96 * because this function is called by early resume code which hasn't
97 * yet set up the percpu register (%gs).
99 void *base
= (void *)((unsigned long)watermark
& ~(THREAD_SIZE
- 1));
101 kasan_unpoison_shadow(base
, watermark
- base
);
105 * Clear all poison for the region between the current SP and a provided
106 * watermark value, as is sometimes required prior to hand-crafted asm function
107 * returns in the middle of functions.
109 void kasan_unpoison_stack_above_sp_to(const void *watermark
)
111 const void *sp
= __builtin_frame_address(0);
112 size_t size
= watermark
- sp
;
114 if (WARN_ON(sp
> watermark
))
116 kasan_unpoison_shadow(sp
, size
);
120 * All functions below always inlined so compiler could
121 * perform better optimizations in each of __asan_loadX/__assn_storeX
122 * depending on memory access size X.
125 static __always_inline
bool memory_is_poisoned_1(unsigned long addr
)
127 s8 shadow_value
= *(s8
*)kasan_mem_to_shadow((void *)addr
);
129 if (unlikely(shadow_value
)) {
130 s8 last_accessible_byte
= addr
& KASAN_SHADOW_MASK
;
131 return unlikely(last_accessible_byte
>= shadow_value
);
137 static __always_inline
bool memory_is_poisoned_2_4_8(unsigned long addr
,
140 u8
*shadow_addr
= (u8
*)kasan_mem_to_shadow((void *)addr
);
143 * Access crosses 8(shadow size)-byte boundary. Such access maps
144 * into 2 shadow bytes, so we need to check them both.
146 if (unlikely(((addr
+ size
- 1) & KASAN_SHADOW_MASK
) < size
- 1))
147 return *shadow_addr
|| memory_is_poisoned_1(addr
+ size
- 1);
149 return memory_is_poisoned_1(addr
+ size
- 1);
152 static __always_inline
bool memory_is_poisoned_16(unsigned long addr
)
154 u16
*shadow_addr
= (u16
*)kasan_mem_to_shadow((void *)addr
);
156 /* Unaligned 16-bytes access maps into 3 shadow bytes. */
157 if (unlikely(!IS_ALIGNED(addr
, KASAN_SHADOW_SCALE_SIZE
)))
158 return *shadow_addr
|| memory_is_poisoned_1(addr
+ 15);
163 static __always_inline
unsigned long bytes_is_nonzero(const u8
*start
,
167 if (unlikely(*start
))
168 return (unsigned long)start
;
176 static __always_inline
unsigned long memory_is_nonzero(const void *start
,
181 unsigned int prefix
= (unsigned long)start
% 8;
183 if (end
- start
<= 16)
184 return bytes_is_nonzero(start
, end
- start
);
188 ret
= bytes_is_nonzero(start
, prefix
);
194 words
= (end
- start
) / 8;
196 if (unlikely(*(u64
*)start
))
197 return bytes_is_nonzero(start
, 8);
202 return bytes_is_nonzero(start
, (end
- start
) % 8);
205 static __always_inline
bool memory_is_poisoned_n(unsigned long addr
,
210 ret
= memory_is_nonzero(kasan_mem_to_shadow((void *)addr
),
211 kasan_mem_to_shadow((void *)addr
+ size
- 1) + 1);
214 unsigned long last_byte
= addr
+ size
- 1;
215 s8
*last_shadow
= (s8
*)kasan_mem_to_shadow((void *)last_byte
);
217 if (unlikely(ret
!= (unsigned long)last_shadow
||
218 ((long)(last_byte
& KASAN_SHADOW_MASK
) >= *last_shadow
)))
224 static __always_inline
bool memory_is_poisoned(unsigned long addr
, size_t size
)
226 if (__builtin_constant_p(size
)) {
229 return memory_is_poisoned_1(addr
);
233 return memory_is_poisoned_2_4_8(addr
, size
);
235 return memory_is_poisoned_16(addr
);
241 return memory_is_poisoned_n(addr
, size
);
244 static __always_inline
void check_memory_region_inline(unsigned long addr
,
245 size_t size
, bool write
,
246 unsigned long ret_ip
)
248 if (unlikely(size
== 0))
251 if (unlikely((void *)addr
<
252 kasan_shadow_to_mem((void *)KASAN_SHADOW_START
))) {
253 kasan_report(addr
, size
, write
, ret_ip
);
257 if (likely(!memory_is_poisoned(addr
, size
)))
260 kasan_report(addr
, size
, write
, ret_ip
);
263 static void check_memory_region(unsigned long addr
,
264 size_t size
, bool write
,
265 unsigned long ret_ip
)
267 check_memory_region_inline(addr
, size
, write
, ret_ip
);
270 void kasan_check_read(const volatile void *p
, unsigned int size
)
272 check_memory_region((unsigned long)p
, size
, false, _RET_IP_
);
274 EXPORT_SYMBOL(kasan_check_read
);
276 void kasan_check_write(const volatile void *p
, unsigned int size
)
278 check_memory_region((unsigned long)p
, size
, true, _RET_IP_
);
280 EXPORT_SYMBOL(kasan_check_write
);
283 void *memset(void *addr
, int c
, size_t len
)
285 check_memory_region((unsigned long)addr
, len
, true, _RET_IP_
);
287 return __memset(addr
, c
, len
);
291 void *memmove(void *dest
, const void *src
, size_t len
)
293 check_memory_region((unsigned long)src
, len
, false, _RET_IP_
);
294 check_memory_region((unsigned long)dest
, len
, true, _RET_IP_
);
296 return __memmove(dest
, src
, len
);
300 void *memcpy(void *dest
, const void *src
, size_t len
)
302 check_memory_region((unsigned long)src
, len
, false, _RET_IP_
);
303 check_memory_region((unsigned long)dest
, len
, true, _RET_IP_
);
305 return __memcpy(dest
, src
, len
);
308 void kasan_alloc_pages(struct page
*page
, unsigned int order
)
310 if (likely(!PageHighMem(page
)))
311 kasan_unpoison_shadow(page_address(page
), PAGE_SIZE
<< order
);
314 void kasan_free_pages(struct page
*page
, unsigned int order
)
316 if (likely(!PageHighMem(page
)))
317 kasan_poison_shadow(page_address(page
),
323 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
324 * For larger allocations larger redzones are used.
326 static size_t optimal_redzone(size_t object_size
)
329 object_size
<= 64 - 16 ? 16 :
330 object_size
<= 128 - 32 ? 32 :
331 object_size
<= 512 - 64 ? 64 :
332 object_size
<= 4096 - 128 ? 128 :
333 object_size
<= (1 << 14) - 256 ? 256 :
334 object_size
<= (1 << 15) - 512 ? 512 :
335 object_size
<= (1 << 16) - 1024 ? 1024 : 2048;
339 void kasan_cache_create(struct kmem_cache
*cache
, size_t *size
,
343 int orig_size
= *size
;
345 /* Add alloc meta. */
346 cache
->kasan_info
.alloc_meta_offset
= *size
;
347 *size
+= sizeof(struct kasan_alloc_meta
);
350 if (cache
->flags
& SLAB_TYPESAFE_BY_RCU
|| cache
->ctor
||
351 cache
->object_size
< sizeof(struct kasan_free_meta
)) {
352 cache
->kasan_info
.free_meta_offset
= *size
;
353 *size
+= sizeof(struct kasan_free_meta
);
355 redzone_adjust
= optimal_redzone(cache
->object_size
) -
356 (*size
- cache
->object_size
);
358 if (redzone_adjust
> 0)
359 *size
+= redzone_adjust
;
361 *size
= min(KMALLOC_MAX_SIZE
, max(*size
, cache
->object_size
+
362 optimal_redzone(cache
->object_size
)));
365 * If the metadata doesn't fit, don't enable KASAN at all.
367 if (*size
<= cache
->kasan_info
.alloc_meta_offset
||
368 *size
<= cache
->kasan_info
.free_meta_offset
) {
369 cache
->kasan_info
.alloc_meta_offset
= 0;
370 cache
->kasan_info
.free_meta_offset
= 0;
375 *flags
|= SLAB_KASAN
;
378 void kasan_cache_shrink(struct kmem_cache
*cache
)
380 quarantine_remove_cache(cache
);
383 void kasan_cache_shutdown(struct kmem_cache
*cache
)
385 quarantine_remove_cache(cache
);
388 size_t kasan_metadata_size(struct kmem_cache
*cache
)
390 return (cache
->kasan_info
.alloc_meta_offset
?
391 sizeof(struct kasan_alloc_meta
) : 0) +
392 (cache
->kasan_info
.free_meta_offset
?
393 sizeof(struct kasan_free_meta
) : 0);
396 void kasan_poison_slab(struct page
*page
)
398 kasan_poison_shadow(page_address(page
),
399 PAGE_SIZE
<< compound_order(page
),
400 KASAN_KMALLOC_REDZONE
);
403 void kasan_unpoison_object_data(struct kmem_cache
*cache
, void *object
)
405 kasan_unpoison_shadow(object
, cache
->object_size
);
408 void kasan_poison_object_data(struct kmem_cache
*cache
, void *object
)
410 kasan_poison_shadow(object
,
411 round_up(cache
->object_size
, KASAN_SHADOW_SCALE_SIZE
),
412 KASAN_KMALLOC_REDZONE
);
415 static inline int in_irqentry_text(unsigned long ptr
)
417 return (ptr
>= (unsigned long)&__irqentry_text_start
&&
418 ptr
< (unsigned long)&__irqentry_text_end
) ||
419 (ptr
>= (unsigned long)&__softirqentry_text_start
&&
420 ptr
< (unsigned long)&__softirqentry_text_end
);
423 static inline void filter_irq_stacks(struct stack_trace
*trace
)
427 if (!trace
->nr_entries
)
429 for (i
= 0; i
< trace
->nr_entries
; i
++)
430 if (in_irqentry_text(trace
->entries
[i
])) {
431 /* Include the irqentry function into the stack. */
432 trace
->nr_entries
= i
+ 1;
437 static inline depot_stack_handle_t
save_stack(gfp_t flags
)
439 unsigned long entries
[KASAN_STACK_DEPTH
];
440 struct stack_trace trace
= {
443 .max_entries
= KASAN_STACK_DEPTH
,
447 save_stack_trace(&trace
);
448 filter_irq_stacks(&trace
);
449 if (trace
.nr_entries
!= 0 &&
450 trace
.entries
[trace
.nr_entries
-1] == ULONG_MAX
)
453 return depot_save_stack(&trace
, flags
);
456 static inline void set_track(struct kasan_track
*track
, gfp_t flags
)
458 track
->pid
= current
->pid
;
459 track
->stack
= save_stack(flags
);
462 struct kasan_alloc_meta
*get_alloc_info(struct kmem_cache
*cache
,
465 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta
) > 32);
466 return (void *)object
+ cache
->kasan_info
.alloc_meta_offset
;
469 struct kasan_free_meta
*get_free_info(struct kmem_cache
*cache
,
472 BUILD_BUG_ON(sizeof(struct kasan_free_meta
) > 32);
473 return (void *)object
+ cache
->kasan_info
.free_meta_offset
;
476 void kasan_init_slab_obj(struct kmem_cache
*cache
, const void *object
)
478 struct kasan_alloc_meta
*alloc_info
;
480 if (!(cache
->flags
& SLAB_KASAN
))
483 alloc_info
= get_alloc_info(cache
, object
);
484 __memset(alloc_info
, 0, sizeof(*alloc_info
));
487 void kasan_slab_alloc(struct kmem_cache
*cache
, void *object
, gfp_t flags
)
489 kasan_kmalloc(cache
, object
, cache
->object_size
, flags
);
492 static bool __kasan_slab_free(struct kmem_cache
*cache
, void *object
,
493 unsigned long ip
, bool quarantine
)
496 unsigned long rounded_up_size
;
498 if (unlikely(nearest_obj(cache
, virt_to_head_page(object
), object
) !=
500 kasan_report_invalid_free(object
, ip
);
504 /* RCU slabs could be legally used after free within the RCU period */
505 if (unlikely(cache
->flags
& SLAB_TYPESAFE_BY_RCU
))
508 shadow_byte
= READ_ONCE(*(s8
*)kasan_mem_to_shadow(object
));
509 if (shadow_byte
< 0 || shadow_byte
>= KASAN_SHADOW_SCALE_SIZE
) {
510 kasan_report_invalid_free(object
, ip
);
514 rounded_up_size
= round_up(cache
->object_size
, KASAN_SHADOW_SCALE_SIZE
);
515 kasan_poison_shadow(object
, rounded_up_size
, KASAN_KMALLOC_FREE
);
517 if (!quarantine
|| unlikely(!(cache
->flags
& SLAB_KASAN
)))
520 set_track(&get_alloc_info(cache
, object
)->free_track
, GFP_NOWAIT
);
521 quarantine_put(get_free_info(cache
, object
), cache
);
525 bool kasan_slab_free(struct kmem_cache
*cache
, void *object
, unsigned long ip
)
527 return __kasan_slab_free(cache
, object
, ip
, true);
530 void kasan_kmalloc(struct kmem_cache
*cache
, const void *object
, size_t size
,
533 unsigned long redzone_start
;
534 unsigned long redzone_end
;
536 if (gfpflags_allow_blocking(flags
))
539 if (unlikely(object
== NULL
))
542 redzone_start
= round_up((unsigned long)(object
+ size
),
543 KASAN_SHADOW_SCALE_SIZE
);
544 redzone_end
= round_up((unsigned long)object
+ cache
->object_size
,
545 KASAN_SHADOW_SCALE_SIZE
);
547 kasan_unpoison_shadow(object
, size
);
548 kasan_poison_shadow((void *)redzone_start
, redzone_end
- redzone_start
,
549 KASAN_KMALLOC_REDZONE
);
551 if (cache
->flags
& SLAB_KASAN
)
552 set_track(&get_alloc_info(cache
, object
)->alloc_track
, flags
);
554 EXPORT_SYMBOL(kasan_kmalloc
);
556 void kasan_kmalloc_large(const void *ptr
, size_t size
, gfp_t flags
)
559 unsigned long redzone_start
;
560 unsigned long redzone_end
;
562 if (gfpflags_allow_blocking(flags
))
565 if (unlikely(ptr
== NULL
))
568 page
= virt_to_page(ptr
);
569 redzone_start
= round_up((unsigned long)(ptr
+ size
),
570 KASAN_SHADOW_SCALE_SIZE
);
571 redzone_end
= (unsigned long)ptr
+ (PAGE_SIZE
<< compound_order(page
));
573 kasan_unpoison_shadow(ptr
, size
);
574 kasan_poison_shadow((void *)redzone_start
, redzone_end
- redzone_start
,
578 void kasan_krealloc(const void *object
, size_t size
, gfp_t flags
)
582 if (unlikely(object
== ZERO_SIZE_PTR
))
585 page
= virt_to_head_page(object
);
587 if (unlikely(!PageSlab(page
)))
588 kasan_kmalloc_large(object
, size
, flags
);
590 kasan_kmalloc(page
->slab_cache
, object
, size
, flags
);
593 void kasan_poison_kfree(void *ptr
, unsigned long ip
)
597 page
= virt_to_head_page(ptr
);
599 if (unlikely(!PageSlab(page
))) {
600 if (ptr
!= page_address(page
)) {
601 kasan_report_invalid_free(ptr
, ip
);
604 kasan_poison_shadow(ptr
, PAGE_SIZE
<< compound_order(page
),
607 __kasan_slab_free(page
->slab_cache
, ptr
, ip
, false);
611 void kasan_kfree_large(void *ptr
, unsigned long ip
)
613 if (ptr
!= page_address(virt_to_head_page(ptr
)))
614 kasan_report_invalid_free(ptr
, ip
);
615 /* The object will be poisoned by page_alloc. */
618 int kasan_module_alloc(void *addr
, size_t size
)
622 unsigned long shadow_start
;
624 shadow_start
= (unsigned long)kasan_mem_to_shadow(addr
);
625 shadow_size
= round_up(size
>> KASAN_SHADOW_SCALE_SHIFT
,
628 if (WARN_ON(!PAGE_ALIGNED(shadow_start
)))
631 ret
= __vmalloc_node_range(shadow_size
, 1, shadow_start
,
632 shadow_start
+ shadow_size
,
633 GFP_KERNEL
| __GFP_ZERO
,
634 PAGE_KERNEL
, VM_NO_GUARD
, NUMA_NO_NODE
,
635 __builtin_return_address(0));
638 find_vm_area(addr
)->flags
|= VM_KASAN
;
639 kmemleak_ignore(ret
);
646 void kasan_free_shadow(const struct vm_struct
*vm
)
648 if (vm
->flags
& VM_KASAN
)
649 vfree(kasan_mem_to_shadow(vm
->addr
));
652 static void register_global(struct kasan_global
*global
)
654 size_t aligned_size
= round_up(global
->size
, KASAN_SHADOW_SCALE_SIZE
);
656 kasan_unpoison_shadow(global
->beg
, global
->size
);
658 kasan_poison_shadow(global
->beg
+ aligned_size
,
659 global
->size_with_redzone
- aligned_size
,
660 KASAN_GLOBAL_REDZONE
);
663 void __asan_register_globals(struct kasan_global
*globals
, size_t size
)
667 for (i
= 0; i
< size
; i
++)
668 register_global(&globals
[i
]);
670 EXPORT_SYMBOL(__asan_register_globals
);
672 void __asan_unregister_globals(struct kasan_global
*globals
, size_t size
)
675 EXPORT_SYMBOL(__asan_unregister_globals
);
677 #define DEFINE_ASAN_LOAD_STORE(size) \
678 void __asan_load##size(unsigned long addr) \
680 check_memory_region_inline(addr, size, false, _RET_IP_);\
682 EXPORT_SYMBOL(__asan_load##size); \
683 __alias(__asan_load##size) \
684 void __asan_load##size##_noabort(unsigned long); \
685 EXPORT_SYMBOL(__asan_load##size##_noabort); \
686 void __asan_store##size(unsigned long addr) \
688 check_memory_region_inline(addr, size, true, _RET_IP_); \
690 EXPORT_SYMBOL(__asan_store##size); \
691 __alias(__asan_store##size) \
692 void __asan_store##size##_noabort(unsigned long); \
693 EXPORT_SYMBOL(__asan_store##size##_noabort)
695 DEFINE_ASAN_LOAD_STORE(1);
696 DEFINE_ASAN_LOAD_STORE(2);
697 DEFINE_ASAN_LOAD_STORE(4);
698 DEFINE_ASAN_LOAD_STORE(8);
699 DEFINE_ASAN_LOAD_STORE(16);
701 void __asan_loadN(unsigned long addr
, size_t size
)
703 check_memory_region(addr
, size
, false, _RET_IP_
);
705 EXPORT_SYMBOL(__asan_loadN
);
707 __alias(__asan_loadN
)
708 void __asan_loadN_noabort(unsigned long, size_t);
709 EXPORT_SYMBOL(__asan_loadN_noabort
);
711 void __asan_storeN(unsigned long addr
, size_t size
)
713 check_memory_region(addr
, size
, true, _RET_IP_
);
715 EXPORT_SYMBOL(__asan_storeN
);
717 __alias(__asan_storeN
)
718 void __asan_storeN_noabort(unsigned long, size_t);
719 EXPORT_SYMBOL(__asan_storeN_noabort
);
721 /* to shut up compiler complaints */
722 void __asan_handle_no_return(void) {}
723 EXPORT_SYMBOL(__asan_handle_no_return
);
725 /* Emitted by compiler to poison large objects when they go out of scope. */
726 void __asan_poison_stack_memory(const void *addr
, size_t size
)
729 * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
730 * by redzones, so we simply round up size to simplify logic.
732 kasan_poison_shadow(addr
, round_up(size
, KASAN_SHADOW_SCALE_SIZE
),
733 KASAN_USE_AFTER_SCOPE
);
735 EXPORT_SYMBOL(__asan_poison_stack_memory
);
737 /* Emitted by compiler to unpoison large objects when they go into scope. */
738 void __asan_unpoison_stack_memory(const void *addr
, size_t size
)
740 kasan_unpoison_shadow(addr
, size
);
742 EXPORT_SYMBOL(__asan_unpoison_stack_memory
);
744 /* Emitted by compiler to poison alloca()ed objects. */
745 void __asan_alloca_poison(unsigned long addr
, size_t size
)
747 size_t rounded_up_size
= round_up(size
, KASAN_SHADOW_SCALE_SIZE
);
748 size_t padding_size
= round_up(size
, KASAN_ALLOCA_REDZONE_SIZE
) -
750 size_t rounded_down_size
= round_down(size
, KASAN_SHADOW_SCALE_SIZE
);
752 const void *left_redzone
= (const void *)(addr
-
753 KASAN_ALLOCA_REDZONE_SIZE
);
754 const void *right_redzone
= (const void *)(addr
+ rounded_up_size
);
756 WARN_ON(!IS_ALIGNED(addr
, KASAN_ALLOCA_REDZONE_SIZE
));
758 kasan_unpoison_shadow((const void *)(addr
+ rounded_down_size
),
759 size
- rounded_down_size
);
760 kasan_poison_shadow(left_redzone
, KASAN_ALLOCA_REDZONE_SIZE
,
762 kasan_poison_shadow(right_redzone
,
763 padding_size
+ KASAN_ALLOCA_REDZONE_SIZE
,
766 EXPORT_SYMBOL(__asan_alloca_poison
);
768 /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
769 void __asan_allocas_unpoison(const void *stack_top
, const void *stack_bottom
)
771 if (unlikely(!stack_top
|| stack_top
> stack_bottom
))
774 kasan_unpoison_shadow(stack_top
, stack_bottom
- stack_top
);
776 EXPORT_SYMBOL(__asan_allocas_unpoison
);
778 /* Emitted by the compiler to [un]poison local variables. */
779 #define DEFINE_ASAN_SET_SHADOW(byte) \
780 void __asan_set_shadow_##byte(const void *addr, size_t size) \
782 __memset((void *)addr, 0x##byte, size); \
784 EXPORT_SYMBOL(__asan_set_shadow_##byte)
786 DEFINE_ASAN_SET_SHADOW(00);
787 DEFINE_ASAN_SET_SHADOW(f1
);
788 DEFINE_ASAN_SET_SHADOW(f2
);
789 DEFINE_ASAN_SET_SHADOW(f3
);
790 DEFINE_ASAN_SET_SHADOW(f5
);
791 DEFINE_ASAN_SET_SHADOW(f8
);
793 #ifdef CONFIG_MEMORY_HOTPLUG
794 static int __meminit
kasan_mem_notifier(struct notifier_block
*nb
,
795 unsigned long action
, void *data
)
797 struct memory_notify
*mem_data
= data
;
798 unsigned long nr_shadow_pages
, start_kaddr
, shadow_start
;
799 unsigned long shadow_end
, shadow_size
;
801 nr_shadow_pages
= mem_data
->nr_pages
>> KASAN_SHADOW_SCALE_SHIFT
;
802 start_kaddr
= (unsigned long)pfn_to_kaddr(mem_data
->start_pfn
);
803 shadow_start
= (unsigned long)kasan_mem_to_shadow((void *)start_kaddr
);
804 shadow_size
= nr_shadow_pages
<< PAGE_SHIFT
;
805 shadow_end
= shadow_start
+ shadow_size
;
807 if (WARN_ON(mem_data
->nr_pages
% KASAN_SHADOW_SCALE_SIZE
) ||
808 WARN_ON(start_kaddr
% (KASAN_SHADOW_SCALE_SIZE
<< PAGE_SHIFT
)))
812 case MEM_GOING_ONLINE
: {
815 ret
= __vmalloc_node_range(shadow_size
, PAGE_SIZE
, shadow_start
,
816 shadow_end
, GFP_KERNEL
,
817 PAGE_KERNEL
, VM_NO_GUARD
,
818 pfn_to_nid(mem_data
->start_pfn
),
819 __builtin_return_address(0));
823 kmemleak_ignore(ret
);
827 vfree((void *)shadow_start
);
833 static int __init
kasan_memhotplug_init(void)
835 hotplug_memory_notifier(kasan_mem_notifier
, 0);
840 module_init(kasan_memhotplug_init
);