1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains core generic KASAN code.
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/export.h>
20 #include <linux/interrupt.h>
21 #include <linux/init.h>
22 #include <linux/kasan.h>
23 #include <linux/kernel.h>
24 #include <linux/kmemleak.h>
25 #include <linux/linkage.h>
26 #include <linux/memblock.h>
27 #include <linux/memory.h>
29 #include <linux/module.h>
30 #include <linux/printk.h>
31 #include <linux/sched.h>
32 #include <linux/sched/task_stack.h>
33 #include <linux/slab.h>
34 #include <linux/stacktrace.h>
35 #include <linux/string.h>
36 #include <linux/types.h>
37 #include <linux/vmalloc.h>
38 #include <linux/bug.h>
44 * All functions below always inlined so compiler could
45 * perform better optimizations in each of __asan_loadX/__assn_storeX
46 * depending on memory access size X.
49 static __always_inline
bool memory_is_poisoned_1(unsigned long addr
)
51 s8 shadow_value
= *(s8
*)kasan_mem_to_shadow((void *)addr
);
53 if (unlikely(shadow_value
)) {
54 s8 last_accessible_byte
= addr
& KASAN_SHADOW_MASK
;
55 return unlikely(last_accessible_byte
>= shadow_value
);
61 static __always_inline
bool memory_is_poisoned_2_4_8(unsigned long addr
,
64 u8
*shadow_addr
= (u8
*)kasan_mem_to_shadow((void *)addr
);
67 * Access crosses 8(shadow size)-byte boundary. Such access maps
68 * into 2 shadow bytes, so we need to check them both.
70 if (unlikely(((addr
+ size
- 1) & KASAN_SHADOW_MASK
) < size
- 1))
71 return *shadow_addr
|| memory_is_poisoned_1(addr
+ size
- 1);
73 return memory_is_poisoned_1(addr
+ size
- 1);
76 static __always_inline
bool memory_is_poisoned_16(unsigned long addr
)
78 u16
*shadow_addr
= (u16
*)kasan_mem_to_shadow((void *)addr
);
80 /* Unaligned 16-bytes access maps into 3 shadow bytes. */
81 if (unlikely(!IS_ALIGNED(addr
, KASAN_SHADOW_SCALE_SIZE
)))
82 return *shadow_addr
|| memory_is_poisoned_1(addr
+ 15);
87 static __always_inline
unsigned long bytes_is_nonzero(const u8
*start
,
92 return (unsigned long)start
;
100 static __always_inline
unsigned long memory_is_nonzero(const void *start
,
105 unsigned int prefix
= (unsigned long)start
% 8;
107 if (end
- start
<= 16)
108 return bytes_is_nonzero(start
, end
- start
);
112 ret
= bytes_is_nonzero(start
, prefix
);
118 words
= (end
- start
) / 8;
120 if (unlikely(*(u64
*)start
))
121 return bytes_is_nonzero(start
, 8);
126 return bytes_is_nonzero(start
, (end
- start
) % 8);
129 static __always_inline
bool memory_is_poisoned_n(unsigned long addr
,
134 ret
= memory_is_nonzero(kasan_mem_to_shadow((void *)addr
),
135 kasan_mem_to_shadow((void *)addr
+ size
- 1) + 1);
138 unsigned long last_byte
= addr
+ size
- 1;
139 s8
*last_shadow
= (s8
*)kasan_mem_to_shadow((void *)last_byte
);
141 if (unlikely(ret
!= (unsigned long)last_shadow
||
142 ((long)(last_byte
& KASAN_SHADOW_MASK
) >= *last_shadow
)))
148 static __always_inline
bool memory_is_poisoned(unsigned long addr
, size_t size
)
150 if (__builtin_constant_p(size
)) {
153 return memory_is_poisoned_1(addr
);
157 return memory_is_poisoned_2_4_8(addr
, size
);
159 return memory_is_poisoned_16(addr
);
165 return memory_is_poisoned_n(addr
, size
);
168 static __always_inline
bool check_memory_region_inline(unsigned long addr
,
169 size_t size
, bool write
,
170 unsigned long ret_ip
)
172 if (unlikely(size
== 0))
175 if (unlikely((void *)addr
<
176 kasan_shadow_to_mem((void *)KASAN_SHADOW_START
))) {
177 kasan_report(addr
, size
, write
, ret_ip
);
181 if (likely(!memory_is_poisoned(addr
, size
)))
184 kasan_report(addr
, size
, write
, ret_ip
);
188 bool check_memory_region(unsigned long addr
, size_t size
, bool write
,
189 unsigned long ret_ip
)
191 return check_memory_region_inline(addr
, size
, write
, ret_ip
);
194 void kasan_cache_shrink(struct kmem_cache
*cache
)
196 quarantine_remove_cache(cache
);
199 void kasan_cache_shutdown(struct kmem_cache
*cache
)
201 if (!__kmem_cache_empty(cache
))
202 quarantine_remove_cache(cache
);
205 static void register_global(struct kasan_global
*global
)
207 size_t aligned_size
= round_up(global
->size
, KASAN_SHADOW_SCALE_SIZE
);
209 kasan_unpoison_shadow(global
->beg
, global
->size
);
211 kasan_poison_shadow(global
->beg
+ aligned_size
,
212 global
->size_with_redzone
- aligned_size
,
213 KASAN_GLOBAL_REDZONE
);
216 void __asan_register_globals(struct kasan_global
*globals
, size_t size
)
220 for (i
= 0; i
< size
; i
++)
221 register_global(&globals
[i
]);
223 EXPORT_SYMBOL(__asan_register_globals
);
225 void __asan_unregister_globals(struct kasan_global
*globals
, size_t size
)
228 EXPORT_SYMBOL(__asan_unregister_globals
);
230 #define DEFINE_ASAN_LOAD_STORE(size) \
231 void __asan_load##size(unsigned long addr) \
233 check_memory_region_inline(addr, size, false, _RET_IP_);\
235 EXPORT_SYMBOL(__asan_load##size); \
236 __alias(__asan_load##size) \
237 void __asan_load##size##_noabort(unsigned long); \
238 EXPORT_SYMBOL(__asan_load##size##_noabort); \
239 void __asan_store##size(unsigned long addr) \
241 check_memory_region_inline(addr, size, true, _RET_IP_); \
243 EXPORT_SYMBOL(__asan_store##size); \
244 __alias(__asan_store##size) \
245 void __asan_store##size##_noabort(unsigned long); \
246 EXPORT_SYMBOL(__asan_store##size##_noabort)
248 DEFINE_ASAN_LOAD_STORE(1);
249 DEFINE_ASAN_LOAD_STORE(2);
250 DEFINE_ASAN_LOAD_STORE(4);
251 DEFINE_ASAN_LOAD_STORE(8);
252 DEFINE_ASAN_LOAD_STORE(16);
254 void __asan_loadN(unsigned long addr
, size_t size
)
256 check_memory_region(addr
, size
, false, _RET_IP_
);
258 EXPORT_SYMBOL(__asan_loadN
);
260 __alias(__asan_loadN
)
261 void __asan_loadN_noabort(unsigned long, size_t);
262 EXPORT_SYMBOL(__asan_loadN_noabort
);
264 void __asan_storeN(unsigned long addr
, size_t size
)
266 check_memory_region(addr
, size
, true, _RET_IP_
);
268 EXPORT_SYMBOL(__asan_storeN
);
270 __alias(__asan_storeN
)
271 void __asan_storeN_noabort(unsigned long, size_t);
272 EXPORT_SYMBOL(__asan_storeN_noabort
);
274 /* to shut up compiler complaints */
275 void __asan_handle_no_return(void) {}
276 EXPORT_SYMBOL(__asan_handle_no_return
);
278 /* Emitted by compiler to poison alloca()ed objects. */
279 void __asan_alloca_poison(unsigned long addr
, size_t size
)
281 size_t rounded_up_size
= round_up(size
, KASAN_SHADOW_SCALE_SIZE
);
282 size_t padding_size
= round_up(size
, KASAN_ALLOCA_REDZONE_SIZE
) -
284 size_t rounded_down_size
= round_down(size
, KASAN_SHADOW_SCALE_SIZE
);
286 const void *left_redzone
= (const void *)(addr
-
287 KASAN_ALLOCA_REDZONE_SIZE
);
288 const void *right_redzone
= (const void *)(addr
+ rounded_up_size
);
290 WARN_ON(!IS_ALIGNED(addr
, KASAN_ALLOCA_REDZONE_SIZE
));
292 kasan_unpoison_shadow((const void *)(addr
+ rounded_down_size
),
293 size
- rounded_down_size
);
294 kasan_poison_shadow(left_redzone
, KASAN_ALLOCA_REDZONE_SIZE
,
296 kasan_poison_shadow(right_redzone
,
297 padding_size
+ KASAN_ALLOCA_REDZONE_SIZE
,
300 EXPORT_SYMBOL(__asan_alloca_poison
);
302 /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
303 void __asan_allocas_unpoison(const void *stack_top
, const void *stack_bottom
)
305 if (unlikely(!stack_top
|| stack_top
> stack_bottom
))
308 kasan_unpoison_shadow(stack_top
, stack_bottom
- stack_top
);
310 EXPORT_SYMBOL(__asan_allocas_unpoison
);
312 /* Emitted by the compiler to [un]poison local variables. */
313 #define DEFINE_ASAN_SET_SHADOW(byte) \
314 void __asan_set_shadow_##byte(const void *addr, size_t size) \
316 __memset((void *)addr, 0x##byte, size); \
318 EXPORT_SYMBOL(__asan_set_shadow_##byte)
320 DEFINE_ASAN_SET_SHADOW(00);
321 DEFINE_ASAN_SET_SHADOW(f1
);
322 DEFINE_ASAN_SET_SHADOW(f2
);
323 DEFINE_ASAN_SET_SHADOW(f3
);
324 DEFINE_ASAN_SET_SHADOW(f5
);
325 DEFINE_ASAN_SET_SHADOW(f8
);