KVM: x86: Remove redundant definitions
[linux/fpc-iii.git] / mm / kasan / kasan.c
blob78fee632a7ee9b73d9d0d5498d76aeb58460e711
1 /*
2 * This file contains shadow memory manipulation code.
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
7 * Some of code borrowed from https://github.com/xairy/linux by
8 * Andrey Konovalov <adech.fo@gmail.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #define DISABLE_BRANCH_PROFILING
19 #include <linux/export.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/memblock.h>
23 #include <linux/memory.h>
24 #include <linux/mm.h>
25 #include <linux/module.h>
26 #include <linux/printk.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/stacktrace.h>
30 #include <linux/string.h>
31 #include <linux/types.h>
32 #include <linux/kasan.h>
34 #include "kasan.h"
35 #include "../slab.h"
38 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
39 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
41 static void kasan_poison_shadow(const void *address, size_t size, u8 value)
43 void *shadow_start, *shadow_end;
45 shadow_start = kasan_mem_to_shadow(address);
46 shadow_end = kasan_mem_to_shadow(address + size);
48 memset(shadow_start, value, shadow_end - shadow_start);
51 void kasan_unpoison_shadow(const void *address, size_t size)
53 kasan_poison_shadow(address, size, 0);
55 if (size & KASAN_SHADOW_MASK) {
56 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
57 *shadow = size & KASAN_SHADOW_MASK;
63 * All functions below always inlined so compiler could
64 * perform better optimizations in each of __asan_loadX/__assn_storeX
65 * depending on memory access size X.
68 static __always_inline bool memory_is_poisoned_1(unsigned long addr)
70 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
72 if (unlikely(shadow_value)) {
73 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
74 return unlikely(last_accessible_byte >= shadow_value);
77 return false;
80 static __always_inline bool memory_is_poisoned_2(unsigned long addr)
82 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
84 if (unlikely(*shadow_addr)) {
85 if (memory_is_poisoned_1(addr + 1))
86 return true;
88 if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
89 return false;
91 return unlikely(*(u8 *)shadow_addr);
94 return false;
97 static __always_inline bool memory_is_poisoned_4(unsigned long addr)
99 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
101 if (unlikely(*shadow_addr)) {
102 if (memory_is_poisoned_1(addr + 3))
103 return true;
105 if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
106 return false;
108 return unlikely(*(u8 *)shadow_addr);
111 return false;
114 static __always_inline bool memory_is_poisoned_8(unsigned long addr)
116 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
118 if (unlikely(*shadow_addr)) {
119 if (memory_is_poisoned_1(addr + 7))
120 return true;
122 if (likely(((addr + 7) & KASAN_SHADOW_MASK) >= 7))
123 return false;
125 return unlikely(*(u8 *)shadow_addr);
128 return false;
131 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
133 u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
135 if (unlikely(*shadow_addr)) {
136 u16 shadow_first_bytes = *(u16 *)shadow_addr;
137 s8 last_byte = (addr + 15) & KASAN_SHADOW_MASK;
139 if (unlikely(shadow_first_bytes))
140 return true;
142 if (likely(!last_byte))
143 return false;
145 return memory_is_poisoned_1(addr + 15);
148 return false;
151 static __always_inline unsigned long bytes_is_zero(const u8 *start,
152 size_t size)
154 while (size) {
155 if (unlikely(*start))
156 return (unsigned long)start;
157 start++;
158 size--;
161 return 0;
164 static __always_inline unsigned long memory_is_zero(const void *start,
165 const void *end)
167 unsigned int words;
168 unsigned long ret;
169 unsigned int prefix = (unsigned long)start % 8;
171 if (end - start <= 16)
172 return bytes_is_zero(start, end - start);
174 if (prefix) {
175 prefix = 8 - prefix;
176 ret = bytes_is_zero(start, prefix);
177 if (unlikely(ret))
178 return ret;
179 start += prefix;
182 words = (end - start) / 8;
183 while (words) {
184 if (unlikely(*(u64 *)start))
185 return bytes_is_zero(start, 8);
186 start += 8;
187 words--;
190 return bytes_is_zero(start, (end - start) % 8);
193 static __always_inline bool memory_is_poisoned_n(unsigned long addr,
194 size_t size)
196 unsigned long ret;
198 ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
199 kasan_mem_to_shadow((void *)addr + size - 1) + 1);
201 if (unlikely(ret)) {
202 unsigned long last_byte = addr + size - 1;
203 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
205 if (unlikely(ret != (unsigned long)last_shadow ||
206 ((last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
207 return true;
209 return false;
212 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
214 if (__builtin_constant_p(size)) {
215 switch (size) {
216 case 1:
217 return memory_is_poisoned_1(addr);
218 case 2:
219 return memory_is_poisoned_2(addr);
220 case 4:
221 return memory_is_poisoned_4(addr);
222 case 8:
223 return memory_is_poisoned_8(addr);
224 case 16:
225 return memory_is_poisoned_16(addr);
226 default:
227 BUILD_BUG();
231 return memory_is_poisoned_n(addr, size);
235 static __always_inline void check_memory_region(unsigned long addr,
236 size_t size, bool write)
238 struct kasan_access_info info;
240 if (unlikely(size == 0))
241 return;
243 if (unlikely((void *)addr <
244 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
245 info.access_addr = (void *)addr;
246 info.access_size = size;
247 info.is_write = write;
248 info.ip = _RET_IP_;
249 kasan_report_user_access(&info);
250 return;
253 if (likely(!memory_is_poisoned(addr, size)))
254 return;
256 kasan_report(addr, size, write, _RET_IP_);
259 void __asan_loadN(unsigned long addr, size_t size);
260 void __asan_storeN(unsigned long addr, size_t size);
262 #undef memset
263 void *memset(void *addr, int c, size_t len)
265 __asan_storeN((unsigned long)addr, len);
267 return __memset(addr, c, len);
270 #undef memmove
271 void *memmove(void *dest, const void *src, size_t len)
273 __asan_loadN((unsigned long)src, len);
274 __asan_storeN((unsigned long)dest, len);
276 return __memmove(dest, src, len);
279 #undef memcpy
280 void *memcpy(void *dest, const void *src, size_t len)
282 __asan_loadN((unsigned long)src, len);
283 __asan_storeN((unsigned long)dest, len);
285 return __memcpy(dest, src, len);
288 void kasan_alloc_pages(struct page *page, unsigned int order)
290 if (likely(!PageHighMem(page)))
291 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
294 void kasan_free_pages(struct page *page, unsigned int order)
296 if (likely(!PageHighMem(page)))
297 kasan_poison_shadow(page_address(page),
298 PAGE_SIZE << order,
299 KASAN_FREE_PAGE);
302 void kasan_poison_slab(struct page *page)
304 kasan_poison_shadow(page_address(page),
305 PAGE_SIZE << compound_order(page),
306 KASAN_KMALLOC_REDZONE);
309 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
311 kasan_unpoison_shadow(object, cache->object_size);
314 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
316 kasan_poison_shadow(object,
317 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
318 KASAN_KMALLOC_REDZONE);
321 void kasan_slab_alloc(struct kmem_cache *cache, void *object)
323 kasan_kmalloc(cache, object, cache->object_size);
326 void kasan_slab_free(struct kmem_cache *cache, void *object)
328 unsigned long size = cache->object_size;
329 unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
331 /* RCU slabs could be legally used after free within the RCU period */
332 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
333 return;
335 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
338 void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
340 unsigned long redzone_start;
341 unsigned long redzone_end;
343 if (unlikely(object == NULL))
344 return;
346 redzone_start = round_up((unsigned long)(object + size),
347 KASAN_SHADOW_SCALE_SIZE);
348 redzone_end = round_up((unsigned long)object + cache->object_size,
349 KASAN_SHADOW_SCALE_SIZE);
351 kasan_unpoison_shadow(object, size);
352 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
353 KASAN_KMALLOC_REDZONE);
355 EXPORT_SYMBOL(kasan_kmalloc);
357 void kasan_kmalloc_large(const void *ptr, size_t size)
359 struct page *page;
360 unsigned long redzone_start;
361 unsigned long redzone_end;
363 if (unlikely(ptr == NULL))
364 return;
366 page = virt_to_page(ptr);
367 redzone_start = round_up((unsigned long)(ptr + size),
368 KASAN_SHADOW_SCALE_SIZE);
369 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
371 kasan_unpoison_shadow(ptr, size);
372 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
373 KASAN_PAGE_REDZONE);
376 void kasan_krealloc(const void *object, size_t size)
378 struct page *page;
380 if (unlikely(object == ZERO_SIZE_PTR))
381 return;
383 page = virt_to_head_page(object);
385 if (unlikely(!PageSlab(page)))
386 kasan_kmalloc_large(object, size);
387 else
388 kasan_kmalloc(page->slab_cache, object, size);
391 void kasan_kfree_large(const void *ptr)
393 struct page *page = virt_to_page(ptr);
395 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
396 KASAN_FREE_PAGE);
399 int kasan_module_alloc(void *addr, size_t size)
401 void *ret;
402 size_t shadow_size;
403 unsigned long shadow_start;
405 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
406 shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
407 PAGE_SIZE);
409 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
410 return -EINVAL;
412 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
413 shadow_start + shadow_size,
414 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
415 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
416 __builtin_return_address(0));
417 return ret ? 0 : -ENOMEM;
420 void kasan_module_free(void *addr)
422 vfree(kasan_mem_to_shadow(addr));
425 static void register_global(struct kasan_global *global)
427 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
429 kasan_unpoison_shadow(global->beg, global->size);
431 kasan_poison_shadow(global->beg + aligned_size,
432 global->size_with_redzone - aligned_size,
433 KASAN_GLOBAL_REDZONE);
436 void __asan_register_globals(struct kasan_global *globals, size_t size)
438 int i;
440 for (i = 0; i < size; i++)
441 register_global(&globals[i]);
443 EXPORT_SYMBOL(__asan_register_globals);
445 void __asan_unregister_globals(struct kasan_global *globals, size_t size)
448 EXPORT_SYMBOL(__asan_unregister_globals);
450 #define DEFINE_ASAN_LOAD_STORE(size) \
451 void __asan_load##size(unsigned long addr) \
453 check_memory_region(addr, size, false); \
455 EXPORT_SYMBOL(__asan_load##size); \
456 __alias(__asan_load##size) \
457 void __asan_load##size##_noabort(unsigned long); \
458 EXPORT_SYMBOL(__asan_load##size##_noabort); \
459 void __asan_store##size(unsigned long addr) \
461 check_memory_region(addr, size, true); \
463 EXPORT_SYMBOL(__asan_store##size); \
464 __alias(__asan_store##size) \
465 void __asan_store##size##_noabort(unsigned long); \
466 EXPORT_SYMBOL(__asan_store##size##_noabort)
468 DEFINE_ASAN_LOAD_STORE(1);
469 DEFINE_ASAN_LOAD_STORE(2);
470 DEFINE_ASAN_LOAD_STORE(4);
471 DEFINE_ASAN_LOAD_STORE(8);
472 DEFINE_ASAN_LOAD_STORE(16);
474 void __asan_loadN(unsigned long addr, size_t size)
476 check_memory_region(addr, size, false);
478 EXPORT_SYMBOL(__asan_loadN);
480 __alias(__asan_loadN)
481 void __asan_loadN_noabort(unsigned long, size_t);
482 EXPORT_SYMBOL(__asan_loadN_noabort);
484 void __asan_storeN(unsigned long addr, size_t size)
486 check_memory_region(addr, size, true);
488 EXPORT_SYMBOL(__asan_storeN);
490 __alias(__asan_storeN)
491 void __asan_storeN_noabort(unsigned long, size_t);
492 EXPORT_SYMBOL(__asan_storeN_noabort);
494 /* to shut up compiler complaints */
495 void __asan_handle_no_return(void) {}
496 EXPORT_SYMBOL(__asan_handle_no_return);
498 #ifdef CONFIG_MEMORY_HOTPLUG
499 static int kasan_mem_notifier(struct notifier_block *nb,
500 unsigned long action, void *data)
502 return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
505 static int __init kasan_memhotplug_init(void)
507 pr_err("WARNING: KASan doesn't support memory hot-add\n");
508 pr_err("Memory hot-add will be disabled\n");
510 hotplug_memory_notifier(kasan_mem_notifier, 0);
512 return 0;
515 module_init(kasan_memhotplug_init);
516 #endif