Merge branch 'bpf-misc'
[linux/fpc-iii.git] / mm / kmemcheck.c
blob5bf191756a4a07b04ffe1dd792f475e1e0af0492
1 #include <linux/gfp.h>
2 #include <linux/mm_types.h>
3 #include <linux/mm.h>
4 #include <linux/slab.h>
5 #include "slab.h"
6 #include <linux/kmemcheck.h>
8 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
10 struct page *shadow;
11 int pages;
12 int i;
14 pages = 1 << order;
17 * With kmemcheck enabled, we need to allocate a memory area for the
18 * shadow bits as well.
20 shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
21 if (!shadow) {
22 if (printk_ratelimit())
23 pr_err("kmemcheck: failed to allocate shadow bitmap\n");
24 return;
27 for(i = 0; i < pages; ++i)
28 page[i].shadow = page_address(&shadow[i]);
31 * Mark it as non-present for the MMU so that our accesses to
32 * this memory will trigger a page fault and let us analyze
33 * the memory accesses.
35 kmemcheck_hide_pages(page, pages);
38 void kmemcheck_free_shadow(struct page *page, int order)
40 struct page *shadow;
41 int pages;
42 int i;
44 if (!kmemcheck_page_is_tracked(page))
45 return;
47 pages = 1 << order;
49 kmemcheck_show_pages(page, pages);
51 shadow = virt_to_page(page[0].shadow);
53 for(i = 0; i < pages; ++i)
54 page[i].shadow = NULL;
56 __free_pages(shadow, order);
59 void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
60 size_t size)
62 if (unlikely(!object)) /* Skip object if allocation failed */
63 return;
66 * Has already been memset(), which initializes the shadow for us
67 * as well.
69 if (gfpflags & __GFP_ZERO)
70 return;
72 /* No need to initialize the shadow of a non-tracked slab. */
73 if (s->flags & SLAB_NOTRACK)
74 return;
76 if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
78 * Allow notracked objects to be allocated from
79 * tracked caches. Note however that these objects
80 * will still get page faults on access, they just
81 * won't ever be flagged as uninitialized. If page
82 * faults are not acceptable, the slab cache itself
83 * should be marked NOTRACK.
85 kmemcheck_mark_initialized(object, size);
86 } else if (!s->ctor) {
88 * New objects should be marked uninitialized before
89 * they're returned to the called.
91 kmemcheck_mark_uninitialized(object, size);
95 void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
97 /* TODO: RCU freeing is unsupported for now; hide false positives. */
98 if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
99 kmemcheck_mark_freed(object, size);
102 void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
103 gfp_t gfpflags)
105 int pages;
107 if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
108 return;
110 pages = 1 << order;
113 * NOTE: We choose to track GFP_ZERO pages too; in fact, they
114 * can become uninitialized by copying uninitialized memory
115 * into them.
118 /* XXX: Can use zone->node for node? */
119 kmemcheck_alloc_shadow(page, order, gfpflags, -1);
121 if (gfpflags & __GFP_ZERO)
122 kmemcheck_mark_initialized_pages(page, pages);
123 else
124 kmemcheck_mark_uninitialized_pages(page, pages);