repo init
[linux-rt-nao.git] / include / linux / kmemcheck.h
blobbc8880828dac4b76ee1403e581ed8d21ecb0077b
1 #ifndef LINUX_KMEMCHECK_H
2 #define LINUX_KMEMCHECK_H
4 #include <linux/mm_types.h>
5 #include <linux/types.h>
7 /*
8 * How to use: If you have a struct using bitfields, for example
10 * struct a {
11 * int x:8, y:8;
12 * };
14 * then this should be rewritten as
16 * struct a {
17 * kmemcheck_define_bitfield(flags, {
18 * int x:8, y:8;
19 * });
20 * };
22 * Now the "flags" member may be used to refer to the bitfield (and things
23 * like &x.flags is allowed). As soon as the struct is allocated, the bit-
24 * fields should be annotated:
26 * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
27 * if (a)
28 * kmemcheck_annotate_bitfield(a->flags);
30 * Note: We provide the same definitions for both kmemcheck and non-
31 * kmemcheck kernels. This makes it harder to introduce accidental errors.
33 #define kmemcheck_define_bitfield(name, fields...) \
34 union { \
35 struct fields name; \
36 struct fields; \
37 }; \
39 /* \
40 * Erk. Due to gcc bug, we'll get a "error: \
41 * flexible array member in otherwise empty \
42 * struct without this. \
43 */ \
44 int kmemcheck_dummy_##name##_[0];
46 #ifdef CONFIG_KMEMCHECK
47 extern int kmemcheck_enabled;
49 void kmemcheck_init(void);
51 /* The slab-related functions. */
52 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
53 void kmemcheck_free_shadow(struct page *page, int order);
54 void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
55 size_t size);
56 void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
58 void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
59 gfp_t gfpflags);
61 void kmemcheck_show_pages(struct page *p, unsigned int n);
62 void kmemcheck_hide_pages(struct page *p, unsigned int n);
64 bool kmemcheck_page_is_tracked(struct page *p);
66 void kmemcheck_mark_unallocated(void *address, unsigned int n);
67 void kmemcheck_mark_uninitialized(void *address, unsigned int n);
68 void kmemcheck_mark_initialized(void *address, unsigned int n);
69 void kmemcheck_mark_freed(void *address, unsigned int n);
71 void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
72 void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
73 void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
75 int kmemcheck_show_addr(unsigned long address);
76 int kmemcheck_hide_addr(unsigned long address);
78 #define kmemcheck_annotate_bitfield(field) \
79 do { \
80 kmemcheck_mark_initialized(&(field), sizeof(field)); \
81 } while (0)
82 #else
83 #define kmemcheck_enabled 0
85 static inline void kmemcheck_init(void)
89 static inline void
90 kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
94 static inline void
95 kmemcheck_free_shadow(struct page *page, int order)
99 static inline void
100 kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
101 size_t size)
105 static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
106 size_t size)
110 static inline void kmemcheck_pagealloc_alloc(struct page *p,
111 unsigned int order, gfp_t gfpflags)
115 static inline bool kmemcheck_page_is_tracked(struct page *p)
117 return false;
120 static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
124 static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
128 static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
132 static inline void kmemcheck_mark_freed(void *address, unsigned int n)
136 static inline void kmemcheck_mark_unallocated_pages(struct page *p,
137 unsigned int n)
141 static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
142 unsigned int n)
146 static inline void kmemcheck_mark_initialized_pages(struct page *p,
147 unsigned int n)
151 #define kmemcheck_annotate_bitfield(field) do { } while (0)
152 #endif /* CONFIG_KMEMCHECK */
154 #endif /* LINUX_KMEMCHECK_H */