2 #define TRACE_SYSTEM kmem
4 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
7 #include <linux/types.h>
8 #include <linux/tracepoint.h>
11 * The order of these masks is important. Matching masks will be seen
12 * first and the left over flags will end up showing by themselves.
14 * For example, if we have GFP_KERNEL before GFP_USER we wil get:
16 * GFP_KERNEL|GFP_HARDWALL
18 * Thus most bits set go first.
20 #define show_gfp_flags(flags) \
21 (flags) ? __print_flags(flags, "|", \
22 {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \
23 {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
24 {(unsigned long)GFP_USER, "GFP_USER"}, \
25 {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \
26 {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
27 {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
28 {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
29 {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
30 {(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \
31 {(unsigned long)__GFP_WAIT, "GFP_WAIT"}, \
32 {(unsigned long)__GFP_IO, "GFP_IO"}, \
33 {(unsigned long)__GFP_COLD, "GFP_COLD"}, \
34 {(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \
35 {(unsigned long)__GFP_REPEAT, "GFP_REPEAT"}, \
36 {(unsigned long)__GFP_NOFAIL, "GFP_NOFAIL"}, \
37 {(unsigned long)__GFP_NORETRY, "GFP_NORETRY"}, \
38 {(unsigned long)__GFP_COMP, "GFP_COMP"}, \
39 {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \
40 {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \
41 {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \
42 {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \
43 {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
44 {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \
49 TP_PROTO(unsigned long call_site
,
55 TP_ARGS(call_site
, ptr
, bytes_req
, bytes_alloc
, gfp_flags
),
58 __field( unsigned long, call_site
)
59 __field( const void *, ptr
)
60 __field( size_t, bytes_req
)
61 __field( size_t, bytes_alloc
)
62 __field( gfp_t
, gfp_flags
)
66 __entry
->call_site
= call_site
;
68 __entry
->bytes_req
= bytes_req
;
69 __entry
->bytes_alloc
= bytes_alloc
;
70 __entry
->gfp_flags
= gfp_flags
;
73 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
78 show_gfp_flags(__entry
->gfp_flags
))
81 TRACE_EVENT(kmem_cache_alloc
,
83 TP_PROTO(unsigned long call_site
,
89 TP_ARGS(call_site
, ptr
, bytes_req
, bytes_alloc
, gfp_flags
),
92 __field( unsigned long, call_site
)
93 __field( const void *, ptr
)
94 __field( size_t, bytes_req
)
95 __field( size_t, bytes_alloc
)
96 __field( gfp_t
, gfp_flags
)
100 __entry
->call_site
= call_site
;
102 __entry
->bytes_req
= bytes_req
;
103 __entry
->bytes_alloc
= bytes_alloc
;
104 __entry
->gfp_flags
= gfp_flags
;
107 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
111 __entry
->bytes_alloc
,
112 show_gfp_flags(__entry
->gfp_flags
))
115 TRACE_EVENT(kmalloc_node
,
117 TP_PROTO(unsigned long call_site
,
124 TP_ARGS(call_site
, ptr
, bytes_req
, bytes_alloc
, gfp_flags
, node
),
127 __field( unsigned long, call_site
)
128 __field( const void *, ptr
)
129 __field( size_t, bytes_req
)
130 __field( size_t, bytes_alloc
)
131 __field( gfp_t
, gfp_flags
)
136 __entry
->call_site
= call_site
;
138 __entry
->bytes_req
= bytes_req
;
139 __entry
->bytes_alloc
= bytes_alloc
;
140 __entry
->gfp_flags
= gfp_flags
;
141 __entry
->node
= node
;
144 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
148 __entry
->bytes_alloc
,
149 show_gfp_flags(__entry
->gfp_flags
),
153 TRACE_EVENT(kmem_cache_alloc_node
,
155 TP_PROTO(unsigned long call_site
,
162 TP_ARGS(call_site
, ptr
, bytes_req
, bytes_alloc
, gfp_flags
, node
),
165 __field( unsigned long, call_site
)
166 __field( const void *, ptr
)
167 __field( size_t, bytes_req
)
168 __field( size_t, bytes_alloc
)
169 __field( gfp_t
, gfp_flags
)
174 __entry
->call_site
= call_site
;
176 __entry
->bytes_req
= bytes_req
;
177 __entry
->bytes_alloc
= bytes_alloc
;
178 __entry
->gfp_flags
= gfp_flags
;
179 __entry
->node
= node
;
182 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
186 __entry
->bytes_alloc
,
187 show_gfp_flags(__entry
->gfp_flags
),
193 TP_PROTO(unsigned long call_site
, const void *ptr
),
195 TP_ARGS(call_site
, ptr
),
198 __field( unsigned long, call_site
)
199 __field( const void *, ptr
)
203 __entry
->call_site
= call_site
;
207 TP_printk("call_site=%lx ptr=%p", __entry
->call_site
, __entry
->ptr
)
210 TRACE_EVENT(kmem_cache_free
,
212 TP_PROTO(unsigned long call_site
, const void *ptr
),
214 TP_ARGS(call_site
, ptr
),
217 __field( unsigned long, call_site
)
218 __field( const void *, ptr
)
222 __entry
->call_site
= call_site
;
226 TP_printk("call_site=%lx ptr=%p", __entry
->call_site
, __entry
->ptr
)
228 #endif /* _TRACE_KMEM_H */
230 /* This part must be outside protection */
231 #include <trace/define_trace.h>