1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define TRACE_SYSTEM kmem
5 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
8 #include <linux/types.h>
9 #include <linux/tracepoint.h>
10 #include <trace/events/mmflags.h>
12 DECLARE_EVENT_CLASS(kmem_alloc
,
14 TP_PROTO(unsigned long call_site
,
20 TP_ARGS(call_site
, ptr
, bytes_req
, bytes_alloc
, gfp_flags
),
23 __field( unsigned long, call_site
)
24 __field( const void *, ptr
)
25 __field( size_t, bytes_req
)
26 __field( size_t, bytes_alloc
)
27 __field( gfp_t
, gfp_flags
)
31 __entry
->call_site
= call_site
;
33 __entry
->bytes_req
= bytes_req
;
34 __entry
->bytes_alloc
= bytes_alloc
;
35 __entry
->gfp_flags
= gfp_flags
;
38 TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
39 (void *)__entry
->call_site
,
43 show_gfp_flags(__entry
->gfp_flags
))
46 DEFINE_EVENT(kmem_alloc
, kmalloc
,
48 TP_PROTO(unsigned long call_site
, const void *ptr
,
49 size_t bytes_req
, size_t bytes_alloc
, gfp_t gfp_flags
),
51 TP_ARGS(call_site
, ptr
, bytes_req
, bytes_alloc
, gfp_flags
)
54 DEFINE_EVENT(kmem_alloc
, kmem_cache_alloc
,
56 TP_PROTO(unsigned long call_site
, const void *ptr
,
57 size_t bytes_req
, size_t bytes_alloc
, gfp_t gfp_flags
),
59 TP_ARGS(call_site
, ptr
, bytes_req
, bytes_alloc
, gfp_flags
)
62 DECLARE_EVENT_CLASS(kmem_alloc_node
,
64 TP_PROTO(unsigned long call_site
,
71 TP_ARGS(call_site
, ptr
, bytes_req
, bytes_alloc
, gfp_flags
, node
),
74 __field( unsigned long, call_site
)
75 __field( const void *, ptr
)
76 __field( size_t, bytes_req
)
77 __field( size_t, bytes_alloc
)
78 __field( gfp_t
, gfp_flags
)
83 __entry
->call_site
= call_site
;
85 __entry
->bytes_req
= bytes_req
;
86 __entry
->bytes_alloc
= bytes_alloc
;
87 __entry
->gfp_flags
= gfp_flags
;
91 TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
92 (void *)__entry
->call_site
,
96 show_gfp_flags(__entry
->gfp_flags
),
100 DEFINE_EVENT(kmem_alloc_node
, kmalloc_node
,
102 TP_PROTO(unsigned long call_site
, const void *ptr
,
103 size_t bytes_req
, size_t bytes_alloc
,
104 gfp_t gfp_flags
, int node
),
106 TP_ARGS(call_site
, ptr
, bytes_req
, bytes_alloc
, gfp_flags
, node
)
109 DEFINE_EVENT(kmem_alloc_node
, kmem_cache_alloc_node
,
111 TP_PROTO(unsigned long call_site
, const void *ptr
,
112 size_t bytes_req
, size_t bytes_alloc
,
113 gfp_t gfp_flags
, int node
),
115 TP_ARGS(call_site
, ptr
, bytes_req
, bytes_alloc
, gfp_flags
, node
)
118 DECLARE_EVENT_CLASS(kmem_free
,
120 TP_PROTO(unsigned long call_site
, const void *ptr
),
122 TP_ARGS(call_site
, ptr
),
125 __field( unsigned long, call_site
)
126 __field( const void *, ptr
)
130 __entry
->call_site
= call_site
;
134 TP_printk("call_site=%pS ptr=%p",
135 (void *)__entry
->call_site
, __entry
->ptr
)
138 DEFINE_EVENT(kmem_free
, kfree
,
140 TP_PROTO(unsigned long call_site
, const void *ptr
),
142 TP_ARGS(call_site
, ptr
)
145 DEFINE_EVENT(kmem_free
, kmem_cache_free
,
147 TP_PROTO(unsigned long call_site
, const void *ptr
),
149 TP_ARGS(call_site
, ptr
)
152 TRACE_EVENT(mm_page_free
,
154 TP_PROTO(struct page
*page
, unsigned int order
),
156 TP_ARGS(page
, order
),
159 __field( unsigned long, pfn
)
160 __field( unsigned int, order
)
164 __entry
->pfn
= page_to_pfn(page
);
165 __entry
->order
= order
;
168 TP_printk("page=%p pfn=%lu order=%d",
169 pfn_to_page(__entry
->pfn
),
174 TRACE_EVENT(mm_page_free_batched
,
176 TP_PROTO(struct page
*page
),
181 __field( unsigned long, pfn
)
185 __entry
->pfn
= page_to_pfn(page
);
188 TP_printk("page=%p pfn=%lu order=0",
189 pfn_to_page(__entry
->pfn
),
193 TRACE_EVENT(mm_page_alloc
,
195 TP_PROTO(struct page
*page
, unsigned int order
,
196 gfp_t gfp_flags
, int migratetype
),
198 TP_ARGS(page
, order
, gfp_flags
, migratetype
),
201 __field( unsigned long, pfn
)
202 __field( unsigned int, order
)
203 __field( gfp_t
, gfp_flags
)
204 __field( int, migratetype
)
208 __entry
->pfn
= page
? page_to_pfn(page
) : -1UL;
209 __entry
->order
= order
;
210 __entry
->gfp_flags
= gfp_flags
;
211 __entry
->migratetype
= migratetype
;
214 TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
215 __entry
->pfn
!= -1UL ? pfn_to_page(__entry
->pfn
) : NULL
,
216 __entry
->pfn
!= -1UL ? __entry
->pfn
: 0,
218 __entry
->migratetype
,
219 show_gfp_flags(__entry
->gfp_flags
))
222 DECLARE_EVENT_CLASS(mm_page
,
224 TP_PROTO(struct page
*page
, unsigned int order
, int migratetype
),
226 TP_ARGS(page
, order
, migratetype
),
229 __field( unsigned long, pfn
)
230 __field( unsigned int, order
)
231 __field( int, migratetype
)
235 __entry
->pfn
= page
? page_to_pfn(page
) : -1UL;
236 __entry
->order
= order
;
237 __entry
->migratetype
= migratetype
;
240 TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
241 __entry
->pfn
!= -1UL ? pfn_to_page(__entry
->pfn
) : NULL
,
242 __entry
->pfn
!= -1UL ? __entry
->pfn
: 0,
244 __entry
->migratetype
,
248 DEFINE_EVENT(mm_page
, mm_page_alloc_zone_locked
,
250 TP_PROTO(struct page
*page
, unsigned int order
, int migratetype
),
252 TP_ARGS(page
, order
, migratetype
)
255 TRACE_EVENT(mm_page_pcpu_drain
,
257 TP_PROTO(struct page
*page
, unsigned int order
, int migratetype
),
259 TP_ARGS(page
, order
, migratetype
),
262 __field( unsigned long, pfn
)
263 __field( unsigned int, order
)
264 __field( int, migratetype
)
268 __entry
->pfn
= page
? page_to_pfn(page
) : -1UL;
269 __entry
->order
= order
;
270 __entry
->migratetype
= migratetype
;
273 TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
274 pfn_to_page(__entry
->pfn
), __entry
->pfn
,
275 __entry
->order
, __entry
->migratetype
)
278 TRACE_EVENT(mm_page_alloc_extfrag
,
280 TP_PROTO(struct page
*page
,
281 int alloc_order
, int fallback_order
,
282 int alloc_migratetype
, int fallback_migratetype
),
285 alloc_order
, fallback_order
,
286 alloc_migratetype
, fallback_migratetype
),
289 __field( unsigned long, pfn
)
290 __field( int, alloc_order
)
291 __field( int, fallback_order
)
292 __field( int, alloc_migratetype
)
293 __field( int, fallback_migratetype
)
294 __field( int, change_ownership
)
298 __entry
->pfn
= page_to_pfn(page
);
299 __entry
->alloc_order
= alloc_order
;
300 __entry
->fallback_order
= fallback_order
;
301 __entry
->alloc_migratetype
= alloc_migratetype
;
302 __entry
->fallback_migratetype
= fallback_migratetype
;
303 __entry
->change_ownership
= (alloc_migratetype
==
304 get_pageblock_migratetype(page
));
307 TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
308 pfn_to_page(__entry
->pfn
),
310 __entry
->alloc_order
,
311 __entry
->fallback_order
,
313 __entry
->alloc_migratetype
,
314 __entry
->fallback_migratetype
,
315 __entry
->fallback_order
< pageblock_order
,
316 __entry
->change_ownership
)
320 * Required for uniquely and securely identifying mm in rss_stat tracepoint.
322 #ifndef __PTR_TO_HASHVAL
323 static unsigned int __maybe_unused
mm_ptr_to_hash(const void *ptr
)
326 unsigned long hashval
;
328 ret
= ptr_to_hashval(ptr
, &hashval
);
332 /* The hashed value is only 32-bit */
333 return (unsigned int)hashval
;
335 #define __PTR_TO_HASHVAL
338 TRACE_EVENT(rss_stat
,
340 TP_PROTO(struct mm_struct
*mm
,
344 TP_ARGS(mm
, member
, count
),
347 __field(unsigned int, mm_id
)
348 __field(unsigned int, curr
)
354 __entry
->mm_id
= mm_ptr_to_hash(mm
);
355 __entry
->curr
= !!(current
->mm
== mm
);
356 __entry
->member
= member
;
357 __entry
->size
= (count
<< PAGE_SHIFT
);
360 TP_printk("mm_id=%u curr=%d member=%d size=%ldB",
366 #endif /* _TRACE_KMEM_H */
368 /* This part must be outside protection */
369 #include <trace/define_trace.h>