Merge tag 'iommu-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux/fpc-iii.git] / include / trace / events / kmem.h
blobf65b1f6db22d868485a6cd43eb884935bda11d0f
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM kmem
5 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_KMEM_H
8 #include <linux/types.h>
9 #include <linux/tracepoint.h>
10 #include <trace/events/mmflags.h>
12 DECLARE_EVENT_CLASS(kmem_alloc,
14 TP_PROTO(unsigned long call_site,
15 const void *ptr,
16 size_t bytes_req,
17 size_t bytes_alloc,
18 gfp_t gfp_flags),
20 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
22 TP_STRUCT__entry(
23 __field( unsigned long, call_site )
24 __field( const void *, ptr )
25 __field( size_t, bytes_req )
26 __field( size_t, bytes_alloc )
27 __field( gfp_t, gfp_flags )
30 TP_fast_assign(
31 __entry->call_site = call_site;
32 __entry->ptr = ptr;
33 __entry->bytes_req = bytes_req;
34 __entry->bytes_alloc = bytes_alloc;
35 __entry->gfp_flags = gfp_flags;
38 TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
39 (void *)__entry->call_site,
40 __entry->ptr,
41 __entry->bytes_req,
42 __entry->bytes_alloc,
43 show_gfp_flags(__entry->gfp_flags))
46 DEFINE_EVENT(kmem_alloc, kmalloc,
48 TP_PROTO(unsigned long call_site, const void *ptr,
49 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
51 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
54 DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
56 TP_PROTO(unsigned long call_site, const void *ptr,
57 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
59 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
62 DECLARE_EVENT_CLASS(kmem_alloc_node,
64 TP_PROTO(unsigned long call_site,
65 const void *ptr,
66 size_t bytes_req,
67 size_t bytes_alloc,
68 gfp_t gfp_flags,
69 int node),
71 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
73 TP_STRUCT__entry(
74 __field( unsigned long, call_site )
75 __field( const void *, ptr )
76 __field( size_t, bytes_req )
77 __field( size_t, bytes_alloc )
78 __field( gfp_t, gfp_flags )
79 __field( int, node )
82 TP_fast_assign(
83 __entry->call_site = call_site;
84 __entry->ptr = ptr;
85 __entry->bytes_req = bytes_req;
86 __entry->bytes_alloc = bytes_alloc;
87 __entry->gfp_flags = gfp_flags;
88 __entry->node = node;
91 TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
92 (void *)__entry->call_site,
93 __entry->ptr,
94 __entry->bytes_req,
95 __entry->bytes_alloc,
96 show_gfp_flags(__entry->gfp_flags),
97 __entry->node)
100 DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
102 TP_PROTO(unsigned long call_site, const void *ptr,
103 size_t bytes_req, size_t bytes_alloc,
104 gfp_t gfp_flags, int node),
106 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
109 DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
111 TP_PROTO(unsigned long call_site, const void *ptr,
112 size_t bytes_req, size_t bytes_alloc,
113 gfp_t gfp_flags, int node),
115 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
118 DECLARE_EVENT_CLASS(kmem_free,
120 TP_PROTO(unsigned long call_site, const void *ptr),
122 TP_ARGS(call_site, ptr),
124 TP_STRUCT__entry(
125 __field( unsigned long, call_site )
126 __field( const void *, ptr )
129 TP_fast_assign(
130 __entry->call_site = call_site;
131 __entry->ptr = ptr;
134 TP_printk("call_site=%pS ptr=%p",
135 (void *)__entry->call_site, __entry->ptr)
138 DEFINE_EVENT(kmem_free, kfree,
140 TP_PROTO(unsigned long call_site, const void *ptr),
142 TP_ARGS(call_site, ptr)
145 DEFINE_EVENT(kmem_free, kmem_cache_free,
147 TP_PROTO(unsigned long call_site, const void *ptr),
149 TP_ARGS(call_site, ptr)
152 TRACE_EVENT(mm_page_free,
154 TP_PROTO(struct page *page, unsigned int order),
156 TP_ARGS(page, order),
158 TP_STRUCT__entry(
159 __field( unsigned long, pfn )
160 __field( unsigned int, order )
163 TP_fast_assign(
164 __entry->pfn = page_to_pfn(page);
165 __entry->order = order;
168 TP_printk("page=%p pfn=%lu order=%d",
169 pfn_to_page(__entry->pfn),
170 __entry->pfn,
171 __entry->order)
174 TRACE_EVENT(mm_page_free_batched,
176 TP_PROTO(struct page *page),
178 TP_ARGS(page),
180 TP_STRUCT__entry(
181 __field( unsigned long, pfn )
184 TP_fast_assign(
185 __entry->pfn = page_to_pfn(page);
188 TP_printk("page=%p pfn=%lu order=0",
189 pfn_to_page(__entry->pfn),
190 __entry->pfn)
193 TRACE_EVENT(mm_page_alloc,
195 TP_PROTO(struct page *page, unsigned int order,
196 gfp_t gfp_flags, int migratetype),
198 TP_ARGS(page, order, gfp_flags, migratetype),
200 TP_STRUCT__entry(
201 __field( unsigned long, pfn )
202 __field( unsigned int, order )
203 __field( gfp_t, gfp_flags )
204 __field( int, migratetype )
207 TP_fast_assign(
208 __entry->pfn = page ? page_to_pfn(page) : -1UL;
209 __entry->order = order;
210 __entry->gfp_flags = gfp_flags;
211 __entry->migratetype = migratetype;
214 TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
215 __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
216 __entry->pfn != -1UL ? __entry->pfn : 0,
217 __entry->order,
218 __entry->migratetype,
219 show_gfp_flags(__entry->gfp_flags))
222 DECLARE_EVENT_CLASS(mm_page,
224 TP_PROTO(struct page *page, unsigned int order, int migratetype),
226 TP_ARGS(page, order, migratetype),
228 TP_STRUCT__entry(
229 __field( unsigned long, pfn )
230 __field( unsigned int, order )
231 __field( int, migratetype )
234 TP_fast_assign(
235 __entry->pfn = page ? page_to_pfn(page) : -1UL;
236 __entry->order = order;
237 __entry->migratetype = migratetype;
240 TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
241 __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
242 __entry->pfn != -1UL ? __entry->pfn : 0,
243 __entry->order,
244 __entry->migratetype,
245 __entry->order == 0)
248 DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
250 TP_PROTO(struct page *page, unsigned int order, int migratetype),
252 TP_ARGS(page, order, migratetype)
255 TRACE_EVENT(mm_page_pcpu_drain,
257 TP_PROTO(struct page *page, unsigned int order, int migratetype),
259 TP_ARGS(page, order, migratetype),
261 TP_STRUCT__entry(
262 __field( unsigned long, pfn )
263 __field( unsigned int, order )
264 __field( int, migratetype )
267 TP_fast_assign(
268 __entry->pfn = page ? page_to_pfn(page) : -1UL;
269 __entry->order = order;
270 __entry->migratetype = migratetype;
273 TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
274 pfn_to_page(__entry->pfn), __entry->pfn,
275 __entry->order, __entry->migratetype)
278 TRACE_EVENT(mm_page_alloc_extfrag,
280 TP_PROTO(struct page *page,
281 int alloc_order, int fallback_order,
282 int alloc_migratetype, int fallback_migratetype),
284 TP_ARGS(page,
285 alloc_order, fallback_order,
286 alloc_migratetype, fallback_migratetype),
288 TP_STRUCT__entry(
289 __field( unsigned long, pfn )
290 __field( int, alloc_order )
291 __field( int, fallback_order )
292 __field( int, alloc_migratetype )
293 __field( int, fallback_migratetype )
294 __field( int, change_ownership )
297 TP_fast_assign(
298 __entry->pfn = page_to_pfn(page);
299 __entry->alloc_order = alloc_order;
300 __entry->fallback_order = fallback_order;
301 __entry->alloc_migratetype = alloc_migratetype;
302 __entry->fallback_migratetype = fallback_migratetype;
303 __entry->change_ownership = (alloc_migratetype ==
304 get_pageblock_migratetype(page));
307 TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
308 pfn_to_page(__entry->pfn),
309 __entry->pfn,
310 __entry->alloc_order,
311 __entry->fallback_order,
312 pageblock_order,
313 __entry->alloc_migratetype,
314 __entry->fallback_migratetype,
315 __entry->fallback_order < pageblock_order,
316 __entry->change_ownership)
320 * Required for uniquely and securely identifying mm in rss_stat tracepoint.
322 #ifndef __PTR_TO_HASHVAL
323 static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr)
325 int ret;
326 unsigned long hashval;
328 ret = ptr_to_hashval(ptr, &hashval);
329 if (ret)
330 return 0;
332 /* The hashed value is only 32-bit */
333 return (unsigned int)hashval;
335 #define __PTR_TO_HASHVAL
336 #endif
338 TRACE_EVENT(rss_stat,
340 TP_PROTO(struct mm_struct *mm,
341 int member,
342 long count),
344 TP_ARGS(mm, member, count),
346 TP_STRUCT__entry(
347 __field(unsigned int, mm_id)
348 __field(unsigned int, curr)
349 __field(int, member)
350 __field(long, size)
353 TP_fast_assign(
354 __entry->mm_id = mm_ptr_to_hash(mm);
355 __entry->curr = !!(current->mm == mm);
356 __entry->member = member;
357 __entry->size = (count << PAGE_SHIFT);
360 TP_printk("mm_id=%u curr=%d member=%d size=%ldB",
361 __entry->mm_id,
362 __entry->curr,
363 __entry->member,
364 __entry->size)
366 #endif /* _TRACE_KMEM_H */
368 /* This part must be outside protection */
369 #include <trace/define_trace.h>