1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define TRACE_SYSTEM xen
5 #if !defined(_TRACE_XEN_H) || defined(TRACE_HEADER_MULTI_READ)
8 #include <linux/tracepoint.h>
9 #include <asm/paravirt_types.h>
10 #include <asm/xen/trace_types.h>
12 struct multicall_entry
;
15 DECLARE_EVENT_CLASS(xen_mc__batch
,
16 TP_PROTO(enum paravirt_lazy_mode mode
),
19 __field(enum paravirt_lazy_mode
, mode
)
21 TP_fast_assign(__entry
->mode
= mode
),
22 TP_printk("start batch LAZY_%s",
23 (__entry
->mode
== PARAVIRT_LAZY_MMU
) ? "MMU" :
24 (__entry
->mode
== PARAVIRT_LAZY_CPU
) ? "CPU" : "NONE")
26 #define DEFINE_XEN_MC_BATCH(name) \
27 DEFINE_EVENT(xen_mc__batch, name, \
28 TP_PROTO(enum paravirt_lazy_mode mode), \
31 DEFINE_XEN_MC_BATCH(xen_mc_batch
);
32 DEFINE_XEN_MC_BATCH(xen_mc_issue
);
34 TRACE_DEFINE_SIZEOF(ulong
);
36 TRACE_EVENT(xen_mc_entry
,
37 TP_PROTO(struct multicall_entry
*mc
, unsigned nargs
),
40 __field(unsigned int, op
)
41 __field(unsigned int, nargs
)
42 __array(unsigned long, args
, 6)
44 TP_fast_assign(__entry
->op
= mc
->op
;
45 __entry
->nargs
= nargs
;
46 memcpy(__entry
->args
, mc
->args
, sizeof(ulong
) * nargs
);
47 memset(__entry
->args
+ nargs
, 0, sizeof(ulong
) * (6 - nargs
));
49 TP_printk("op %u%s args [%lx, %lx, %lx, %lx, %lx, %lx]",
50 __entry
->op
, xen_hypercall_name(__entry
->op
),
51 __entry
->args
[0], __entry
->args
[1], __entry
->args
[2],
52 __entry
->args
[3], __entry
->args
[4], __entry
->args
[5])
55 TRACE_EVENT(xen_mc_entry_alloc
,
56 TP_PROTO(size_t args
),
61 TP_fast_assign(__entry
->args
= args
),
62 TP_printk("alloc entry %zu arg bytes", __entry
->args
)
65 TRACE_EVENT(xen_mc_callback
,
66 TP_PROTO(xen_mc_callback_fn_t fn
, void *data
),
70 * Use field_struct to avoid is_signed_type()
71 * comparison of a function pointer.
73 __field_struct(xen_mc_callback_fn_t
, fn
)
80 TP_printk("callback %ps, data %p",
81 __entry
->fn
, __entry
->data
)
84 TRACE_EVENT(xen_mc_flush_reason
,
85 TP_PROTO(enum xen_mc_flush_reason reason
),
88 __field(enum xen_mc_flush_reason
, reason
)
90 TP_fast_assign(__entry
->reason
= reason
),
91 TP_printk("flush reason %s",
92 (__entry
->reason
== XEN_MC_FL_NONE
) ? "NONE" :
93 (__entry
->reason
== XEN_MC_FL_BATCH
) ? "BATCH" :
94 (__entry
->reason
== XEN_MC_FL_ARGS
) ? "ARGS" :
95 (__entry
->reason
== XEN_MC_FL_CALLBACK
) ? "CALLBACK" : "??")
98 TRACE_EVENT(xen_mc_flush
,
99 TP_PROTO(unsigned mcidx
, unsigned argidx
, unsigned cbidx
),
100 TP_ARGS(mcidx
, argidx
, cbidx
),
102 __field(unsigned, mcidx
)
103 __field(unsigned, argidx
)
104 __field(unsigned, cbidx
)
106 TP_fast_assign(__entry
->mcidx
= mcidx
;
107 __entry
->argidx
= argidx
;
108 __entry
->cbidx
= cbidx
),
109 TP_printk("flushing %u hypercalls, %u arg bytes, %u callbacks",
110 __entry
->mcidx
, __entry
->argidx
, __entry
->cbidx
)
113 TRACE_EVENT(xen_mc_extend_args
,
114 TP_PROTO(unsigned long op
, size_t args
, enum xen_mc_extend_args res
),
115 TP_ARGS(op
, args
, res
),
117 __field(unsigned int, op
)
118 __field(size_t, args
)
119 __field(enum xen_mc_extend_args
, res
)
121 TP_fast_assign(__entry
->op
= op
;
122 __entry
->args
= args
;
124 TP_printk("extending op %u%s by %zu bytes res %s",
125 __entry
->op
, xen_hypercall_name(__entry
->op
),
127 __entry
->res
== XEN_MC_XE_OK
? "OK" :
128 __entry
->res
== XEN_MC_XE_BAD_OP
? "BAD_OP" :
129 __entry
->res
== XEN_MC_XE_NO_SPACE
? "NO_SPACE" : "???")
132 TRACE_DEFINE_SIZEOF(pteval_t
);
134 DECLARE_EVENT_CLASS(xen_mmu__set_pte
,
135 TP_PROTO(pte_t
*ptep
, pte_t pteval
),
136 TP_ARGS(ptep
, pteval
),
138 __field(pte_t
*, ptep
)
139 __field(pteval_t
, pteval
)
141 TP_fast_assign(__entry
->ptep
= ptep
;
142 __entry
->pteval
= pteval
.pte
),
143 TP_printk("ptep %p pteval %0*llx (raw %0*llx)",
145 (int)sizeof(pteval_t
) * 2, (unsigned long long)pte_val(native_make_pte(__entry
->pteval
)),
146 (int)sizeof(pteval_t
) * 2, (unsigned long long)__entry
->pteval
)
149 #define DEFINE_XEN_MMU_SET_PTE(name) \
150 DEFINE_EVENT(xen_mmu__set_pte, name, \
151 TP_PROTO(pte_t *ptep, pte_t pteval), \
152 TP_ARGS(ptep, pteval))
154 DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte
);
156 TRACE_DEFINE_SIZEOF(pmdval_t
);
158 TRACE_EVENT(xen_mmu_set_pmd
,
159 TP_PROTO(pmd_t
*pmdp
, pmd_t pmdval
),
160 TP_ARGS(pmdp
, pmdval
),
162 __field(pmd_t
*, pmdp
)
163 __field(pmdval_t
, pmdval
)
165 TP_fast_assign(__entry
->pmdp
= pmdp
;
166 __entry
->pmdval
= pmdval
.pmd
),
167 TP_printk("pmdp %p pmdval %0*llx (raw %0*llx)",
169 (int)sizeof(pmdval_t
) * 2, (unsigned long long)pmd_val(native_make_pmd(__entry
->pmdval
)),
170 (int)sizeof(pmdval_t
) * 2, (unsigned long long)__entry
->pmdval
)
173 #ifdef CONFIG_X86_PAE
174 DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic
);
176 TRACE_EVENT(xen_mmu_pte_clear
,
177 TP_PROTO(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
),
178 TP_ARGS(mm
, addr
, ptep
),
180 __field(struct mm_struct
*, mm
)
181 __field(unsigned long, addr
)
182 __field(pte_t
*, ptep
)
184 TP_fast_assign(__entry
->mm
= mm
;
185 __entry
->addr
= addr
;
186 __entry
->ptep
= ptep
),
187 TP_printk("mm %p addr %lx ptep %p",
188 __entry
->mm
, __entry
->addr
, __entry
->ptep
)
191 TRACE_EVENT(xen_mmu_pmd_clear
,
192 TP_PROTO(pmd_t
*pmdp
),
195 __field(pmd_t
*, pmdp
)
197 TP_fast_assign(__entry
->pmdp
= pmdp
),
198 TP_printk("pmdp %p", __entry
->pmdp
)
202 #if CONFIG_PGTABLE_LEVELS >= 4
204 TRACE_DEFINE_SIZEOF(pudval_t
);
206 TRACE_EVENT(xen_mmu_set_pud
,
207 TP_PROTO(pud_t
*pudp
, pud_t pudval
),
208 TP_ARGS(pudp
, pudval
),
210 __field(pud_t
*, pudp
)
211 __field(pudval_t
, pudval
)
213 TP_fast_assign(__entry
->pudp
= pudp
;
214 __entry
->pudval
= native_pud_val(pudval
)),
215 TP_printk("pudp %p pudval %0*llx (raw %0*llx)",
217 (int)sizeof(pudval_t
) * 2, (unsigned long long)pud_val(native_make_pud(__entry
->pudval
)),
218 (int)sizeof(pudval_t
) * 2, (unsigned long long)__entry
->pudval
)
221 TRACE_DEFINE_SIZEOF(p4dval_t
);
223 TRACE_EVENT(xen_mmu_set_p4d
,
224 TP_PROTO(p4d_t
*p4dp
, p4d_t
*user_p4dp
, p4d_t p4dval
),
225 TP_ARGS(p4dp
, user_p4dp
, p4dval
),
227 __field(p4d_t
*, p4dp
)
228 __field(p4d_t
*, user_p4dp
)
229 __field(p4dval_t
, p4dval
)
231 TP_fast_assign(__entry
->p4dp
= p4dp
;
232 __entry
->user_p4dp
= user_p4dp
;
233 __entry
->p4dval
= p4d_val(p4dval
)),
234 TP_printk("p4dp %p user_p4dp %p p4dval %0*llx (raw %0*llx)",
235 __entry
->p4dp
, __entry
->user_p4dp
,
236 (int)sizeof(p4dval_t
) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry
->p4dval
)),
237 (int)sizeof(p4dval_t
) * 2, (unsigned long long)__entry
->p4dval
)
241 TRACE_EVENT(xen_mmu_set_pud
,
242 TP_PROTO(pud_t
*pudp
, pud_t pudval
),
243 TP_ARGS(pudp
, pudval
),
245 __field(pud_t
*, pudp
)
246 __field(pudval_t
, pudval
)
248 TP_fast_assign(__entry
->pudp
= pudp
;
249 __entry
->pudval
= native_pud_val(pudval
)),
250 TP_printk("pudp %p pudval %0*llx (raw %0*llx)",
252 (int)sizeof(pudval_t
) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry
->pudval
)),
253 (int)sizeof(pudval_t
) * 2, (unsigned long long)__entry
->pudval
)
258 DECLARE_EVENT_CLASS(xen_mmu_ptep_modify_prot
,
259 TP_PROTO(struct mm_struct
*mm
, unsigned long addr
,
260 pte_t
*ptep
, pte_t pteval
),
261 TP_ARGS(mm
, addr
, ptep
, pteval
),
263 __field(struct mm_struct
*, mm
)
264 __field(unsigned long, addr
)
265 __field(pte_t
*, ptep
)
266 __field(pteval_t
, pteval
)
268 TP_fast_assign(__entry
->mm
= mm
;
269 __entry
->addr
= addr
;
270 __entry
->ptep
= ptep
;
271 __entry
->pteval
= pteval
.pte
),
272 TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
273 __entry
->mm
, __entry
->addr
, __entry
->ptep
,
274 (int)sizeof(pteval_t
) * 2, (unsigned long long)pte_val(native_make_pte(__entry
->pteval
)),
275 (int)sizeof(pteval_t
) * 2, (unsigned long long)__entry
->pteval
)
277 #define DEFINE_XEN_MMU_PTEP_MODIFY_PROT(name) \
278 DEFINE_EVENT(xen_mmu_ptep_modify_prot, name, \
279 TP_PROTO(struct mm_struct *mm, unsigned long addr, \
280 pte_t *ptep, pte_t pteval), \
281 TP_ARGS(mm, addr, ptep, pteval))
283 DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_start
);
284 DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_commit
);
286 TRACE_EVENT(xen_mmu_alloc_ptpage
,
287 TP_PROTO(struct mm_struct
*mm
, unsigned long pfn
, unsigned level
, bool pinned
),
288 TP_ARGS(mm
, pfn
, level
, pinned
),
290 __field(struct mm_struct
*, mm
)
291 __field(unsigned long, pfn
)
292 __field(unsigned, level
)
293 __field(bool, pinned
)
295 TP_fast_assign(__entry
->mm
= mm
;
297 __entry
->level
= level
;
298 __entry
->pinned
= pinned
),
299 TP_printk("mm %p pfn %lx level %d %spinned",
300 __entry
->mm
, __entry
->pfn
, __entry
->level
,
301 __entry
->pinned
? "" : "un")
304 TRACE_EVENT(xen_mmu_release_ptpage
,
305 TP_PROTO(unsigned long pfn
, unsigned level
, bool pinned
),
306 TP_ARGS(pfn
, level
, pinned
),
308 __field(unsigned long, pfn
)
309 __field(unsigned, level
)
310 __field(bool, pinned
)
312 TP_fast_assign(__entry
->pfn
= pfn
;
313 __entry
->level
= level
;
314 __entry
->pinned
= pinned
),
315 TP_printk("pfn %lx level %d %spinned",
316 __entry
->pfn
, __entry
->level
,
317 __entry
->pinned
? "" : "un")
320 DECLARE_EVENT_CLASS(xen_mmu_pgd
,
321 TP_PROTO(struct mm_struct
*mm
, pgd_t
*pgd
),
324 __field(struct mm_struct
*, mm
)
325 __field(pgd_t
*, pgd
)
327 TP_fast_assign(__entry
->mm
= mm
;
329 TP_printk("mm %p pgd %p", __entry
->mm
, __entry
->pgd
)
331 #define DEFINE_XEN_MMU_PGD_EVENT(name) \
332 DEFINE_EVENT(xen_mmu_pgd, name, \
333 TP_PROTO(struct mm_struct *mm, pgd_t *pgd), \
336 DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin
);
337 DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin
);
339 TRACE_EVENT(xen_mmu_flush_tlb_one_user
,
340 TP_PROTO(unsigned long addr
),
343 __field(unsigned long, addr
)
345 TP_fast_assign(__entry
->addr
= addr
),
346 TP_printk("addr %lx", __entry
->addr
)
349 TRACE_EVENT(xen_mmu_flush_tlb_others
,
350 TP_PROTO(const struct cpumask
*cpus
, struct mm_struct
*mm
,
351 unsigned long addr
, unsigned long end
),
352 TP_ARGS(cpus
, mm
, addr
, end
),
354 __field(unsigned, ncpus
)
355 __field(struct mm_struct
*, mm
)
356 __field(unsigned long, addr
)
357 __field(unsigned long, end
)
359 TP_fast_assign(__entry
->ncpus
= cpumask_weight(cpus
);
361 __entry
->addr
= addr
,
363 TP_printk("ncpus %d mm %p addr %lx, end %lx",
364 __entry
->ncpus
, __entry
->mm
, __entry
->addr
, __entry
->end
)
367 TRACE_EVENT(xen_mmu_write_cr3
,
368 TP_PROTO(bool kernel
, unsigned long cr3
),
369 TP_ARGS(kernel
, cr3
),
371 __field(bool, kernel
)
372 __field(unsigned long, cr3
)
374 TP_fast_assign(__entry
->kernel
= kernel
;
376 TP_printk("%s cr3 %lx",
377 __entry
->kernel
? "kernel" : "user", __entry
->cr3
)
382 TRACE_EVENT(xen_cpu_write_ldt_entry
,
383 TP_PROTO(struct desc_struct
*dt
, int entrynum
, u64 desc
),
384 TP_ARGS(dt
, entrynum
, desc
),
386 __field(struct desc_struct
*, dt
)
387 __field(int, entrynum
)
390 TP_fast_assign(__entry
->dt
= dt
;
391 __entry
->entrynum
= entrynum
;
392 __entry
->desc
= desc
;
394 TP_printk("dt %p entrynum %d entry %016llx",
395 __entry
->dt
, __entry
->entrynum
,
396 (unsigned long long)__entry
->desc
)
399 TRACE_EVENT(xen_cpu_write_idt_entry
,
400 TP_PROTO(gate_desc
*dt
, int entrynum
, const gate_desc
*ent
),
401 TP_ARGS(dt
, entrynum
, ent
),
403 __field(gate_desc
*, dt
)
404 __field(int, entrynum
)
406 TP_fast_assign(__entry
->dt
= dt
;
407 __entry
->entrynum
= entrynum
;
409 TP_printk("dt %p entrynum %d",
410 __entry
->dt
, __entry
->entrynum
)
413 TRACE_EVENT(xen_cpu_load_idt
,
414 TP_PROTO(const struct desc_ptr
*desc
),
417 __field(unsigned long, addr
)
419 TP_fast_assign(__entry
->addr
= desc
->address
),
420 TP_printk("addr %lx", __entry
->addr
)
423 TRACE_EVENT(xen_cpu_write_gdt_entry
,
424 TP_PROTO(struct desc_struct
*dt
, int entrynum
, const void *desc
, int type
),
425 TP_ARGS(dt
, entrynum
, desc
, type
),
428 __field(struct desc_struct
*, dt
)
429 __field(int, entrynum
)
432 TP_fast_assign(__entry
->dt
= dt
;
433 __entry
->entrynum
= entrynum
;
434 __entry
->desc
= *(u64
*)desc
;
435 __entry
->type
= type
;
437 TP_printk("dt %p entrynum %d type %d desc %016llx",
438 __entry
->dt
, __entry
->entrynum
, __entry
->type
,
439 (unsigned long long)__entry
->desc
)
442 TRACE_EVENT(xen_cpu_set_ldt
,
443 TP_PROTO(const void *addr
, unsigned entries
),
444 TP_ARGS(addr
, entries
),
446 __field(const void *, addr
)
447 __field(unsigned, entries
)
449 TP_fast_assign(__entry
->addr
= addr
;
450 __entry
->entries
= entries
),
451 TP_printk("addr %p entries %u",
452 __entry
->addr
, __entry
->entries
)
456 #endif /* _TRACE_XEN_H */
458 /* This part must be outside protection */
459 #include <trace/define_trace.h>