Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / include / trace / events / xen.h
blobb70a38b7fa84b85f13967d1c4be83d18a1b416c1
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM xen
4 #if !defined(_TRACE_XEN_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_XEN_H
7 #include <linux/tracepoint.h>
8 #include <asm/paravirt_types.h>
9 #include <asm/xen/trace_types.h>
11 struct multicall_entry;
13 /* Multicalls */
14 DECLARE_EVENT_CLASS(xen_mc__batch,
15 TP_PROTO(enum paravirt_lazy_mode mode),
16 TP_ARGS(mode),
17 TP_STRUCT__entry(
18 __field(enum paravirt_lazy_mode, mode)
20 TP_fast_assign(__entry->mode = mode),
21 TP_printk("start batch LAZY_%s",
22 (__entry->mode == PARAVIRT_LAZY_MMU) ? "MMU" :
23 (__entry->mode == PARAVIRT_LAZY_CPU) ? "CPU" : "NONE")
25 #define DEFINE_XEN_MC_BATCH(name) \
26 DEFINE_EVENT(xen_mc__batch, name, \
27 TP_PROTO(enum paravirt_lazy_mode mode), \
28 TP_ARGS(mode))
30 DEFINE_XEN_MC_BATCH(xen_mc_batch);
31 DEFINE_XEN_MC_BATCH(xen_mc_issue);
33 TRACE_DEFINE_SIZEOF(ulong);
35 TRACE_EVENT(xen_mc_entry,
36 TP_PROTO(struct multicall_entry *mc, unsigned nargs),
37 TP_ARGS(mc, nargs),
38 TP_STRUCT__entry(
39 __field(unsigned int, op)
40 __field(unsigned int, nargs)
41 __array(unsigned long, args, 6)
43 TP_fast_assign(__entry->op = mc->op;
44 __entry->nargs = nargs;
45 memcpy(__entry->args, mc->args, sizeof(ulong) * nargs);
46 memset(__entry->args + nargs, 0, sizeof(ulong) * (6 - nargs));
48 TP_printk("op %u%s args [%lx, %lx, %lx, %lx, %lx, %lx]",
49 __entry->op, xen_hypercall_name(__entry->op),
50 __entry->args[0], __entry->args[1], __entry->args[2],
51 __entry->args[3], __entry->args[4], __entry->args[5])
54 TRACE_EVENT(xen_mc_entry_alloc,
55 TP_PROTO(size_t args),
56 TP_ARGS(args),
57 TP_STRUCT__entry(
58 __field(size_t, args)
60 TP_fast_assign(__entry->args = args),
61 TP_printk("alloc entry %zu arg bytes", __entry->args)
64 TRACE_EVENT(xen_mc_callback,
65 TP_PROTO(xen_mc_callback_fn_t fn, void *data),
66 TP_ARGS(fn, data),
67 TP_STRUCT__entry(
68 __field(xen_mc_callback_fn_t, fn)
69 __field(void *, data)
71 TP_fast_assign(
72 __entry->fn = fn;
73 __entry->data = data;
75 TP_printk("callback %pf, data %p",
76 __entry->fn, __entry->data)
79 TRACE_EVENT(xen_mc_flush_reason,
80 TP_PROTO(enum xen_mc_flush_reason reason),
81 TP_ARGS(reason),
82 TP_STRUCT__entry(
83 __field(enum xen_mc_flush_reason, reason)
85 TP_fast_assign(__entry->reason = reason),
86 TP_printk("flush reason %s",
87 (__entry->reason == XEN_MC_FL_NONE) ? "NONE" :
88 (__entry->reason == XEN_MC_FL_BATCH) ? "BATCH" :
89 (__entry->reason == XEN_MC_FL_ARGS) ? "ARGS" :
90 (__entry->reason == XEN_MC_FL_CALLBACK) ? "CALLBACK" : "??")
93 TRACE_EVENT(xen_mc_flush,
94 TP_PROTO(unsigned mcidx, unsigned argidx, unsigned cbidx),
95 TP_ARGS(mcidx, argidx, cbidx),
96 TP_STRUCT__entry(
97 __field(unsigned, mcidx)
98 __field(unsigned, argidx)
99 __field(unsigned, cbidx)
101 TP_fast_assign(__entry->mcidx = mcidx;
102 __entry->argidx = argidx;
103 __entry->cbidx = cbidx),
104 TP_printk("flushing %u hypercalls, %u arg bytes, %u callbacks",
105 __entry->mcidx, __entry->argidx, __entry->cbidx)
108 TRACE_EVENT(xen_mc_extend_args,
109 TP_PROTO(unsigned long op, size_t args, enum xen_mc_extend_args res),
110 TP_ARGS(op, args, res),
111 TP_STRUCT__entry(
112 __field(unsigned int, op)
113 __field(size_t, args)
114 __field(enum xen_mc_extend_args, res)
116 TP_fast_assign(__entry->op = op;
117 __entry->args = args;
118 __entry->res = res),
119 TP_printk("extending op %u%s by %zu bytes res %s",
120 __entry->op, xen_hypercall_name(__entry->op),
121 __entry->args,
122 __entry->res == XEN_MC_XE_OK ? "OK" :
123 __entry->res == XEN_MC_XE_BAD_OP ? "BAD_OP" :
124 __entry->res == XEN_MC_XE_NO_SPACE ? "NO_SPACE" : "???")
127 TRACE_DEFINE_SIZEOF(pteval_t);
128 /* mmu */
129 DECLARE_EVENT_CLASS(xen_mmu__set_pte,
130 TP_PROTO(pte_t *ptep, pte_t pteval),
131 TP_ARGS(ptep, pteval),
132 TP_STRUCT__entry(
133 __field(pte_t *, ptep)
134 __field(pteval_t, pteval)
136 TP_fast_assign(__entry->ptep = ptep;
137 __entry->pteval = pteval.pte),
138 TP_printk("ptep %p pteval %0*llx (raw %0*llx)",
139 __entry->ptep,
140 (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
141 (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
144 #define DEFINE_XEN_MMU_SET_PTE(name) \
145 DEFINE_EVENT(xen_mmu__set_pte, name, \
146 TP_PROTO(pte_t *ptep, pte_t pteval), \
147 TP_ARGS(ptep, pteval))
149 DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte);
150 DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic);
152 TRACE_EVENT(xen_mmu_set_domain_pte,
153 TP_PROTO(pte_t *ptep, pte_t pteval, unsigned domid),
154 TP_ARGS(ptep, pteval, domid),
155 TP_STRUCT__entry(
156 __field(pte_t *, ptep)
157 __field(pteval_t, pteval)
158 __field(unsigned, domid)
160 TP_fast_assign(__entry->ptep = ptep;
161 __entry->pteval = pteval.pte;
162 __entry->domid = domid),
163 TP_printk("ptep %p pteval %0*llx (raw %0*llx) domid %u",
164 __entry->ptep,
165 (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
166 (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval,
167 __entry->domid)
170 TRACE_EVENT(xen_mmu_set_pte_at,
171 TP_PROTO(struct mm_struct *mm, unsigned long addr,
172 pte_t *ptep, pte_t pteval),
173 TP_ARGS(mm, addr, ptep, pteval),
174 TP_STRUCT__entry(
175 __field(struct mm_struct *, mm)
176 __field(unsigned long, addr)
177 __field(pte_t *, ptep)
178 __field(pteval_t, pteval)
180 TP_fast_assign(__entry->mm = mm;
181 __entry->addr = addr;
182 __entry->ptep = ptep;
183 __entry->pteval = pteval.pte),
184 TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
185 __entry->mm, __entry->addr, __entry->ptep,
186 (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
187 (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
190 TRACE_EVENT(xen_mmu_pte_clear,
191 TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
192 TP_ARGS(mm, addr, ptep),
193 TP_STRUCT__entry(
194 __field(struct mm_struct *, mm)
195 __field(unsigned long, addr)
196 __field(pte_t *, ptep)
198 TP_fast_assign(__entry->mm = mm;
199 __entry->addr = addr;
200 __entry->ptep = ptep),
201 TP_printk("mm %p addr %lx ptep %p",
202 __entry->mm, __entry->addr, __entry->ptep)
205 TRACE_DEFINE_SIZEOF(pmdval_t);
207 TRACE_EVENT(xen_mmu_set_pmd,
208 TP_PROTO(pmd_t *pmdp, pmd_t pmdval),
209 TP_ARGS(pmdp, pmdval),
210 TP_STRUCT__entry(
211 __field(pmd_t *, pmdp)
212 __field(pmdval_t, pmdval)
214 TP_fast_assign(__entry->pmdp = pmdp;
215 __entry->pmdval = pmdval.pmd),
216 TP_printk("pmdp %p pmdval %0*llx (raw %0*llx)",
217 __entry->pmdp,
218 (int)sizeof(pmdval_t) * 2, (unsigned long long)pmd_val(native_make_pmd(__entry->pmdval)),
219 (int)sizeof(pmdval_t) * 2, (unsigned long long)__entry->pmdval)
222 TRACE_EVENT(xen_mmu_pmd_clear,
223 TP_PROTO(pmd_t *pmdp),
224 TP_ARGS(pmdp),
225 TP_STRUCT__entry(
226 __field(pmd_t *, pmdp)
228 TP_fast_assign(__entry->pmdp = pmdp),
229 TP_printk("pmdp %p", __entry->pmdp)
232 #if CONFIG_PGTABLE_LEVELS >= 4
234 TRACE_DEFINE_SIZEOF(pudval_t);
236 TRACE_EVENT(xen_mmu_set_pud,
237 TP_PROTO(pud_t *pudp, pud_t pudval),
238 TP_ARGS(pudp, pudval),
239 TP_STRUCT__entry(
240 __field(pud_t *, pudp)
241 __field(pudval_t, pudval)
243 TP_fast_assign(__entry->pudp = pudp;
244 __entry->pudval = native_pud_val(pudval)),
245 TP_printk("pudp %p pudval %0*llx (raw %0*llx)",
246 __entry->pudp,
247 (int)sizeof(pudval_t) * 2, (unsigned long long)pud_val(native_make_pud(__entry->pudval)),
248 (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
251 TRACE_DEFINE_SIZEOF(p4dval_t);
253 TRACE_EVENT(xen_mmu_set_p4d,
254 TP_PROTO(p4d_t *p4dp, p4d_t *user_p4dp, p4d_t p4dval),
255 TP_ARGS(p4dp, user_p4dp, p4dval),
256 TP_STRUCT__entry(
257 __field(p4d_t *, p4dp)
258 __field(p4d_t *, user_p4dp)
259 __field(p4dval_t, p4dval)
261 TP_fast_assign(__entry->p4dp = p4dp;
262 __entry->user_p4dp = user_p4dp;
263 __entry->p4dval = p4d_val(p4dval)),
264 TP_printk("p4dp %p user_p4dp %p p4dval %0*llx (raw %0*llx)",
265 __entry->p4dp, __entry->user_p4dp,
266 (int)sizeof(p4dval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->p4dval)),
267 (int)sizeof(p4dval_t) * 2, (unsigned long long)__entry->p4dval)
270 TRACE_EVENT(xen_mmu_pud_clear,
271 TP_PROTO(pud_t *pudp),
272 TP_ARGS(pudp),
273 TP_STRUCT__entry(
274 __field(pud_t *, pudp)
276 TP_fast_assign(__entry->pudp = pudp),
277 TP_printk("pudp %p", __entry->pudp)
279 #else
281 TRACE_EVENT(xen_mmu_set_pud,
282 TP_PROTO(pud_t *pudp, pud_t pudval),
283 TP_ARGS(pudp, pudval),
284 TP_STRUCT__entry(
285 __field(pud_t *, pudp)
286 __field(pudval_t, pudval)
288 TP_fast_assign(__entry->pudp = pudp;
289 __entry->pudval = native_pud_val(pudval)),
290 TP_printk("pudp %p pudval %0*llx (raw %0*llx)",
291 __entry->pudp,
292 (int)sizeof(pudval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pudval)),
293 (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
296 #endif
298 TRACE_EVENT(xen_mmu_pgd_clear,
299 TP_PROTO(pgd_t *pgdp),
300 TP_ARGS(pgdp),
301 TP_STRUCT__entry(
302 __field(pgd_t *, pgdp)
304 TP_fast_assign(__entry->pgdp = pgdp),
305 TP_printk("pgdp %p", __entry->pgdp)
308 DECLARE_EVENT_CLASS(xen_mmu_ptep_modify_prot,
309 TP_PROTO(struct mm_struct *mm, unsigned long addr,
310 pte_t *ptep, pte_t pteval),
311 TP_ARGS(mm, addr, ptep, pteval),
312 TP_STRUCT__entry(
313 __field(struct mm_struct *, mm)
314 __field(unsigned long, addr)
315 __field(pte_t *, ptep)
316 __field(pteval_t, pteval)
318 TP_fast_assign(__entry->mm = mm;
319 __entry->addr = addr;
320 __entry->ptep = ptep;
321 __entry->pteval = pteval.pte),
322 TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
323 __entry->mm, __entry->addr, __entry->ptep,
324 (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
325 (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
327 #define DEFINE_XEN_MMU_PTEP_MODIFY_PROT(name) \
328 DEFINE_EVENT(xen_mmu_ptep_modify_prot, name, \
329 TP_PROTO(struct mm_struct *mm, unsigned long addr, \
330 pte_t *ptep, pte_t pteval), \
331 TP_ARGS(mm, addr, ptep, pteval))
333 DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_start);
334 DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_commit);
336 TRACE_EVENT(xen_mmu_alloc_ptpage,
337 TP_PROTO(struct mm_struct *mm, unsigned long pfn, unsigned level, bool pinned),
338 TP_ARGS(mm, pfn, level, pinned),
339 TP_STRUCT__entry(
340 __field(struct mm_struct *, mm)
341 __field(unsigned long, pfn)
342 __field(unsigned, level)
343 __field(bool, pinned)
345 TP_fast_assign(__entry->mm = mm;
346 __entry->pfn = pfn;
347 __entry->level = level;
348 __entry->pinned = pinned),
349 TP_printk("mm %p pfn %lx level %d %spinned",
350 __entry->mm, __entry->pfn, __entry->level,
351 __entry->pinned ? "" : "un")
354 TRACE_EVENT(xen_mmu_release_ptpage,
355 TP_PROTO(unsigned long pfn, unsigned level, bool pinned),
356 TP_ARGS(pfn, level, pinned),
357 TP_STRUCT__entry(
358 __field(unsigned long, pfn)
359 __field(unsigned, level)
360 __field(bool, pinned)
362 TP_fast_assign(__entry->pfn = pfn;
363 __entry->level = level;
364 __entry->pinned = pinned),
365 TP_printk("pfn %lx level %d %spinned",
366 __entry->pfn, __entry->level,
367 __entry->pinned ? "" : "un")
370 DECLARE_EVENT_CLASS(xen_mmu_pgd,
371 TP_PROTO(struct mm_struct *mm, pgd_t *pgd),
372 TP_ARGS(mm, pgd),
373 TP_STRUCT__entry(
374 __field(struct mm_struct *, mm)
375 __field(pgd_t *, pgd)
377 TP_fast_assign(__entry->mm = mm;
378 __entry->pgd = pgd),
379 TP_printk("mm %p pgd %p", __entry->mm, __entry->pgd)
381 #define DEFINE_XEN_MMU_PGD_EVENT(name) \
382 DEFINE_EVENT(xen_mmu_pgd, name, \
383 TP_PROTO(struct mm_struct *mm, pgd_t *pgd), \
384 TP_ARGS(mm, pgd))
386 DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
387 DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
389 TRACE_EVENT(xen_mmu_flush_tlb_all,
390 TP_PROTO(int x),
391 TP_ARGS(x),
392 TP_STRUCT__entry(__array(char, x, 0)),
393 TP_fast_assign((void)x),
394 TP_printk("%s", "")
397 TRACE_EVENT(xen_mmu_flush_tlb,
398 TP_PROTO(int x),
399 TP_ARGS(x),
400 TP_STRUCT__entry(__array(char, x, 0)),
401 TP_fast_assign((void)x),
402 TP_printk("%s", "")
405 TRACE_EVENT(xen_mmu_flush_tlb_single,
406 TP_PROTO(unsigned long addr),
407 TP_ARGS(addr),
408 TP_STRUCT__entry(
409 __field(unsigned long, addr)
411 TP_fast_assign(__entry->addr = addr),
412 TP_printk("addr %lx", __entry->addr)
415 TRACE_EVENT(xen_mmu_flush_tlb_others,
416 TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm,
417 unsigned long addr, unsigned long end),
418 TP_ARGS(cpus, mm, addr, end),
419 TP_STRUCT__entry(
420 __field(unsigned, ncpus)
421 __field(struct mm_struct *, mm)
422 __field(unsigned long, addr)
423 __field(unsigned long, end)
425 TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
426 __entry->mm = mm;
427 __entry->addr = addr,
428 __entry->end = end),
429 TP_printk("ncpus %d mm %p addr %lx, end %lx",
430 __entry->ncpus, __entry->mm, __entry->addr, __entry->end)
433 TRACE_EVENT(xen_mmu_write_cr3,
434 TP_PROTO(bool kernel, unsigned long cr3),
435 TP_ARGS(kernel, cr3),
436 TP_STRUCT__entry(
437 __field(bool, kernel)
438 __field(unsigned long, cr3)
440 TP_fast_assign(__entry->kernel = kernel;
441 __entry->cr3 = cr3),
442 TP_printk("%s cr3 %lx",
443 __entry->kernel ? "kernel" : "user", __entry->cr3)
447 /* CPU */
448 TRACE_EVENT(xen_cpu_write_ldt_entry,
449 TP_PROTO(struct desc_struct *dt, int entrynum, u64 desc),
450 TP_ARGS(dt, entrynum, desc),
451 TP_STRUCT__entry(
452 __field(struct desc_struct *, dt)
453 __field(int, entrynum)
454 __field(u64, desc)
456 TP_fast_assign(__entry->dt = dt;
457 __entry->entrynum = entrynum;
458 __entry->desc = desc;
460 TP_printk("dt %p entrynum %d entry %016llx",
461 __entry->dt, __entry->entrynum,
462 (unsigned long long)__entry->desc)
465 TRACE_EVENT(xen_cpu_write_idt_entry,
466 TP_PROTO(gate_desc *dt, int entrynum, const gate_desc *ent),
467 TP_ARGS(dt, entrynum, ent),
468 TP_STRUCT__entry(
469 __field(gate_desc *, dt)
470 __field(int, entrynum)
472 TP_fast_assign(__entry->dt = dt;
473 __entry->entrynum = entrynum;
475 TP_printk("dt %p entrynum %d",
476 __entry->dt, __entry->entrynum)
479 TRACE_EVENT(xen_cpu_load_idt,
480 TP_PROTO(const struct desc_ptr *desc),
481 TP_ARGS(desc),
482 TP_STRUCT__entry(
483 __field(unsigned long, addr)
485 TP_fast_assign(__entry->addr = desc->address),
486 TP_printk("addr %lx", __entry->addr)
489 TRACE_EVENT(xen_cpu_write_gdt_entry,
490 TP_PROTO(struct desc_struct *dt, int entrynum, const void *desc, int type),
491 TP_ARGS(dt, entrynum, desc, type),
492 TP_STRUCT__entry(
493 __field(u64, desc)
494 __field(struct desc_struct *, dt)
495 __field(int, entrynum)
496 __field(int, type)
498 TP_fast_assign(__entry->dt = dt;
499 __entry->entrynum = entrynum;
500 __entry->desc = *(u64 *)desc;
501 __entry->type = type;
503 TP_printk("dt %p entrynum %d type %d desc %016llx",
504 __entry->dt, __entry->entrynum, __entry->type,
505 (unsigned long long)__entry->desc)
508 TRACE_EVENT(xen_cpu_set_ldt,
509 TP_PROTO(const void *addr, unsigned entries),
510 TP_ARGS(addr, entries),
511 TP_STRUCT__entry(
512 __field(const void *, addr)
513 __field(unsigned, entries)
515 TP_fast_assign(__entry->addr = addr;
516 __entry->entries = entries),
517 TP_printk("addr %p entries %u",
518 __entry->addr, __entry->entries)
522 #endif /* _TRACE_XEN_H */
524 /* This part must be outside protection */
525 #include <trace/define_trace.h>