x86/amd-iommu: Add per IOMMU reference counting
[linux/fpc-iii.git] / arch / x86 / kvm / mmutrace.h
blob3e4a5c6ca2a9df76f82273918513f24db621d6aa
1 #if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
2 #define _TRACE_KVMMMU_H
4 #include <linux/tracepoint.h>
5 #include <linux/ftrace_event.h>
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM kvmmmu
9 #define TRACE_INCLUDE_PATH .
10 #define TRACE_INCLUDE_FILE mmutrace
12 #define KVM_MMU_PAGE_FIELDS \
13 __field(__u64, gfn) \
14 __field(__u32, role) \
15 __field(__u32, root_count) \
16 __field(__u32, unsync)
18 #define KVM_MMU_PAGE_ASSIGN(sp) \
19 __entry->gfn = sp->gfn; \
20 __entry->role = sp->role.word; \
21 __entry->root_count = sp->root_count; \
22 __entry->unsync = sp->unsync;
24 #define KVM_MMU_PAGE_PRINTK() ({ \
25 const char *ret = p->buffer + p->len; \
26 static const char *access_str[] = { \
27 "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \
28 }; \
29 union kvm_mmu_page_role role; \
31 role.word = __entry->role; \
33 trace_seq_printf(p, "sp gfn %llx %u/%u q%u%s %s%s %spge" \
34 " %snxe root %u %s%c", \
35 __entry->gfn, role.level, role.glevels, \
36 role.quadrant, \
37 role.direct ? " direct" : "", \
38 access_str[role.access], \
39 role.invalid ? " invalid" : "", \
40 role.cr4_pge ? "" : "!", \
41 role.nxe ? "" : "!", \
42 __entry->root_count, \
43 __entry->unsync ? "unsync" : "sync", 0); \
44 ret; \
47 #define kvm_mmu_trace_pferr_flags \
48 { PFERR_PRESENT_MASK, "P" }, \
49 { PFERR_WRITE_MASK, "W" }, \
50 { PFERR_USER_MASK, "U" }, \
51 { PFERR_RSVD_MASK, "RSVD" }, \
52 { PFERR_FETCH_MASK, "F" }
55 * A pagetable walk has started
57 TRACE_EVENT(
58 kvm_mmu_pagetable_walk,
59 TP_PROTO(u64 addr, int write_fault, int user_fault, int fetch_fault),
60 TP_ARGS(addr, write_fault, user_fault, fetch_fault),
62 TP_STRUCT__entry(
63 __field(__u64, addr)
64 __field(__u32, pferr)
67 TP_fast_assign(
68 __entry->addr = addr;
69 __entry->pferr = (!!write_fault << 1) | (!!user_fault << 2)
70 | (!!fetch_fault << 4);
73 TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
74 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
78 /* We just walked a paging element */
79 TRACE_EVENT(
80 kvm_mmu_paging_element,
81 TP_PROTO(u64 pte, int level),
82 TP_ARGS(pte, level),
84 TP_STRUCT__entry(
85 __field(__u64, pte)
86 __field(__u32, level)
89 TP_fast_assign(
90 __entry->pte = pte;
91 __entry->level = level;
94 TP_printk("pte %llx level %u", __entry->pte, __entry->level)
97 /* We set a pte accessed bit */
98 TRACE_EVENT(
99 kvm_mmu_set_accessed_bit,
100 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
101 TP_ARGS(table_gfn, index, size),
103 TP_STRUCT__entry(
104 __field(__u64, gpa)
107 TP_fast_assign(
108 __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
109 + index * size;
112 TP_printk("gpa %llx", __entry->gpa)
115 /* We set a pte dirty bit */
116 TRACE_EVENT(
117 kvm_mmu_set_dirty_bit,
118 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
119 TP_ARGS(table_gfn, index, size),
121 TP_STRUCT__entry(
122 __field(__u64, gpa)
125 TP_fast_assign(
126 __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
127 + index * size;
130 TP_printk("gpa %llx", __entry->gpa)
133 TRACE_EVENT(
134 kvm_mmu_walker_error,
135 TP_PROTO(u32 pferr),
136 TP_ARGS(pferr),
138 TP_STRUCT__entry(
139 __field(__u32, pferr)
142 TP_fast_assign(
143 __entry->pferr = pferr;
146 TP_printk("pferr %x %s", __entry->pferr,
147 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
150 TRACE_EVENT(
151 kvm_mmu_get_page,
152 TP_PROTO(struct kvm_mmu_page *sp, bool created),
153 TP_ARGS(sp, created),
155 TP_STRUCT__entry(
156 KVM_MMU_PAGE_FIELDS
157 __field(bool, created)
160 TP_fast_assign(
161 KVM_MMU_PAGE_ASSIGN(sp)
162 __entry->created = created;
165 TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(),
166 __entry->created ? "new" : "existing")
169 TRACE_EVENT(
170 kvm_mmu_sync_page,
171 TP_PROTO(struct kvm_mmu_page *sp),
172 TP_ARGS(sp),
174 TP_STRUCT__entry(
175 KVM_MMU_PAGE_FIELDS
178 TP_fast_assign(
179 KVM_MMU_PAGE_ASSIGN(sp)
182 TP_printk("%s", KVM_MMU_PAGE_PRINTK())
185 TRACE_EVENT(
186 kvm_mmu_unsync_page,
187 TP_PROTO(struct kvm_mmu_page *sp),
188 TP_ARGS(sp),
190 TP_STRUCT__entry(
191 KVM_MMU_PAGE_FIELDS
194 TP_fast_assign(
195 KVM_MMU_PAGE_ASSIGN(sp)
198 TP_printk("%s", KVM_MMU_PAGE_PRINTK())
201 TRACE_EVENT(
202 kvm_mmu_zap_page,
203 TP_PROTO(struct kvm_mmu_page *sp),
204 TP_ARGS(sp),
206 TP_STRUCT__entry(
207 KVM_MMU_PAGE_FIELDS
210 TP_fast_assign(
211 KVM_MMU_PAGE_ASSIGN(sp)
214 TP_printk("%s", KVM_MMU_PAGE_PRINTK())
217 #endif /* _TRACE_KVMMMU_H */
219 /* This part must be outside protection */
220 #include <trace/define_trace.h>