1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_PMU_H
3 #define __KVM_X86_PMU_H
5 #include <linux/nospec.h>
7 #include <asm/kvm_host.h>
9 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
10 #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
11 #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
13 #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | \
14 MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
16 /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
17 #define fixed_ctrl_field(ctrl_reg, idx) \
18 (((ctrl_reg) >> ((idx) * INTEL_FIXED_BITS_STRIDE)) & INTEL_FIXED_BITS_MASK)
20 #define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000
21 #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001
22 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002
24 #define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED
26 struct kvm_pmu_emulated_event_selectors
{
27 u64 INSTRUCTIONS_RETIRED
;
28 u64 BRANCH_INSTRUCTIONS_RETIRED
;
32 struct kvm_pmc
*(*rdpmc_ecx_to_pmc
)(struct kvm_vcpu
*vcpu
,
33 unsigned int idx
, u64
*mask
);
34 struct kvm_pmc
*(*msr_idx_to_pmc
)(struct kvm_vcpu
*vcpu
, u32 msr
);
35 int (*check_rdpmc_early
)(struct kvm_vcpu
*vcpu
, unsigned int idx
);
36 bool (*is_valid_msr
)(struct kvm_vcpu
*vcpu
, u32 msr
);
37 int (*get_msr
)(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
);
38 int (*set_msr
)(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
);
39 void (*refresh
)(struct kvm_vcpu
*vcpu
);
40 void (*init
)(struct kvm_vcpu
*vcpu
);
41 void (*reset
)(struct kvm_vcpu
*vcpu
);
42 void (*deliver_pmi
)(struct kvm_vcpu
*vcpu
);
43 void (*cleanup
)(struct kvm_vcpu
*vcpu
);
45 const u64 EVENTSEL_EVENT
;
46 const int MAX_NR_GP_COUNTERS
;
47 const int MIN_NR_GP_COUNTERS
;
50 void kvm_pmu_ops_update(const struct kvm_pmu_ops
*pmu_ops
);
52 static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu
*pmu
)
55 * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is
56 * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is
57 * greater than zero. However, KVM only exposes and emulates the MSR
58 * to/for the guest if the guest PMU supports at least "Architectural
59 * Performance Monitoring Version 2".
61 * AMD's version of PERF_GLOBAL_CTRL conveniently shows up with v2.
63 return pmu
->version
> 1;
67 * KVM tracks all counters in 64-bit bitmaps, with general purpose counters
68 * mapped to bits 31:0 and fixed counters mapped to 63:32, e.g. fixed counter 0
69 * is tracked internally via index 32. On Intel, (AMD doesn't support fixed
70 * counters), this mirrors how fixed counters are mapped to PERF_GLOBAL_CTRL
71 * and similar MSRs, i.e. tracking fixed counters at base index 32 reduces the
72 * amounter of boilerplate needed to iterate over PMCs *and* simplifies common
73 * enabling/disable/reset operations.
75 * WARNING! This helper is only for lookups that are initiated by KVM, it is
76 * NOT safe for guest lookups, e.g. will do the wrong thing if passed a raw
77 * ECX value from RDPMC (fixed counters are accessed by setting bit 30 in ECX
78 * for RDPMC, not by adding 32 to the fixed counter index).
80 static inline struct kvm_pmc
*kvm_pmc_idx_to_pmc(struct kvm_pmu
*pmu
, int idx
)
82 if (idx
< pmu
->nr_arch_gp_counters
)
83 return &pmu
->gp_counters
[idx
];
85 idx
-= KVM_FIXED_PMC_BASE_IDX
;
86 if (idx
>= 0 && idx
< pmu
->nr_arch_fixed_counters
)
87 return &pmu
->fixed_counters
[idx
];
92 #define kvm_for_each_pmc(pmu, pmc, i, bitmap) \
93 for_each_set_bit(i, bitmap, X86_PMC_IDX_MAX) \
94 if (!(pmc = kvm_pmc_idx_to_pmc(pmu, i))) \
98 static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
100 struct kvm_pmu
*pmu
= pmc_to_pmu(pmc
);
102 return pmu
->counter_bitmask
[pmc
->type
];
105 static inline u64
pmc_read_counter(struct kvm_pmc
*pmc
)
107 u64 counter
, enabled
, running
;
109 counter
= pmc
->counter
+ pmc
->emulated_counter
;
111 if (pmc
->perf_event
&& !pmc
->is_paused
)
112 counter
+= perf_event_read_value(pmc
->perf_event
,
114 /* FIXME: Scaling needed? */
115 return counter
& pmc_bitmask(pmc
);
118 void pmc_write_counter(struct kvm_pmc
*pmc
, u64 val
);
120 static inline bool pmc_is_gp(struct kvm_pmc
*pmc
)
122 return pmc
->type
== KVM_PMC_GP
;
125 static inline bool pmc_is_fixed(struct kvm_pmc
*pmc
)
127 return pmc
->type
== KVM_PMC_FIXED
;
130 static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu
*pmu
,
133 return !(pmu
->global_ctrl_rsvd
& data
);
136 /* returns general purpose PMC with the specified MSR. Note that it can be
137 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
138 * parameter to tell them apart.
140 static inline struct kvm_pmc
*get_gp_pmc(struct kvm_pmu
*pmu
, u32 msr
,
143 if (msr
>= base
&& msr
< base
+ pmu
->nr_arch_gp_counters
) {
144 u32 index
= array_index_nospec(msr
- base
,
145 pmu
->nr_arch_gp_counters
);
147 return &pmu
->gp_counters
[index
];
153 /* returns fixed PMC with the specified MSR */
154 static inline struct kvm_pmc
*get_fixed_pmc(struct kvm_pmu
*pmu
, u32 msr
)
156 int base
= MSR_CORE_PERF_FIXED_CTR0
;
158 if (msr
>= base
&& msr
< base
+ pmu
->nr_arch_fixed_counters
) {
159 u32 index
= array_index_nospec(msr
- base
,
160 pmu
->nr_arch_fixed_counters
);
162 return &pmu
->fixed_counters
[index
];
168 static inline bool pmc_speculative_in_use(struct kvm_pmc
*pmc
)
170 struct kvm_pmu
*pmu
= pmc_to_pmu(pmc
);
172 if (pmc_is_fixed(pmc
))
173 return fixed_ctrl_field(pmu
->fixed_ctr_ctrl
,
174 pmc
->idx
- KVM_FIXED_PMC_BASE_IDX
) &
175 (INTEL_FIXED_0_KERNEL
| INTEL_FIXED_0_USER
);
177 return pmc
->eventsel
& ARCH_PERFMON_EVENTSEL_ENABLE
;
180 extern struct x86_pmu_capability kvm_pmu_cap
;
181 extern struct kvm_pmu_emulated_event_selectors kvm_pmu_eventsel
;
183 static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops
*pmu_ops
)
185 bool is_intel
= boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
;
186 int min_nr_gp_ctrs
= pmu_ops
->MIN_NR_GP_COUNTERS
;
189 * Hybrid PMUs don't play nice with virtualization without careful
190 * configuration by userspace, and KVM's APIs for reporting supported
191 * vPMU features do not account for hybrid PMUs. Disable vPMU support
192 * for hybrid PMUs until KVM gains a way to let userspace opt-in.
194 if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU
))
198 perf_get_x86_pmu_capability(&kvm_pmu_cap
);
201 * WARN if perf did NOT disable hardware PMU if the number of
202 * architecturally required GP counters aren't present, i.e. if
203 * there are a non-zero number of counters, but fewer than what
204 * is architecturally required.
206 if (!kvm_pmu_cap
.num_counters_gp
||
207 WARN_ON_ONCE(kvm_pmu_cap
.num_counters_gp
< min_nr_gp_ctrs
))
209 else if (is_intel
&& !kvm_pmu_cap
.version
)
214 memset(&kvm_pmu_cap
, 0, sizeof(kvm_pmu_cap
));
218 kvm_pmu_cap
.version
= min(kvm_pmu_cap
.version
, 2);
219 kvm_pmu_cap
.num_counters_gp
= min(kvm_pmu_cap
.num_counters_gp
,
220 pmu_ops
->MAX_NR_GP_COUNTERS
);
221 kvm_pmu_cap
.num_counters_fixed
= min(kvm_pmu_cap
.num_counters_fixed
,
222 KVM_MAX_NR_FIXED_COUNTERS
);
224 kvm_pmu_eventsel
.INSTRUCTIONS_RETIRED
=
225 perf_get_hw_event_config(PERF_COUNT_HW_INSTRUCTIONS
);
226 kvm_pmu_eventsel
.BRANCH_INSTRUCTIONS_RETIRED
=
227 perf_get_hw_event_config(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
230 static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc
*pmc
)
232 set_bit(pmc
->idx
, pmc_to_pmu(pmc
)->reprogram_pmi
);
233 kvm_make_request(KVM_REQ_PMU
, pmc
->vcpu
);
236 static inline void reprogram_counters(struct kvm_pmu
*pmu
, u64 diff
)
243 for_each_set_bit(bit
, (unsigned long *)&diff
, X86_PMC_IDX_MAX
)
244 set_bit(bit
, pmu
->reprogram_pmi
);
245 kvm_make_request(KVM_REQ_PMU
, pmu_to_vcpu(pmu
));
249 * Check if a PMC is enabled by comparing it against global_ctrl bits.
251 * If the vPMU doesn't have global_ctrl MSR, all vPMCs are enabled.
253 static inline bool pmc_is_globally_enabled(struct kvm_pmc
*pmc
)
255 struct kvm_pmu
*pmu
= pmc_to_pmu(pmc
);
257 if (!kvm_pmu_has_perf_global_ctrl(pmu
))
260 return test_bit(pmc
->idx
, (unsigned long *)&pmu
->global_ctrl
);
263 void kvm_pmu_deliver_pmi(struct kvm_vcpu
*vcpu
);
264 void kvm_pmu_handle_event(struct kvm_vcpu
*vcpu
);
265 int kvm_pmu_rdpmc(struct kvm_vcpu
*vcpu
, unsigned pmc
, u64
*data
);
266 int kvm_pmu_check_rdpmc_early(struct kvm_vcpu
*vcpu
, unsigned int idx
);
267 bool kvm_pmu_is_valid_msr(struct kvm_vcpu
*vcpu
, u32 msr
);
268 int kvm_pmu_get_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
);
269 int kvm_pmu_set_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
);
270 void kvm_pmu_refresh(struct kvm_vcpu
*vcpu
);
271 void kvm_pmu_init(struct kvm_vcpu
*vcpu
);
272 void kvm_pmu_cleanup(struct kvm_vcpu
*vcpu
);
273 void kvm_pmu_destroy(struct kvm_vcpu
*vcpu
);
274 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm
*kvm
, void __user
*argp
);
275 void kvm_pmu_trigger_event(struct kvm_vcpu
*vcpu
, u64 eventsel
);
277 bool is_vmware_backdoor_pmc(u32 pmc_idx
);
279 extern struct kvm_pmu_ops intel_pmu_ops
;
280 extern struct kvm_pmu_ops amd_pmu_ops
;
281 #endif /* __KVM_X86_PMU_H */