2 * KVM PMU support for AMD
4 * Copyright 2015, Red Hat, Inc. and/or its affiliates.
7 * Wei Huang <wei@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Implementation is based on pmu_intel.c file
14 #include <linux/types.h>
15 #include <linux/kvm_host.h>
16 #include <linux/perf_event.h>
37 /* duplicated from amd_perfmon_event_map, K7 and above should work. */
38 static struct kvm_event_hw_type_mapping amd_event_mapping
[] = {
39 [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES
},
40 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS
},
41 [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES
},
42 [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES
},
43 [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS
},
44 [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES
},
45 [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
},
46 [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND
},
49 static unsigned int get_msr_base(struct kvm_pmu
*pmu
, enum pmu_type type
)
51 struct kvm_vcpu
*vcpu
= pmu_to_vcpu(pmu
);
53 if (guest_cpuid_has(vcpu
, X86_FEATURE_PERFCTR_CORE
)) {
54 if (type
== PMU_TYPE_COUNTER
)
55 return MSR_F15H_PERF_CTR
;
57 return MSR_F15H_PERF_CTL
;
59 if (type
== PMU_TYPE_COUNTER
)
60 return MSR_K7_PERFCTR0
;
62 return MSR_K7_EVNTSEL0
;
66 static enum index
msr_to_index(u32 msr
)
69 case MSR_F15H_PERF_CTL0
:
70 case MSR_F15H_PERF_CTR0
:
74 case MSR_F15H_PERF_CTL1
:
75 case MSR_F15H_PERF_CTR1
:
79 case MSR_F15H_PERF_CTL2
:
80 case MSR_F15H_PERF_CTR2
:
84 case MSR_F15H_PERF_CTL3
:
85 case MSR_F15H_PERF_CTR3
:
89 case MSR_F15H_PERF_CTL4
:
90 case MSR_F15H_PERF_CTR4
:
92 case MSR_F15H_PERF_CTL5
:
93 case MSR_F15H_PERF_CTR5
:
100 static inline struct kvm_pmc
*get_gp_pmc_amd(struct kvm_pmu
*pmu
, u32 msr
,
104 case MSR_F15H_PERF_CTL0
:
105 case MSR_F15H_PERF_CTL1
:
106 case MSR_F15H_PERF_CTL2
:
107 case MSR_F15H_PERF_CTL3
:
108 case MSR_F15H_PERF_CTL4
:
109 case MSR_F15H_PERF_CTL5
:
110 case MSR_K7_EVNTSEL0
... MSR_K7_EVNTSEL3
:
111 if (type
!= PMU_TYPE_EVNTSEL
)
114 case MSR_F15H_PERF_CTR0
:
115 case MSR_F15H_PERF_CTR1
:
116 case MSR_F15H_PERF_CTR2
:
117 case MSR_F15H_PERF_CTR3
:
118 case MSR_F15H_PERF_CTR4
:
119 case MSR_F15H_PERF_CTR5
:
120 case MSR_K7_PERFCTR0
... MSR_K7_PERFCTR3
:
121 if (type
!= PMU_TYPE_COUNTER
)
128 return &pmu
->gp_counters
[msr_to_index(msr
)];
131 static unsigned amd_find_arch_event(struct kvm_pmu
*pmu
,
137 for (i
= 0; i
< ARRAY_SIZE(amd_event_mapping
); i
++)
138 if (amd_event_mapping
[i
].eventsel
== event_select
139 && amd_event_mapping
[i
].unit_mask
== unit_mask
)
142 if (i
== ARRAY_SIZE(amd_event_mapping
))
143 return PERF_COUNT_HW_MAX
;
145 return amd_event_mapping
[i
].event_type
;
148 /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
149 static unsigned amd_find_fixed_event(int idx
)
151 return PERF_COUNT_HW_MAX
;
154 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
155 * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
157 static bool amd_pmc_is_enabled(struct kvm_pmc
*pmc
)
162 static struct kvm_pmc
*amd_pmc_idx_to_pmc(struct kvm_pmu
*pmu
, int pmc_idx
)
164 unsigned int base
= get_msr_base(pmu
, PMU_TYPE_COUNTER
);
165 struct kvm_vcpu
*vcpu
= pmu_to_vcpu(pmu
);
167 if (guest_cpuid_has(vcpu
, X86_FEATURE_PERFCTR_CORE
)) {
169 * The idx is contiguous. The MSRs are not. The counter MSRs
170 * are interleaved with the event select MSRs.
175 return get_gp_pmc_amd(pmu
, base
+ pmc_idx
, PMU_TYPE_COUNTER
);
178 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
179 static int amd_is_valid_msr_idx(struct kvm_vcpu
*vcpu
, unsigned idx
)
181 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
185 return (idx
>= pmu
->nr_arch_gp_counters
);
188 /* idx is the ECX register of RDPMC instruction */
189 static struct kvm_pmc
*amd_msr_idx_to_pmc(struct kvm_vcpu
*vcpu
, unsigned idx
)
191 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
192 struct kvm_pmc
*counters
;
195 if (idx
>= pmu
->nr_arch_gp_counters
)
197 counters
= pmu
->gp_counters
;
199 return &counters
[idx
];
202 static bool amd_is_valid_msr(struct kvm_vcpu
*vcpu
, u32 msr
)
204 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
207 ret
= get_gp_pmc_amd(pmu
, msr
, PMU_TYPE_COUNTER
) ||
208 get_gp_pmc_amd(pmu
, msr
, PMU_TYPE_EVNTSEL
);
213 static int amd_pmu_get_msr(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*data
)
215 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
219 pmc
= get_gp_pmc_amd(pmu
, msr
, PMU_TYPE_COUNTER
);
221 *data
= pmc_read_counter(pmc
);
225 pmc
= get_gp_pmc_amd(pmu
, msr
, PMU_TYPE_EVNTSEL
);
227 *data
= pmc
->eventsel
;
234 static int amd_pmu_set_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
236 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
238 u32 msr
= msr_info
->index
;
239 u64 data
= msr_info
->data
;
242 pmc
= get_gp_pmc_amd(pmu
, msr
, PMU_TYPE_COUNTER
);
244 pmc
->counter
+= data
- pmc_read_counter(pmc
);
248 pmc
= get_gp_pmc_amd(pmu
, msr
, PMU_TYPE_EVNTSEL
);
250 if (data
== pmc
->eventsel
)
252 if (!(data
& pmu
->reserved_bits
)) {
253 reprogram_gp_counter(pmc
, data
);
261 static void amd_pmu_refresh(struct kvm_vcpu
*vcpu
)
263 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
265 if (guest_cpuid_has(vcpu
, X86_FEATURE_PERFCTR_CORE
))
266 pmu
->nr_arch_gp_counters
= AMD64_NUM_COUNTERS_CORE
;
268 pmu
->nr_arch_gp_counters
= AMD64_NUM_COUNTERS
;
270 pmu
->counter_bitmask
[KVM_PMC_GP
] = ((u64
)1 << 48) - 1;
271 pmu
->reserved_bits
= 0xffffffff00200000ull
;
272 /* not applicable to AMD; but clean them to prevent any fall out */
273 pmu
->counter_bitmask
[KVM_PMC_FIXED
] = 0;
274 pmu
->nr_arch_fixed_counters
= 0;
276 pmu
->global_status
= 0;
279 static void amd_pmu_init(struct kvm_vcpu
*vcpu
)
281 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
284 BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE
> INTEL_PMC_MAX_GENERIC
);
286 for (i
= 0; i
< AMD64_NUM_COUNTERS_CORE
; i
++) {
287 pmu
->gp_counters
[i
].type
= KVM_PMC_GP
;
288 pmu
->gp_counters
[i
].vcpu
= vcpu
;
289 pmu
->gp_counters
[i
].idx
= i
;
293 static void amd_pmu_reset(struct kvm_vcpu
*vcpu
)
295 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
298 for (i
= 0; i
< AMD64_NUM_COUNTERS_CORE
; i
++) {
299 struct kvm_pmc
*pmc
= &pmu
->gp_counters
[i
];
301 pmc_stop_counter(pmc
);
302 pmc
->counter
= pmc
->eventsel
= 0;
306 struct kvm_pmu_ops amd_pmu_ops
= {
307 .find_arch_event
= amd_find_arch_event
,
308 .find_fixed_event
= amd_find_fixed_event
,
309 .pmc_is_enabled
= amd_pmc_is_enabled
,
310 .pmc_idx_to_pmc
= amd_pmc_idx_to_pmc
,
311 .msr_idx_to_pmc
= amd_msr_idx_to_pmc
,
312 .is_valid_msr_idx
= amd_is_valid_msr_idx
,
313 .is_valid_msr
= amd_is_valid_msr
,
314 .get_msr
= amd_pmu_get_msr
,
315 .set_msr
= amd_pmu_set_msr
,
316 .refresh
= amd_pmu_refresh
,
317 .init
= amd_pmu_init
,
318 .reset
= amd_pmu_reset
,