Linux 4.18.10
[linux/fpc-iii.git] / arch / x86 / kvm / pmu_amd.c
blob1495a735b38e757ea3d01e6716888d55cf8a84b7
1 /*
2 * KVM PMU support for AMD
4 * Copyright 2015, Red Hat, Inc. and/or its affiliates.
6 * Author:
7 * Wei Huang <wei@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Implementation is based on pmu_intel.c file
14 #include <linux/types.h>
15 #include <linux/kvm_host.h>
16 #include <linux/perf_event.h>
17 #include "x86.h"
18 #include "cpuid.h"
19 #include "lapic.h"
20 #include "pmu.h"
22 enum pmu_type {
23 PMU_TYPE_COUNTER = 0,
24 PMU_TYPE_EVNTSEL,
27 enum index {
28 INDEX_ZERO = 0,
29 INDEX_ONE,
30 INDEX_TWO,
31 INDEX_THREE,
32 INDEX_FOUR,
33 INDEX_FIVE,
34 INDEX_ERROR,
37 /* duplicated from amd_perfmon_event_map, K7 and above should work. */
38 static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
39 [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
40 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
41 [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
42 [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
43 [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
44 [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
45 [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
46 [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
49 static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
51 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
53 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
54 if (type == PMU_TYPE_COUNTER)
55 return MSR_F15H_PERF_CTR;
56 else
57 return MSR_F15H_PERF_CTL;
58 } else {
59 if (type == PMU_TYPE_COUNTER)
60 return MSR_K7_PERFCTR0;
61 else
62 return MSR_K7_EVNTSEL0;
66 static enum index msr_to_index(u32 msr)
68 switch (msr) {
69 case MSR_F15H_PERF_CTL0:
70 case MSR_F15H_PERF_CTR0:
71 case MSR_K7_EVNTSEL0:
72 case MSR_K7_PERFCTR0:
73 return INDEX_ZERO;
74 case MSR_F15H_PERF_CTL1:
75 case MSR_F15H_PERF_CTR1:
76 case MSR_K7_EVNTSEL1:
77 case MSR_K7_PERFCTR1:
78 return INDEX_ONE;
79 case MSR_F15H_PERF_CTL2:
80 case MSR_F15H_PERF_CTR2:
81 case MSR_K7_EVNTSEL2:
82 case MSR_K7_PERFCTR2:
83 return INDEX_TWO;
84 case MSR_F15H_PERF_CTL3:
85 case MSR_F15H_PERF_CTR3:
86 case MSR_K7_EVNTSEL3:
87 case MSR_K7_PERFCTR3:
88 return INDEX_THREE;
89 case MSR_F15H_PERF_CTL4:
90 case MSR_F15H_PERF_CTR4:
91 return INDEX_FOUR;
92 case MSR_F15H_PERF_CTL5:
93 case MSR_F15H_PERF_CTR5:
94 return INDEX_FIVE;
95 default:
96 return INDEX_ERROR;
100 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
101 enum pmu_type type)
103 switch (msr) {
104 case MSR_F15H_PERF_CTL0:
105 case MSR_F15H_PERF_CTL1:
106 case MSR_F15H_PERF_CTL2:
107 case MSR_F15H_PERF_CTL3:
108 case MSR_F15H_PERF_CTL4:
109 case MSR_F15H_PERF_CTL5:
110 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
111 if (type != PMU_TYPE_EVNTSEL)
112 return NULL;
113 break;
114 case MSR_F15H_PERF_CTR0:
115 case MSR_F15H_PERF_CTR1:
116 case MSR_F15H_PERF_CTR2:
117 case MSR_F15H_PERF_CTR3:
118 case MSR_F15H_PERF_CTR4:
119 case MSR_F15H_PERF_CTR5:
120 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
121 if (type != PMU_TYPE_COUNTER)
122 return NULL;
123 break;
124 default:
125 return NULL;
128 return &pmu->gp_counters[msr_to_index(msr)];
131 static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
132 u8 event_select,
133 u8 unit_mask)
135 int i;
137 for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
138 if (amd_event_mapping[i].eventsel == event_select
139 && amd_event_mapping[i].unit_mask == unit_mask)
140 break;
142 if (i == ARRAY_SIZE(amd_event_mapping))
143 return PERF_COUNT_HW_MAX;
145 return amd_event_mapping[i].event_type;
148 /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
149 static unsigned amd_find_fixed_event(int idx)
151 return PERF_COUNT_HW_MAX;
154 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
155 * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
157 static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
159 return true;
162 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
164 unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
165 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
167 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
169 * The idx is contiguous. The MSRs are not. The counter MSRs
170 * are interleaved with the event select MSRs.
172 pmc_idx *= 2;
175 return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
178 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
179 static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
181 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
183 idx &= ~(3u << 30);
185 return (idx >= pmu->nr_arch_gp_counters);
188 /* idx is the ECX register of RDPMC instruction */
189 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx)
191 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
192 struct kvm_pmc *counters;
194 idx &= ~(3u << 30);
195 if (idx >= pmu->nr_arch_gp_counters)
196 return NULL;
197 counters = pmu->gp_counters;
199 return &counters[idx];
202 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
204 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
205 int ret = false;
207 ret = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER) ||
208 get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
210 return ret;
213 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
215 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
216 struct kvm_pmc *pmc;
218 /* MSR_PERFCTRn */
219 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
220 if (pmc) {
221 *data = pmc_read_counter(pmc);
222 return 0;
224 /* MSR_EVNTSELn */
225 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
226 if (pmc) {
227 *data = pmc->eventsel;
228 return 0;
231 return 1;
234 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
236 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
237 struct kvm_pmc *pmc;
238 u32 msr = msr_info->index;
239 u64 data = msr_info->data;
241 /* MSR_PERFCTRn */
242 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
243 if (pmc) {
244 pmc->counter += data - pmc_read_counter(pmc);
245 return 0;
247 /* MSR_EVNTSELn */
248 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
249 if (pmc) {
250 if (data == pmc->eventsel)
251 return 0;
252 if (!(data & pmu->reserved_bits)) {
253 reprogram_gp_counter(pmc, data);
254 return 0;
258 return 1;
261 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
263 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
265 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
266 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
267 else
268 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
270 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
271 pmu->reserved_bits = 0xffffffff00200000ull;
272 /* not applicable to AMD; but clean them to prevent any fall out */
273 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
274 pmu->nr_arch_fixed_counters = 0;
275 pmu->version = 0;
276 pmu->global_status = 0;
279 static void amd_pmu_init(struct kvm_vcpu *vcpu)
281 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
282 int i;
284 BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
286 for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
287 pmu->gp_counters[i].type = KVM_PMC_GP;
288 pmu->gp_counters[i].vcpu = vcpu;
289 pmu->gp_counters[i].idx = i;
293 static void amd_pmu_reset(struct kvm_vcpu *vcpu)
295 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
296 int i;
298 for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
299 struct kvm_pmc *pmc = &pmu->gp_counters[i];
301 pmc_stop_counter(pmc);
302 pmc->counter = pmc->eventsel = 0;
306 struct kvm_pmu_ops amd_pmu_ops = {
307 .find_arch_event = amd_find_arch_event,
308 .find_fixed_event = amd_find_fixed_event,
309 .pmc_is_enabled = amd_pmc_is_enabled,
310 .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
311 .msr_idx_to_pmc = amd_msr_idx_to_pmc,
312 .is_valid_msr_idx = amd_is_valid_msr_idx,
313 .is_valid_msr = amd_is_valid_msr,
314 .get_msr = amd_pmu_get_msr,
315 .set_msr = amd_pmu_set_msr,
316 .refresh = amd_pmu_refresh,
317 .init = amd_pmu_init,
318 .reset = amd_pmu_reset,