1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM PMU support for AMD
5 * Copyright 2015, Red Hat, Inc. and/or its affiliates.
8 * Wei Huang <wei@redhat.com>
10 * Implementation is based on pmu_intel.c file
12 #include <linux/types.h>
13 #include <linux/kvm_host.h>
14 #include <linux/perf_event.h>
35 /* duplicated from amd_perfmon_event_map, K7 and above should work. */
36 static struct kvm_event_hw_type_mapping amd_event_mapping
[] = {
37 [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES
},
38 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS
},
39 [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES
},
40 [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES
},
41 [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS
},
42 [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES
},
43 [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
},
44 [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND
},
47 static unsigned int get_msr_base(struct kvm_pmu
*pmu
, enum pmu_type type
)
49 struct kvm_vcpu
*vcpu
= pmu_to_vcpu(pmu
);
51 if (guest_cpuid_has(vcpu
, X86_FEATURE_PERFCTR_CORE
)) {
52 if (type
== PMU_TYPE_COUNTER
)
53 return MSR_F15H_PERF_CTR
;
55 return MSR_F15H_PERF_CTL
;
57 if (type
== PMU_TYPE_COUNTER
)
58 return MSR_K7_PERFCTR0
;
60 return MSR_K7_EVNTSEL0
;
64 static enum index
msr_to_index(u32 msr
)
67 case MSR_F15H_PERF_CTL0
:
68 case MSR_F15H_PERF_CTR0
:
72 case MSR_F15H_PERF_CTL1
:
73 case MSR_F15H_PERF_CTR1
:
77 case MSR_F15H_PERF_CTL2
:
78 case MSR_F15H_PERF_CTR2
:
82 case MSR_F15H_PERF_CTL3
:
83 case MSR_F15H_PERF_CTR3
:
87 case MSR_F15H_PERF_CTL4
:
88 case MSR_F15H_PERF_CTR4
:
90 case MSR_F15H_PERF_CTL5
:
91 case MSR_F15H_PERF_CTR5
:
98 static inline struct kvm_pmc
*get_gp_pmc_amd(struct kvm_pmu
*pmu
, u32 msr
,
102 case MSR_F15H_PERF_CTL0
:
103 case MSR_F15H_PERF_CTL1
:
104 case MSR_F15H_PERF_CTL2
:
105 case MSR_F15H_PERF_CTL3
:
106 case MSR_F15H_PERF_CTL4
:
107 case MSR_F15H_PERF_CTL5
:
108 case MSR_K7_EVNTSEL0
... MSR_K7_EVNTSEL3
:
109 if (type
!= PMU_TYPE_EVNTSEL
)
112 case MSR_F15H_PERF_CTR0
:
113 case MSR_F15H_PERF_CTR1
:
114 case MSR_F15H_PERF_CTR2
:
115 case MSR_F15H_PERF_CTR3
:
116 case MSR_F15H_PERF_CTR4
:
117 case MSR_F15H_PERF_CTR5
:
118 case MSR_K7_PERFCTR0
... MSR_K7_PERFCTR3
:
119 if (type
!= PMU_TYPE_COUNTER
)
126 return &pmu
->gp_counters
[msr_to_index(msr
)];
129 static unsigned amd_find_arch_event(struct kvm_pmu
*pmu
,
135 for (i
= 0; i
< ARRAY_SIZE(amd_event_mapping
); i
++)
136 if (amd_event_mapping
[i
].eventsel
== event_select
137 && amd_event_mapping
[i
].unit_mask
== unit_mask
)
140 if (i
== ARRAY_SIZE(amd_event_mapping
))
141 return PERF_COUNT_HW_MAX
;
143 return amd_event_mapping
[i
].event_type
;
146 /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
147 static unsigned amd_find_fixed_event(int idx
)
149 return PERF_COUNT_HW_MAX
;
152 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
153 * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
155 static bool amd_pmc_is_enabled(struct kvm_pmc
*pmc
)
160 static struct kvm_pmc
*amd_pmc_idx_to_pmc(struct kvm_pmu
*pmu
, int pmc_idx
)
162 unsigned int base
= get_msr_base(pmu
, PMU_TYPE_COUNTER
);
163 struct kvm_vcpu
*vcpu
= pmu_to_vcpu(pmu
);
165 if (guest_cpuid_has(vcpu
, X86_FEATURE_PERFCTR_CORE
)) {
167 * The idx is contiguous. The MSRs are not. The counter MSRs
168 * are interleaved with the event select MSRs.
173 return get_gp_pmc_amd(pmu
, base
+ pmc_idx
, PMU_TYPE_COUNTER
);
176 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
177 static int amd_is_valid_rdpmc_ecx(struct kvm_vcpu
*vcpu
, unsigned int idx
)
179 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
183 return (idx
>= pmu
->nr_arch_gp_counters
);
186 /* idx is the ECX register of RDPMC instruction */
187 static struct kvm_pmc
*amd_rdpmc_ecx_to_pmc(struct kvm_vcpu
*vcpu
,
188 unsigned int idx
, u64
*mask
)
190 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
191 struct kvm_pmc
*counters
;
194 if (idx
>= pmu
->nr_arch_gp_counters
)
196 counters
= pmu
->gp_counters
;
198 return &counters
[idx
];
201 static bool amd_is_valid_msr(struct kvm_vcpu
*vcpu
, u32 msr
)
203 /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */
207 static struct kvm_pmc
*amd_msr_idx_to_pmc(struct kvm_vcpu
*vcpu
, u32 msr
)
209 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
212 pmc
= get_gp_pmc_amd(pmu
, msr
, PMU_TYPE_COUNTER
);
213 pmc
= pmc
? pmc
: get_gp_pmc_amd(pmu
, msr
, PMU_TYPE_EVNTSEL
);
218 static int amd_pmu_get_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
220 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
222 u32 msr
= msr_info
->index
;
225 pmc
= get_gp_pmc_amd(pmu
, msr
, PMU_TYPE_COUNTER
);
227 msr_info
->data
= pmc_read_counter(pmc
);
231 pmc
= get_gp_pmc_amd(pmu
, msr
, PMU_TYPE_EVNTSEL
);
233 msr_info
->data
= pmc
->eventsel
;
240 static int amd_pmu_set_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
242 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
244 u32 msr
= msr_info
->index
;
245 u64 data
= msr_info
->data
;
248 pmc
= get_gp_pmc_amd(pmu
, msr
, PMU_TYPE_COUNTER
);
250 pmc
->counter
+= data
- pmc_read_counter(pmc
);
254 pmc
= get_gp_pmc_amd(pmu
, msr
, PMU_TYPE_EVNTSEL
);
256 if (data
== pmc
->eventsel
)
258 if (!(data
& pmu
->reserved_bits
)) {
259 reprogram_gp_counter(pmc
, data
);
267 static void amd_pmu_refresh(struct kvm_vcpu
*vcpu
)
269 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
271 if (guest_cpuid_has(vcpu
, X86_FEATURE_PERFCTR_CORE
))
272 pmu
->nr_arch_gp_counters
= AMD64_NUM_COUNTERS_CORE
;
274 pmu
->nr_arch_gp_counters
= AMD64_NUM_COUNTERS
;
276 pmu
->counter_bitmask
[KVM_PMC_GP
] = ((u64
)1 << 48) - 1;
277 pmu
->reserved_bits
= 0xffffffff00200000ull
;
279 /* not applicable to AMD; but clean them to prevent any fall out */
280 pmu
->counter_bitmask
[KVM_PMC_FIXED
] = 0;
281 pmu
->nr_arch_fixed_counters
= 0;
282 pmu
->global_status
= 0;
283 bitmap_set(pmu
->all_valid_pmc_idx
, 0, pmu
->nr_arch_gp_counters
);
286 static void amd_pmu_init(struct kvm_vcpu
*vcpu
)
288 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
291 BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE
> INTEL_PMC_MAX_GENERIC
);
293 for (i
= 0; i
< AMD64_NUM_COUNTERS_CORE
; i
++) {
294 pmu
->gp_counters
[i
].type
= KVM_PMC_GP
;
295 pmu
->gp_counters
[i
].vcpu
= vcpu
;
296 pmu
->gp_counters
[i
].idx
= i
;
297 pmu
->gp_counters
[i
].current_config
= 0;
301 static void amd_pmu_reset(struct kvm_vcpu
*vcpu
)
303 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
306 for (i
= 0; i
< AMD64_NUM_COUNTERS_CORE
; i
++) {
307 struct kvm_pmc
*pmc
= &pmu
->gp_counters
[i
];
309 pmc_stop_counter(pmc
);
310 pmc
->counter
= pmc
->eventsel
= 0;
314 struct kvm_pmu_ops amd_pmu_ops
= {
315 .find_arch_event
= amd_find_arch_event
,
316 .find_fixed_event
= amd_find_fixed_event
,
317 .pmc_is_enabled
= amd_pmc_is_enabled
,
318 .pmc_idx_to_pmc
= amd_pmc_idx_to_pmc
,
319 .rdpmc_ecx_to_pmc
= amd_rdpmc_ecx_to_pmc
,
320 .msr_idx_to_pmc
= amd_msr_idx_to_pmc
,
321 .is_valid_rdpmc_ecx
= amd_is_valid_rdpmc_ecx
,
322 .is_valid_msr
= amd_is_valid_msr
,
323 .get_msr
= amd_pmu_get_msr
,
324 .set_msr
= amd_pmu_set_msr
,
325 .refresh
= amd_pmu_refresh
,
326 .init
= amd_pmu_init
,
327 .reset
= amd_pmu_reset
,