2 * KVM PMU support for AMD
4 * Copyright 2015, Red Hat, Inc. and/or its affiliates.
7 * Wei Huang <wei@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Implementation is based on pmu_intel.c file
14 #include <linux/types.h>
15 #include <linux/kvm_host.h>
16 #include <linux/perf_event.h>
22 /* duplicated from amd_perfmon_event_map, K7 and above should work. */
23 static struct kvm_event_hw_type_mapping amd_event_mapping
[] = {
24 [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES
},
25 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS
},
26 [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES
},
27 [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES
},
28 [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS
},
29 [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES
},
30 [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
},
31 [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND
},
34 static unsigned amd_find_arch_event(struct kvm_pmu
*pmu
,
40 for (i
= 0; i
< ARRAY_SIZE(amd_event_mapping
); i
++)
41 if (amd_event_mapping
[i
].eventsel
== event_select
42 && amd_event_mapping
[i
].unit_mask
== unit_mask
)
45 if (i
== ARRAY_SIZE(amd_event_mapping
))
46 return PERF_COUNT_HW_MAX
;
48 return amd_event_mapping
[i
].event_type
;
51 /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
52 static unsigned amd_find_fixed_event(int idx
)
54 return PERF_COUNT_HW_MAX
;
57 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
58 * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
60 static bool amd_pmc_is_enabled(struct kvm_pmc
*pmc
)
65 static struct kvm_pmc
*amd_pmc_idx_to_pmc(struct kvm_pmu
*pmu
, int pmc_idx
)
67 return get_gp_pmc(pmu
, MSR_K7_EVNTSEL0
+ pmc_idx
, MSR_K7_EVNTSEL0
);
70 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
71 static int amd_is_valid_msr_idx(struct kvm_vcpu
*vcpu
, unsigned idx
)
73 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
77 return (idx
>= pmu
->nr_arch_gp_counters
);
80 /* idx is the ECX register of RDPMC instruction */
81 static struct kvm_pmc
*amd_msr_idx_to_pmc(struct kvm_vcpu
*vcpu
, unsigned idx
)
83 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
84 struct kvm_pmc
*counters
;
87 if (idx
>= pmu
->nr_arch_gp_counters
)
89 counters
= pmu
->gp_counters
;
91 return &counters
[idx
];
94 static bool amd_is_valid_msr(struct kvm_vcpu
*vcpu
, u32 msr
)
96 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
99 ret
= get_gp_pmc(pmu
, msr
, MSR_K7_PERFCTR0
) ||
100 get_gp_pmc(pmu
, msr
, MSR_K7_EVNTSEL0
);
105 static int amd_pmu_get_msr(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*data
)
107 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
110 /* MSR_K7_PERFCTRn */
111 pmc
= get_gp_pmc(pmu
, msr
, MSR_K7_PERFCTR0
);
113 *data
= pmc_read_counter(pmc
);
116 /* MSR_K7_EVNTSELn */
117 pmc
= get_gp_pmc(pmu
, msr
, MSR_K7_EVNTSEL0
);
119 *data
= pmc
->eventsel
;
126 static int amd_pmu_set_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
128 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
130 u32 msr
= msr_info
->index
;
131 u64 data
= msr_info
->data
;
133 /* MSR_K7_PERFCTRn */
134 pmc
= get_gp_pmc(pmu
, msr
, MSR_K7_PERFCTR0
);
136 pmc
->counter
+= data
- pmc_read_counter(pmc
);
139 /* MSR_K7_EVNTSELn */
140 pmc
= get_gp_pmc(pmu
, msr
, MSR_K7_EVNTSEL0
);
142 if (data
== pmc
->eventsel
)
144 if (!(data
& pmu
->reserved_bits
)) {
145 reprogram_gp_counter(pmc
, data
);
153 static void amd_pmu_refresh(struct kvm_vcpu
*vcpu
)
155 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
157 pmu
->nr_arch_gp_counters
= AMD64_NUM_COUNTERS
;
158 pmu
->counter_bitmask
[KVM_PMC_GP
] = ((u64
)1 << 48) - 1;
159 pmu
->reserved_bits
= 0xffffffff00200000ull
;
160 /* not applicable to AMD; but clean them to prevent any fall out */
161 pmu
->counter_bitmask
[KVM_PMC_FIXED
] = 0;
162 pmu
->nr_arch_fixed_counters
= 0;
164 pmu
->global_status
= 0;
167 static void amd_pmu_init(struct kvm_vcpu
*vcpu
)
169 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
172 for (i
= 0; i
< AMD64_NUM_COUNTERS
; i
++) {
173 pmu
->gp_counters
[i
].type
= KVM_PMC_GP
;
174 pmu
->gp_counters
[i
].vcpu
= vcpu
;
175 pmu
->gp_counters
[i
].idx
= i
;
179 static void amd_pmu_reset(struct kvm_vcpu
*vcpu
)
181 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
184 for (i
= 0; i
< AMD64_NUM_COUNTERS
; i
++) {
185 struct kvm_pmc
*pmc
= &pmu
->gp_counters
[i
];
187 pmc_stop_counter(pmc
);
188 pmc
->counter
= pmc
->eventsel
= 0;
192 struct kvm_pmu_ops amd_pmu_ops
= {
193 .find_arch_event
= amd_find_arch_event
,
194 .find_fixed_event
= amd_find_fixed_event
,
195 .pmc_is_enabled
= amd_pmc_is_enabled
,
196 .pmc_idx_to_pmc
= amd_pmc_idx_to_pmc
,
197 .msr_idx_to_pmc
= amd_msr_idx_to_pmc
,
198 .is_valid_msr_idx
= amd_is_valid_msr_idx
,
199 .is_valid_msr
= amd_is_valid_msr
,
200 .get_msr
= amd_pmu_get_msr
,
201 .set_msr
= amd_pmu_set_msr
,
202 .refresh
= amd_pmu_refresh
,
203 .init
= amd_pmu_init
,
204 .reset
= amd_pmu_reset
,