2 * KVM PMU support for Intel CPUs
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
7 * Avi Kivity <avi@redhat.com>
8 * Gleb Natapov <gleb@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
14 #include <linux/types.h>
15 #include <linux/kvm_host.h>
16 #include <linux/perf_event.h>
17 #include <asm/perf_event.h>
23 static struct kvm_event_hw_type_mapping intel_arch_events
[] = {
24 /* Index must match CPUID 0x0A.EBX bit vector */
25 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES
},
26 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS
},
27 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES
},
28 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES
},
29 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES
},
30 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS
},
31 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES
},
32 [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES
},
35 /* mapping between fixed pmc index and intel_arch_events array */
36 static int fixed_pmc_events
[] = {1, 0, 7};
38 static void reprogram_fixed_counters(struct kvm_pmu
*pmu
, u64 data
)
42 for (i
= 0; i
< pmu
->nr_arch_fixed_counters
; i
++) {
43 u8 new_ctrl
= fixed_ctrl_field(data
, i
);
44 u8 old_ctrl
= fixed_ctrl_field(pmu
->fixed_ctr_ctrl
, i
);
47 pmc
= get_fixed_pmc(pmu
, MSR_CORE_PERF_FIXED_CTR0
+ i
);
49 if (old_ctrl
== new_ctrl
)
52 reprogram_fixed_counter(pmc
, new_ctrl
, i
);
55 pmu
->fixed_ctr_ctrl
= data
;
58 /* function is called when global control register has been updated. */
59 static void global_ctrl_changed(struct kvm_pmu
*pmu
, u64 data
)
62 u64 diff
= pmu
->global_ctrl
^ data
;
64 pmu
->global_ctrl
= data
;
66 for_each_set_bit(bit
, (unsigned long *)&diff
, X86_PMC_IDX_MAX
)
67 reprogram_counter(pmu
, bit
);
70 static unsigned intel_find_arch_event(struct kvm_pmu
*pmu
,
76 for (i
= 0; i
< ARRAY_SIZE(intel_arch_events
); i
++)
77 if (intel_arch_events
[i
].eventsel
== event_select
78 && intel_arch_events
[i
].unit_mask
== unit_mask
79 && (pmu
->available_event_types
& (1 << i
)))
82 if (i
== ARRAY_SIZE(intel_arch_events
))
83 return PERF_COUNT_HW_MAX
;
85 return intel_arch_events
[i
].event_type
;
88 static unsigned intel_find_fixed_event(int idx
)
90 if (idx
>= ARRAY_SIZE(fixed_pmc_events
))
91 return PERF_COUNT_HW_MAX
;
93 return intel_arch_events
[fixed_pmc_events
[idx
]].event_type
;
96 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
97 static bool intel_pmc_is_enabled(struct kvm_pmc
*pmc
)
99 struct kvm_pmu
*pmu
= pmc_to_pmu(pmc
);
101 return test_bit(pmc
->idx
, (unsigned long *)&pmu
->global_ctrl
);
104 static struct kvm_pmc
*intel_pmc_idx_to_pmc(struct kvm_pmu
*pmu
, int pmc_idx
)
106 if (pmc_idx
< INTEL_PMC_IDX_FIXED
)
107 return get_gp_pmc(pmu
, MSR_P6_EVNTSEL0
+ pmc_idx
,
110 u32 idx
= pmc_idx
- INTEL_PMC_IDX_FIXED
;
112 return get_fixed_pmc(pmu
, idx
+ MSR_CORE_PERF_FIXED_CTR0
);
116 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
117 static int intel_is_valid_msr_idx(struct kvm_vcpu
*vcpu
, unsigned idx
)
119 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
120 bool fixed
= idx
& (1u << 30);
124 return (!fixed
&& idx
>= pmu
->nr_arch_gp_counters
) ||
125 (fixed
&& idx
>= pmu
->nr_arch_fixed_counters
);
128 static struct kvm_pmc
*intel_msr_idx_to_pmc(struct kvm_vcpu
*vcpu
,
129 unsigned idx
, u64
*mask
)
131 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
132 bool fixed
= idx
& (1u << 30);
133 struct kvm_pmc
*counters
;
136 if (!fixed
&& idx
>= pmu
->nr_arch_gp_counters
)
138 if (fixed
&& idx
>= pmu
->nr_arch_fixed_counters
)
140 counters
= fixed
? pmu
->fixed_counters
: pmu
->gp_counters
;
141 *mask
&= pmu
->counter_bitmask
[fixed
? KVM_PMC_FIXED
: KVM_PMC_GP
];
143 return &counters
[idx
];
146 static bool intel_is_valid_msr(struct kvm_vcpu
*vcpu
, u32 msr
)
148 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
152 case MSR_CORE_PERF_FIXED_CTR_CTRL
:
153 case MSR_CORE_PERF_GLOBAL_STATUS
:
154 case MSR_CORE_PERF_GLOBAL_CTRL
:
155 case MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
156 ret
= pmu
->version
> 1;
159 ret
= get_gp_pmc(pmu
, msr
, MSR_IA32_PERFCTR0
) ||
160 get_gp_pmc(pmu
, msr
, MSR_P6_EVNTSEL0
) ||
161 get_fixed_pmc(pmu
, msr
);
168 static int intel_pmu_get_msr(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*data
)
170 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
174 case MSR_CORE_PERF_FIXED_CTR_CTRL
:
175 *data
= pmu
->fixed_ctr_ctrl
;
177 case MSR_CORE_PERF_GLOBAL_STATUS
:
178 *data
= pmu
->global_status
;
180 case MSR_CORE_PERF_GLOBAL_CTRL
:
181 *data
= pmu
->global_ctrl
;
183 case MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
184 *data
= pmu
->global_ovf_ctrl
;
187 if ((pmc
= get_gp_pmc(pmu
, msr
, MSR_IA32_PERFCTR0
))) {
188 u64 val
= pmc_read_counter(pmc
);
189 *data
= val
& pmu
->counter_bitmask
[KVM_PMC_GP
];
191 } else if ((pmc
= get_fixed_pmc(pmu
, msr
))) {
192 u64 val
= pmc_read_counter(pmc
);
193 *data
= val
& pmu
->counter_bitmask
[KVM_PMC_FIXED
];
195 } else if ((pmc
= get_gp_pmc(pmu
, msr
, MSR_P6_EVNTSEL0
))) {
196 *data
= pmc
->eventsel
;
204 static int intel_pmu_set_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
206 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
208 u32 msr
= msr_info
->index
;
209 u64 data
= msr_info
->data
;
212 case MSR_CORE_PERF_FIXED_CTR_CTRL
:
213 if (pmu
->fixed_ctr_ctrl
== data
)
215 if (!(data
& 0xfffffffffffff444ull
)) {
216 reprogram_fixed_counters(pmu
, data
);
220 case MSR_CORE_PERF_GLOBAL_STATUS
:
221 if (msr_info
->host_initiated
) {
222 pmu
->global_status
= data
;
226 case MSR_CORE_PERF_GLOBAL_CTRL
:
227 if (pmu
->global_ctrl
== data
)
229 if (!(data
& pmu
->global_ctrl_mask
)) {
230 global_ctrl_changed(pmu
, data
);
234 case MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
235 if (!(data
& (pmu
->global_ctrl_mask
& ~(3ull<<62)))) {
236 if (!msr_info
->host_initiated
)
237 pmu
->global_status
&= ~data
;
238 pmu
->global_ovf_ctrl
= data
;
243 if ((pmc
= get_gp_pmc(pmu
, msr
, MSR_IA32_PERFCTR0
))) {
244 if (msr_info
->host_initiated
)
247 pmc
->counter
= (s32
)data
;
249 } else if ((pmc
= get_fixed_pmc(pmu
, msr
))) {
252 } else if ((pmc
= get_gp_pmc(pmu
, msr
, MSR_P6_EVNTSEL0
))) {
253 if (data
== pmc
->eventsel
)
255 if (!(data
& pmu
->reserved_bits
)) {
256 reprogram_gp_counter(pmc
, data
);
265 static void intel_pmu_refresh(struct kvm_vcpu
*vcpu
)
267 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
268 struct kvm_cpuid_entry2
*entry
;
269 union cpuid10_eax eax
;
270 union cpuid10_edx edx
;
272 pmu
->nr_arch_gp_counters
= 0;
273 pmu
->nr_arch_fixed_counters
= 0;
274 pmu
->counter_bitmask
[KVM_PMC_GP
] = 0;
275 pmu
->counter_bitmask
[KVM_PMC_FIXED
] = 0;
277 pmu
->reserved_bits
= 0xffffffff00200000ull
;
279 entry
= kvm_find_cpuid_entry(vcpu
, 0xa, 0);
282 eax
.full
= entry
->eax
;
283 edx
.full
= entry
->edx
;
285 pmu
->version
= eax
.split
.version_id
;
289 pmu
->nr_arch_gp_counters
= min_t(int, eax
.split
.num_counters
,
290 INTEL_PMC_MAX_GENERIC
);
291 pmu
->counter_bitmask
[KVM_PMC_GP
] = ((u64
)1 << eax
.split
.bit_width
) - 1;
292 pmu
->available_event_types
= ~entry
->ebx
&
293 ((1ull << eax
.split
.mask_length
) - 1);
295 if (pmu
->version
== 1) {
296 pmu
->nr_arch_fixed_counters
= 0;
298 pmu
->nr_arch_fixed_counters
=
299 min_t(int, edx
.split
.num_counters_fixed
,
300 INTEL_PMC_MAX_FIXED
);
301 pmu
->counter_bitmask
[KVM_PMC_FIXED
] =
302 ((u64
)1 << edx
.split
.bit_width_fixed
) - 1;
305 pmu
->global_ctrl
= ((1ull << pmu
->nr_arch_gp_counters
) - 1) |
306 (((1ull << pmu
->nr_arch_fixed_counters
) - 1) << INTEL_PMC_IDX_FIXED
);
307 pmu
->global_ctrl_mask
= ~pmu
->global_ctrl
;
309 entry
= kvm_find_cpuid_entry(vcpu
, 7, 0);
311 (boot_cpu_has(X86_FEATURE_HLE
) || boot_cpu_has(X86_FEATURE_RTM
)) &&
312 (entry
->ebx
& (X86_FEATURE_HLE
|X86_FEATURE_RTM
)))
313 pmu
->reserved_bits
^= HSW_IN_TX
|HSW_IN_TX_CHECKPOINTED
;
316 static void intel_pmu_init(struct kvm_vcpu
*vcpu
)
319 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
321 for (i
= 0; i
< INTEL_PMC_MAX_GENERIC
; i
++) {
322 pmu
->gp_counters
[i
].type
= KVM_PMC_GP
;
323 pmu
->gp_counters
[i
].vcpu
= vcpu
;
324 pmu
->gp_counters
[i
].idx
= i
;
327 for (i
= 0; i
< INTEL_PMC_MAX_FIXED
; i
++) {
328 pmu
->fixed_counters
[i
].type
= KVM_PMC_FIXED
;
329 pmu
->fixed_counters
[i
].vcpu
= vcpu
;
330 pmu
->fixed_counters
[i
].idx
= i
+ INTEL_PMC_IDX_FIXED
;
334 static void intel_pmu_reset(struct kvm_vcpu
*vcpu
)
336 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
339 for (i
= 0; i
< INTEL_PMC_MAX_GENERIC
; i
++) {
340 struct kvm_pmc
*pmc
= &pmu
->gp_counters
[i
];
342 pmc_stop_counter(pmc
);
343 pmc
->counter
= pmc
->eventsel
= 0;
346 for (i
= 0; i
< INTEL_PMC_MAX_FIXED
; i
++)
347 pmc_stop_counter(&pmu
->fixed_counters
[i
]);
349 pmu
->fixed_ctr_ctrl
= pmu
->global_ctrl
= pmu
->global_status
=
350 pmu
->global_ovf_ctrl
= 0;
353 struct kvm_pmu_ops intel_pmu_ops
= {
354 .find_arch_event
= intel_find_arch_event
,
355 .find_fixed_event
= intel_find_fixed_event
,
356 .pmc_is_enabled
= intel_pmc_is_enabled
,
357 .pmc_idx_to_pmc
= intel_pmc_idx_to_pmc
,
358 .msr_idx_to_pmc
= intel_msr_idx_to_pmc
,
359 .is_valid_msr_idx
= intel_is_valid_msr_idx
,
360 .is_valid_msr
= intel_is_valid_msr
,
361 .get_msr
= intel_pmu_get_msr
,
362 .set_msr
= intel_pmu_set_msr
,
363 .refresh
= intel_pmu_refresh
,
364 .init
= intel_pmu_init
,
365 .reset
= intel_pmu_reset
,