mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / arch / x86 / kvm / pmu_intel.c
blob84ae4dd261caff611a6dff9bd3aae3360a0f37c3
1 /*
2 * KVM PMU support for Intel CPUs
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 * Gleb Natapov <gleb@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
14 #include <linux/types.h>
15 #include <linux/kvm_host.h>
16 #include <linux/perf_event.h>
17 #include <asm/perf_event.h>
18 #include "x86.h"
19 #include "cpuid.h"
20 #include "lapic.h"
21 #include "pmu.h"
23 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
24 /* Index must match CPUID 0x0A.EBX bit vector */
25 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
26 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
27 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
28 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
29 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
30 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
31 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
32 [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
35 /* mapping between fixed pmc index and intel_arch_events array */
36 static int fixed_pmc_events[] = {1, 0, 7};
38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
40 int i;
42 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
43 u8 new_ctrl = fixed_ctrl_field(data, i);
44 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
45 struct kvm_pmc *pmc;
47 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
49 if (old_ctrl == new_ctrl)
50 continue;
52 reprogram_fixed_counter(pmc, new_ctrl, i);
55 pmu->fixed_ctr_ctrl = data;
58 /* function is called when global control register has been updated. */
59 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
61 int bit;
62 u64 diff = pmu->global_ctrl ^ data;
64 pmu->global_ctrl = data;
66 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
67 reprogram_counter(pmu, bit);
70 static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
71 u8 event_select,
72 u8 unit_mask)
74 int i;
76 for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
77 if (intel_arch_events[i].eventsel == event_select
78 && intel_arch_events[i].unit_mask == unit_mask
79 && (pmu->available_event_types & (1 << i)))
80 break;
82 if (i == ARRAY_SIZE(intel_arch_events))
83 return PERF_COUNT_HW_MAX;
85 return intel_arch_events[i].event_type;
88 static unsigned intel_find_fixed_event(int idx)
90 u32 event;
91 size_t size = ARRAY_SIZE(fixed_pmc_events);
93 if (idx >= size)
94 return PERF_COUNT_HW_MAX;
96 event = fixed_pmc_events[array_index_nospec(idx, size)];
97 return intel_arch_events[event].event_type;
100 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
101 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
103 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
105 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
108 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
110 if (pmc_idx < INTEL_PMC_IDX_FIXED)
111 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
112 MSR_P6_EVNTSEL0);
113 else {
114 u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
116 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
120 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
121 static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
123 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
124 bool fixed = idx & (1u << 30);
126 idx &= ~(3u << 30);
128 return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
129 (fixed && idx >= pmu->nr_arch_fixed_counters);
132 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
133 unsigned idx)
135 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
136 bool fixed = idx & (1u << 30);
137 struct kvm_pmc *counters;
138 unsigned int num_counters;
140 idx &= ~(3u << 30);
141 if (fixed) {
142 counters = pmu->fixed_counters;
143 num_counters = pmu->nr_arch_fixed_counters;
144 } else {
145 counters = pmu->gp_counters;
146 num_counters = pmu->nr_arch_gp_counters;
148 if (idx >= num_counters)
149 return NULL;
150 return &counters[array_index_nospec(idx, num_counters)];
153 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
155 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
156 int ret;
158 switch (msr) {
159 case MSR_CORE_PERF_FIXED_CTR_CTRL:
160 case MSR_CORE_PERF_GLOBAL_STATUS:
161 case MSR_CORE_PERF_GLOBAL_CTRL:
162 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
163 ret = pmu->version > 1;
164 break;
165 default:
166 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
167 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
168 get_fixed_pmc(pmu, msr);
169 break;
172 return ret;
175 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
177 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
178 struct kvm_pmc *pmc;
180 switch (msr) {
181 case MSR_CORE_PERF_FIXED_CTR_CTRL:
182 *data = pmu->fixed_ctr_ctrl;
183 return 0;
184 case MSR_CORE_PERF_GLOBAL_STATUS:
185 *data = pmu->global_status;
186 return 0;
187 case MSR_CORE_PERF_GLOBAL_CTRL:
188 *data = pmu->global_ctrl;
189 return 0;
190 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
191 *data = pmu->global_ovf_ctrl;
192 return 0;
193 default:
194 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
195 (pmc = get_fixed_pmc(pmu, msr))) {
196 *data = pmc_read_counter(pmc);
197 return 0;
198 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
199 *data = pmc->eventsel;
200 return 0;
204 return 1;
207 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
209 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
210 struct kvm_pmc *pmc;
211 u32 msr = msr_info->index;
212 u64 data = msr_info->data;
214 switch (msr) {
215 case MSR_CORE_PERF_FIXED_CTR_CTRL:
216 if (pmu->fixed_ctr_ctrl == data)
217 return 0;
218 if (!(data & 0xfffffffffffff444ull)) {
219 reprogram_fixed_counters(pmu, data);
220 return 0;
222 break;
223 case MSR_CORE_PERF_GLOBAL_STATUS:
224 if (msr_info->host_initiated) {
225 pmu->global_status = data;
226 return 0;
228 break; /* RO MSR */
229 case MSR_CORE_PERF_GLOBAL_CTRL:
230 if (pmu->global_ctrl == data)
231 return 0;
232 if (!(data & pmu->global_ctrl_mask)) {
233 global_ctrl_changed(pmu, data);
234 return 0;
236 break;
237 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
238 if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
239 if (!msr_info->host_initiated)
240 pmu->global_status &= ~data;
241 pmu->global_ovf_ctrl = data;
242 return 0;
244 break;
245 default:
246 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
247 if (msr_info->host_initiated)
248 pmc->counter = data;
249 else
250 pmc->counter = (s32)data;
251 return 0;
252 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
253 pmc->counter = data;
254 return 0;
255 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
256 if (data == pmc->eventsel)
257 return 0;
258 if (!(data & pmu->reserved_bits)) {
259 reprogram_gp_counter(pmc, data);
260 return 0;
265 return 1;
268 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
270 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
271 struct kvm_cpuid_entry2 *entry;
272 union cpuid10_eax eax;
273 union cpuid10_edx edx;
275 pmu->nr_arch_gp_counters = 0;
276 pmu->nr_arch_fixed_counters = 0;
277 pmu->counter_bitmask[KVM_PMC_GP] = 0;
278 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
279 pmu->version = 0;
280 pmu->reserved_bits = 0xffffffff00200000ull;
282 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
283 if (!entry)
284 return;
285 eax.full = entry->eax;
286 edx.full = entry->edx;
288 pmu->version = eax.split.version_id;
289 if (!pmu->version)
290 return;
292 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
293 INTEL_PMC_MAX_GENERIC);
294 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
295 pmu->available_event_types = ~entry->ebx &
296 ((1ull << eax.split.mask_length) - 1);
298 if (pmu->version == 1) {
299 pmu->nr_arch_fixed_counters = 0;
300 } else {
301 pmu->nr_arch_fixed_counters =
302 min_t(int, edx.split.num_counters_fixed,
303 INTEL_PMC_MAX_FIXED);
304 pmu->counter_bitmask[KVM_PMC_FIXED] =
305 ((u64)1 << edx.split.bit_width_fixed) - 1;
308 pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
309 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
310 pmu->global_ctrl_mask = ~pmu->global_ctrl;
312 entry = kvm_find_cpuid_entry(vcpu, 7, 0);
313 if (entry &&
314 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
315 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
316 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
319 static void intel_pmu_init(struct kvm_vcpu *vcpu)
321 int i;
322 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
324 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
325 pmu->gp_counters[i].type = KVM_PMC_GP;
326 pmu->gp_counters[i].vcpu = vcpu;
327 pmu->gp_counters[i].idx = i;
330 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
331 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
332 pmu->fixed_counters[i].vcpu = vcpu;
333 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
337 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
339 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
340 int i;
342 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
343 struct kvm_pmc *pmc = &pmu->gp_counters[i];
345 pmc_stop_counter(pmc);
346 pmc->counter = pmc->eventsel = 0;
349 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
350 pmc_stop_counter(&pmu->fixed_counters[i]);
352 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
353 pmu->global_ovf_ctrl = 0;
356 struct kvm_pmu_ops intel_pmu_ops = {
357 .find_arch_event = intel_find_arch_event,
358 .find_fixed_event = intel_find_fixed_event,
359 .pmc_is_enabled = intel_pmc_is_enabled,
360 .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
361 .msr_idx_to_pmc = intel_msr_idx_to_pmc,
362 .is_valid_msr_idx = intel_is_valid_msr_idx,
363 .is_valid_msr = intel_is_valid_msr,
364 .get_msr = intel_pmu_get_msr,
365 .set_msr = intel_pmu_set_msr,
366 .refresh = intel_pmu_refresh,
367 .init = intel_pmu_init,
368 .reset = intel_pmu_reset,