Linux 4.2.2
[linux/fpc-iii.git] / arch / x86 / kvm / pmu_amd.c
blob886aa25a7131479b5acd64d4aa9f1600984335d2
1 /*
2 * KVM PMU support for AMD
4 * Copyright 2015, Red Hat, Inc. and/or its affiliates.
6 * Author:
7 * Wei Huang <wei@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Implementation is based on pmu_intel.c file
14 #include <linux/types.h>
15 #include <linux/kvm_host.h>
16 #include <linux/perf_event.h>
17 #include "x86.h"
18 #include "cpuid.h"
19 #include "lapic.h"
20 #include "pmu.h"
22 /* duplicated from amd_perfmon_event_map, K7 and above should work. */
23 static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
24 [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
25 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
26 [2] = { 0x80, 0x00, PERF_COUNT_HW_CACHE_REFERENCES },
27 [3] = { 0x81, 0x00, PERF_COUNT_HW_CACHE_MISSES },
28 [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
29 [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
30 [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
31 [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
34 static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
35 u8 event_select,
36 u8 unit_mask)
38 int i;
40 for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
41 if (amd_event_mapping[i].eventsel == event_select
42 && amd_event_mapping[i].unit_mask == unit_mask)
43 break;
45 if (i == ARRAY_SIZE(amd_event_mapping))
46 return PERF_COUNT_HW_MAX;
48 return amd_event_mapping[i].event_type;
51 /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
52 static unsigned amd_find_fixed_event(int idx)
54 return PERF_COUNT_HW_MAX;
57 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
58 * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
60 static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
62 return true;
65 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
67 return get_gp_pmc(pmu, MSR_K7_EVNTSEL0 + pmc_idx, MSR_K7_EVNTSEL0);
70 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
71 static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
73 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
75 idx &= ~(3u << 30);
77 return (idx >= pmu->nr_arch_gp_counters);
80 /* idx is the ECX register of RDPMC instruction */
81 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx)
83 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
84 struct kvm_pmc *counters;
86 idx &= ~(3u << 30);
87 if (idx >= pmu->nr_arch_gp_counters)
88 return NULL;
89 counters = pmu->gp_counters;
91 return &counters[idx];
94 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
96 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
97 int ret = false;
99 ret = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0) ||
100 get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0);
102 return ret;
105 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
107 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
108 struct kvm_pmc *pmc;
110 /* MSR_K7_PERFCTRn */
111 pmc = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0);
112 if (pmc) {
113 *data = pmc_read_counter(pmc);
114 return 0;
116 /* MSR_K7_EVNTSELn */
117 pmc = get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0);
118 if (pmc) {
119 *data = pmc->eventsel;
120 return 0;
123 return 1;
126 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
128 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
129 struct kvm_pmc *pmc;
130 u32 msr = msr_info->index;
131 u64 data = msr_info->data;
133 /* MSR_K7_PERFCTRn */
134 pmc = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0);
135 if (pmc) {
136 if (!msr_info->host_initiated)
137 data = (s64)data;
138 pmc->counter += data - pmc_read_counter(pmc);
139 return 0;
141 /* MSR_K7_EVNTSELn */
142 pmc = get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0);
143 if (pmc) {
144 if (data == pmc->eventsel)
145 return 0;
146 if (!(data & pmu->reserved_bits)) {
147 reprogram_gp_counter(pmc, data);
148 return 0;
152 return 1;
155 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
157 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
159 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
160 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
161 pmu->reserved_bits = 0xffffffff00200000ull;
162 /* not applicable to AMD; but clean them to prevent any fall out */
163 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
164 pmu->nr_arch_fixed_counters = 0;
165 pmu->version = 0;
166 pmu->global_status = 0;
169 static void amd_pmu_init(struct kvm_vcpu *vcpu)
171 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
172 int i;
174 for (i = 0; i < AMD64_NUM_COUNTERS ; i++) {
175 pmu->gp_counters[i].type = KVM_PMC_GP;
176 pmu->gp_counters[i].vcpu = vcpu;
177 pmu->gp_counters[i].idx = i;
181 static void amd_pmu_reset(struct kvm_vcpu *vcpu)
183 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
184 int i;
186 for (i = 0; i < AMD64_NUM_COUNTERS; i++) {
187 struct kvm_pmc *pmc = &pmu->gp_counters[i];
189 pmc_stop_counter(pmc);
190 pmc->counter = pmc->eventsel = 0;
194 struct kvm_pmu_ops amd_pmu_ops = {
195 .find_arch_event = amd_find_arch_event,
196 .find_fixed_event = amd_find_fixed_event,
197 .pmc_is_enabled = amd_pmc_is_enabled,
198 .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
199 .msr_idx_to_pmc = amd_msr_idx_to_pmc,
200 .is_valid_msr_idx = amd_is_valid_msr_idx,
201 .is_valid_msr = amd_is_valid_msr,
202 .get_msr = amd_pmu_get_msr,
203 .set_msr = amd_pmu_set_msr,
204 .refresh = amd_pmu_refresh,
205 .init = amd_pmu_init,
206 .reset = amd_pmu_reset,