kvm: take srcu lock around kvm_steal_time_set_preempted()
[linux/fpc-iii.git] / arch / powerpc / perf / isa207-common.c
blob6143c99f3ec503d01423059d364577d1a5930e3a
1 /*
2 * Common Performance counter support functions for PowerISA v2.07 processors.
4 * Copyright 2009 Paul Mackerras, IBM Corporation.
5 * Copyright 2013 Michael Ellerman, IBM Corporation.
6 * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 #include "isa207-common.h"
15 static inline bool event_is_fab_match(u64 event)
17 /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
18 event &= 0xff0fe;
20 /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
21 return (event == 0x30056 || event == 0x4f052);
24 int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
26 unsigned int unit, pmc, cache, ebb;
27 unsigned long mask, value;
29 mask = value = 0;
31 if (event & ~EVENT_VALID_MASK)
32 return -1;
34 pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
35 unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
36 cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
37 ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK;
39 if (pmc) {
40 u64 base_event;
42 if (pmc > 6)
43 return -1;
45 /* Ignore Linux defined bits when checking event below */
46 base_event = event & ~EVENT_LINUX_MASK;
48 if (pmc >= 5 && base_event != 0x500fa &&
49 base_event != 0x600f4)
50 return -1;
52 mask |= CNST_PMC_MASK(pmc);
53 value |= CNST_PMC_VAL(pmc);
56 if (pmc <= 4) {
58 * Add to number of counters in use. Note this includes events with
59 * a PMC of 0 - they still need a PMC, it's just assigned later.
60 * Don't count events on PMC 5 & 6, there is only one valid event
61 * on each of those counters, and they are handled above.
63 mask |= CNST_NC_MASK;
64 value |= CNST_NC_VAL;
67 if (unit >= 6 && unit <= 9) {
69 * L2/L3 events contain a cache selector field, which is
70 * supposed to be programmed into MMCRC. However MMCRC is only
71 * HV writable, and there is no API for guest kernels to modify
72 * it. The solution is for the hypervisor to initialise the
73 * field to zeroes, and for us to only ever allow events that
74 * have a cache selector of zero. The bank selector (bit 3) is
75 * irrelevant, as long as the rest of the value is 0.
77 if (cache & 0x7)
78 return -1;
80 } else if (event & EVENT_IS_L1) {
81 mask |= CNST_L1_QUAL_MASK;
82 value |= CNST_L1_QUAL_VAL(cache);
85 if (event & EVENT_IS_MARKED) {
86 mask |= CNST_SAMPLE_MASK;
87 value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
91 * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
92 * the threshold control bits are used for the match value.
94 if (event_is_fab_match(event)) {
95 mask |= CNST_FAB_MATCH_MASK;
96 value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
97 } else {
99 * Check the mantissa upper two bits are not zero, unless the
100 * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
102 unsigned int cmp, exp;
104 cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
105 exp = cmp >> 7;
107 if (exp && (cmp & 0x60) == 0)
108 return -1;
110 mask |= CNST_THRESH_MASK;
111 value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
114 if (!pmc && ebb)
115 /* EBB events must specify the PMC */
116 return -1;
118 if (event & EVENT_WANTS_BHRB) {
119 if (!ebb)
120 /* Only EBB events can request BHRB */
121 return -1;
123 mask |= CNST_IFM_MASK;
124 value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT);
128 * All events must agree on EBB, either all request it or none.
129 * EBB events are pinned & exclusive, so this should never actually
130 * hit, but we leave it as a fallback in case.
132 mask |= CNST_EBB_VAL(ebb);
133 value |= CNST_EBB_MASK;
135 *maskp = mask;
136 *valp = value;
138 return 0;
141 int isa207_compute_mmcr(u64 event[], int n_ev,
142 unsigned int hwc[], unsigned long mmcr[],
143 struct perf_event *pevents[])
145 unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val;
146 unsigned int pmc, pmc_inuse;
147 int i;
149 pmc_inuse = 0;
151 /* First pass to count resource use */
152 for (i = 0; i < n_ev; ++i) {
153 pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
154 if (pmc)
155 pmc_inuse |= 1 << pmc;
158 /* In continuous sampling mode, update SDAR on TLB miss */
159 mmcra = MMCRA_SDAR_MODE_TLB;
160 mmcr1 = mmcr2 = 0;
162 /* Second pass: assign PMCs, set all MMCR1 fields */
163 for (i = 0; i < n_ev; ++i) {
164 pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
165 unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
166 combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK;
167 psel = event[i] & EVENT_PSEL_MASK;
169 if (!pmc) {
170 for (pmc = 1; pmc <= 4; ++pmc) {
171 if (!(pmc_inuse & (1 << pmc)))
172 break;
175 pmc_inuse |= 1 << pmc;
178 if (pmc <= 4) {
179 mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
180 mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc);
181 mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
184 if (event[i] & EVENT_IS_L1) {
185 cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
186 mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
187 cache >>= 1;
188 mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
191 if (event[i] & EVENT_IS_MARKED) {
192 mmcra |= MMCRA_SAMPLE_ENABLE;
194 val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
195 if (val) {
196 mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT;
197 mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
202 * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
203 * the threshold bits are used for the match value.
205 if (event_is_fab_match(event[i])) {
206 mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
207 EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
208 } else {
209 val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
210 mmcra |= val << MMCRA_THR_CTL_SHIFT;
211 val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
212 mmcra |= val << MMCRA_THR_SEL_SHIFT;
213 val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
214 mmcra |= val << MMCRA_THR_CMP_SHIFT;
217 if (event[i] & EVENT_WANTS_BHRB) {
218 val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK;
219 mmcra |= val << MMCRA_IFM_SHIFT;
222 if (pevents[i]->attr.exclude_user)
223 mmcr2 |= MMCR2_FCP(pmc);
225 if (pevents[i]->attr.exclude_hv)
226 mmcr2 |= MMCR2_FCH(pmc);
228 if (pevents[i]->attr.exclude_kernel) {
229 if (cpu_has_feature(CPU_FTR_HVMODE))
230 mmcr2 |= MMCR2_FCH(pmc);
231 else
232 mmcr2 |= MMCR2_FCS(pmc);
235 hwc[i] = pmc - 1;
238 /* Return MMCRx values */
239 mmcr[0] = 0;
241 /* pmc_inuse is 1-based */
242 if (pmc_inuse & 2)
243 mmcr[0] = MMCR0_PMC1CE;
245 if (pmc_inuse & 0x7c)
246 mmcr[0] |= MMCR0_PMCjCE;
248 /* If we're not using PMC 5 or 6, freeze them */
249 if (!(pmc_inuse & 0x60))
250 mmcr[0] |= MMCR0_FC56;
252 mmcr[1] = mmcr1;
253 mmcr[2] = mmcra;
254 mmcr[3] = mmcr2;
256 return 0;
259 void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[])
261 if (pmc <= 3)
262 mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));