Merge tag 'sched-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / powerpc / perf / power9-pmu.c
blob2a57e93a79dcf703b920f1b556d46d60308507e4
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Performance counter support for POWER9 processors.
5 * Copyright 2009 Paul Mackerras, IBM Corporation.
6 * Copyright 2013 Michael Ellerman, IBM Corporation.
7 * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
8 */
10 #define pr_fmt(fmt) "power9-pmu: " fmt
12 #include "isa207-common.h"
15 * Raw event encoding for Power9:
17 * 60 56 52 48 44 40 36 32
18 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
19 * | | [ ] [ ] [ thresh_cmp ] [ thresh_ctl ]
20 * | | | | |
21 * | | *- IFM (Linux) | thresh start/stop -*
22 * | *- BHRB (Linux) *sm
23 * *- EBB (Linux)
25 * 28 24 20 16 12 8 4 0
26 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
27 * [ ] [ sample ] [cache] [ pmc ] [unit ] [] m [ pmcxsel ]
28 * | | | | |
29 * | | | | *- mark
30 * | | *- L1/L2/L3 cache_sel |
31 * | | |
32 * | *- sampling mode for marked events *- combine
33 * |
34 * *- thresh_sel
36 * Below uses IBM bit numbering.
38 * MMCR1[x:y] = unit (PMCxUNIT)
39 * MMCR1[24] = pmc1combine[0]
40 * MMCR1[25] = pmc1combine[1]
41 * MMCR1[26] = pmc2combine[0]
42 * MMCR1[27] = pmc2combine[1]
43 * MMCR1[28] = pmc3combine[0]
44 * MMCR1[29] = pmc3combine[1]
45 * MMCR1[30] = pmc4combine[0]
46 * MMCR1[31] = pmc4combine[1]
48 * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
49 * MMCR1[20:27] = thresh_ctl
50 * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
51 * MMCR1[20:27] = thresh_ctl
52 * else
53 * MMCRA[48:55] = thresh_ctl (THRESH START/END)
55 * if thresh_sel:
56 * MMCRA[45:47] = thresh_sel
58 * if thresh_cmp:
59 * MMCRA[9:11] = thresh_cmp[0:2]
60 * MMCRA[12:18] = thresh_cmp[3:9]
62 * MMCR1[16] = cache_sel[2]
63  * MMCR1[17] = cache_sel[3]
65 * if mark:
66 * MMCRA[63] = 1 (SAMPLE_ENABLE)
67 * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
68  * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
70 * if EBB and BHRB:
71 * MMCRA[32:33] = IFM
73 * MMCRA[SDAR_MODE] = sm
77 * Some power9 event codes.
79 #define EVENT(_name, _code) _name = _code,
81 enum {
82 #include "power9-events-list.h"
85 #undef EVENT
87 /* MMCRA IFM bits - POWER9 */
88 #define POWER9_MMCRA_IFM1 0x0000000040000000UL
89 #define POWER9_MMCRA_IFM2 0x0000000080000000UL
90 #define POWER9_MMCRA_IFM3 0x00000000C0000000UL
91 #define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL
93 extern u64 PERF_REG_EXTENDED_MASK;
95 /* Nasty Power9 specific hack */
96 #define PVR_POWER9_CUMULUS 0x00002000
98 /* PowerISA v2.07 format attribute structure*/
99 extern struct attribute_group isa207_pmu_format_group;
101 int p9_dd21_bl_ev[] = {
102 PM_MRK_ST_DONE_L2,
103 PM_RADIX_PWC_L1_HIT,
104 PM_FLOP_CMPL,
105 PM_MRK_NTF_FIN,
106 PM_RADIX_PWC_L2_HIT,
107 PM_IFETCH_THROTTLE,
108 PM_MRK_L2_TM_ST_ABORT_SISTER,
109 PM_RADIX_PWC_L3_HIT,
110 PM_RUN_CYC_SMT2_MODE,
111 PM_TM_TX_PASS_RUN_INST,
112 PM_DISP_HELD_SYNC_HOLD,
115 int p9_dd22_bl_ev[] = {
116 PM_DTLB_MISS_16G,
117 PM_DERAT_MISS_2M,
118 PM_DTLB_MISS_2M,
119 PM_MRK_DTLB_MISS_1G,
120 PM_DTLB_MISS_4K,
121 PM_DERAT_MISS_1G,
122 PM_MRK_DERAT_MISS_2M,
123 PM_MRK_DTLB_MISS_4K,
124 PM_MRK_DTLB_MISS_16G,
125 PM_DTLB_MISS_64K,
126 PM_MRK_DERAT_MISS_1G,
127 PM_MRK_DTLB_MISS_64K,
128 PM_DISP_HELD_SYNC_HOLD,
129 PM_DTLB_MISS_16M,
130 PM_DTLB_MISS_1G,
131 PM_MRK_DTLB_MISS_16M,
134 /* Table of alternatives, sorted by column 0 */
135 static const unsigned int power9_event_alternatives[][MAX_ALT] = {
136 { PM_INST_DISP, PM_INST_DISP_ALT },
137 { PM_RUN_CYC_ALT, PM_RUN_CYC },
138 { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
139 { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
140 { PM_BR_2PATH, PM_BR_2PATH_ALT },
143 static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
145 int num_alt = 0;
147 num_alt = isa207_get_alternatives(event, alt,
148 ARRAY_SIZE(power9_event_alternatives), flags,
149 power9_event_alternatives);
151 return num_alt;
154 GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
155 GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_ICT_NOSLOT_CYC);
156 GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
157 GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
158 GENERIC_EVENT_ATTR(branch-instructions, PM_BR_CMPL);
159 GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
160 GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
161 GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1_FIN);
162 GENERIC_EVENT_ATTR(mem-loads, MEM_LOADS);
163 GENERIC_EVENT_ATTR(mem-stores, MEM_STORES);
165 CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1_FIN);
166 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
167 CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
168 CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
169 CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
170 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
171 CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
172 CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
173 CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
174 CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL);
175 CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
176 CACHE_EVENT_ATTR(branch-loads, PM_BR_CMPL);
177 CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
178 CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
180 static struct attribute *power9_events_attr[] = {
181 GENERIC_EVENT_PTR(PM_CYC),
182 GENERIC_EVENT_PTR(PM_ICT_NOSLOT_CYC),
183 GENERIC_EVENT_PTR(PM_CMPLU_STALL),
184 GENERIC_EVENT_PTR(PM_INST_CMPL),
185 GENERIC_EVENT_PTR(PM_BR_CMPL),
186 GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
187 GENERIC_EVENT_PTR(PM_LD_REF_L1),
188 GENERIC_EVENT_PTR(PM_LD_MISS_L1_FIN),
189 GENERIC_EVENT_PTR(MEM_LOADS),
190 GENERIC_EVENT_PTR(MEM_STORES),
191 CACHE_EVENT_PTR(PM_LD_MISS_L1_FIN),
192 CACHE_EVENT_PTR(PM_LD_REF_L1),
193 CACHE_EVENT_PTR(PM_L1_PREF),
194 CACHE_EVENT_PTR(PM_ST_MISS_L1),
195 CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
196 CACHE_EVENT_PTR(PM_INST_FROM_L1),
197 CACHE_EVENT_PTR(PM_IC_PREF_WRITE),
198 CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
199 CACHE_EVENT_PTR(PM_DATA_FROM_L3),
200 CACHE_EVENT_PTR(PM_L3_PREF_ALL),
201 CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
202 CACHE_EVENT_PTR(PM_BR_CMPL),
203 CACHE_EVENT_PTR(PM_DTLB_MISS),
204 CACHE_EVENT_PTR(PM_ITLB_MISS),
205 NULL
208 static struct attribute_group power9_pmu_events_group = {
209 .name = "events",
210 .attrs = power9_events_attr,
213 PMU_FORMAT_ATTR(event, "config:0-51");
214 PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
215 PMU_FORMAT_ATTR(mark, "config:8");
216 PMU_FORMAT_ATTR(combine, "config:10-11");
217 PMU_FORMAT_ATTR(unit, "config:12-15");
218 PMU_FORMAT_ATTR(pmc, "config:16-19");
219 PMU_FORMAT_ATTR(cache_sel, "config:20-23");
220 PMU_FORMAT_ATTR(sample_mode, "config:24-28");
221 PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
222 PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
223 PMU_FORMAT_ATTR(thresh_start, "config:36-39");
224 PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
225 PMU_FORMAT_ATTR(sdar_mode, "config:50-51");
227 static struct attribute *power9_pmu_format_attr[] = {
228 &format_attr_event.attr,
229 &format_attr_pmcxsel.attr,
230 &format_attr_mark.attr,
231 &format_attr_combine.attr,
232 &format_attr_unit.attr,
233 &format_attr_pmc.attr,
234 &format_attr_cache_sel.attr,
235 &format_attr_sample_mode.attr,
236 &format_attr_thresh_sel.attr,
237 &format_attr_thresh_stop.attr,
238 &format_attr_thresh_start.attr,
239 &format_attr_thresh_cmp.attr,
240 &format_attr_sdar_mode.attr,
241 NULL,
244 static struct attribute_group power9_pmu_format_group = {
245 .name = "format",
246 .attrs = power9_pmu_format_attr,
249 static const struct attribute_group *power9_pmu_attr_groups[] = {
250 &power9_pmu_format_group,
251 &power9_pmu_events_group,
252 NULL,
255 static int power9_generic_events[] = {
256 [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
257 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
258 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
259 [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
260 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL,
261 [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
262 [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
263 [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1_FIN,
266 static u64 power9_bhrb_filter_map(u64 branch_sample_type)
268 u64 pmu_bhrb_filter = 0;
270 /* BHRB and regular PMU events share the same privilege state
271 * filter configuration. BHRB is always recorded along with a
272 * regular PMU event. As the privilege state filter is handled
273 * in the basic PMC configuration of the accompanying regular
274 * PMU event, we ignore any separate BHRB specific request.
277 /* No branch filter requested */
278 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
279 return pmu_bhrb_filter;
281 /* Invalid branch filter options - HW does not support */
282 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
283 return -1;
285 if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
286 return -1;
288 if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
289 return -1;
291 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
292 pmu_bhrb_filter |= POWER9_MMCRA_IFM1;
293 return pmu_bhrb_filter;
296 /* Every thing else is unsupported */
297 return -1;
300 static void power9_config_bhrb(u64 pmu_bhrb_filter)
302 pmu_bhrb_filter &= POWER9_MMCRA_BHRB_MASK;
304 /* Enable BHRB filter in PMU */
305 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
308 #define C(x) PERF_COUNT_HW_CACHE_##x
311 * Table of generalized cache-related events.
312 * 0 means not supported, -1 means nonsensical, other values
313 * are event codes.
315 static u64 power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
316 [ C(L1D) ] = {
317 [ C(OP_READ) ] = {
318 [ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
319 [ C(RESULT_MISS) ] = PM_LD_MISS_L1_FIN,
321 [ C(OP_WRITE) ] = {
322 [ C(RESULT_ACCESS) ] = 0,
323 [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
325 [ C(OP_PREFETCH) ] = {
326 [ C(RESULT_ACCESS) ] = PM_L1_PREF,
327 [ C(RESULT_MISS) ] = 0,
330 [ C(L1I) ] = {
331 [ C(OP_READ) ] = {
332 [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
333 [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
335 [ C(OP_WRITE) ] = {
336 [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
337 [ C(RESULT_MISS) ] = -1,
339 [ C(OP_PREFETCH) ] = {
340 [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
341 [ C(RESULT_MISS) ] = 0,
344 [ C(LL) ] = {
345 [ C(OP_READ) ] = {
346 [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
347 [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
349 [ C(OP_WRITE) ] = {
350 [ C(RESULT_ACCESS) ] = 0,
351 [ C(RESULT_MISS) ] = 0,
353 [ C(OP_PREFETCH) ] = {
354 [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
355 [ C(RESULT_MISS) ] = 0,
358 [ C(DTLB) ] = {
359 [ C(OP_READ) ] = {
360 [ C(RESULT_ACCESS) ] = 0,
361 [ C(RESULT_MISS) ] = PM_DTLB_MISS,
363 [ C(OP_WRITE) ] = {
364 [ C(RESULT_ACCESS) ] = -1,
365 [ C(RESULT_MISS) ] = -1,
367 [ C(OP_PREFETCH) ] = {
368 [ C(RESULT_ACCESS) ] = -1,
369 [ C(RESULT_MISS) ] = -1,
372 [ C(ITLB) ] = {
373 [ C(OP_READ) ] = {
374 [ C(RESULT_ACCESS) ] = 0,
375 [ C(RESULT_MISS) ] = PM_ITLB_MISS,
377 [ C(OP_WRITE) ] = {
378 [ C(RESULT_ACCESS) ] = -1,
379 [ C(RESULT_MISS) ] = -1,
381 [ C(OP_PREFETCH) ] = {
382 [ C(RESULT_ACCESS) ] = -1,
383 [ C(RESULT_MISS) ] = -1,
386 [ C(BPU) ] = {
387 [ C(OP_READ) ] = {
388 [ C(RESULT_ACCESS) ] = PM_BR_CMPL,
389 [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
391 [ C(OP_WRITE) ] = {
392 [ C(RESULT_ACCESS) ] = -1,
393 [ C(RESULT_MISS) ] = -1,
395 [ C(OP_PREFETCH) ] = {
396 [ C(RESULT_ACCESS) ] = -1,
397 [ C(RESULT_MISS) ] = -1,
400 [ C(NODE) ] = {
401 [ C(OP_READ) ] = {
402 [ C(RESULT_ACCESS) ] = -1,
403 [ C(RESULT_MISS) ] = -1,
405 [ C(OP_WRITE) ] = {
406 [ C(RESULT_ACCESS) ] = -1,
407 [ C(RESULT_MISS) ] = -1,
409 [ C(OP_PREFETCH) ] = {
410 [ C(RESULT_ACCESS) ] = -1,
411 [ C(RESULT_MISS) ] = -1,
416 #undef C
418 static struct power_pmu power9_pmu = {
419 .name = "POWER9",
420 .n_counter = MAX_PMU_COUNTERS,
421 .add_fields = ISA207_ADD_FIELDS,
422 .test_adder = ISA207_TEST_ADDER,
423 .group_constraint_mask = CNST_CACHE_PMC4_MASK,
424 .group_constraint_val = CNST_CACHE_PMC4_VAL,
425 .compute_mmcr = isa207_compute_mmcr,
426 .config_bhrb = power9_config_bhrb,
427 .bhrb_filter_map = power9_bhrb_filter_map,
428 .get_constraint = isa207_get_constraint,
429 .get_alternatives = power9_get_alternatives,
430 .get_mem_data_src = isa207_get_mem_data_src,
431 .get_mem_weight = isa207_get_mem_weight,
432 .disable_pmc = isa207_disable_pmc,
433 .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
434 .n_generic = ARRAY_SIZE(power9_generic_events),
435 .generic_events = power9_generic_events,
436 .cache_events = &power9_cache_events,
437 .attr_groups = power9_pmu_attr_groups,
438 .bhrb_nr = 32,
439 .capabilities = PERF_PMU_CAP_EXTENDED_REGS,
442 int init_power9_pmu(void)
444 int rc = 0;
445 unsigned int pvr = mfspr(SPRN_PVR);
447 /* Comes from cpu_specs[] */
448 if (!cur_cpu_spec->oprofile_cpu_type ||
449 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power9"))
450 return -ENODEV;
452 /* Blacklist events */
453 if (!(pvr & PVR_POWER9_CUMULUS)) {
454 if ((PVR_CFG(pvr) == 2) && (PVR_MIN(pvr) == 1)) {
455 power9_pmu.blacklist_ev = p9_dd21_bl_ev;
456 power9_pmu.n_blacklist_ev = ARRAY_SIZE(p9_dd21_bl_ev);
457 } else if ((PVR_CFG(pvr) == 2) && (PVR_MIN(pvr) == 2)) {
458 power9_pmu.blacklist_ev = p9_dd22_bl_ev;
459 power9_pmu.n_blacklist_ev = ARRAY_SIZE(p9_dd22_bl_ev);
463 /* Set the PERF_REG_EXTENDED_MASK here */
464 PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_300;
466 rc = register_power_pmu(&power9_pmu);
467 if (rc)
468 return rc;
470 /* Tell userspace that EBB is supported */
471 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
473 return 0;