2 * Performance counter support for POWER9 processors.
4 * Copyright 2009 Paul Mackerras, IBM Corporation.
5 * Copyright 2013 Michael Ellerman, IBM Corporation.
6 * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or later version.
14 #define pr_fmt(fmt) "power9-pmu: " fmt
16 #include "isa207-common.h"
19 * Raw event encoding for Power9:
21 * 60 56 52 48 44 40 36 32
22 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
23 * | | [ ] [ ] [ thresh_cmp ] [ thresh_ctl ]
25 * | | *- IFM (Linux) | thresh start/stop -*
26 * | *- BHRB (Linux) *sm
29 * 28 24 20 16 12 8 4 0
30 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
31 * [ ] [ sample ] [cache] [ pmc ] [unit ] [] m [ pmcxsel ]
34 * | | *- L1/L2/L3 cache_sel |
36 * | *- sampling mode for marked events *- combine
40 * Below uses IBM bit numbering.
42 * MMCR1[x:y] = unit (PMCxUNIT)
43 * MMCR1[24] = pmc1combine[0]
44 * MMCR1[25] = pmc1combine[1]
45 * MMCR1[26] = pmc2combine[0]
46 * MMCR1[27] = pmc2combine[1]
47 * MMCR1[28] = pmc3combine[0]
48 * MMCR1[29] = pmc3combine[1]
49 * MMCR1[30] = pmc4combine[0]
50 * MMCR1[31] = pmc4combine[1]
52 * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
53 * MMCR1[20:27] = thresh_ctl
54 * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
55 * MMCR1[20:27] = thresh_ctl
57 * MMCRA[48:55] = thresh_ctl (THRESH START/END)
60 * MMCRA[45:47] = thresh_sel
63 * MMCRA[9:11] = thresh_cmp[0:2]
64 * MMCRA[12:18] = thresh_cmp[3:9]
66 * if unit == 6 or unit == 7
67 * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
68 * else if unit == 8 or unit == 9:
69 * if cache_sel[0] == 0: # L3 bank
70 * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
71 * else if cache_sel[0] == 1:
72 * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
73 * else if cache_sel[1]: # L1 event
74 * MMCR1[16] = cache_sel[2]
75 * MMCR1[17] = cache_sel[3]
78 * MMCRA[63] = 1 (SAMPLE_ENABLE)
79 * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
80 * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
85 * MMCRA[SDAR_MODE] = sm
89 * Some power9 event codes.
91 #define EVENT(_name, _code) _name = _code,
94 #include "power9-events-list.h"
99 /* MMCRA IFM bits - POWER9 */
100 #define POWER9_MMCRA_IFM1 0x0000000040000000UL
101 #define POWER9_MMCRA_IFM2 0x0000000080000000UL
102 #define POWER9_MMCRA_IFM3 0x00000000C0000000UL
104 /* PowerISA v2.07 format attribute structure*/
105 extern struct attribute_group isa207_pmu_format_group
;
107 /* Table of alternatives, sorted by column 0 */
108 static const unsigned int power9_event_alternatives
[][MAX_ALT
] = {
109 { PM_INST_DISP
, PM_INST_DISP_ALT
},
112 static int power9_get_alternatives(u64 event
, unsigned int flags
, u64 alt
[])
116 num_alt
= isa207_get_alternatives(event
, alt
, power9_event_alternatives
,
117 (int)ARRAY_SIZE(power9_event_alternatives
));
122 GENERIC_EVENT_ATTR(cpu
-cycles
, PM_CYC
);
123 GENERIC_EVENT_ATTR(stalled
-cycles
-frontend
, PM_ICT_NOSLOT_CYC
);
124 GENERIC_EVENT_ATTR(stalled
-cycles
-backend
, PM_CMPLU_STALL
);
125 GENERIC_EVENT_ATTR(instructions
, PM_INST_CMPL
);
126 GENERIC_EVENT_ATTR(branch
-instructions
, PM_BRU_CMPL
);
127 GENERIC_EVENT_ATTR(branch
-misses
, PM_BR_MPRED_CMPL
);
128 GENERIC_EVENT_ATTR(cache
-references
, PM_LD_REF_L1
);
129 GENERIC_EVENT_ATTR(cache
-misses
, PM_LD_MISS_L1_FIN
);
131 CACHE_EVENT_ATTR(L1
-dcache
-load
-misses
, PM_LD_MISS_L1_FIN
);
132 CACHE_EVENT_ATTR(L1
-dcache
-loads
, PM_LD_REF_L1
);
133 CACHE_EVENT_ATTR(L1
-dcache
-prefetches
, PM_L1_PREF
);
134 CACHE_EVENT_ATTR(L1
-dcache
-store
-misses
, PM_ST_MISS_L1
);
135 CACHE_EVENT_ATTR(L1
-icache
-load
-misses
, PM_L1_ICACHE_MISS
);
136 CACHE_EVENT_ATTR(L1
-icache
-loads
, PM_INST_FROM_L1
);
137 CACHE_EVENT_ATTR(L1
-icache
-prefetches
, PM_IC_PREF_WRITE
);
138 CACHE_EVENT_ATTR(LLC
-load
-misses
, PM_DATA_FROM_L3MISS
);
139 CACHE_EVENT_ATTR(LLC
-loads
, PM_DATA_FROM_L3
);
140 CACHE_EVENT_ATTR(LLC
-prefetches
, PM_L3_PREF_ALL
);
141 CACHE_EVENT_ATTR(LLC
-store
-misses
, PM_L2_ST_MISS
);
142 CACHE_EVENT_ATTR(LLC
-stores
, PM_L2_ST
);
143 CACHE_EVENT_ATTR(branch
-load
-misses
, PM_BR_MPRED_CMPL
);
144 CACHE_EVENT_ATTR(branch
-loads
, PM_BRU_CMPL
);
145 CACHE_EVENT_ATTR(dTLB
-load
-misses
, PM_DTLB_MISS
);
146 CACHE_EVENT_ATTR(iTLB
-load
-misses
, PM_ITLB_MISS
);
148 static struct attribute
*power9_events_attr
[] = {
149 GENERIC_EVENT_PTR(PM_CYC
),
150 GENERIC_EVENT_PTR(PM_ICT_NOSLOT_CYC
),
151 GENERIC_EVENT_PTR(PM_CMPLU_STALL
),
152 GENERIC_EVENT_PTR(PM_INST_CMPL
),
153 GENERIC_EVENT_PTR(PM_BRU_CMPL
),
154 GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL
),
155 GENERIC_EVENT_PTR(PM_LD_REF_L1
),
156 GENERIC_EVENT_PTR(PM_LD_MISS_L1_FIN
),
157 CACHE_EVENT_PTR(PM_LD_MISS_L1_FIN
),
158 CACHE_EVENT_PTR(PM_LD_REF_L1
),
159 CACHE_EVENT_PTR(PM_L1_PREF
),
160 CACHE_EVENT_PTR(PM_ST_MISS_L1
),
161 CACHE_EVENT_PTR(PM_L1_ICACHE_MISS
),
162 CACHE_EVENT_PTR(PM_INST_FROM_L1
),
163 CACHE_EVENT_PTR(PM_IC_PREF_WRITE
),
164 CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS
),
165 CACHE_EVENT_PTR(PM_DATA_FROM_L3
),
166 CACHE_EVENT_PTR(PM_L3_PREF_ALL
),
167 CACHE_EVENT_PTR(PM_L2_ST_MISS
),
168 CACHE_EVENT_PTR(PM_L2_ST
),
169 CACHE_EVENT_PTR(PM_BR_MPRED_CMPL
),
170 CACHE_EVENT_PTR(PM_BRU_CMPL
),
171 CACHE_EVENT_PTR(PM_DTLB_MISS
),
172 CACHE_EVENT_PTR(PM_ITLB_MISS
),
176 static struct attribute_group power9_pmu_events_group
= {
178 .attrs
= power9_events_attr
,
181 static const struct attribute_group
*power9_isa207_pmu_attr_groups
[] = {
182 &isa207_pmu_format_group
,
183 &power9_pmu_events_group
,
187 PMU_FORMAT_ATTR(event
, "config:0-51");
188 PMU_FORMAT_ATTR(pmcxsel
, "config:0-7");
189 PMU_FORMAT_ATTR(mark
, "config:8");
190 PMU_FORMAT_ATTR(combine
, "config:10-11");
191 PMU_FORMAT_ATTR(unit
, "config:12-15");
192 PMU_FORMAT_ATTR(pmc
, "config:16-19");
193 PMU_FORMAT_ATTR(cache_sel
, "config:20-23");
194 PMU_FORMAT_ATTR(sample_mode
, "config:24-28");
195 PMU_FORMAT_ATTR(thresh_sel
, "config:29-31");
196 PMU_FORMAT_ATTR(thresh_stop
, "config:32-35");
197 PMU_FORMAT_ATTR(thresh_start
, "config:36-39");
198 PMU_FORMAT_ATTR(thresh_cmp
, "config:40-49");
199 PMU_FORMAT_ATTR(sdar_mode
, "config:50-51");
201 static struct attribute
*power9_pmu_format_attr
[] = {
202 &format_attr_event
.attr
,
203 &format_attr_pmcxsel
.attr
,
204 &format_attr_mark
.attr
,
205 &format_attr_combine
.attr
,
206 &format_attr_unit
.attr
,
207 &format_attr_pmc
.attr
,
208 &format_attr_cache_sel
.attr
,
209 &format_attr_sample_mode
.attr
,
210 &format_attr_thresh_sel
.attr
,
211 &format_attr_thresh_stop
.attr
,
212 &format_attr_thresh_start
.attr
,
213 &format_attr_thresh_cmp
.attr
,
214 &format_attr_sdar_mode
.attr
,
218 static struct attribute_group power9_pmu_format_group
= {
220 .attrs
= power9_pmu_format_attr
,
223 static const struct attribute_group
*power9_pmu_attr_groups
[] = {
224 &power9_pmu_format_group
,
225 &power9_pmu_events_group
,
229 static int power9_generic_events_dd1
[] = {
230 [PERF_COUNT_HW_CPU_CYCLES
] = PM_CYC
,
231 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = PM_ICT_NOSLOT_CYC
,
232 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = PM_CMPLU_STALL
,
233 [PERF_COUNT_HW_INSTRUCTIONS
] = PM_INST_DISP
,
234 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = PM_BR_CMPL_ALT
,
235 [PERF_COUNT_HW_BRANCH_MISSES
] = PM_BR_MPRED_CMPL
,
236 [PERF_COUNT_HW_CACHE_REFERENCES
] = PM_LD_REF_L1
,
237 [PERF_COUNT_HW_CACHE_MISSES
] = PM_LD_MISS_L1_FIN
,
240 static int power9_generic_events
[] = {
241 [PERF_COUNT_HW_CPU_CYCLES
] = PM_CYC
,
242 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = PM_ICT_NOSLOT_CYC
,
243 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = PM_CMPLU_STALL
,
244 [PERF_COUNT_HW_INSTRUCTIONS
] = PM_INST_CMPL
,
245 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = PM_BRU_CMPL
,
246 [PERF_COUNT_HW_BRANCH_MISSES
] = PM_BR_MPRED_CMPL
,
247 [PERF_COUNT_HW_CACHE_REFERENCES
] = PM_LD_REF_L1
,
248 [PERF_COUNT_HW_CACHE_MISSES
] = PM_LD_MISS_L1_FIN
,
251 static u64
power9_bhrb_filter_map(u64 branch_sample_type
)
253 u64 pmu_bhrb_filter
= 0;
255 /* BHRB and regular PMU events share the same privilege state
256 * filter configuration. BHRB is always recorded along with a
257 * regular PMU event. As the privilege state filter is handled
258 * in the basic PMC configuration of the accompanying regular
259 * PMU event, we ignore any separate BHRB specific request.
262 /* No branch filter requested */
263 if (branch_sample_type
& PERF_SAMPLE_BRANCH_ANY
)
264 return pmu_bhrb_filter
;
266 /* Invalid branch filter options - HW does not support */
267 if (branch_sample_type
& PERF_SAMPLE_BRANCH_ANY_RETURN
)
270 if (branch_sample_type
& PERF_SAMPLE_BRANCH_IND_CALL
)
273 if (branch_sample_type
& PERF_SAMPLE_BRANCH_CALL
)
276 if (branch_sample_type
& PERF_SAMPLE_BRANCH_ANY_CALL
) {
277 pmu_bhrb_filter
|= POWER9_MMCRA_IFM1
;
278 return pmu_bhrb_filter
;
281 /* Every thing else is unsupported */
285 static void power9_config_bhrb(u64 pmu_bhrb_filter
)
287 /* Enable BHRB filter in PMU */
288 mtspr(SPRN_MMCRA
, (mfspr(SPRN_MMCRA
) | pmu_bhrb_filter
));
291 #define C(x) PERF_COUNT_HW_CACHE_##x
294 * Table of generalized cache-related events.
295 * 0 means not supported, -1 means nonsensical, other values
298 static int power9_cache_events
[C(MAX
)][C(OP_MAX
)][C(RESULT_MAX
)] = {
301 [ C(RESULT_ACCESS
) ] = PM_LD_REF_L1
,
302 [ C(RESULT_MISS
) ] = PM_LD_MISS_L1_FIN
,
305 [ C(RESULT_ACCESS
) ] = 0,
306 [ C(RESULT_MISS
) ] = PM_ST_MISS_L1
,
308 [ C(OP_PREFETCH
) ] = {
309 [ C(RESULT_ACCESS
) ] = PM_L1_PREF
,
310 [ C(RESULT_MISS
) ] = 0,
315 [ C(RESULT_ACCESS
) ] = PM_INST_FROM_L1
,
316 [ C(RESULT_MISS
) ] = PM_L1_ICACHE_MISS
,
319 [ C(RESULT_ACCESS
) ] = PM_L1_DEMAND_WRITE
,
320 [ C(RESULT_MISS
) ] = -1,
322 [ C(OP_PREFETCH
) ] = {
323 [ C(RESULT_ACCESS
) ] = PM_IC_PREF_WRITE
,
324 [ C(RESULT_MISS
) ] = 0,
329 [ C(RESULT_ACCESS
) ] = PM_DATA_FROM_L3
,
330 [ C(RESULT_MISS
) ] = PM_DATA_FROM_L3MISS
,
333 [ C(RESULT_ACCESS
) ] = PM_L2_ST
,
334 [ C(RESULT_MISS
) ] = PM_L2_ST_MISS
,
336 [ C(OP_PREFETCH
) ] = {
337 [ C(RESULT_ACCESS
) ] = PM_L3_PREF_ALL
,
338 [ C(RESULT_MISS
) ] = 0,
343 [ C(RESULT_ACCESS
) ] = 0,
344 [ C(RESULT_MISS
) ] = PM_DTLB_MISS
,
347 [ C(RESULT_ACCESS
) ] = -1,
348 [ C(RESULT_MISS
) ] = -1,
350 [ C(OP_PREFETCH
) ] = {
351 [ C(RESULT_ACCESS
) ] = -1,
352 [ C(RESULT_MISS
) ] = -1,
357 [ C(RESULT_ACCESS
) ] = 0,
358 [ C(RESULT_MISS
) ] = PM_ITLB_MISS
,
361 [ C(RESULT_ACCESS
) ] = -1,
362 [ C(RESULT_MISS
) ] = -1,
364 [ C(OP_PREFETCH
) ] = {
365 [ C(RESULT_ACCESS
) ] = -1,
366 [ C(RESULT_MISS
) ] = -1,
371 [ C(RESULT_ACCESS
) ] = PM_BRU_CMPL
,
372 [ C(RESULT_MISS
) ] = PM_BR_MPRED_CMPL
,
375 [ C(RESULT_ACCESS
) ] = -1,
376 [ C(RESULT_MISS
) ] = -1,
378 [ C(OP_PREFETCH
) ] = {
379 [ C(RESULT_ACCESS
) ] = -1,
380 [ C(RESULT_MISS
) ] = -1,
385 [ C(RESULT_ACCESS
) ] = -1,
386 [ C(RESULT_MISS
) ] = -1,
389 [ C(RESULT_ACCESS
) ] = -1,
390 [ C(RESULT_MISS
) ] = -1,
392 [ C(OP_PREFETCH
) ] = {
393 [ C(RESULT_ACCESS
) ] = -1,
394 [ C(RESULT_MISS
) ] = -1,
401 static struct power_pmu power9_isa207_pmu
= {
403 .n_counter
= MAX_PMU_COUNTERS
,
404 .add_fields
= ISA207_ADD_FIELDS
,
405 .test_adder
= P9_DD1_TEST_ADDER
,
406 .compute_mmcr
= isa207_compute_mmcr
,
407 .config_bhrb
= power9_config_bhrb
,
408 .bhrb_filter_map
= power9_bhrb_filter_map
,
409 .get_constraint
= isa207_get_constraint
,
410 .get_alternatives
= power9_get_alternatives
,
411 .disable_pmc
= isa207_disable_pmc
,
412 .flags
= PPMU_NO_SIAR
| PPMU_ARCH_207S
,
413 .n_generic
= ARRAY_SIZE(power9_generic_events_dd1
),
414 .generic_events
= power9_generic_events_dd1
,
415 .cache_events
= &power9_cache_events
,
416 .attr_groups
= power9_isa207_pmu_attr_groups
,
420 static struct power_pmu power9_pmu
= {
422 .n_counter
= MAX_PMU_COUNTERS
,
423 .add_fields
= ISA207_ADD_FIELDS
,
424 .test_adder
= ISA207_TEST_ADDER
,
425 .compute_mmcr
= isa207_compute_mmcr
,
426 .config_bhrb
= power9_config_bhrb
,
427 .bhrb_filter_map
= power9_bhrb_filter_map
,
428 .get_constraint
= isa207_get_constraint
,
429 .get_alternatives
= power9_get_alternatives
,
430 .get_mem_data_src
= isa207_get_mem_data_src
,
431 .get_mem_weight
= isa207_get_mem_weight
,
432 .disable_pmc
= isa207_disable_pmc
,
433 .flags
= PPMU_HAS_SIER
| PPMU_ARCH_207S
,
434 .n_generic
= ARRAY_SIZE(power9_generic_events
),
435 .generic_events
= power9_generic_events
,
436 .cache_events
= &power9_cache_events
,
437 .attr_groups
= power9_pmu_attr_groups
,
441 static int __init
init_power9_pmu(void)
445 /* Comes from cpu_specs[] */
446 if (!cur_cpu_spec
->oprofile_cpu_type
||
447 strcmp(cur_cpu_spec
->oprofile_cpu_type
, "ppc64/power9"))
450 if (cpu_has_feature(CPU_FTR_POWER9_DD1
)) {
452 * Since PM_INST_CMPL may not provide right counts in all
453 * sampling scenarios in power9 DD1, instead use PM_INST_DISP.
455 EVENT_VAR(PM_INST_CMPL
, _g
).id
= PM_INST_DISP
;
457 * Power9 DD1 should use PM_BR_CMPL_ALT event code for
458 * "branches" to provide correct counter value.
460 EVENT_VAR(PM_BRU_CMPL
, _g
).id
= PM_BR_CMPL_ALT
;
461 EVENT_VAR(PM_BRU_CMPL
, _c
).id
= PM_BR_CMPL_ALT
;
462 rc
= register_power_pmu(&power9_isa207_pmu
);
464 rc
= register_power_pmu(&power9_pmu
);
470 /* Tell userspace that EBB is supported */
471 cur_cpu_spec
->cpu_user_features2
|= PPC_FEATURE2_EBB
;
475 early_initcall(init_power9_pmu
);