2 * Performance counter support for POWER8 processors.
4 * Copyright 2009 Paul Mackerras, IBM Corporation.
5 * Copyright 2013 Michael Ellerman, IBM Corporation.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) "power8-pmu: " fmt
15 #include "isa207-common.h"
18 * Some power8 event codes.
20 #define EVENT(_name, _code) _name = _code,
23 #include "power8-events-list.h"
28 /* MMCRA IFM bits - POWER8 */
29 #define POWER8_MMCRA_IFM1 0x0000000040000000UL
30 #define POWER8_MMCRA_IFM2 0x0000000080000000UL
31 #define POWER8_MMCRA_IFM3 0x00000000C0000000UL
33 /* Table of alternatives, sorted by column 0 */
34 static const unsigned int event_alternatives
[][MAX_ALT
] = {
35 { PM_MRK_ST_CMPL
, PM_MRK_ST_CMPL_ALT
},
36 { PM_BR_MRK_2PATH
, PM_BR_MRK_2PATH_ALT
},
37 { PM_L3_CO_MEPF
, PM_L3_CO_MEPF_ALT
},
38 { PM_MRK_DATA_FROM_L2MISS
, PM_MRK_DATA_FROM_L2MISS_ALT
},
39 { PM_CMPLU_STALL_ALT
, PM_CMPLU_STALL
},
40 { PM_BR_2PATH
, PM_BR_2PATH_ALT
},
41 { PM_INST_DISP
, PM_INST_DISP_ALT
},
42 { PM_RUN_CYC_ALT
, PM_RUN_CYC
},
43 { PM_MRK_FILT_MATCH
, PM_MRK_FILT_MATCH_ALT
},
44 { PM_LD_MISS_L1
, PM_LD_MISS_L1_ALT
},
45 { PM_RUN_INST_CMPL_ALT
, PM_RUN_INST_CMPL
},
49 * Scan the alternatives table for a match and return the
50 * index into the alternatives table if found, else -1.
52 static int find_alternative(u64 event
)
56 for (i
= 0; i
< ARRAY_SIZE(event_alternatives
); ++i
) {
57 if (event
< event_alternatives
[i
][0])
60 for (j
= 0; j
< MAX_ALT
&& event_alternatives
[i
][j
]; ++j
)
61 if (event
== event_alternatives
[i
][j
])
68 static int power8_get_alternatives(u64 event
, unsigned int flags
, u64 alt
[])
70 int i
, j
, num_alt
= 0;
73 alt
[num_alt
++] = event
;
75 i
= find_alternative(event
);
77 /* Filter out the original event, it's already in alt[0] */
78 for (j
= 0; j
< MAX_ALT
; ++j
) {
79 alt_event
= event_alternatives
[i
][j
];
80 if (alt_event
&& alt_event
!= event
)
81 alt
[num_alt
++] = alt_event
;
85 if (flags
& PPMU_ONLY_COUNT_RUN
) {
87 * We're only counting in RUN state, so PM_CYC is equivalent to
88 * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
91 for (i
= 0; i
< num_alt
; ++i
) {
94 alt
[j
++] = PM_RUN_CYC
;
100 alt
[j
++] = PM_RUN_INST_CMPL
;
102 case PM_RUN_INST_CMPL
:
103 alt
[j
++] = PM_INST_CMPL
;
113 GENERIC_EVENT_ATTR(cpu
-cycles
, PM_CYC
);
114 GENERIC_EVENT_ATTR(stalled
-cycles
-frontend
, PM_GCT_NOSLOT_CYC
);
115 GENERIC_EVENT_ATTR(stalled
-cycles
-backend
, PM_CMPLU_STALL
);
116 GENERIC_EVENT_ATTR(instructions
, PM_INST_CMPL
);
117 GENERIC_EVENT_ATTR(branch
-instructions
, PM_BRU_FIN
);
118 GENERIC_EVENT_ATTR(branch
-misses
, PM_BR_MPRED_CMPL
);
119 GENERIC_EVENT_ATTR(cache
-references
, PM_LD_REF_L1
);
120 GENERIC_EVENT_ATTR(cache
-misses
, PM_LD_MISS_L1
);
122 CACHE_EVENT_ATTR(L1
-dcache
-load
-misses
, PM_LD_MISS_L1
);
123 CACHE_EVENT_ATTR(L1
-dcache
-loads
, PM_LD_REF_L1
);
125 CACHE_EVENT_ATTR(L1
-dcache
-prefetches
, PM_L1_PREF
);
126 CACHE_EVENT_ATTR(L1
-dcache
-store
-misses
, PM_ST_MISS_L1
);
127 CACHE_EVENT_ATTR(L1
-icache
-load
-misses
, PM_L1_ICACHE_MISS
);
128 CACHE_EVENT_ATTR(L1
-icache
-loads
, PM_INST_FROM_L1
);
129 CACHE_EVENT_ATTR(L1
-icache
-prefetches
, PM_IC_PREF_WRITE
);
131 CACHE_EVENT_ATTR(LLC
-load
-misses
, PM_DATA_FROM_L3MISS
);
132 CACHE_EVENT_ATTR(LLC
-loads
, PM_DATA_FROM_L3
);
133 CACHE_EVENT_ATTR(LLC
-prefetches
, PM_L3_PREF_ALL
);
134 CACHE_EVENT_ATTR(LLC
-store
-misses
, PM_L2_ST_MISS
);
135 CACHE_EVENT_ATTR(LLC
-stores
, PM_L2_ST
);
137 CACHE_EVENT_ATTR(branch
-load
-misses
, PM_BR_MPRED_CMPL
);
138 CACHE_EVENT_ATTR(branch
-loads
, PM_BRU_FIN
);
139 CACHE_EVENT_ATTR(dTLB
-load
-misses
, PM_DTLB_MISS
);
140 CACHE_EVENT_ATTR(iTLB
-load
-misses
, PM_ITLB_MISS
);
142 static struct attribute
*power8_events_attr
[] = {
143 GENERIC_EVENT_PTR(PM_CYC
),
144 GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC
),
145 GENERIC_EVENT_PTR(PM_CMPLU_STALL
),
146 GENERIC_EVENT_PTR(PM_INST_CMPL
),
147 GENERIC_EVENT_PTR(PM_BRU_FIN
),
148 GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL
),
149 GENERIC_EVENT_PTR(PM_LD_REF_L1
),
150 GENERIC_EVENT_PTR(PM_LD_MISS_L1
),
152 CACHE_EVENT_PTR(PM_LD_MISS_L1
),
153 CACHE_EVENT_PTR(PM_LD_REF_L1
),
154 CACHE_EVENT_PTR(PM_L1_PREF
),
155 CACHE_EVENT_PTR(PM_ST_MISS_L1
),
156 CACHE_EVENT_PTR(PM_L1_ICACHE_MISS
),
157 CACHE_EVENT_PTR(PM_INST_FROM_L1
),
158 CACHE_EVENT_PTR(PM_IC_PREF_WRITE
),
159 CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS
),
160 CACHE_EVENT_PTR(PM_DATA_FROM_L3
),
161 CACHE_EVENT_PTR(PM_L3_PREF_ALL
),
162 CACHE_EVENT_PTR(PM_L2_ST_MISS
),
163 CACHE_EVENT_PTR(PM_L2_ST
),
165 CACHE_EVENT_PTR(PM_BR_MPRED_CMPL
),
166 CACHE_EVENT_PTR(PM_BRU_FIN
),
168 CACHE_EVENT_PTR(PM_DTLB_MISS
),
169 CACHE_EVENT_PTR(PM_ITLB_MISS
),
173 static struct attribute_group power8_pmu_events_group
= {
175 .attrs
= power8_events_attr
,
178 PMU_FORMAT_ATTR(event
, "config:0-49");
179 PMU_FORMAT_ATTR(pmcxsel
, "config:0-7");
180 PMU_FORMAT_ATTR(mark
, "config:8");
181 PMU_FORMAT_ATTR(combine
, "config:11");
182 PMU_FORMAT_ATTR(unit
, "config:12-15");
183 PMU_FORMAT_ATTR(pmc
, "config:16-19");
184 PMU_FORMAT_ATTR(cache_sel
, "config:20-23");
185 PMU_FORMAT_ATTR(sample_mode
, "config:24-28");
186 PMU_FORMAT_ATTR(thresh_sel
, "config:29-31");
187 PMU_FORMAT_ATTR(thresh_stop
, "config:32-35");
188 PMU_FORMAT_ATTR(thresh_start
, "config:36-39");
189 PMU_FORMAT_ATTR(thresh_cmp
, "config:40-49");
191 static struct attribute
*power8_pmu_format_attr
[] = {
192 &format_attr_event
.attr
,
193 &format_attr_pmcxsel
.attr
,
194 &format_attr_mark
.attr
,
195 &format_attr_combine
.attr
,
196 &format_attr_unit
.attr
,
197 &format_attr_pmc
.attr
,
198 &format_attr_cache_sel
.attr
,
199 &format_attr_sample_mode
.attr
,
200 &format_attr_thresh_sel
.attr
,
201 &format_attr_thresh_stop
.attr
,
202 &format_attr_thresh_start
.attr
,
203 &format_attr_thresh_cmp
.attr
,
207 static struct attribute_group power8_pmu_format_group
= {
209 .attrs
= power8_pmu_format_attr
,
212 static const struct attribute_group
*power8_pmu_attr_groups
[] = {
213 &power8_pmu_format_group
,
214 &power8_pmu_events_group
,
218 static int power8_generic_events
[] = {
219 [PERF_COUNT_HW_CPU_CYCLES
] = PM_CYC
,
220 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = PM_GCT_NOSLOT_CYC
,
221 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = PM_CMPLU_STALL
,
222 [PERF_COUNT_HW_INSTRUCTIONS
] = PM_INST_CMPL
,
223 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = PM_BRU_FIN
,
224 [PERF_COUNT_HW_BRANCH_MISSES
] = PM_BR_MPRED_CMPL
,
225 [PERF_COUNT_HW_CACHE_REFERENCES
] = PM_LD_REF_L1
,
226 [PERF_COUNT_HW_CACHE_MISSES
] = PM_LD_MISS_L1
,
229 static u64
power8_bhrb_filter_map(u64 branch_sample_type
)
231 u64 pmu_bhrb_filter
= 0;
233 /* BHRB and regular PMU events share the same privilege state
234 * filter configuration. BHRB is always recorded along with a
235 * regular PMU event. As the privilege state filter is handled
236 * in the basic PMC configuration of the accompanying regular
237 * PMU event, we ignore any separate BHRB specific request.
240 /* No branch filter requested */
241 if (branch_sample_type
& PERF_SAMPLE_BRANCH_ANY
)
242 return pmu_bhrb_filter
;
244 /* Invalid branch filter options - HW does not support */
245 if (branch_sample_type
& PERF_SAMPLE_BRANCH_ANY_RETURN
)
248 if (branch_sample_type
& PERF_SAMPLE_BRANCH_IND_CALL
)
251 if (branch_sample_type
& PERF_SAMPLE_BRANCH_CALL
)
254 if (branch_sample_type
& PERF_SAMPLE_BRANCH_ANY_CALL
) {
255 pmu_bhrb_filter
|= POWER8_MMCRA_IFM1
;
256 return pmu_bhrb_filter
;
259 /* Every thing else is unsupported */
263 static void power8_config_bhrb(u64 pmu_bhrb_filter
)
265 /* Enable BHRB filter in PMU */
266 mtspr(SPRN_MMCRA
, (mfspr(SPRN_MMCRA
) | pmu_bhrb_filter
));
269 #define C(x) PERF_COUNT_HW_CACHE_##x
272 * Table of generalized cache-related events.
273 * 0 means not supported, -1 means nonsensical, other values
276 static int power8_cache_events
[C(MAX
)][C(OP_MAX
)][C(RESULT_MAX
)] = {
279 [ C(RESULT_ACCESS
) ] = PM_LD_REF_L1
,
280 [ C(RESULT_MISS
) ] = PM_LD_MISS_L1
,
283 [ C(RESULT_ACCESS
) ] = 0,
284 [ C(RESULT_MISS
) ] = PM_ST_MISS_L1
,
286 [ C(OP_PREFETCH
) ] = {
287 [ C(RESULT_ACCESS
) ] = PM_L1_PREF
,
288 [ C(RESULT_MISS
) ] = 0,
293 [ C(RESULT_ACCESS
) ] = PM_INST_FROM_L1
,
294 [ C(RESULT_MISS
) ] = PM_L1_ICACHE_MISS
,
297 [ C(RESULT_ACCESS
) ] = PM_L1_DEMAND_WRITE
,
298 [ C(RESULT_MISS
) ] = -1,
300 [ C(OP_PREFETCH
) ] = {
301 [ C(RESULT_ACCESS
) ] = PM_IC_PREF_WRITE
,
302 [ C(RESULT_MISS
) ] = 0,
307 [ C(RESULT_ACCESS
) ] = PM_DATA_FROM_L3
,
308 [ C(RESULT_MISS
) ] = PM_DATA_FROM_L3MISS
,
311 [ C(RESULT_ACCESS
) ] = PM_L2_ST
,
312 [ C(RESULT_MISS
) ] = PM_L2_ST_MISS
,
314 [ C(OP_PREFETCH
) ] = {
315 [ C(RESULT_ACCESS
) ] = PM_L3_PREF_ALL
,
316 [ C(RESULT_MISS
) ] = 0,
321 [ C(RESULT_ACCESS
) ] = 0,
322 [ C(RESULT_MISS
) ] = PM_DTLB_MISS
,
325 [ C(RESULT_ACCESS
) ] = -1,
326 [ C(RESULT_MISS
) ] = -1,
328 [ C(OP_PREFETCH
) ] = {
329 [ C(RESULT_ACCESS
) ] = -1,
330 [ C(RESULT_MISS
) ] = -1,
335 [ C(RESULT_ACCESS
) ] = 0,
336 [ C(RESULT_MISS
) ] = PM_ITLB_MISS
,
339 [ C(RESULT_ACCESS
) ] = -1,
340 [ C(RESULT_MISS
) ] = -1,
342 [ C(OP_PREFETCH
) ] = {
343 [ C(RESULT_ACCESS
) ] = -1,
344 [ C(RESULT_MISS
) ] = -1,
349 [ C(RESULT_ACCESS
) ] = PM_BRU_FIN
,
350 [ C(RESULT_MISS
) ] = PM_BR_MPRED_CMPL
,
353 [ C(RESULT_ACCESS
) ] = -1,
354 [ C(RESULT_MISS
) ] = -1,
356 [ C(OP_PREFETCH
) ] = {
357 [ C(RESULT_ACCESS
) ] = -1,
358 [ C(RESULT_MISS
) ] = -1,
363 [ C(RESULT_ACCESS
) ] = -1,
364 [ C(RESULT_MISS
) ] = -1,
367 [ C(RESULT_ACCESS
) ] = -1,
368 [ C(RESULT_MISS
) ] = -1,
370 [ C(OP_PREFETCH
) ] = {
371 [ C(RESULT_ACCESS
) ] = -1,
372 [ C(RESULT_MISS
) ] = -1,
379 static struct power_pmu power8_pmu
= {
381 .n_counter
= MAX_PMU_COUNTERS
,
382 .max_alternatives
= MAX_ALT
+ 1,
383 .add_fields
= ISA207_ADD_FIELDS
,
384 .test_adder
= ISA207_TEST_ADDER
,
385 .compute_mmcr
= isa207_compute_mmcr
,
386 .config_bhrb
= power8_config_bhrb
,
387 .bhrb_filter_map
= power8_bhrb_filter_map
,
388 .get_constraint
= isa207_get_constraint
,
389 .get_alternatives
= power8_get_alternatives
,
390 .disable_pmc
= isa207_disable_pmc
,
391 .flags
= PPMU_HAS_SIER
| PPMU_ARCH_207S
,
392 .n_generic
= ARRAY_SIZE(power8_generic_events
),
393 .generic_events
= power8_generic_events
,
394 .cache_events
= &power8_cache_events
,
395 .attr_groups
= power8_pmu_attr_groups
,
399 static int __init
init_power8_pmu(void)
403 if (!cur_cpu_spec
->oprofile_cpu_type
||
404 strcmp(cur_cpu_spec
->oprofile_cpu_type
, "ppc64/power8"))
407 rc
= register_power_pmu(&power8_pmu
);
411 /* Tell userspace that EBB is supported */
412 cur_cpu_spec
->cpu_user_features2
|= PPC_FEATURE2_EBB
;
414 if (cpu_has_feature(CPU_FTR_PMAO_BUG
))
415 pr_info("PMAO restore workaround active.\n");
419 early_initcall(init_power8_pmu
);