2 * Performance counter support for POWER8 processors.
4 * Copyright 2009 Paul Mackerras, IBM Corporation.
5 * Copyright 2013 Michael Ellerman, IBM Corporation.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) "power8-pmu: " fmt
15 #include "isa207-common.h"
18 * Some power8 event codes.
20 #define EVENT(_name, _code) _name = _code,
23 #include "power8-events-list.h"
28 /* MMCRA IFM bits - POWER8 */
29 #define POWER8_MMCRA_IFM1 0x0000000040000000UL
30 #define POWER8_MMCRA_IFM2 0x0000000080000000UL
31 #define POWER8_MMCRA_IFM3 0x00000000C0000000UL
33 /* PowerISA v2.07 format attribute structure*/
34 extern struct attribute_group isa207_pmu_format_group
;
36 /* Table of alternatives, sorted by column 0 */
37 static const unsigned int event_alternatives
[][MAX_ALT
] = {
38 { PM_MRK_ST_CMPL
, PM_MRK_ST_CMPL_ALT
},
39 { PM_BR_MRK_2PATH
, PM_BR_MRK_2PATH_ALT
},
40 { PM_L3_CO_MEPF
, PM_L3_CO_MEPF_ALT
},
41 { PM_MRK_DATA_FROM_L2MISS
, PM_MRK_DATA_FROM_L2MISS_ALT
},
42 { PM_CMPLU_STALL_ALT
, PM_CMPLU_STALL
},
43 { PM_BR_2PATH
, PM_BR_2PATH_ALT
},
44 { PM_INST_DISP
, PM_INST_DISP_ALT
},
45 { PM_RUN_CYC_ALT
, PM_RUN_CYC
},
46 { PM_MRK_FILT_MATCH
, PM_MRK_FILT_MATCH_ALT
},
47 { PM_LD_MISS_L1
, PM_LD_MISS_L1_ALT
},
48 { PM_RUN_INST_CMPL_ALT
, PM_RUN_INST_CMPL
},
51 static int power8_get_alternatives(u64 event
, unsigned int flags
, u64 alt
[])
53 int i
, j
, num_alt
= 0;
55 num_alt
= isa207_get_alternatives(event
, alt
, event_alternatives
,
56 (int)ARRAY_SIZE(event_alternatives
));
57 if (flags
& PPMU_ONLY_COUNT_RUN
) {
59 * We're only counting in RUN state, so PM_CYC is equivalent to
60 * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
63 for (i
= 0; i
< num_alt
; ++i
) {
66 alt
[j
++] = PM_RUN_CYC
;
72 alt
[j
++] = PM_RUN_INST_CMPL
;
74 case PM_RUN_INST_CMPL
:
75 alt
[j
++] = PM_INST_CMPL
;
85 GENERIC_EVENT_ATTR(cpu
-cycles
, PM_CYC
);
86 GENERIC_EVENT_ATTR(stalled
-cycles
-frontend
, PM_GCT_NOSLOT_CYC
);
87 GENERIC_EVENT_ATTR(stalled
-cycles
-backend
, PM_CMPLU_STALL
);
88 GENERIC_EVENT_ATTR(instructions
, PM_INST_CMPL
);
89 GENERIC_EVENT_ATTR(branch
-instructions
, PM_BRU_FIN
);
90 GENERIC_EVENT_ATTR(branch
-misses
, PM_BR_MPRED_CMPL
);
91 GENERIC_EVENT_ATTR(cache
-references
, PM_LD_REF_L1
);
92 GENERIC_EVENT_ATTR(cache
-misses
, PM_LD_MISS_L1
);
94 CACHE_EVENT_ATTR(L1
-dcache
-load
-misses
, PM_LD_MISS_L1
);
95 CACHE_EVENT_ATTR(L1
-dcache
-loads
, PM_LD_REF_L1
);
97 CACHE_EVENT_ATTR(L1
-dcache
-prefetches
, PM_L1_PREF
);
98 CACHE_EVENT_ATTR(L1
-dcache
-store
-misses
, PM_ST_MISS_L1
);
99 CACHE_EVENT_ATTR(L1
-icache
-load
-misses
, PM_L1_ICACHE_MISS
);
100 CACHE_EVENT_ATTR(L1
-icache
-loads
, PM_INST_FROM_L1
);
101 CACHE_EVENT_ATTR(L1
-icache
-prefetches
, PM_IC_PREF_WRITE
);
103 CACHE_EVENT_ATTR(LLC
-load
-misses
, PM_DATA_FROM_L3MISS
);
104 CACHE_EVENT_ATTR(LLC
-loads
, PM_DATA_FROM_L3
);
105 CACHE_EVENT_ATTR(LLC
-prefetches
, PM_L3_PREF_ALL
);
106 CACHE_EVENT_ATTR(LLC
-store
-misses
, PM_L2_ST_MISS
);
107 CACHE_EVENT_ATTR(LLC
-stores
, PM_L2_ST
);
109 CACHE_EVENT_ATTR(branch
-load
-misses
, PM_BR_MPRED_CMPL
);
110 CACHE_EVENT_ATTR(branch
-loads
, PM_BRU_FIN
);
111 CACHE_EVENT_ATTR(dTLB
-load
-misses
, PM_DTLB_MISS
);
112 CACHE_EVENT_ATTR(iTLB
-load
-misses
, PM_ITLB_MISS
);
114 static struct attribute
*power8_events_attr
[] = {
115 GENERIC_EVENT_PTR(PM_CYC
),
116 GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC
),
117 GENERIC_EVENT_PTR(PM_CMPLU_STALL
),
118 GENERIC_EVENT_PTR(PM_INST_CMPL
),
119 GENERIC_EVENT_PTR(PM_BRU_FIN
),
120 GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL
),
121 GENERIC_EVENT_PTR(PM_LD_REF_L1
),
122 GENERIC_EVENT_PTR(PM_LD_MISS_L1
),
124 CACHE_EVENT_PTR(PM_LD_MISS_L1
),
125 CACHE_EVENT_PTR(PM_LD_REF_L1
),
126 CACHE_EVENT_PTR(PM_L1_PREF
),
127 CACHE_EVENT_PTR(PM_ST_MISS_L1
),
128 CACHE_EVENT_PTR(PM_L1_ICACHE_MISS
),
129 CACHE_EVENT_PTR(PM_INST_FROM_L1
),
130 CACHE_EVENT_PTR(PM_IC_PREF_WRITE
),
131 CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS
),
132 CACHE_EVENT_PTR(PM_DATA_FROM_L3
),
133 CACHE_EVENT_PTR(PM_L3_PREF_ALL
),
134 CACHE_EVENT_PTR(PM_L2_ST_MISS
),
135 CACHE_EVENT_PTR(PM_L2_ST
),
137 CACHE_EVENT_PTR(PM_BR_MPRED_CMPL
),
138 CACHE_EVENT_PTR(PM_BRU_FIN
),
140 CACHE_EVENT_PTR(PM_DTLB_MISS
),
141 CACHE_EVENT_PTR(PM_ITLB_MISS
),
145 static struct attribute_group power8_pmu_events_group
= {
147 .attrs
= power8_events_attr
,
150 static const struct attribute_group
*power8_pmu_attr_groups
[] = {
151 &isa207_pmu_format_group
,
152 &power8_pmu_events_group
,
156 static int power8_generic_events
[] = {
157 [PERF_COUNT_HW_CPU_CYCLES
] = PM_CYC
,
158 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = PM_GCT_NOSLOT_CYC
,
159 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = PM_CMPLU_STALL
,
160 [PERF_COUNT_HW_INSTRUCTIONS
] = PM_INST_CMPL
,
161 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = PM_BRU_FIN
,
162 [PERF_COUNT_HW_BRANCH_MISSES
] = PM_BR_MPRED_CMPL
,
163 [PERF_COUNT_HW_CACHE_REFERENCES
] = PM_LD_REF_L1
,
164 [PERF_COUNT_HW_CACHE_MISSES
] = PM_LD_MISS_L1
,
167 static u64
power8_bhrb_filter_map(u64 branch_sample_type
)
169 u64 pmu_bhrb_filter
= 0;
171 /* BHRB and regular PMU events share the same privilege state
172 * filter configuration. BHRB is always recorded along with a
173 * regular PMU event. As the privilege state filter is handled
174 * in the basic PMC configuration of the accompanying regular
175 * PMU event, we ignore any separate BHRB specific request.
178 /* No branch filter requested */
179 if (branch_sample_type
& PERF_SAMPLE_BRANCH_ANY
)
180 return pmu_bhrb_filter
;
182 /* Invalid branch filter options - HW does not support */
183 if (branch_sample_type
& PERF_SAMPLE_BRANCH_ANY_RETURN
)
186 if (branch_sample_type
& PERF_SAMPLE_BRANCH_IND_CALL
)
189 if (branch_sample_type
& PERF_SAMPLE_BRANCH_CALL
)
192 if (branch_sample_type
& PERF_SAMPLE_BRANCH_ANY_CALL
) {
193 pmu_bhrb_filter
|= POWER8_MMCRA_IFM1
;
194 return pmu_bhrb_filter
;
197 /* Every thing else is unsupported */
201 static void power8_config_bhrb(u64 pmu_bhrb_filter
)
203 /* Enable BHRB filter in PMU */
204 mtspr(SPRN_MMCRA
, (mfspr(SPRN_MMCRA
) | pmu_bhrb_filter
));
207 #define C(x) PERF_COUNT_HW_CACHE_##x
210 * Table of generalized cache-related events.
211 * 0 means not supported, -1 means nonsensical, other values
214 static int power8_cache_events
[C(MAX
)][C(OP_MAX
)][C(RESULT_MAX
)] = {
217 [ C(RESULT_ACCESS
) ] = PM_LD_REF_L1
,
218 [ C(RESULT_MISS
) ] = PM_LD_MISS_L1
,
221 [ C(RESULT_ACCESS
) ] = 0,
222 [ C(RESULT_MISS
) ] = PM_ST_MISS_L1
,
224 [ C(OP_PREFETCH
) ] = {
225 [ C(RESULT_ACCESS
) ] = PM_L1_PREF
,
226 [ C(RESULT_MISS
) ] = 0,
231 [ C(RESULT_ACCESS
) ] = PM_INST_FROM_L1
,
232 [ C(RESULT_MISS
) ] = PM_L1_ICACHE_MISS
,
235 [ C(RESULT_ACCESS
) ] = PM_L1_DEMAND_WRITE
,
236 [ C(RESULT_MISS
) ] = -1,
238 [ C(OP_PREFETCH
) ] = {
239 [ C(RESULT_ACCESS
) ] = PM_IC_PREF_WRITE
,
240 [ C(RESULT_MISS
) ] = 0,
245 [ C(RESULT_ACCESS
) ] = PM_DATA_FROM_L3
,
246 [ C(RESULT_MISS
) ] = PM_DATA_FROM_L3MISS
,
249 [ C(RESULT_ACCESS
) ] = PM_L2_ST
,
250 [ C(RESULT_MISS
) ] = PM_L2_ST_MISS
,
252 [ C(OP_PREFETCH
) ] = {
253 [ C(RESULT_ACCESS
) ] = PM_L3_PREF_ALL
,
254 [ C(RESULT_MISS
) ] = 0,
259 [ C(RESULT_ACCESS
) ] = 0,
260 [ C(RESULT_MISS
) ] = PM_DTLB_MISS
,
263 [ C(RESULT_ACCESS
) ] = -1,
264 [ C(RESULT_MISS
) ] = -1,
266 [ C(OP_PREFETCH
) ] = {
267 [ C(RESULT_ACCESS
) ] = -1,
268 [ C(RESULT_MISS
) ] = -1,
273 [ C(RESULT_ACCESS
) ] = 0,
274 [ C(RESULT_MISS
) ] = PM_ITLB_MISS
,
277 [ C(RESULT_ACCESS
) ] = -1,
278 [ C(RESULT_MISS
) ] = -1,
280 [ C(OP_PREFETCH
) ] = {
281 [ C(RESULT_ACCESS
) ] = -1,
282 [ C(RESULT_MISS
) ] = -1,
287 [ C(RESULT_ACCESS
) ] = PM_BRU_FIN
,
288 [ C(RESULT_MISS
) ] = PM_BR_MPRED_CMPL
,
291 [ C(RESULT_ACCESS
) ] = -1,
292 [ C(RESULT_MISS
) ] = -1,
294 [ C(OP_PREFETCH
) ] = {
295 [ C(RESULT_ACCESS
) ] = -1,
296 [ C(RESULT_MISS
) ] = -1,
301 [ C(RESULT_ACCESS
) ] = -1,
302 [ C(RESULT_MISS
) ] = -1,
305 [ C(RESULT_ACCESS
) ] = -1,
306 [ C(RESULT_MISS
) ] = -1,
308 [ C(OP_PREFETCH
) ] = {
309 [ C(RESULT_ACCESS
) ] = -1,
310 [ C(RESULT_MISS
) ] = -1,
317 static struct power_pmu power8_pmu
= {
319 .n_counter
= MAX_PMU_COUNTERS
,
320 .max_alternatives
= MAX_ALT
+ 1,
321 .add_fields
= ISA207_ADD_FIELDS
,
322 .test_adder
= ISA207_TEST_ADDER
,
323 .compute_mmcr
= isa207_compute_mmcr
,
324 .config_bhrb
= power8_config_bhrb
,
325 .bhrb_filter_map
= power8_bhrb_filter_map
,
326 .get_constraint
= isa207_get_constraint
,
327 .get_alternatives
= power8_get_alternatives
,
328 .disable_pmc
= isa207_disable_pmc
,
329 .flags
= PPMU_HAS_SIER
| PPMU_ARCH_207S
,
330 .n_generic
= ARRAY_SIZE(power8_generic_events
),
331 .generic_events
= power8_generic_events
,
332 .cache_events
= &power8_cache_events
,
333 .attr_groups
= power8_pmu_attr_groups
,
337 static int __init
init_power8_pmu(void)
341 if (!cur_cpu_spec
->oprofile_cpu_type
||
342 strcmp(cur_cpu_spec
->oprofile_cpu_type
, "ppc64/power8"))
345 rc
= register_power_pmu(&power8_pmu
);
349 /* Tell userspace that EBB is supported */
350 cur_cpu_spec
->cpu_user_features2
|= PPC_FEATURE2_EBB
;
352 if (cpu_has_feature(CPU_FTR_PMAO_BUG
))
353 pr_info("PMAO restore workaround active.\n");
357 early_initcall(init_power8_pmu
);