2 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
4 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
5 * 2010 (c) MontaVista Software, LLC.
7 * Copied from ARMv6 code, with the low level code inspired
8 * by the ARMv7 Oprofile code.
10 * Cortex-A8 has up to 4 configurable performance counters and
11 * a single cycle counter.
12 * Cortex-A9 has up to 31 configurable performance counters and
13 * a single cycle counter.
15 * All counters can be enabled/disabled and IRQ masked separately. The cycle
16 * counter and all 4 performance counters together can be reset separately.
21 static struct arm_pmu armv7pmu
;
24 * Common ARMv7 event types
26 * Note: An implementation may not be able to count all of these events
27 * but the encodings are considered to be `reserved' in the case that
28 * they are not available.
30 enum armv7_perf_types
{
31 ARMV7_PERFCTR_PMNC_SW_INCR
= 0x00,
32 ARMV7_PERFCTR_L1_ICACHE_REFILL
= 0x01,
33 ARMV7_PERFCTR_ITLB_REFILL
= 0x02,
34 ARMV7_PERFCTR_L1_DCACHE_REFILL
= 0x03,
35 ARMV7_PERFCTR_L1_DCACHE_ACCESS
= 0x04,
36 ARMV7_PERFCTR_DTLB_REFILL
= 0x05,
37 ARMV7_PERFCTR_MEM_READ
= 0x06,
38 ARMV7_PERFCTR_MEM_WRITE
= 0x07,
39 ARMV7_PERFCTR_INSTR_EXECUTED
= 0x08,
40 ARMV7_PERFCTR_EXC_TAKEN
= 0x09,
41 ARMV7_PERFCTR_EXC_EXECUTED
= 0x0A,
42 ARMV7_PERFCTR_CID_WRITE
= 0x0B,
45 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
47 * - all (taken) branch instructions,
48 * - instructions that explicitly write the PC,
49 * - exception generating instructions.
51 ARMV7_PERFCTR_PC_WRITE
= 0x0C,
52 ARMV7_PERFCTR_PC_IMM_BRANCH
= 0x0D,
53 ARMV7_PERFCTR_PC_PROC_RETURN
= 0x0E,
54 ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS
= 0x0F,
55 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
= 0x10,
56 ARMV7_PERFCTR_CLOCK_CYCLES
= 0x11,
57 ARMV7_PERFCTR_PC_BRANCH_PRED
= 0x12,
59 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
60 ARMV7_PERFCTR_MEM_ACCESS
= 0x13,
61 ARMV7_PERFCTR_L1_ICACHE_ACCESS
= 0x14,
62 ARMV7_PERFCTR_L1_DCACHE_WB
= 0x15,
63 ARMV7_PERFCTR_L2_CACHE_ACCESS
= 0x16,
64 ARMV7_PERFCTR_L2_CACHE_REFILL
= 0x17,
65 ARMV7_PERFCTR_L2_CACHE_WB
= 0x18,
66 ARMV7_PERFCTR_BUS_ACCESS
= 0x19,
67 ARMV7_PERFCTR_MEM_ERROR
= 0x1A,
68 ARMV7_PERFCTR_INSTR_SPEC
= 0x1B,
69 ARMV7_PERFCTR_TTBR_WRITE
= 0x1C,
70 ARMV7_PERFCTR_BUS_CYCLES
= 0x1D,
72 ARMV7_PERFCTR_CPU_CYCLES
= 0xFF
75 /* ARMv7 Cortex-A8 specific event types */
76 enum armv7_a8_perf_types
{
77 ARMV7_A8_PERFCTR_L2_CACHE_ACCESS
= 0x43,
78 ARMV7_A8_PERFCTR_L2_CACHE_REFILL
= 0x44,
79 ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS
= 0x50,
80 ARMV7_A8_PERFCTR_STALL_ISIDE
= 0x56,
83 /* ARMv7 Cortex-A9 specific event types */
84 enum armv7_a9_perf_types
{
85 ARMV7_A9_PERFCTR_INSTR_CORE_RENAME
= 0x68,
86 ARMV7_A9_PERFCTR_STALL_ICACHE
= 0x60,
87 ARMV7_A9_PERFCTR_STALL_DISPATCH
= 0x66,
90 /* ARMv7 Cortex-A5 specific event types */
91 enum armv7_a5_perf_types
{
92 ARMV7_A5_PERFCTR_PREFETCH_LINEFILL
= 0xc2,
93 ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP
= 0xc3,
96 /* ARMv7 Cortex-A15 specific event types */
97 enum armv7_a15_perf_types
{
98 ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ
= 0x40,
99 ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE
= 0x41,
100 ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ
= 0x42,
101 ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE
= 0x43,
103 ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ
= 0x4C,
104 ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE
= 0x4D,
106 ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ
= 0x50,
107 ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE
= 0x51,
108 ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ
= 0x52,
109 ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE
= 0x53,
111 ARMV7_A15_PERFCTR_PC_WRITE_SPEC
= 0x76,
115 * Cortex-A8 HW events mapping
117 * The hardware events that we support. We do support cache operations but
118 * we have harvard caches and no way to combine instruction and data
119 * accesses/misses in hardware.
121 static const unsigned armv7_a8_perf_map
[PERF_COUNT_HW_MAX
] = {
122 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
123 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
124 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
125 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
126 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
127 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
128 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
129 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = ARMV7_A8_PERFCTR_STALL_ISIDE
,
130 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = HW_OP_UNSUPPORTED
,
133 static const unsigned armv7_a8_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
134 [PERF_COUNT_HW_CACHE_OP_MAX
]
135 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
138 * The performance counters don't differentiate between read
139 * and write accesses/misses so this isn't strictly correct,
140 * but it's the best we can do. Writes and reads get
144 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
145 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
148 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
149 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
152 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
153 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
158 [C(RESULT_ACCESS
)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS
,
159 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
162 [C(RESULT_ACCESS
)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS
,
163 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
166 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
167 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
172 [C(RESULT_ACCESS
)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS
,
173 [C(RESULT_MISS
)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL
,
176 [C(RESULT_ACCESS
)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS
,
177 [C(RESULT_MISS
)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL
,
180 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
181 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
186 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
187 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
190 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
191 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
194 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
195 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
200 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
201 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
204 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
205 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
208 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
209 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
214 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
215 [C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
218 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
219 [C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
222 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
223 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
228 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
229 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
232 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
233 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
236 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
237 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
243 * Cortex-A9 HW events mapping
245 static const unsigned armv7_a9_perf_map
[PERF_COUNT_HW_MAX
] = {
246 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
247 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME
,
248 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
249 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
250 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
251 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
252 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
253 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = ARMV7_A9_PERFCTR_STALL_ICACHE
,
254 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = ARMV7_A9_PERFCTR_STALL_DISPATCH
,
257 static const unsigned armv7_a9_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
258 [PERF_COUNT_HW_CACHE_OP_MAX
]
259 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
262 * The performance counters don't differentiate between read
263 * and write accesses/misses so this isn't strictly correct,
264 * but it's the best we can do. Writes and reads get
268 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
269 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
272 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
273 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
276 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
277 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
282 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
283 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
286 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
287 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
290 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
291 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
296 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
297 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
300 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
301 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
304 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
305 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
310 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
311 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
314 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
315 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
318 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
319 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
324 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
325 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
328 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
329 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
332 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
333 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
338 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
339 [C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
342 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
343 [C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
346 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
347 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
352 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
353 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
356 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
357 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
360 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
361 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
367 * Cortex-A5 HW events mapping
369 static const unsigned armv7_a5_perf_map
[PERF_COUNT_HW_MAX
] = {
370 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
371 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
372 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
373 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
374 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
375 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
376 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
377 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = HW_OP_UNSUPPORTED
,
378 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = HW_OP_UNSUPPORTED
,
381 static const unsigned armv7_a5_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
382 [PERF_COUNT_HW_CACHE_OP_MAX
]
383 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
386 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
387 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
390 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
391 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
394 [C(RESULT_ACCESS
)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL
,
395 [C(RESULT_MISS
)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP
,
400 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS
,
401 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
404 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS
,
405 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
408 * The prefetch counters don't differentiate between the I
409 * side and the D side.
412 [C(RESULT_ACCESS
)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL
,
413 [C(RESULT_MISS
)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP
,
418 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
419 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
422 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
423 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
426 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
427 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
432 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
433 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
436 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
437 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
440 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
441 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
446 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
447 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
450 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
451 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
454 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
455 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
460 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
461 [C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
464 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
465 [C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
468 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
469 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
474 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
475 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
478 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
479 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
482 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
483 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
489 * Cortex-A15 HW events mapping
491 static const unsigned armv7_a15_perf_map
[PERF_COUNT_HW_MAX
] = {
492 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
493 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
494 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
495 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
496 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC
,
497 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
498 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_BUS_CYCLES
,
499 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = HW_OP_UNSUPPORTED
,
500 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = HW_OP_UNSUPPORTED
,
503 static const unsigned armv7_a15_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
504 [PERF_COUNT_HW_CACHE_OP_MAX
]
505 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
508 [C(RESULT_ACCESS
)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ
,
509 [C(RESULT_MISS
)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ
,
512 [C(RESULT_ACCESS
)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE
,
513 [C(RESULT_MISS
)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE
,
516 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
517 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
522 * Not all performance counters differentiate between read
523 * and write accesses/misses so we're not always strictly
524 * correct, but it's the best we can do. Writes and reads get
525 * combined in these cases.
528 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS
,
529 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
532 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS
,
533 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
536 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
537 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
542 [C(RESULT_ACCESS
)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ
,
543 [C(RESULT_MISS
)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ
,
546 [C(RESULT_ACCESS
)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE
,
547 [C(RESULT_MISS
)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE
,
550 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
551 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
556 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
557 [C(RESULT_MISS
)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ
,
560 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
561 [C(RESULT_MISS
)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE
,
564 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
565 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
570 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
571 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
574 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
575 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
578 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
579 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
584 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
585 [C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
588 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
589 [C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
592 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
593 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
598 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
599 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
602 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
603 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
606 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
607 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
613 * Perf Events' indices
615 #define ARMV7_IDX_CYCLE_COUNTER 0
616 #define ARMV7_IDX_COUNTER0 1
617 #define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
619 #define ARMV7_MAX_COUNTERS 32
620 #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
623 * ARMv7 low level PMNC access
627 * Perf Event to low level counters mapping
629 #define ARMV7_IDX_TO_COUNTER(x) \
630 (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
633 * Per-CPU PMNC: config reg
635 #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
636 #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
637 #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
638 #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
639 #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
640 #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
641 #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
642 #define ARMV7_PMNC_N_MASK 0x1f
643 #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
646 * FLAG: counters overflow flag status reg
648 #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
649 #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
652 * PMXEVTYPER: Event selection reg
654 #define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
655 #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
658 * Event filters for PMUv2
660 #define ARMV7_EXCLUDE_PL1 (1 << 31)
661 #define ARMV7_EXCLUDE_USER (1 << 30)
662 #define ARMV7_INCLUDE_HYP (1 << 27)
664 static inline u32
armv7_pmnc_read(void)
667 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val
));
671 static inline void armv7_pmnc_write(u32 val
)
673 val
&= ARMV7_PMNC_MASK
;
675 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val
));
678 static inline int armv7_pmnc_has_overflowed(u32 pmnc
)
680 return pmnc
& ARMV7_OVERFLOWED_MASK
;
683 static inline int armv7_pmnc_counter_valid(int idx
)
685 return idx
>= ARMV7_IDX_CYCLE_COUNTER
&& idx
<= ARMV7_IDX_COUNTER_LAST
;
688 static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc
, int idx
)
693 if (!armv7_pmnc_counter_valid(idx
)) {
694 pr_err("CPU%u checking wrong counter %d overflow status\n",
695 smp_processor_id(), idx
);
697 counter
= ARMV7_IDX_TO_COUNTER(idx
);
698 ret
= pmnc
& BIT(counter
);
704 static inline int armv7_pmnc_select_counter(int idx
)
708 if (!armv7_pmnc_counter_valid(idx
)) {
709 pr_err("CPU%u selecting wrong PMNC counter %d\n",
710 smp_processor_id(), idx
);
714 counter
= ARMV7_IDX_TO_COUNTER(idx
);
715 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter
));
721 static inline u32
armv7pmu_read_counter(int idx
)
725 if (!armv7_pmnc_counter_valid(idx
))
726 pr_err("CPU%u reading wrong counter %d\n",
727 smp_processor_id(), idx
);
728 else if (idx
== ARMV7_IDX_CYCLE_COUNTER
)
729 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value
));
730 else if (armv7_pmnc_select_counter(idx
) == idx
)
731 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value
));
736 static inline void armv7pmu_write_counter(int idx
, u32 value
)
738 if (!armv7_pmnc_counter_valid(idx
))
739 pr_err("CPU%u writing wrong counter %d\n",
740 smp_processor_id(), idx
);
741 else if (idx
== ARMV7_IDX_CYCLE_COUNTER
)
742 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value
));
743 else if (armv7_pmnc_select_counter(idx
) == idx
)
744 asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value
));
747 static inline void armv7_pmnc_write_evtsel(int idx
, u32 val
)
749 if (armv7_pmnc_select_counter(idx
) == idx
) {
750 val
&= ARMV7_EVTYPE_MASK
;
751 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val
));
755 static inline int armv7_pmnc_enable_counter(int idx
)
759 if (!armv7_pmnc_counter_valid(idx
)) {
760 pr_err("CPU%u enabling wrong PMNC counter %d\n",
761 smp_processor_id(), idx
);
765 counter
= ARMV7_IDX_TO_COUNTER(idx
);
766 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter
)));
770 static inline int armv7_pmnc_disable_counter(int idx
)
774 if (!armv7_pmnc_counter_valid(idx
)) {
775 pr_err("CPU%u disabling wrong PMNC counter %d\n",
776 smp_processor_id(), idx
);
780 counter
= ARMV7_IDX_TO_COUNTER(idx
);
781 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter
)));
785 static inline int armv7_pmnc_enable_intens(int idx
)
789 if (!armv7_pmnc_counter_valid(idx
)) {
790 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
791 smp_processor_id(), idx
);
795 counter
= ARMV7_IDX_TO_COUNTER(idx
);
796 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter
)));
800 static inline int armv7_pmnc_disable_intens(int idx
)
804 if (!armv7_pmnc_counter_valid(idx
)) {
805 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
806 smp_processor_id(), idx
);
810 counter
= ARMV7_IDX_TO_COUNTER(idx
);
811 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter
)));
813 /* Clear the overflow flag in case an interrupt is pending. */
814 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter
)));
820 static inline u32
armv7_pmnc_getreset_flags(void)
825 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val
));
827 /* Write to clear flags */
828 val
&= ARMV7_FLAG_MASK
;
829 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val
));
835 static void armv7_pmnc_dump_regs(void)
840 printk(KERN_INFO
"PMNC registers dump:\n");
842 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val
));
843 printk(KERN_INFO
"PMNC =0x%08x\n", val
);
845 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val
));
846 printk(KERN_INFO
"CNTENS=0x%08x\n", val
);
848 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val
));
849 printk(KERN_INFO
"INTENS=0x%08x\n", val
);
851 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val
));
852 printk(KERN_INFO
"FLAGS =0x%08x\n", val
);
854 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val
));
855 printk(KERN_INFO
"SELECT=0x%08x\n", val
);
857 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val
));
858 printk(KERN_INFO
"CCNT =0x%08x\n", val
);
860 for (cnt
= ARMV7_IDX_COUNTER0
; cnt
<= ARMV7_IDX_COUNTER_LAST
; cnt
++) {
861 armv7_pmnc_select_counter(cnt
);
862 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val
));
863 printk(KERN_INFO
"CNT[%d] count =0x%08x\n",
864 ARMV7_IDX_TO_COUNTER(cnt
), val
);
865 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val
));
866 printk(KERN_INFO
"CNT[%d] evtsel=0x%08x\n",
867 ARMV7_IDX_TO_COUNTER(cnt
), val
);
872 static void armv7pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
875 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
878 * Enable counter and interrupt, and set the counter to count
879 * the event that we're interested in.
881 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
886 armv7_pmnc_disable_counter(idx
);
889 * Set event (if destined for PMNx counters)
890 * We only need to set the event for the cycle counter if we
891 * have the ability to perform event filtering.
893 if (armv7pmu
.set_event_filter
|| idx
!= ARMV7_IDX_CYCLE_COUNTER
)
894 armv7_pmnc_write_evtsel(idx
, hwc
->config_base
);
897 * Enable interrupt for this counter
899 armv7_pmnc_enable_intens(idx
);
904 armv7_pmnc_enable_counter(idx
);
906 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
909 static void armv7pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
912 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
915 * Disable counter and interrupt
917 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
922 armv7_pmnc_disable_counter(idx
);
925 * Disable interrupt for this counter
927 armv7_pmnc_disable_intens(idx
);
929 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
932 static irqreturn_t
armv7pmu_handle_irq(int irq_num
, void *dev
)
935 struct perf_sample_data data
;
936 struct pmu_hw_events
*cpuc
;
937 struct pt_regs
*regs
;
941 * Get and reset the IRQ flags
943 pmnc
= armv7_pmnc_getreset_flags();
946 * Did an overflow occur?
948 if (!armv7_pmnc_has_overflowed(pmnc
))
952 * Handle the counter(s) overflow(s)
954 regs
= get_irq_regs();
956 perf_sample_data_init(&data
, 0);
958 cpuc
= &__get_cpu_var(cpu_hw_events
);
959 for (idx
= 0; idx
< cpu_pmu
->num_events
; ++idx
) {
960 struct perf_event
*event
= cpuc
->events
[idx
];
961 struct hw_perf_event
*hwc
;
963 /* Ignore if we don't have an event. */
968 * We have a single interrupt for all counters. Check that
969 * each counter has overflowed before we process it.
971 if (!armv7_pmnc_counter_has_overflowed(pmnc
, idx
))
975 armpmu_event_update(event
, hwc
, idx
);
976 data
.period
= event
->hw
.last_period
;
977 if (!armpmu_event_set_period(event
, hwc
, idx
))
980 if (perf_event_overflow(event
, &data
, regs
))
981 cpu_pmu
->disable(hwc
, idx
);
985 * Handle the pending perf events.
987 * Note: this call *must* be run with interrupts disabled. For
988 * platforms that can have the PMU interrupts raised as an NMI, this
996 static void armv7pmu_start(void)
999 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
1001 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
1002 /* Enable all counters */
1003 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E
);
1004 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
1007 static void armv7pmu_stop(void)
1009 unsigned long flags
;
1010 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
1012 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
1013 /* Disable all counters */
1014 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E
);
1015 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
1018 static int armv7pmu_get_event_idx(struct pmu_hw_events
*cpuc
,
1019 struct hw_perf_event
*event
)
1022 unsigned long evtype
= event
->config_base
& ARMV7_EVTYPE_EVENT
;
1024 /* Always place a cycle counter into the cycle counter. */
1025 if (evtype
== ARMV7_PERFCTR_CPU_CYCLES
) {
1026 if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER
, cpuc
->used_mask
))
1029 return ARMV7_IDX_CYCLE_COUNTER
;
1033 * For anything other than a cycle counter, try and use
1034 * the events counters
1036 for (idx
= ARMV7_IDX_COUNTER0
; idx
< cpu_pmu
->num_events
; ++idx
) {
1037 if (!test_and_set_bit(idx
, cpuc
->used_mask
))
1041 /* The counters are all in use. */
1046 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1048 static int armv7pmu_set_event_filter(struct hw_perf_event
*event
,
1049 struct perf_event_attr
*attr
)
1051 unsigned long config_base
= 0;
1053 if (attr
->exclude_idle
)
1055 if (attr
->exclude_user
)
1056 config_base
|= ARMV7_EXCLUDE_USER
;
1057 if (attr
->exclude_kernel
)
1058 config_base
|= ARMV7_EXCLUDE_PL1
;
1059 if (!attr
->exclude_hv
)
1060 config_base
|= ARMV7_INCLUDE_HYP
;
1063 * Install the filter into config_base as this is used to
1064 * construct the event type.
1066 event
->config_base
= config_base
;
1071 static void armv7pmu_reset(void *info
)
1073 u32 idx
, nb_cnt
= cpu_pmu
->num_events
;
1075 /* The counter and interrupt enable registers are unknown at reset. */
1076 for (idx
= ARMV7_IDX_CYCLE_COUNTER
; idx
< nb_cnt
; ++idx
)
1077 armv7pmu_disable_event(NULL
, idx
);
1079 /* Initialize & Reset PMNC: C and P bits */
1080 armv7_pmnc_write(ARMV7_PMNC_P
| ARMV7_PMNC_C
);
1083 static int armv7_a8_map_event(struct perf_event
*event
)
1085 return map_cpu_event(event
, &armv7_a8_perf_map
,
1086 &armv7_a8_perf_cache_map
, 0xFF);
1089 static int armv7_a9_map_event(struct perf_event
*event
)
1091 return map_cpu_event(event
, &armv7_a9_perf_map
,
1092 &armv7_a9_perf_cache_map
, 0xFF);
1095 static int armv7_a5_map_event(struct perf_event
*event
)
1097 return map_cpu_event(event
, &armv7_a5_perf_map
,
1098 &armv7_a5_perf_cache_map
, 0xFF);
1101 static int armv7_a15_map_event(struct perf_event
*event
)
1103 return map_cpu_event(event
, &armv7_a15_perf_map
,
1104 &armv7_a15_perf_cache_map
, 0xFF);
1107 static struct arm_pmu armv7pmu
= {
1108 .handle_irq
= armv7pmu_handle_irq
,
1109 .enable
= armv7pmu_enable_event
,
1110 .disable
= armv7pmu_disable_event
,
1111 .read_counter
= armv7pmu_read_counter
,
1112 .write_counter
= armv7pmu_write_counter
,
1113 .get_event_idx
= armv7pmu_get_event_idx
,
1114 .start
= armv7pmu_start
,
1115 .stop
= armv7pmu_stop
,
1116 .reset
= armv7pmu_reset
,
1117 .max_period
= (1LLU << 32) - 1,
1120 static u32 __init
armv7_read_num_pmnc_events(void)
1124 /* Read the nb of CNTx counters supported from PMNC */
1125 nb_cnt
= (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT
) & ARMV7_PMNC_N_MASK
;
1127 /* Add the CPU cycles counter and return */
1131 static struct arm_pmu
*__init
armv7_a8_pmu_init(void)
1133 armv7pmu
.id
= ARM_PERF_PMU_ID_CA8
;
1134 armv7pmu
.name
= "ARMv7 Cortex-A8";
1135 armv7pmu
.map_event
= armv7_a8_map_event
;
1136 armv7pmu
.num_events
= armv7_read_num_pmnc_events();
1140 static struct arm_pmu
*__init
armv7_a9_pmu_init(void)
1142 armv7pmu
.id
= ARM_PERF_PMU_ID_CA9
;
1143 armv7pmu
.name
= "ARMv7 Cortex-A9";
1144 armv7pmu
.map_event
= armv7_a9_map_event
;
1145 armv7pmu
.num_events
= armv7_read_num_pmnc_events();
1149 static struct arm_pmu
*__init
armv7_a5_pmu_init(void)
1151 armv7pmu
.id
= ARM_PERF_PMU_ID_CA5
;
1152 armv7pmu
.name
= "ARMv7 Cortex-A5";
1153 armv7pmu
.map_event
= armv7_a5_map_event
;
1154 armv7pmu
.num_events
= armv7_read_num_pmnc_events();
1158 static struct arm_pmu
*__init
armv7_a15_pmu_init(void)
1160 armv7pmu
.id
= ARM_PERF_PMU_ID_CA15
;
1161 armv7pmu
.name
= "ARMv7 Cortex-A15";
1162 armv7pmu
.map_event
= armv7_a15_map_event
;
1163 armv7pmu
.num_events
= armv7_read_num_pmnc_events();
1164 armv7pmu
.set_event_filter
= armv7pmu_set_event_filter
;
1168 static struct arm_pmu
*__init
armv7_a8_pmu_init(void)
1173 static struct arm_pmu
*__init
armv7_a9_pmu_init(void)
1178 static struct arm_pmu
*__init
armv7_a5_pmu_init(void)
1183 static struct arm_pmu
*__init
armv7_a15_pmu_init(void)
1187 #endif /* CONFIG_CPU_V7 */