2 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
4 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
5 * 2010 (c) MontaVista Software, LLC.
7 * Copied from ARMv6 code, with the low level code inspired
8 * by the ARMv7 Oprofile code.
10 * Cortex-A8 has up to 4 configurable performance counters and
11 * a single cycle counter.
12 * Cortex-A9 has up to 31 configurable performance counters and
13 * a single cycle counter.
15 * All counters can be enabled/disabled and IRQ masked separately. The cycle
16 * counter and all 4 performance counters together can be reset separately.
22 * Common ARMv7 event types
24 * Note: An implementation may not be able to count all of these events
25 * but the encodings are considered to be `reserved' in the case that
26 * they are not available.
28 enum armv7_perf_types
{
29 ARMV7_PERFCTR_PMNC_SW_INCR
= 0x00,
30 ARMV7_PERFCTR_L1_ICACHE_REFILL
= 0x01,
31 ARMV7_PERFCTR_ITLB_REFILL
= 0x02,
32 ARMV7_PERFCTR_L1_DCACHE_REFILL
= 0x03,
33 ARMV7_PERFCTR_L1_DCACHE_ACCESS
= 0x04,
34 ARMV7_PERFCTR_DTLB_REFILL
= 0x05,
35 ARMV7_PERFCTR_MEM_READ
= 0x06,
36 ARMV7_PERFCTR_MEM_WRITE
= 0x07,
37 ARMV7_PERFCTR_INSTR_EXECUTED
= 0x08,
38 ARMV7_PERFCTR_EXC_TAKEN
= 0x09,
39 ARMV7_PERFCTR_EXC_EXECUTED
= 0x0A,
40 ARMV7_PERFCTR_CID_WRITE
= 0x0B,
43 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
45 * - all (taken) branch instructions,
46 * - instructions that explicitly write the PC,
47 * - exception generating instructions.
49 ARMV7_PERFCTR_PC_WRITE
= 0x0C,
50 ARMV7_PERFCTR_PC_IMM_BRANCH
= 0x0D,
51 ARMV7_PERFCTR_PC_PROC_RETURN
= 0x0E,
52 ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS
= 0x0F,
53 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
= 0x10,
54 ARMV7_PERFCTR_CLOCK_CYCLES
= 0x11,
55 ARMV7_PERFCTR_PC_BRANCH_PRED
= 0x12,
57 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
58 ARMV7_PERFCTR_MEM_ACCESS
= 0x13,
59 ARMV7_PERFCTR_L1_ICACHE_ACCESS
= 0x14,
60 ARMV7_PERFCTR_L1_DCACHE_WB
= 0x15,
61 ARMV7_PERFCTR_L2_CACHE_ACCESS
= 0x16,
62 ARMV7_PERFCTR_L2_CACHE_REFILL
= 0x17,
63 ARMV7_PERFCTR_L2_CACHE_WB
= 0x18,
64 ARMV7_PERFCTR_BUS_ACCESS
= 0x19,
65 ARMV7_PERFCTR_MEM_ERROR
= 0x1A,
66 ARMV7_PERFCTR_INSTR_SPEC
= 0x1B,
67 ARMV7_PERFCTR_TTBR_WRITE
= 0x1C,
68 ARMV7_PERFCTR_BUS_CYCLES
= 0x1D,
70 ARMV7_PERFCTR_CPU_CYCLES
= 0xFF
73 /* ARMv7 Cortex-A8 specific event types */
74 enum armv7_a8_perf_types
{
75 ARMV7_A8_PERFCTR_L2_CACHE_ACCESS
= 0x43,
76 ARMV7_A8_PERFCTR_L2_CACHE_REFILL
= 0x44,
77 ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS
= 0x50,
78 ARMV7_A8_PERFCTR_STALL_ISIDE
= 0x56,
81 /* ARMv7 Cortex-A9 specific event types */
82 enum armv7_a9_perf_types
{
83 ARMV7_A9_PERFCTR_INSTR_CORE_RENAME
= 0x68,
84 ARMV7_A9_PERFCTR_STALL_ICACHE
= 0x60,
85 ARMV7_A9_PERFCTR_STALL_DISPATCH
= 0x66,
88 /* ARMv7 Cortex-A5 specific event types */
89 enum armv7_a5_perf_types
{
90 ARMV7_A5_PERFCTR_PREFETCH_LINEFILL
= 0xc2,
91 ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP
= 0xc3,
94 /* ARMv7 Cortex-A15 specific event types */
95 enum armv7_a15_perf_types
{
96 ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ
= 0x40,
97 ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE
= 0x41,
98 ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ
= 0x42,
99 ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE
= 0x43,
101 ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ
= 0x4C,
102 ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE
= 0x4D,
104 ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ
= 0x50,
105 ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE
= 0x51,
106 ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ
= 0x52,
107 ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE
= 0x53,
109 ARMV7_A15_PERFCTR_PC_WRITE_SPEC
= 0x76,
113 * Cortex-A8 HW events mapping
115 * The hardware events that we support. We do support cache operations but
116 * we have harvard caches and no way to combine instruction and data
117 * accesses/misses in hardware.
119 static const unsigned armv7_a8_perf_map
[PERF_COUNT_HW_MAX
] = {
120 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
121 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
122 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
123 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
124 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
125 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
126 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
127 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = ARMV7_A8_PERFCTR_STALL_ISIDE
,
128 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = HW_OP_UNSUPPORTED
,
131 static const unsigned armv7_a8_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
132 [PERF_COUNT_HW_CACHE_OP_MAX
]
133 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
136 * The performance counters don't differentiate between read
137 * and write accesses/misses so this isn't strictly correct,
138 * but it's the best we can do. Writes and reads get
142 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
143 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
146 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
147 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
150 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
151 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
156 [C(RESULT_ACCESS
)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS
,
157 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
160 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
161 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
164 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
165 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
170 [C(RESULT_ACCESS
)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS
,
171 [C(RESULT_MISS
)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL
,
174 [C(RESULT_ACCESS
)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS
,
175 [C(RESULT_MISS
)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL
,
178 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
179 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
184 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
185 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
188 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
189 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
192 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
193 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
198 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
199 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
202 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
203 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
206 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
207 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
212 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
213 [C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
216 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
217 [C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
220 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
221 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
226 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
227 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
230 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
231 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
234 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
235 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
241 * Cortex-A9 HW events mapping
243 static const unsigned armv7_a9_perf_map
[PERF_COUNT_HW_MAX
] = {
244 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
245 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME
,
246 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
247 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
248 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
249 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
250 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
251 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = ARMV7_A9_PERFCTR_STALL_ICACHE
,
252 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = ARMV7_A9_PERFCTR_STALL_DISPATCH
,
255 static const unsigned armv7_a9_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
256 [PERF_COUNT_HW_CACHE_OP_MAX
]
257 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
260 * The performance counters don't differentiate between read
261 * and write accesses/misses so this isn't strictly correct,
262 * but it's the best we can do. Writes and reads get
266 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
267 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
270 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
271 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
274 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
275 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
280 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
281 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
284 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
285 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
288 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
289 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
294 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
295 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
298 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
299 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
302 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
303 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
308 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
309 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
312 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
313 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
316 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
317 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
322 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
323 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
326 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
327 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
330 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
331 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
336 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
337 [C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
340 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
341 [C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
344 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
345 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
350 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
351 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
354 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
355 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
358 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
359 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
365 * Cortex-A5 HW events mapping
367 static const unsigned armv7_a5_perf_map
[PERF_COUNT_HW_MAX
] = {
368 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
369 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
370 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
371 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
372 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
373 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
374 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
375 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = HW_OP_UNSUPPORTED
,
376 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = HW_OP_UNSUPPORTED
,
379 static const unsigned armv7_a5_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
380 [PERF_COUNT_HW_CACHE_OP_MAX
]
381 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
384 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
385 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
388 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
389 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
392 [C(RESULT_ACCESS
)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL
,
393 [C(RESULT_MISS
)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP
,
398 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS
,
399 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
402 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
403 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
406 * The prefetch counters don't differentiate between the I
407 * side and the D side.
410 [C(RESULT_ACCESS
)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL
,
411 [C(RESULT_MISS
)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP
,
416 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
417 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
420 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
421 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
424 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
425 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
430 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
431 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
434 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
435 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
438 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
439 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
444 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
445 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
448 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
449 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
452 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
453 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
458 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
459 [C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
462 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
463 [C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
466 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
467 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
472 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
473 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
476 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
477 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
480 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
481 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
487 * Cortex-A15 HW events mapping
489 static const unsigned armv7_a15_perf_map
[PERF_COUNT_HW_MAX
] = {
490 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
491 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
492 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
493 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
494 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC
,
495 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
496 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_BUS_CYCLES
,
497 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = HW_OP_UNSUPPORTED
,
498 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = HW_OP_UNSUPPORTED
,
501 static const unsigned armv7_a15_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
502 [PERF_COUNT_HW_CACHE_OP_MAX
]
503 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
506 [C(RESULT_ACCESS
)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ
,
507 [C(RESULT_MISS
)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ
,
510 [C(RESULT_ACCESS
)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE
,
511 [C(RESULT_MISS
)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE
,
514 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
515 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
520 * Not all performance counters differentiate between read
521 * and write accesses/misses so we're not always strictly
522 * correct, but it's the best we can do. Writes and reads get
523 * combined in these cases.
526 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS
,
527 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
530 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
531 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
534 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
535 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
540 [C(RESULT_ACCESS
)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ
,
541 [C(RESULT_MISS
)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ
,
544 [C(RESULT_ACCESS
)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE
,
545 [C(RESULT_MISS
)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE
,
548 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
549 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
554 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
555 [C(RESULT_MISS
)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ
,
558 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
559 [C(RESULT_MISS
)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE
,
562 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
563 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
568 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
569 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
572 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
573 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
576 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
577 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
582 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
583 [C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
586 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
587 [C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
590 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
591 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
596 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
597 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
600 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
601 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
604 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
605 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
611 * Cortex-A7 HW events mapping
613 static const unsigned armv7_a7_perf_map
[PERF_COUNT_HW_MAX
] = {
614 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
615 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
616 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
617 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
618 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
619 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
620 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_BUS_CYCLES
,
621 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = HW_OP_UNSUPPORTED
,
622 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = HW_OP_UNSUPPORTED
,
625 static const unsigned armv7_a7_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
626 [PERF_COUNT_HW_CACHE_OP_MAX
]
627 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
630 * The performance counters don't differentiate between read
631 * and write accesses/misses so this isn't strictly correct,
632 * but it's the best we can do. Writes and reads get
636 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
637 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
640 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
641 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
644 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
645 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
650 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS
,
651 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
654 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
655 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
658 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
659 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
664 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L2_CACHE_ACCESS
,
665 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L2_CACHE_REFILL
,
668 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L2_CACHE_ACCESS
,
669 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L2_CACHE_REFILL
,
672 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
673 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
678 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
679 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
682 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
683 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
686 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
687 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
692 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
693 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
696 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
697 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
700 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
701 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
706 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
707 [C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
710 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
711 [C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
714 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
715 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
720 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
721 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
724 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
725 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
728 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
729 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
735 * Perf Events' indices
737 #define ARMV7_IDX_CYCLE_COUNTER 0
738 #define ARMV7_IDX_COUNTER0 1
739 #define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
740 (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
742 #define ARMV7_MAX_COUNTERS 32
743 #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
746 * ARMv7 low level PMNC access
750 * Perf Event to low level counters mapping
752 #define ARMV7_IDX_TO_COUNTER(x) \
753 (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
756 * Per-CPU PMNC: config reg
758 #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
759 #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
760 #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
761 #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
762 #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
763 #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
764 #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
765 #define ARMV7_PMNC_N_MASK 0x1f
766 #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
769 * FLAG: counters overflow flag status reg
771 #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
772 #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
775 * PMXEVTYPER: Event selection reg
777 #define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
778 #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
781 * Event filters for PMUv2
783 #define ARMV7_EXCLUDE_PL1 (1 << 31)
784 #define ARMV7_EXCLUDE_USER (1 << 30)
785 #define ARMV7_INCLUDE_HYP (1 << 27)
787 static inline u32
armv7_pmnc_read(void)
790 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val
));
794 static inline void armv7_pmnc_write(u32 val
)
796 val
&= ARMV7_PMNC_MASK
;
798 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val
));
801 static inline int armv7_pmnc_has_overflowed(u32 pmnc
)
803 return pmnc
& ARMV7_OVERFLOWED_MASK
;
806 static inline int armv7_pmnc_counter_valid(struct arm_pmu
*cpu_pmu
, int idx
)
808 return idx
>= ARMV7_IDX_CYCLE_COUNTER
&&
809 idx
<= ARMV7_IDX_COUNTER_LAST(cpu_pmu
);
812 static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc
, int idx
)
814 return pmnc
& BIT(ARMV7_IDX_TO_COUNTER(idx
));
817 static inline int armv7_pmnc_select_counter(int idx
)
819 u32 counter
= ARMV7_IDX_TO_COUNTER(idx
);
820 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter
));
826 static inline u32
armv7pmu_read_counter(struct perf_event
*event
)
828 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
829 struct hw_perf_event
*hwc
= &event
->hw
;
833 if (!armv7_pmnc_counter_valid(cpu_pmu
, idx
))
834 pr_err("CPU%u reading wrong counter %d\n",
835 smp_processor_id(), idx
);
836 else if (idx
== ARMV7_IDX_CYCLE_COUNTER
)
837 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value
));
838 else if (armv7_pmnc_select_counter(idx
) == idx
)
839 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value
));
844 static inline void armv7pmu_write_counter(struct perf_event
*event
, u32 value
)
846 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
847 struct hw_perf_event
*hwc
= &event
->hw
;
850 if (!armv7_pmnc_counter_valid(cpu_pmu
, idx
))
851 pr_err("CPU%u writing wrong counter %d\n",
852 smp_processor_id(), idx
);
853 else if (idx
== ARMV7_IDX_CYCLE_COUNTER
)
854 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value
));
855 else if (armv7_pmnc_select_counter(idx
) == idx
)
856 asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value
));
859 static inline void armv7_pmnc_write_evtsel(int idx
, u32 val
)
861 if (armv7_pmnc_select_counter(idx
) == idx
) {
862 val
&= ARMV7_EVTYPE_MASK
;
863 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val
));
867 static inline int armv7_pmnc_enable_counter(int idx
)
869 u32 counter
= ARMV7_IDX_TO_COUNTER(idx
);
870 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter
)));
874 static inline int armv7_pmnc_disable_counter(int idx
)
876 u32 counter
= ARMV7_IDX_TO_COUNTER(idx
);
877 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter
)));
881 static inline int armv7_pmnc_enable_intens(int idx
)
883 u32 counter
= ARMV7_IDX_TO_COUNTER(idx
);
884 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter
)));
888 static inline int armv7_pmnc_disable_intens(int idx
)
890 u32 counter
= ARMV7_IDX_TO_COUNTER(idx
);
891 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter
)));
893 /* Clear the overflow flag in case an interrupt is pending. */
894 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter
)));
900 static inline u32
armv7_pmnc_getreset_flags(void)
905 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val
));
907 /* Write to clear flags */
908 val
&= ARMV7_FLAG_MASK
;
909 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val
));
915 static void armv7_pmnc_dump_regs(struct arm_pmu
*cpu_pmu
)
920 printk(KERN_INFO
"PMNC registers dump:\n");
922 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val
));
923 printk(KERN_INFO
"PMNC =0x%08x\n", val
);
925 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val
));
926 printk(KERN_INFO
"CNTENS=0x%08x\n", val
);
928 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val
));
929 printk(KERN_INFO
"INTENS=0x%08x\n", val
);
931 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val
));
932 printk(KERN_INFO
"FLAGS =0x%08x\n", val
);
934 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val
));
935 printk(KERN_INFO
"SELECT=0x%08x\n", val
);
937 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val
));
938 printk(KERN_INFO
"CCNT =0x%08x\n", val
);
940 for (cnt
= ARMV7_IDX_COUNTER0
;
941 cnt
<= ARMV7_IDX_COUNTER_LAST(cpu_pmu
); cnt
++) {
942 armv7_pmnc_select_counter(cnt
);
943 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val
));
944 printk(KERN_INFO
"CNT[%d] count =0x%08x\n",
945 ARMV7_IDX_TO_COUNTER(cnt
), val
);
946 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val
));
947 printk(KERN_INFO
"CNT[%d] evtsel=0x%08x\n",
948 ARMV7_IDX_TO_COUNTER(cnt
), val
);
953 static void armv7pmu_enable_event(struct perf_event
*event
)
956 struct hw_perf_event
*hwc
= &event
->hw
;
957 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
958 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
961 if (!armv7_pmnc_counter_valid(cpu_pmu
, idx
)) {
962 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
963 smp_processor_id(), idx
);
968 * Enable counter and interrupt, and set the counter to count
969 * the event that we're interested in.
971 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
976 armv7_pmnc_disable_counter(idx
);
979 * Set event (if destined for PMNx counters)
980 * We only need to set the event for the cycle counter if we
981 * have the ability to perform event filtering.
983 if (cpu_pmu
->set_event_filter
|| idx
!= ARMV7_IDX_CYCLE_COUNTER
)
984 armv7_pmnc_write_evtsel(idx
, hwc
->config_base
);
987 * Enable interrupt for this counter
989 armv7_pmnc_enable_intens(idx
);
994 armv7_pmnc_enable_counter(idx
);
996 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
999 static void armv7pmu_disable_event(struct perf_event
*event
)
1001 unsigned long flags
;
1002 struct hw_perf_event
*hwc
= &event
->hw
;
1003 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
1004 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
1007 if (!armv7_pmnc_counter_valid(cpu_pmu
, idx
)) {
1008 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
1009 smp_processor_id(), idx
);
1014 * Disable counter and interrupt
1016 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
1021 armv7_pmnc_disable_counter(idx
);
1024 * Disable interrupt for this counter
1026 armv7_pmnc_disable_intens(idx
);
1028 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
1031 static irqreturn_t
armv7pmu_handle_irq(int irq_num
, void *dev
)
1034 struct perf_sample_data data
;
1035 struct arm_pmu
*cpu_pmu
= (struct arm_pmu
*)dev
;
1036 struct pmu_hw_events
*cpuc
= cpu_pmu
->get_hw_events();
1037 struct pt_regs
*regs
;
1041 * Get and reset the IRQ flags
1043 pmnc
= armv7_pmnc_getreset_flags();
1046 * Did an overflow occur?
1048 if (!armv7_pmnc_has_overflowed(pmnc
))
1052 * Handle the counter(s) overflow(s)
1054 regs
= get_irq_regs();
1056 for (idx
= 0; idx
< cpu_pmu
->num_events
; ++idx
) {
1057 struct perf_event
*event
= cpuc
->events
[idx
];
1058 struct hw_perf_event
*hwc
;
1060 /* Ignore if we don't have an event. */
1065 * We have a single interrupt for all counters. Check that
1066 * each counter has overflowed before we process it.
1068 if (!armv7_pmnc_counter_has_overflowed(pmnc
, idx
))
1072 armpmu_event_update(event
);
1073 perf_sample_data_init(&data
, 0, hwc
->last_period
);
1074 if (!armpmu_event_set_period(event
))
1077 if (perf_event_overflow(event
, &data
, regs
))
1078 cpu_pmu
->disable(event
);
1082 * Handle the pending perf events.
1084 * Note: this call *must* be run with interrupts disabled. For
1085 * platforms that can have the PMU interrupts raised as an NMI, this
1093 static void armv7pmu_start(struct arm_pmu
*cpu_pmu
)
1095 unsigned long flags
;
1096 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
1098 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
1099 /* Enable all counters */
1100 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E
);
1101 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
1104 static void armv7pmu_stop(struct arm_pmu
*cpu_pmu
)
1106 unsigned long flags
;
1107 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
1109 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
1110 /* Disable all counters */
1111 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E
);
1112 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
1115 static int armv7pmu_get_event_idx(struct pmu_hw_events
*cpuc
,
1116 struct perf_event
*event
)
1119 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
1120 struct hw_perf_event
*hwc
= &event
->hw
;
1121 unsigned long evtype
= hwc
->config_base
& ARMV7_EVTYPE_EVENT
;
1123 /* Always place a cycle counter into the cycle counter. */
1124 if (evtype
== ARMV7_PERFCTR_CPU_CYCLES
) {
1125 if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER
, cpuc
->used_mask
))
1128 return ARMV7_IDX_CYCLE_COUNTER
;
1132 * For anything other than a cycle counter, try and use
1133 * the events counters
1135 for (idx
= ARMV7_IDX_COUNTER0
; idx
< cpu_pmu
->num_events
; ++idx
) {
1136 if (!test_and_set_bit(idx
, cpuc
->used_mask
))
1140 /* The counters are all in use. */
1145 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1147 static int armv7pmu_set_event_filter(struct hw_perf_event
*event
,
1148 struct perf_event_attr
*attr
)
1150 unsigned long config_base
= 0;
1152 if (attr
->exclude_idle
)
1154 if (attr
->exclude_user
)
1155 config_base
|= ARMV7_EXCLUDE_USER
;
1156 if (attr
->exclude_kernel
)
1157 config_base
|= ARMV7_EXCLUDE_PL1
;
1158 if (!attr
->exclude_hv
)
1159 config_base
|= ARMV7_INCLUDE_HYP
;
1162 * Install the filter into config_base as this is used to
1163 * construct the event type.
1165 event
->config_base
= config_base
;
1170 static void armv7pmu_reset(void *info
)
1172 struct arm_pmu
*cpu_pmu
= (struct arm_pmu
*)info
;
1173 u32 idx
, nb_cnt
= cpu_pmu
->num_events
;
1175 /* The counter and interrupt enable registers are unknown at reset. */
1176 for (idx
= ARMV7_IDX_CYCLE_COUNTER
; idx
< nb_cnt
; ++idx
) {
1177 armv7_pmnc_disable_counter(idx
);
1178 armv7_pmnc_disable_intens(idx
);
1181 /* Initialize & Reset PMNC: C and P bits */
1182 armv7_pmnc_write(ARMV7_PMNC_P
| ARMV7_PMNC_C
);
1185 static int armv7_a8_map_event(struct perf_event
*event
)
1187 return armpmu_map_event(event
, &armv7_a8_perf_map
,
1188 &armv7_a8_perf_cache_map
, 0xFF);
1191 static int armv7_a9_map_event(struct perf_event
*event
)
1193 return armpmu_map_event(event
, &armv7_a9_perf_map
,
1194 &armv7_a9_perf_cache_map
, 0xFF);
1197 static int armv7_a5_map_event(struct perf_event
*event
)
1199 return armpmu_map_event(event
, &armv7_a5_perf_map
,
1200 &armv7_a5_perf_cache_map
, 0xFF);
1203 static int armv7_a15_map_event(struct perf_event
*event
)
1205 return armpmu_map_event(event
, &armv7_a15_perf_map
,
1206 &armv7_a15_perf_cache_map
, 0xFF);
1209 static int armv7_a7_map_event(struct perf_event
*event
)
1211 return armpmu_map_event(event
, &armv7_a7_perf_map
,
1212 &armv7_a7_perf_cache_map
, 0xFF);
1215 static void armv7pmu_init(struct arm_pmu
*cpu_pmu
)
1217 cpu_pmu
->handle_irq
= armv7pmu_handle_irq
;
1218 cpu_pmu
->enable
= armv7pmu_enable_event
;
1219 cpu_pmu
->disable
= armv7pmu_disable_event
;
1220 cpu_pmu
->read_counter
= armv7pmu_read_counter
;
1221 cpu_pmu
->write_counter
= armv7pmu_write_counter
;
1222 cpu_pmu
->get_event_idx
= armv7pmu_get_event_idx
;
1223 cpu_pmu
->start
= armv7pmu_start
;
1224 cpu_pmu
->stop
= armv7pmu_stop
;
1225 cpu_pmu
->reset
= armv7pmu_reset
;
1226 cpu_pmu
->max_period
= (1LLU << 32) - 1;
1229 static u32
armv7_read_num_pmnc_events(void)
1233 /* Read the nb of CNTx counters supported from PMNC */
1234 nb_cnt
= (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT
) & ARMV7_PMNC_N_MASK
;
1236 /* Add the CPU cycles counter and return */
1240 static int armv7_a8_pmu_init(struct arm_pmu
*cpu_pmu
)
1242 armv7pmu_init(cpu_pmu
);
1243 cpu_pmu
->name
= "ARMv7 Cortex-A8";
1244 cpu_pmu
->map_event
= armv7_a8_map_event
;
1245 cpu_pmu
->num_events
= armv7_read_num_pmnc_events();
1249 static int armv7_a9_pmu_init(struct arm_pmu
*cpu_pmu
)
1251 armv7pmu_init(cpu_pmu
);
1252 cpu_pmu
->name
= "ARMv7 Cortex-A9";
1253 cpu_pmu
->map_event
= armv7_a9_map_event
;
1254 cpu_pmu
->num_events
= armv7_read_num_pmnc_events();
1258 static int armv7_a5_pmu_init(struct arm_pmu
*cpu_pmu
)
1260 armv7pmu_init(cpu_pmu
);
1261 cpu_pmu
->name
= "ARMv7 Cortex-A5";
1262 cpu_pmu
->map_event
= armv7_a5_map_event
;
1263 cpu_pmu
->num_events
= armv7_read_num_pmnc_events();
1267 static int armv7_a15_pmu_init(struct arm_pmu
*cpu_pmu
)
1269 armv7pmu_init(cpu_pmu
);
1270 cpu_pmu
->name
= "ARMv7 Cortex-A15";
1271 cpu_pmu
->map_event
= armv7_a15_map_event
;
1272 cpu_pmu
->num_events
= armv7_read_num_pmnc_events();
1273 cpu_pmu
->set_event_filter
= armv7pmu_set_event_filter
;
1277 static int armv7_a7_pmu_init(struct arm_pmu
*cpu_pmu
)
1279 armv7pmu_init(cpu_pmu
);
1280 cpu_pmu
->name
= "ARMv7 Cortex-A7";
1281 cpu_pmu
->map_event
= armv7_a7_map_event
;
1282 cpu_pmu
->num_events
= armv7_read_num_pmnc_events();
1283 cpu_pmu
->set_event_filter
= armv7pmu_set_event_filter
;
1287 static inline int armv7_a8_pmu_init(struct arm_pmu
*cpu_pmu
)
1292 static inline int armv7_a9_pmu_init(struct arm_pmu
*cpu_pmu
)
1297 static inline int armv7_a5_pmu_init(struct arm_pmu
*cpu_pmu
)
1302 static inline int armv7_a15_pmu_init(struct arm_pmu
*cpu_pmu
)
1307 static inline int armv7_a7_pmu_init(struct arm_pmu
*cpu_pmu
)
1311 #endif /* CONFIG_CPU_V7 */