2 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
4 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
5 * 2010 (c) MontaVista Software, LLC.
7 * Copied from ARMv6 code, with the low level code inspired
8 * by the ARMv7 Oprofile code.
10 * Cortex-A8 has up to 4 configurable performance counters and
11 * a single cycle counter.
12 * Cortex-A9 has up to 31 configurable performance counters and
13 * a single cycle counter.
15 * All counters can be enabled/disabled and IRQ masked separately. The cycle
16 * counter and all 4 performance counters together can be reset separately.
20 /* Common ARMv7 event types */
21 enum armv7_perf_types
{
22 ARMV7_PERFCTR_PMNC_SW_INCR
= 0x00,
23 ARMV7_PERFCTR_IFETCH_MISS
= 0x01,
24 ARMV7_PERFCTR_ITLB_MISS
= 0x02,
25 ARMV7_PERFCTR_DCACHE_REFILL
= 0x03,
26 ARMV7_PERFCTR_DCACHE_ACCESS
= 0x04,
27 ARMV7_PERFCTR_DTLB_REFILL
= 0x05,
28 ARMV7_PERFCTR_DREAD
= 0x06,
29 ARMV7_PERFCTR_DWRITE
= 0x07,
31 ARMV7_PERFCTR_EXC_TAKEN
= 0x09,
32 ARMV7_PERFCTR_EXC_EXECUTED
= 0x0A,
33 ARMV7_PERFCTR_CID_WRITE
= 0x0B,
34 /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
36 * - all branch instructions,
37 * - instructions that explicitly write the PC,
38 * - exception generating instructions.
40 ARMV7_PERFCTR_PC_WRITE
= 0x0C,
41 ARMV7_PERFCTR_PC_IMM_BRANCH
= 0x0D,
42 ARMV7_PERFCTR_UNALIGNED_ACCESS
= 0x0F,
43 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
= 0x10,
44 ARMV7_PERFCTR_CLOCK_CYCLES
= 0x11,
46 ARMV7_PERFCTR_PC_BRANCH_MIS_USED
= 0x12,
48 ARMV7_PERFCTR_CPU_CYCLES
= 0xFF
51 /* ARMv7 Cortex-A8 specific event types */
52 enum armv7_a8_perf_types
{
53 ARMV7_PERFCTR_INSTR_EXECUTED
= 0x08,
55 ARMV7_PERFCTR_PC_PROC_RETURN
= 0x0E,
57 ARMV7_PERFCTR_WRITE_BUFFER_FULL
= 0x40,
58 ARMV7_PERFCTR_L2_STORE_MERGED
= 0x41,
59 ARMV7_PERFCTR_L2_STORE_BUFF
= 0x42,
60 ARMV7_PERFCTR_L2_ACCESS
= 0x43,
61 ARMV7_PERFCTR_L2_CACH_MISS
= 0x44,
62 ARMV7_PERFCTR_AXI_READ_CYCLES
= 0x45,
63 ARMV7_PERFCTR_AXI_WRITE_CYCLES
= 0x46,
64 ARMV7_PERFCTR_MEMORY_REPLAY
= 0x47,
65 ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY
= 0x48,
66 ARMV7_PERFCTR_L1_DATA_MISS
= 0x49,
67 ARMV7_PERFCTR_L1_INST_MISS
= 0x4A,
68 ARMV7_PERFCTR_L1_DATA_COLORING
= 0x4B,
69 ARMV7_PERFCTR_L1_NEON_DATA
= 0x4C,
70 ARMV7_PERFCTR_L1_NEON_CACH_DATA
= 0x4D,
71 ARMV7_PERFCTR_L2_NEON
= 0x4E,
72 ARMV7_PERFCTR_L2_NEON_HIT
= 0x4F,
73 ARMV7_PERFCTR_L1_INST
= 0x50,
74 ARMV7_PERFCTR_PC_RETURN_MIS_PRED
= 0x51,
75 ARMV7_PERFCTR_PC_BRANCH_FAILED
= 0x52,
76 ARMV7_PERFCTR_PC_BRANCH_TAKEN
= 0x53,
77 ARMV7_PERFCTR_PC_BRANCH_EXECUTED
= 0x54,
78 ARMV7_PERFCTR_OP_EXECUTED
= 0x55,
79 ARMV7_PERFCTR_CYCLES_INST_STALL
= 0x56,
80 ARMV7_PERFCTR_CYCLES_INST
= 0x57,
81 ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL
= 0x58,
82 ARMV7_PERFCTR_CYCLES_NEON_INST_STALL
= 0x59,
83 ARMV7_PERFCTR_NEON_CYCLES
= 0x5A,
85 ARMV7_PERFCTR_PMU0_EVENTS
= 0x70,
86 ARMV7_PERFCTR_PMU1_EVENTS
= 0x71,
87 ARMV7_PERFCTR_PMU_EVENTS
= 0x72,
90 /* ARMv7 Cortex-A9 specific event types */
91 enum armv7_a9_perf_types
{
92 ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC
= 0x40,
93 ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC
= 0x41,
94 ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC
= 0x42,
96 ARMV7_PERFCTR_COHERENT_LINE_MISS
= 0x50,
97 ARMV7_PERFCTR_COHERENT_LINE_HIT
= 0x51,
99 ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES
= 0x60,
100 ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES
= 0x61,
101 ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES
= 0x62,
102 ARMV7_PERFCTR_STREX_EXECUTED_PASSED
= 0x63,
103 ARMV7_PERFCTR_STREX_EXECUTED_FAILED
= 0x64,
104 ARMV7_PERFCTR_DATA_EVICTION
= 0x65,
105 ARMV7_PERFCTR_ISSUE_STAGE_NO_INST
= 0x66,
106 ARMV7_PERFCTR_ISSUE_STAGE_EMPTY
= 0x67,
107 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE
= 0x68,
109 ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS
= 0x6E,
111 ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST
= 0x70,
112 ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST
= 0x71,
113 ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST
= 0x72,
114 ARMV7_PERFCTR_FP_EXECUTED_INST
= 0x73,
115 ARMV7_PERFCTR_NEON_EXECUTED_INST
= 0x74,
117 ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES
= 0x80,
118 ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES
= 0x81,
119 ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES
= 0x82,
120 ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES
= 0x83,
121 ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES
= 0x84,
122 ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES
= 0x85,
123 ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES
= 0x86,
125 ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES
= 0x8A,
126 ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES
= 0x8B,
128 ARMV7_PERFCTR_ISB_INST
= 0x90,
129 ARMV7_PERFCTR_DSB_INST
= 0x91,
130 ARMV7_PERFCTR_DMB_INST
= 0x92,
131 ARMV7_PERFCTR_EXT_INTERRUPTS
= 0x93,
133 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED
= 0xA0,
134 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED
= 0xA1,
135 ARMV7_PERFCTR_PLE_FIFO_FLUSH
= 0xA2,
136 ARMV7_PERFCTR_PLE_RQST_COMPLETED
= 0xA3,
137 ARMV7_PERFCTR_PLE_FIFO_OVERFLOW
= 0xA4,
138 ARMV7_PERFCTR_PLE_RQST_PROG
= 0xA5
142 * Cortex-A8 HW events mapping
144 * The hardware events that we support. We do support cache operations but
145 * we have harvard caches and no way to combine instruction and data
146 * accesses/misses in hardware.
148 static const unsigned armv7_a8_perf_map
[PERF_COUNT_HW_MAX
] = {
149 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
150 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
151 [PERF_COUNT_HW_CACHE_REFERENCES
] = HW_OP_UNSUPPORTED
,
152 [PERF_COUNT_HW_CACHE_MISSES
] = HW_OP_UNSUPPORTED
,
153 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
154 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
155 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_CLOCK_CYCLES
,
158 static const unsigned armv7_a8_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
159 [PERF_COUNT_HW_CACHE_OP_MAX
]
160 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
163 * The performance counters don't differentiate between read
164 * and write accesses/misses so this isn't strictly correct,
165 * but it's the best we can do. Writes and reads get
169 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
170 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
173 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
174 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
177 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
178 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
183 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_INST
,
184 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_INST_MISS
,
187 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_INST
,
188 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_INST_MISS
,
191 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
192 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
197 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L2_ACCESS
,
198 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L2_CACH_MISS
,
201 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L2_ACCESS
,
202 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L2_CACH_MISS
,
205 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
206 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
211 * Only ITLB misses and DTLB refills are supported.
212 * If users want the DTLB refills misses a raw counter
216 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
217 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
220 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
221 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
224 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
225 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
230 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
231 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
234 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
235 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
238 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
239 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
244 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
246 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
249 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
251 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
254 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
255 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
261 * Cortex-A9 HW events mapping
263 static const unsigned armv7_a9_perf_map
[PERF_COUNT_HW_MAX
] = {
264 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
265 [PERF_COUNT_HW_INSTRUCTIONS
] =
266 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE
,
267 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_COHERENT_LINE_HIT
,
268 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_COHERENT_LINE_MISS
,
269 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
270 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
271 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_CLOCK_CYCLES
,
274 static const unsigned armv7_a9_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
275 [PERF_COUNT_HW_CACHE_OP_MAX
]
276 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
279 * The performance counters don't differentiate between read
280 * and write accesses/misses so this isn't strictly correct,
281 * but it's the best we can do. Writes and reads get
285 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
286 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
289 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
290 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
293 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
294 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
299 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
300 [C(RESULT_MISS
)] = ARMV7_PERFCTR_IFETCH_MISS
,
303 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
304 [C(RESULT_MISS
)] = ARMV7_PERFCTR_IFETCH_MISS
,
307 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
308 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
313 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
314 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
317 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
318 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
321 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
322 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
327 * Only ITLB misses and DTLB refills are supported.
328 * If users want the DTLB refills misses a raw counter
332 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
333 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
336 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
337 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
340 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
341 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
346 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
347 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
350 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
351 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
354 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
355 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
360 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
362 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
365 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
367 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
370 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
371 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
377 * Perf Events counters
379 enum armv7_counters
{
380 ARMV7_CYCLE_COUNTER
= 1, /* Cycle counter */
381 ARMV7_COUNTER0
= 2, /* First event counter */
385 * The cycle counter is ARMV7_CYCLE_COUNTER.
386 * The first event counter is ARMV7_COUNTER0.
387 * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
389 #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
392 * ARMv7 low level PMNC access
396 * Per-CPU PMNC: config reg
398 #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
399 #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
400 #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
401 #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
402 #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
403 #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
404 #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
405 #define ARMV7_PMNC_N_MASK 0x1f
406 #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
411 #define ARMV7_CNT0 0 /* First event counter */
412 #define ARMV7_CCNT 31 /* Cycle counter */
414 /* Perf Event to low level counters mapping */
415 #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
418 * CNTENS: counters enable reg
420 #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
421 #define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
424 * CNTENC: counters disable reg
426 #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
427 #define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
430 * INTENS: counters overflow interrupt enable reg
432 #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
433 #define ARMV7_INTENS_C (1 << ARMV7_CCNT)
436 * INTENC: counters overflow interrupt disable reg
438 #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
439 #define ARMV7_INTENC_C (1 << ARMV7_CCNT)
442 * EVTSEL: Event selection reg
444 #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
447 * SELECT: Counter selection reg
449 #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
452 * FLAG: counters overflow flag status reg
454 #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
455 #define ARMV7_FLAG_C (1 << ARMV7_CCNT)
456 #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
457 #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
459 static inline unsigned long armv7_pmnc_read(void)
462 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val
));
466 static inline void armv7_pmnc_write(unsigned long val
)
468 val
&= ARMV7_PMNC_MASK
;
469 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val
));
472 static inline int armv7_pmnc_has_overflowed(unsigned long pmnc
)
474 return pmnc
& ARMV7_OVERFLOWED_MASK
;
477 static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc
,
478 enum armv7_counters counter
)
482 if (counter
== ARMV7_CYCLE_COUNTER
)
483 ret
= pmnc
& ARMV7_FLAG_C
;
484 else if ((counter
>= ARMV7_COUNTER0
) && (counter
<= ARMV7_COUNTER_LAST
))
485 ret
= pmnc
& ARMV7_FLAG_P(counter
);
487 pr_err("CPU%u checking wrong counter %d overflow status\n",
488 smp_processor_id(), counter
);
493 static inline int armv7_pmnc_select_counter(unsigned int idx
)
497 if ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
)) {
498 pr_err("CPU%u selecting wrong PMNC counter"
499 " %d\n", smp_processor_id(), idx
);
503 val
= (idx
- ARMV7_EVENT_CNT_TO_CNTx
) & ARMV7_SELECT_MASK
;
504 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val
));
509 static inline u32
armv7pmu_read_counter(int idx
)
511 unsigned long value
= 0;
513 if (idx
== ARMV7_CYCLE_COUNTER
)
514 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value
));
515 else if ((idx
>= ARMV7_COUNTER0
) && (idx
<= ARMV7_COUNTER_LAST
)) {
516 if (armv7_pmnc_select_counter(idx
) == idx
)
517 asm volatile("mrc p15, 0, %0, c9, c13, 2"
520 pr_err("CPU%u reading wrong counter %d\n",
521 smp_processor_id(), idx
);
526 static inline void armv7pmu_write_counter(int idx
, u32 value
)
528 if (idx
== ARMV7_CYCLE_COUNTER
)
529 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value
));
530 else if ((idx
>= ARMV7_COUNTER0
) && (idx
<= ARMV7_COUNTER_LAST
)) {
531 if (armv7_pmnc_select_counter(idx
) == idx
)
532 asm volatile("mcr p15, 0, %0, c9, c13, 2"
535 pr_err("CPU%u writing wrong counter %d\n",
536 smp_processor_id(), idx
);
539 static inline void armv7_pmnc_write_evtsel(unsigned int idx
, u32 val
)
541 if (armv7_pmnc_select_counter(idx
) == idx
) {
542 val
&= ARMV7_EVTSEL_MASK
;
543 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val
));
547 static inline u32
armv7_pmnc_enable_counter(unsigned int idx
)
551 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
552 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
553 pr_err("CPU%u enabling wrong PMNC counter"
554 " %d\n", smp_processor_id(), idx
);
558 if (idx
== ARMV7_CYCLE_COUNTER
)
559 val
= ARMV7_CNTENS_C
;
561 val
= ARMV7_CNTENS_P(idx
);
563 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val
));
568 static inline u32
armv7_pmnc_disable_counter(unsigned int idx
)
573 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
574 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
575 pr_err("CPU%u disabling wrong PMNC counter"
576 " %d\n", smp_processor_id(), idx
);
580 if (idx
== ARMV7_CYCLE_COUNTER
)
581 val
= ARMV7_CNTENC_C
;
583 val
= ARMV7_CNTENC_P(idx
);
585 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val
));
590 static inline u32
armv7_pmnc_enable_intens(unsigned int idx
)
594 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
595 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
596 pr_err("CPU%u enabling wrong PMNC counter"
597 " interrupt enable %d\n", smp_processor_id(), idx
);
601 if (idx
== ARMV7_CYCLE_COUNTER
)
602 val
= ARMV7_INTENS_C
;
604 val
= ARMV7_INTENS_P(idx
);
606 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val
));
611 static inline u32
armv7_pmnc_disable_intens(unsigned int idx
)
615 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
616 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
617 pr_err("CPU%u disabling wrong PMNC counter"
618 " interrupt enable %d\n", smp_processor_id(), idx
);
622 if (idx
== ARMV7_CYCLE_COUNTER
)
623 val
= ARMV7_INTENC_C
;
625 val
= ARMV7_INTENC_P(idx
);
627 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val
));
632 static inline u32
armv7_pmnc_getreset_flags(void)
637 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val
));
639 /* Write to clear flags */
640 val
&= ARMV7_FLAG_MASK
;
641 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val
));
647 static void armv7_pmnc_dump_regs(void)
652 printk(KERN_INFO
"PMNC registers dump:\n");
654 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val
));
655 printk(KERN_INFO
"PMNC =0x%08x\n", val
);
657 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val
));
658 printk(KERN_INFO
"CNTENS=0x%08x\n", val
);
660 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val
));
661 printk(KERN_INFO
"INTENS=0x%08x\n", val
);
663 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val
));
664 printk(KERN_INFO
"FLAGS =0x%08x\n", val
);
666 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val
));
667 printk(KERN_INFO
"SELECT=0x%08x\n", val
);
669 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val
));
670 printk(KERN_INFO
"CCNT =0x%08x\n", val
);
672 for (cnt
= ARMV7_COUNTER0
; cnt
< ARMV7_COUNTER_LAST
; cnt
++) {
673 armv7_pmnc_select_counter(cnt
);
674 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val
));
675 printk(KERN_INFO
"CNT[%d] count =0x%08x\n",
676 cnt
-ARMV7_EVENT_CNT_TO_CNTx
, val
);
677 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val
));
678 printk(KERN_INFO
"CNT[%d] evtsel=0x%08x\n",
679 cnt
-ARMV7_EVENT_CNT_TO_CNTx
, val
);
684 static void armv7pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
689 * Enable counter and interrupt, and set the counter to count
690 * the event that we're interested in.
692 raw_spin_lock_irqsave(&pmu_lock
, flags
);
697 armv7_pmnc_disable_counter(idx
);
700 * Set event (if destined for PMNx counters)
701 * We don't need to set the event if it's a cycle count
703 if (idx
!= ARMV7_CYCLE_COUNTER
)
704 armv7_pmnc_write_evtsel(idx
, hwc
->config_base
);
707 * Enable interrupt for this counter
709 armv7_pmnc_enable_intens(idx
);
714 armv7_pmnc_enable_counter(idx
);
716 raw_spin_unlock_irqrestore(&pmu_lock
, flags
);
719 static void armv7pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
724 * Disable counter and interrupt
726 raw_spin_lock_irqsave(&pmu_lock
, flags
);
731 armv7_pmnc_disable_counter(idx
);
734 * Disable interrupt for this counter
736 armv7_pmnc_disable_intens(idx
);
738 raw_spin_unlock_irqrestore(&pmu_lock
, flags
);
741 static irqreturn_t
armv7pmu_handle_irq(int irq_num
, void *dev
)
744 struct perf_sample_data data
;
745 struct cpu_hw_events
*cpuc
;
746 struct pt_regs
*regs
;
750 * Get and reset the IRQ flags
752 pmnc
= armv7_pmnc_getreset_flags();
755 * Did an overflow occur?
757 if (!armv7_pmnc_has_overflowed(pmnc
))
761 * Handle the counter(s) overflow(s)
763 regs
= get_irq_regs();
765 perf_sample_data_init(&data
, 0);
767 cpuc
= &__get_cpu_var(cpu_hw_events
);
768 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
769 struct perf_event
*event
= cpuc
->events
[idx
];
770 struct hw_perf_event
*hwc
;
772 if (!test_bit(idx
, cpuc
->active_mask
))
776 * We have a single interrupt for all counters. Check that
777 * each counter has overflowed before we process it.
779 if (!armv7_pmnc_counter_has_overflowed(pmnc
, idx
))
783 armpmu_event_update(event
, hwc
, idx
);
784 data
.period
= event
->hw
.last_period
;
785 if (!armpmu_event_set_period(event
, hwc
, idx
))
788 if (perf_event_overflow(event
, 0, &data
, regs
))
789 armpmu
->disable(hwc
, idx
);
793 * Handle the pending perf events.
795 * Note: this call *must* be run with interrupts disabled. For
796 * platforms that can have the PMU interrupts raised as an NMI, this
804 static void armv7pmu_start(void)
808 raw_spin_lock_irqsave(&pmu_lock
, flags
);
809 /* Enable all counters */
810 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E
);
811 raw_spin_unlock_irqrestore(&pmu_lock
, flags
);
814 static void armv7pmu_stop(void)
818 raw_spin_lock_irqsave(&pmu_lock
, flags
);
819 /* Disable all counters */
820 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E
);
821 raw_spin_unlock_irqrestore(&pmu_lock
, flags
);
824 static int armv7pmu_get_event_idx(struct cpu_hw_events
*cpuc
,
825 struct hw_perf_event
*event
)
829 /* Always place a cycle counter into the cycle counter. */
830 if (event
->config_base
== ARMV7_PERFCTR_CPU_CYCLES
) {
831 if (test_and_set_bit(ARMV7_CYCLE_COUNTER
, cpuc
->used_mask
))
834 return ARMV7_CYCLE_COUNTER
;
837 * For anything other than a cycle counter, try and use
838 * the events counters
840 for (idx
= ARMV7_COUNTER0
; idx
<= armpmu
->num_events
; ++idx
) {
841 if (!test_and_set_bit(idx
, cpuc
->used_mask
))
845 /* The counters are all in use. */
850 static struct arm_pmu armv7pmu
= {
851 .handle_irq
= armv7pmu_handle_irq
,
852 .enable
= armv7pmu_enable_event
,
853 .disable
= armv7pmu_disable_event
,
854 .read_counter
= armv7pmu_read_counter
,
855 .write_counter
= armv7pmu_write_counter
,
856 .get_event_idx
= armv7pmu_get_event_idx
,
857 .start
= armv7pmu_start
,
858 .stop
= armv7pmu_stop
,
859 .raw_event_mask
= 0xFF,
860 .max_period
= (1LLU << 32) - 1,
863 static u32 __init
armv7_reset_read_pmnc(void)
867 /* Initialize & Reset PMNC: C and P bits */
868 armv7_pmnc_write(ARMV7_PMNC_P
| ARMV7_PMNC_C
);
870 /* Read the nb of CNTx counters supported from PMNC */
871 nb_cnt
= (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT
) & ARMV7_PMNC_N_MASK
;
873 /* Add the CPU cycles counter and return */
877 static const struct arm_pmu
*__init
armv7_a8_pmu_init(void)
879 armv7pmu
.id
= ARM_PERF_PMU_ID_CA8
;
880 armv7pmu
.name
= "ARMv7 Cortex-A8";
881 armv7pmu
.cache_map
= &armv7_a8_perf_cache_map
;
882 armv7pmu
.event_map
= &armv7_a8_perf_map
;
883 armv7pmu
.num_events
= armv7_reset_read_pmnc();
887 static const struct arm_pmu
*__init
armv7_a9_pmu_init(void)
889 armv7pmu
.id
= ARM_PERF_PMU_ID_CA9
;
890 armv7pmu
.name
= "ARMv7 Cortex-A9";
891 armv7pmu
.cache_map
= &armv7_a9_perf_cache_map
;
892 armv7pmu
.event_map
= &armv7_a9_perf_map
;
893 armv7pmu
.num_events
= armv7_reset_read_pmnc();
897 static const struct arm_pmu
*__init
armv7_a8_pmu_init(void)
902 static const struct arm_pmu
*__init
armv7_a9_pmu_init(void)
906 #endif /* CONFIG_CPU_V7 */