2 * ARMv5 [xscale] Performance counter handling code.
4 * Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com>
6 * Based on the previous xscale OProfile code.
8 * There are two variants of the xscale PMU that we support:
9 * - xscale1pmu: 2 event counters and a cycle counter
10 * - xscale2pmu: 4 event counters and a cycle counter
11 * The two variants share event definitions, but have different
15 #ifdef CONFIG_CPU_XSCALE
16 enum xscale_perf_types
{
17 XSCALE_PERFCTR_ICACHE_MISS
= 0x00,
18 XSCALE_PERFCTR_ICACHE_NO_DELIVER
= 0x01,
19 XSCALE_PERFCTR_DATA_STALL
= 0x02,
20 XSCALE_PERFCTR_ITLB_MISS
= 0x03,
21 XSCALE_PERFCTR_DTLB_MISS
= 0x04,
22 XSCALE_PERFCTR_BRANCH
= 0x05,
23 XSCALE_PERFCTR_BRANCH_MISS
= 0x06,
24 XSCALE_PERFCTR_INSTRUCTION
= 0x07,
25 XSCALE_PERFCTR_DCACHE_FULL_STALL
= 0x08,
26 XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG
= 0x09,
27 XSCALE_PERFCTR_DCACHE_ACCESS
= 0x0A,
28 XSCALE_PERFCTR_DCACHE_MISS
= 0x0B,
29 XSCALE_PERFCTR_DCACHE_WRITE_BACK
= 0x0C,
30 XSCALE_PERFCTR_PC_CHANGED
= 0x0D,
31 XSCALE_PERFCTR_BCU_REQUEST
= 0x10,
32 XSCALE_PERFCTR_BCU_FULL
= 0x11,
33 XSCALE_PERFCTR_BCU_DRAIN
= 0x12,
34 XSCALE_PERFCTR_BCU_ECC_NO_ELOG
= 0x14,
35 XSCALE_PERFCTR_BCU_1_BIT_ERR
= 0x15,
36 XSCALE_PERFCTR_RMW
= 0x16,
37 /* XSCALE_PERFCTR_CCNT is not hardware defined */
38 XSCALE_PERFCTR_CCNT
= 0xFE,
39 XSCALE_PERFCTR_UNUSED
= 0xFF,
42 enum xscale_counters
{
43 XSCALE_CYCLE_COUNTER
= 0,
50 static const unsigned xscale_perf_map
[PERF_COUNT_HW_MAX
] = {
51 [PERF_COUNT_HW_CPU_CYCLES
] = XSCALE_PERFCTR_CCNT
,
52 [PERF_COUNT_HW_INSTRUCTIONS
] = XSCALE_PERFCTR_INSTRUCTION
,
53 [PERF_COUNT_HW_CACHE_REFERENCES
] = HW_OP_UNSUPPORTED
,
54 [PERF_COUNT_HW_CACHE_MISSES
] = HW_OP_UNSUPPORTED
,
55 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = XSCALE_PERFCTR_BRANCH
,
56 [PERF_COUNT_HW_BRANCH_MISSES
] = XSCALE_PERFCTR_BRANCH_MISS
,
57 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
58 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = XSCALE_PERFCTR_ICACHE_NO_DELIVER
,
59 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = HW_OP_UNSUPPORTED
,
62 static const unsigned xscale_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
63 [PERF_COUNT_HW_CACHE_OP_MAX
]
64 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
67 [C(RESULT_ACCESS
)] = XSCALE_PERFCTR_DCACHE_ACCESS
,
68 [C(RESULT_MISS
)] = XSCALE_PERFCTR_DCACHE_MISS
,
71 [C(RESULT_ACCESS
)] = XSCALE_PERFCTR_DCACHE_ACCESS
,
72 [C(RESULT_MISS
)] = XSCALE_PERFCTR_DCACHE_MISS
,
75 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
76 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
81 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
82 [C(RESULT_MISS
)] = XSCALE_PERFCTR_ICACHE_MISS
,
85 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
86 [C(RESULT_MISS
)] = XSCALE_PERFCTR_ICACHE_MISS
,
89 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
90 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
95 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
96 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
99 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
100 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
103 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
104 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
109 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
110 [C(RESULT_MISS
)] = XSCALE_PERFCTR_DTLB_MISS
,
113 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
114 [C(RESULT_MISS
)] = XSCALE_PERFCTR_DTLB_MISS
,
117 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
118 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
123 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
124 [C(RESULT_MISS
)] = XSCALE_PERFCTR_ITLB_MISS
,
127 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
128 [C(RESULT_MISS
)] = XSCALE_PERFCTR_ITLB_MISS
,
131 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
132 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
137 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
138 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
141 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
142 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
145 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
146 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
151 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
152 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
155 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
156 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
159 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
160 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
165 #define XSCALE_PMU_ENABLE 0x001
166 #define XSCALE_PMN_RESET 0x002
167 #define XSCALE_CCNT_RESET 0x004
168 #define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
169 #define XSCALE_PMU_CNT64 0x008
171 #define XSCALE1_OVERFLOWED_MASK 0x700
172 #define XSCALE1_CCOUNT_OVERFLOW 0x400
173 #define XSCALE1_COUNT0_OVERFLOW 0x100
174 #define XSCALE1_COUNT1_OVERFLOW 0x200
175 #define XSCALE1_CCOUNT_INT_EN 0x040
176 #define XSCALE1_COUNT0_INT_EN 0x010
177 #define XSCALE1_COUNT1_INT_EN 0x020
178 #define XSCALE1_COUNT0_EVT_SHFT 12
179 #define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
180 #define XSCALE1_COUNT1_EVT_SHFT 20
181 #define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
184 xscale1pmu_read_pmnc(void)
187 asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val
));
192 xscale1pmu_write_pmnc(u32 val
)
194 /* upper 4bits and 7, 11 are write-as-0 */
196 asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val
));
200 xscale1_pmnc_counter_has_overflowed(unsigned long pmnc
,
201 enum xscale_counters counter
)
206 case XSCALE_CYCLE_COUNTER
:
207 ret
= pmnc
& XSCALE1_CCOUNT_OVERFLOW
;
209 case XSCALE_COUNTER0
:
210 ret
= pmnc
& XSCALE1_COUNT0_OVERFLOW
;
212 case XSCALE_COUNTER1
:
213 ret
= pmnc
& XSCALE1_COUNT1_OVERFLOW
;
216 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
223 xscale1pmu_handle_irq(int irq_num
, void *dev
)
226 struct perf_sample_data data
;
227 struct pmu_hw_events
*cpuc
;
228 struct pt_regs
*regs
;
232 * NOTE: there's an A stepping erratum that states if an overflow
233 * bit already exists and another occurs, the previous
234 * Overflow bit gets cleared. There's no workaround.
235 * Fixed in B stepping or later.
237 pmnc
= xscale1pmu_read_pmnc();
240 * Write the value back to clear the overflow flags. Overflow
241 * flags remain in pmnc for use below. We also disable the PMU
242 * while we process the interrupt.
244 xscale1pmu_write_pmnc(pmnc
& ~XSCALE_PMU_ENABLE
);
246 if (!(pmnc
& XSCALE1_OVERFLOWED_MASK
))
249 regs
= get_irq_regs();
251 cpuc
= &__get_cpu_var(cpu_hw_events
);
252 for (idx
= 0; idx
< cpu_pmu
->num_events
; ++idx
) {
253 struct perf_event
*event
= cpuc
->events
[idx
];
254 struct hw_perf_event
*hwc
;
259 if (!xscale1_pmnc_counter_has_overflowed(pmnc
, idx
))
263 armpmu_event_update(event
, hwc
, idx
);
264 perf_sample_data_init(&data
, 0, hwc
->last_period
);
265 if (!armpmu_event_set_period(event
, hwc
, idx
))
268 if (perf_event_overflow(event
, &data
, regs
))
269 cpu_pmu
->disable(hwc
, idx
);
277 pmnc
= xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE
;
278 xscale1pmu_write_pmnc(pmnc
);
284 xscale1pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
286 unsigned long val
, mask
, evt
, flags
;
287 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
290 case XSCALE_CYCLE_COUNTER
:
292 evt
= XSCALE1_CCOUNT_INT_EN
;
294 case XSCALE_COUNTER0
:
295 mask
= XSCALE1_COUNT0_EVT_MASK
;
296 evt
= (hwc
->config_base
<< XSCALE1_COUNT0_EVT_SHFT
) |
297 XSCALE1_COUNT0_INT_EN
;
299 case XSCALE_COUNTER1
:
300 mask
= XSCALE1_COUNT1_EVT_MASK
;
301 evt
= (hwc
->config_base
<< XSCALE1_COUNT1_EVT_SHFT
) |
302 XSCALE1_COUNT1_INT_EN
;
305 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
309 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
310 val
= xscale1pmu_read_pmnc();
313 xscale1pmu_write_pmnc(val
);
314 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
318 xscale1pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
320 unsigned long val
, mask
, evt
, flags
;
321 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
324 case XSCALE_CYCLE_COUNTER
:
325 mask
= XSCALE1_CCOUNT_INT_EN
;
328 case XSCALE_COUNTER0
:
329 mask
= XSCALE1_COUNT0_INT_EN
| XSCALE1_COUNT0_EVT_MASK
;
330 evt
= XSCALE_PERFCTR_UNUSED
<< XSCALE1_COUNT0_EVT_SHFT
;
332 case XSCALE_COUNTER1
:
333 mask
= XSCALE1_COUNT1_INT_EN
| XSCALE1_COUNT1_EVT_MASK
;
334 evt
= XSCALE_PERFCTR_UNUSED
<< XSCALE1_COUNT1_EVT_SHFT
;
337 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
341 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
342 val
= xscale1pmu_read_pmnc();
345 xscale1pmu_write_pmnc(val
);
346 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
350 xscale1pmu_get_event_idx(struct pmu_hw_events
*cpuc
,
351 struct hw_perf_event
*event
)
353 if (XSCALE_PERFCTR_CCNT
== event
->config_base
) {
354 if (test_and_set_bit(XSCALE_CYCLE_COUNTER
, cpuc
->used_mask
))
357 return XSCALE_CYCLE_COUNTER
;
359 if (!test_and_set_bit(XSCALE_COUNTER1
, cpuc
->used_mask
))
360 return XSCALE_COUNTER1
;
362 if (!test_and_set_bit(XSCALE_COUNTER0
, cpuc
->used_mask
))
363 return XSCALE_COUNTER0
;
370 xscale1pmu_start(void)
372 unsigned long flags
, val
;
373 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
375 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
376 val
= xscale1pmu_read_pmnc();
377 val
|= XSCALE_PMU_ENABLE
;
378 xscale1pmu_write_pmnc(val
);
379 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
383 xscale1pmu_stop(void)
385 unsigned long flags
, val
;
386 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
388 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
389 val
= xscale1pmu_read_pmnc();
390 val
&= ~XSCALE_PMU_ENABLE
;
391 xscale1pmu_write_pmnc(val
);
392 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
396 xscale1pmu_read_counter(int counter
)
401 case XSCALE_CYCLE_COUNTER
:
402 asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val
));
404 case XSCALE_COUNTER0
:
405 asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val
));
407 case XSCALE_COUNTER1
:
408 asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val
));
416 xscale1pmu_write_counter(int counter
, u32 val
)
419 case XSCALE_CYCLE_COUNTER
:
420 asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val
));
422 case XSCALE_COUNTER0
:
423 asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val
));
425 case XSCALE_COUNTER1
:
426 asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val
));
431 static int xscale_map_event(struct perf_event
*event
)
433 return map_cpu_event(event
, &xscale_perf_map
,
434 &xscale_perf_cache_map
, 0xFF);
437 static struct arm_pmu xscale1pmu
= {
439 .handle_irq
= xscale1pmu_handle_irq
,
440 .enable
= xscale1pmu_enable_event
,
441 .disable
= xscale1pmu_disable_event
,
442 .read_counter
= xscale1pmu_read_counter
,
443 .write_counter
= xscale1pmu_write_counter
,
444 .get_event_idx
= xscale1pmu_get_event_idx
,
445 .start
= xscale1pmu_start
,
446 .stop
= xscale1pmu_stop
,
447 .map_event
= xscale_map_event
,
449 .max_period
= (1LLU << 32) - 1,
452 static struct arm_pmu
*__init
xscale1pmu_init(void)
457 #define XSCALE2_OVERFLOWED_MASK 0x01f
458 #define XSCALE2_CCOUNT_OVERFLOW 0x001
459 #define XSCALE2_COUNT0_OVERFLOW 0x002
460 #define XSCALE2_COUNT1_OVERFLOW 0x004
461 #define XSCALE2_COUNT2_OVERFLOW 0x008
462 #define XSCALE2_COUNT3_OVERFLOW 0x010
463 #define XSCALE2_CCOUNT_INT_EN 0x001
464 #define XSCALE2_COUNT0_INT_EN 0x002
465 #define XSCALE2_COUNT1_INT_EN 0x004
466 #define XSCALE2_COUNT2_INT_EN 0x008
467 #define XSCALE2_COUNT3_INT_EN 0x010
468 #define XSCALE2_COUNT0_EVT_SHFT 0
469 #define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
470 #define XSCALE2_COUNT1_EVT_SHFT 8
471 #define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
472 #define XSCALE2_COUNT2_EVT_SHFT 16
473 #define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
474 #define XSCALE2_COUNT3_EVT_SHFT 24
475 #define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
478 xscale2pmu_read_pmnc(void)
481 asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val
));
482 /* bits 1-2 and 4-23 are read-unpredictable */
483 return val
& 0xff000009;
487 xscale2pmu_write_pmnc(u32 val
)
489 /* bits 4-23 are write-as-0, 24-31 are write ignored */
491 asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val
));
495 xscale2pmu_read_overflow_flags(void)
498 asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val
));
503 xscale2pmu_write_overflow_flags(u32 val
)
505 asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val
));
509 xscale2pmu_read_event_select(void)
512 asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val
));
517 xscale2pmu_write_event_select(u32 val
)
519 asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val
));
523 xscale2pmu_read_int_enable(void)
526 asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val
));
531 xscale2pmu_write_int_enable(u32 val
)
533 asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val
));
537 xscale2_pmnc_counter_has_overflowed(unsigned long of_flags
,
538 enum xscale_counters counter
)
543 case XSCALE_CYCLE_COUNTER
:
544 ret
= of_flags
& XSCALE2_CCOUNT_OVERFLOW
;
546 case XSCALE_COUNTER0
:
547 ret
= of_flags
& XSCALE2_COUNT0_OVERFLOW
;
549 case XSCALE_COUNTER1
:
550 ret
= of_flags
& XSCALE2_COUNT1_OVERFLOW
;
552 case XSCALE_COUNTER2
:
553 ret
= of_flags
& XSCALE2_COUNT2_OVERFLOW
;
555 case XSCALE_COUNTER3
:
556 ret
= of_flags
& XSCALE2_COUNT3_OVERFLOW
;
559 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
566 xscale2pmu_handle_irq(int irq_num
, void *dev
)
568 unsigned long pmnc
, of_flags
;
569 struct perf_sample_data data
;
570 struct pmu_hw_events
*cpuc
;
571 struct pt_regs
*regs
;
574 /* Disable the PMU. */
575 pmnc
= xscale2pmu_read_pmnc();
576 xscale2pmu_write_pmnc(pmnc
& ~XSCALE_PMU_ENABLE
);
578 /* Check the overflow flag register. */
579 of_flags
= xscale2pmu_read_overflow_flags();
580 if (!(of_flags
& XSCALE2_OVERFLOWED_MASK
))
583 /* Clear the overflow bits. */
584 xscale2pmu_write_overflow_flags(of_flags
);
586 regs
= get_irq_regs();
588 cpuc
= &__get_cpu_var(cpu_hw_events
);
589 for (idx
= 0; idx
< cpu_pmu
->num_events
; ++idx
) {
590 struct perf_event
*event
= cpuc
->events
[idx
];
591 struct hw_perf_event
*hwc
;
596 if (!xscale2_pmnc_counter_has_overflowed(of_flags
, idx
))
600 armpmu_event_update(event
, hwc
, idx
);
601 perf_sample_data_init(&data
, 0, hwc
->last_period
);
602 if (!armpmu_event_set_period(event
, hwc
, idx
))
605 if (perf_event_overflow(event
, &data
, regs
))
606 cpu_pmu
->disable(hwc
, idx
);
614 pmnc
= xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE
;
615 xscale2pmu_write_pmnc(pmnc
);
621 xscale2pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
623 unsigned long flags
, ien
, evtsel
;
624 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
626 ien
= xscale2pmu_read_int_enable();
627 evtsel
= xscale2pmu_read_event_select();
630 case XSCALE_CYCLE_COUNTER
:
631 ien
|= XSCALE2_CCOUNT_INT_EN
;
633 case XSCALE_COUNTER0
:
634 ien
|= XSCALE2_COUNT0_INT_EN
;
635 evtsel
&= ~XSCALE2_COUNT0_EVT_MASK
;
636 evtsel
|= hwc
->config_base
<< XSCALE2_COUNT0_EVT_SHFT
;
638 case XSCALE_COUNTER1
:
639 ien
|= XSCALE2_COUNT1_INT_EN
;
640 evtsel
&= ~XSCALE2_COUNT1_EVT_MASK
;
641 evtsel
|= hwc
->config_base
<< XSCALE2_COUNT1_EVT_SHFT
;
643 case XSCALE_COUNTER2
:
644 ien
|= XSCALE2_COUNT2_INT_EN
;
645 evtsel
&= ~XSCALE2_COUNT2_EVT_MASK
;
646 evtsel
|= hwc
->config_base
<< XSCALE2_COUNT2_EVT_SHFT
;
648 case XSCALE_COUNTER3
:
649 ien
|= XSCALE2_COUNT3_INT_EN
;
650 evtsel
&= ~XSCALE2_COUNT3_EVT_MASK
;
651 evtsel
|= hwc
->config_base
<< XSCALE2_COUNT3_EVT_SHFT
;
654 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
658 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
659 xscale2pmu_write_event_select(evtsel
);
660 xscale2pmu_write_int_enable(ien
);
661 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
665 xscale2pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
667 unsigned long flags
, ien
, evtsel
, of_flags
;
668 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
670 ien
= xscale2pmu_read_int_enable();
671 evtsel
= xscale2pmu_read_event_select();
674 case XSCALE_CYCLE_COUNTER
:
675 ien
&= ~XSCALE2_CCOUNT_INT_EN
;
676 of_flags
= XSCALE2_CCOUNT_OVERFLOW
;
678 case XSCALE_COUNTER0
:
679 ien
&= ~XSCALE2_COUNT0_INT_EN
;
680 evtsel
&= ~XSCALE2_COUNT0_EVT_MASK
;
681 evtsel
|= XSCALE_PERFCTR_UNUSED
<< XSCALE2_COUNT0_EVT_SHFT
;
682 of_flags
= XSCALE2_COUNT0_OVERFLOW
;
684 case XSCALE_COUNTER1
:
685 ien
&= ~XSCALE2_COUNT1_INT_EN
;
686 evtsel
&= ~XSCALE2_COUNT1_EVT_MASK
;
687 evtsel
|= XSCALE_PERFCTR_UNUSED
<< XSCALE2_COUNT1_EVT_SHFT
;
688 of_flags
= XSCALE2_COUNT1_OVERFLOW
;
690 case XSCALE_COUNTER2
:
691 ien
&= ~XSCALE2_COUNT2_INT_EN
;
692 evtsel
&= ~XSCALE2_COUNT2_EVT_MASK
;
693 evtsel
|= XSCALE_PERFCTR_UNUSED
<< XSCALE2_COUNT2_EVT_SHFT
;
694 of_flags
= XSCALE2_COUNT2_OVERFLOW
;
696 case XSCALE_COUNTER3
:
697 ien
&= ~XSCALE2_COUNT3_INT_EN
;
698 evtsel
&= ~XSCALE2_COUNT3_EVT_MASK
;
699 evtsel
|= XSCALE_PERFCTR_UNUSED
<< XSCALE2_COUNT3_EVT_SHFT
;
700 of_flags
= XSCALE2_COUNT3_OVERFLOW
;
703 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
707 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
708 xscale2pmu_write_event_select(evtsel
);
709 xscale2pmu_write_int_enable(ien
);
710 xscale2pmu_write_overflow_flags(of_flags
);
711 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
715 xscale2pmu_get_event_idx(struct pmu_hw_events
*cpuc
,
716 struct hw_perf_event
*event
)
718 int idx
= xscale1pmu_get_event_idx(cpuc
, event
);
722 if (!test_and_set_bit(XSCALE_COUNTER3
, cpuc
->used_mask
))
723 idx
= XSCALE_COUNTER3
;
724 else if (!test_and_set_bit(XSCALE_COUNTER2
, cpuc
->used_mask
))
725 idx
= XSCALE_COUNTER2
;
731 xscale2pmu_start(void)
733 unsigned long flags
, val
;
734 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
736 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
737 val
= xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64
;
738 val
|= XSCALE_PMU_ENABLE
;
739 xscale2pmu_write_pmnc(val
);
740 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
744 xscale2pmu_stop(void)
746 unsigned long flags
, val
;
747 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
749 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
750 val
= xscale2pmu_read_pmnc();
751 val
&= ~XSCALE_PMU_ENABLE
;
752 xscale2pmu_write_pmnc(val
);
753 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
757 xscale2pmu_read_counter(int counter
)
762 case XSCALE_CYCLE_COUNTER
:
763 asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val
));
765 case XSCALE_COUNTER0
:
766 asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val
));
768 case XSCALE_COUNTER1
:
769 asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val
));
771 case XSCALE_COUNTER2
:
772 asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val
));
774 case XSCALE_COUNTER3
:
775 asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val
));
783 xscale2pmu_write_counter(int counter
, u32 val
)
786 case XSCALE_CYCLE_COUNTER
:
787 asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val
));
789 case XSCALE_COUNTER0
:
790 asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val
));
792 case XSCALE_COUNTER1
:
793 asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val
));
795 case XSCALE_COUNTER2
:
796 asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val
));
798 case XSCALE_COUNTER3
:
799 asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val
));
804 static struct arm_pmu xscale2pmu
= {
806 .handle_irq
= xscale2pmu_handle_irq
,
807 .enable
= xscale2pmu_enable_event
,
808 .disable
= xscale2pmu_disable_event
,
809 .read_counter
= xscale2pmu_read_counter
,
810 .write_counter
= xscale2pmu_write_counter
,
811 .get_event_idx
= xscale2pmu_get_event_idx
,
812 .start
= xscale2pmu_start
,
813 .stop
= xscale2pmu_stop
,
814 .map_event
= xscale_map_event
,
816 .max_period
= (1LLU << 32) - 1,
819 static struct arm_pmu
*__init
xscale2pmu_init(void)
824 static struct arm_pmu
*__init
xscale1pmu_init(void)
829 static struct arm_pmu
*__init
xscale2pmu_init(void)
833 #endif /* CONFIG_CPU_XSCALE */