4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
9 * This code is based on the sparc64 perf event code, which is in turn based
10 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
13 #define pr_fmt(fmt) "hw perfevents: " fmt
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/perf_event.h>
19 #include <linux/platform_device.h>
20 #include <linux/spinlock.h>
21 #include <linux/uaccess.h>
23 #include <asm/cputype.h>
25 #include <asm/irq_regs.h>
27 #include <asm/stacktrace.h>
29 static struct platform_device
*pmu_device
;
32 * Hardware lock to serialize accesses to PMU registers. Needed for the
33 * read/modify/write sequences.
35 static DEFINE_RAW_SPINLOCK(pmu_lock
);
38 * ARMv6 supports a maximum of 3 events, starting from index 1. If we add
39 * another platform that supports more, we need to increase this to be the
40 * largest of all platforms.
42 * ARMv7 supports up to 32 events:
43 * cycle counter CCNT + 31 events counters CNT0..30.
44 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
46 #define ARMPMU_MAX_HWEVENTS 33
48 /* The events for a given CPU. */
49 struct cpu_hw_events
{
51 * The events that are active on the CPU for the given index. Index 0
54 struct perf_event
*events
[ARMPMU_MAX_HWEVENTS
];
57 * A 1 bit for an index indicates that the counter is being used for
58 * an event. A 0 means that the counter can be used.
60 unsigned long used_mask
[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS
)];
63 * A 1 bit for an index indicates that the counter is actively being
66 unsigned long active_mask
[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS
)];
68 static DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
);
71 enum arm_perf_pmu_ids id
;
73 irqreturn_t (*handle_irq
)(int irq_num
, void *dev
);
74 void (*enable
)(struct hw_perf_event
*evt
, int idx
);
75 void (*disable
)(struct hw_perf_event
*evt
, int idx
);
76 int (*get_event_idx
)(struct cpu_hw_events
*cpuc
,
77 struct hw_perf_event
*hwc
);
78 u32 (*read_counter
)(int idx
);
79 void (*write_counter
)(int idx
, u32 val
);
82 void (*reset
)(void *);
83 const unsigned (*cache_map
)[PERF_COUNT_HW_CACHE_MAX
]
84 [PERF_COUNT_HW_CACHE_OP_MAX
]
85 [PERF_COUNT_HW_CACHE_RESULT_MAX
];
86 const unsigned (*event_map
)[PERF_COUNT_HW_MAX
];
92 /* Set at runtime when we know what CPU type we are. */
93 static const struct arm_pmu
*armpmu
;
96 armpmu_get_pmu_id(void)
105 EXPORT_SYMBOL_GPL(armpmu_get_pmu_id
);
108 armpmu_get_max_events(void)
113 max_events
= armpmu
->num_events
;
117 EXPORT_SYMBOL_GPL(armpmu_get_max_events
);
119 int perf_num_counters(void)
121 return armpmu_get_max_events();
123 EXPORT_SYMBOL_GPL(perf_num_counters
);
125 #define HW_OP_UNSUPPORTED 0xFFFF
128 PERF_COUNT_HW_CACHE_##_x
130 #define CACHE_OP_UNSUPPORTED 0xFFFF
133 armpmu_map_cache_event(u64 config
)
135 unsigned int cache_type
, cache_op
, cache_result
, ret
;
137 cache_type
= (config
>> 0) & 0xff;
138 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
141 cache_op
= (config
>> 8) & 0xff;
142 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
145 cache_result
= (config
>> 16) & 0xff;
146 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
149 ret
= (int)(*armpmu
->cache_map
)[cache_type
][cache_op
][cache_result
];
151 if (ret
== CACHE_OP_UNSUPPORTED
)
158 armpmu_map_event(u64 config
)
160 int mapping
= (*armpmu
->event_map
)[config
];
161 return mapping
== HW_OP_UNSUPPORTED
? -EOPNOTSUPP
: mapping
;
165 armpmu_map_raw_event(u64 config
)
167 return (int)(config
& armpmu
->raw_event_mask
);
171 armpmu_event_set_period(struct perf_event
*event
,
172 struct hw_perf_event
*hwc
,
175 s64 left
= local64_read(&hwc
->period_left
);
176 s64 period
= hwc
->sample_period
;
179 if (unlikely(left
<= -period
)) {
181 local64_set(&hwc
->period_left
, left
);
182 hwc
->last_period
= period
;
186 if (unlikely(left
<= 0)) {
188 local64_set(&hwc
->period_left
, left
);
189 hwc
->last_period
= period
;
193 if (left
> (s64
)armpmu
->max_period
)
194 left
= armpmu
->max_period
;
196 local64_set(&hwc
->prev_count
, (u64
)-left
);
198 armpmu
->write_counter(idx
, (u64
)(-left
) & 0xffffffff);
200 perf_event_update_userpage(event
);
206 armpmu_event_update(struct perf_event
*event
,
207 struct hw_perf_event
*hwc
,
208 int idx
, int overflow
)
210 u64 delta
, prev_raw_count
, new_raw_count
;
213 prev_raw_count
= local64_read(&hwc
->prev_count
);
214 new_raw_count
= armpmu
->read_counter(idx
);
216 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
217 new_raw_count
) != prev_raw_count
)
220 new_raw_count
&= armpmu
->max_period
;
221 prev_raw_count
&= armpmu
->max_period
;
224 delta
= armpmu
->max_period
- prev_raw_count
+ new_raw_count
+ 1;
226 delta
= new_raw_count
- prev_raw_count
;
228 local64_add(delta
, &event
->count
);
229 local64_sub(delta
, &hwc
->period_left
);
231 return new_raw_count
;
235 armpmu_read(struct perf_event
*event
)
237 struct hw_perf_event
*hwc
= &event
->hw
;
239 /* Don't read disabled counters! */
243 armpmu_event_update(event
, hwc
, hwc
->idx
, 0);
247 armpmu_stop(struct perf_event
*event
, int flags
)
249 struct hw_perf_event
*hwc
= &event
->hw
;
255 * ARM pmu always has to update the counter, so ignore
256 * PERF_EF_UPDATE, see comments in armpmu_start().
258 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
259 armpmu
->disable(hwc
, hwc
->idx
);
260 barrier(); /* why? */
261 armpmu_event_update(event
, hwc
, hwc
->idx
, 0);
262 hwc
->state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
267 armpmu_start(struct perf_event
*event
, int flags
)
269 struct hw_perf_event
*hwc
= &event
->hw
;
275 * ARM pmu always has to reprogram the period, so ignore
276 * PERF_EF_RELOAD, see the comment below.
278 if (flags
& PERF_EF_RELOAD
)
279 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
283 * Set the period again. Some counters can't be stopped, so when we
284 * were stopped we simply disabled the IRQ source and the counter
285 * may have been left counting. If we don't do this step then we may
286 * get an interrupt too soon or *way* too late if the overflow has
287 * happened since disabling.
289 armpmu_event_set_period(event
, hwc
, hwc
->idx
);
290 armpmu
->enable(hwc
, hwc
->idx
);
294 armpmu_del(struct perf_event
*event
, int flags
)
296 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
297 struct hw_perf_event
*hwc
= &event
->hw
;
302 clear_bit(idx
, cpuc
->active_mask
);
303 armpmu_stop(event
, PERF_EF_UPDATE
);
304 cpuc
->events
[idx
] = NULL
;
305 clear_bit(idx
, cpuc
->used_mask
);
307 perf_event_update_userpage(event
);
311 armpmu_add(struct perf_event
*event
, int flags
)
313 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
314 struct hw_perf_event
*hwc
= &event
->hw
;
318 perf_pmu_disable(event
->pmu
);
320 /* If we don't have a space for the counter then finish early. */
321 idx
= armpmu
->get_event_idx(cpuc
, hwc
);
328 * If there is an event in the counter we are going to use then make
329 * sure it is disabled.
332 armpmu
->disable(hwc
, idx
);
333 cpuc
->events
[idx
] = event
;
334 set_bit(idx
, cpuc
->active_mask
);
336 hwc
->state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
337 if (flags
& PERF_EF_START
)
338 armpmu_start(event
, PERF_EF_RELOAD
);
340 /* Propagate our changes to the userspace mapping. */
341 perf_event_update_userpage(event
);
344 perf_pmu_enable(event
->pmu
);
348 static struct pmu pmu
;
351 validate_event(struct cpu_hw_events
*cpuc
,
352 struct perf_event
*event
)
354 struct hw_perf_event fake_event
= event
->hw
;
356 if (event
->pmu
!= &pmu
|| event
->state
<= PERF_EVENT_STATE_OFF
)
359 return armpmu
->get_event_idx(cpuc
, &fake_event
) >= 0;
363 validate_group(struct perf_event
*event
)
365 struct perf_event
*sibling
, *leader
= event
->group_leader
;
366 struct cpu_hw_events fake_pmu
;
368 memset(&fake_pmu
, 0, sizeof(fake_pmu
));
370 if (!validate_event(&fake_pmu
, leader
))
373 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
374 if (!validate_event(&fake_pmu
, sibling
))
378 if (!validate_event(&fake_pmu
, event
))
384 static irqreturn_t
armpmu_platform_irq(int irq
, void *dev
)
386 struct arm_pmu_platdata
*plat
= dev_get_platdata(&pmu_device
->dev
);
388 return plat
->handle_irq(irq
, dev
, armpmu
->handle_irq
);
392 armpmu_reserve_hardware(void)
394 struct arm_pmu_platdata
*plat
;
395 irq_handler_t handle_irq
;
396 int i
, err
= -ENODEV
, irq
;
398 pmu_device
= reserve_pmu(ARM_PMU_DEVICE_CPU
);
399 if (IS_ERR(pmu_device
)) {
400 pr_warning("unable to reserve pmu\n");
401 return PTR_ERR(pmu_device
);
404 init_pmu(ARM_PMU_DEVICE_CPU
);
406 plat
= dev_get_platdata(&pmu_device
->dev
);
407 if (plat
&& plat
->handle_irq
)
408 handle_irq
= armpmu_platform_irq
;
410 handle_irq
= armpmu
->handle_irq
;
412 if (pmu_device
->num_resources
< 1) {
413 pr_err("no irqs for PMUs defined\n");
417 for (i
= 0; i
< pmu_device
->num_resources
; ++i
) {
418 irq
= platform_get_irq(pmu_device
, i
);
422 err
= request_irq(irq
, handle_irq
,
423 IRQF_DISABLED
| IRQF_NOBALANCING
,
426 pr_warning("unable to request IRQ%d for ARM perf "
433 for (i
= i
- 1; i
>= 0; --i
) {
434 irq
= platform_get_irq(pmu_device
, i
);
438 release_pmu(ARM_PMU_DEVICE_CPU
);
446 armpmu_release_hardware(void)
450 for (i
= pmu_device
->num_resources
- 1; i
>= 0; --i
) {
451 irq
= platform_get_irq(pmu_device
, i
);
457 release_pmu(ARM_PMU_DEVICE_CPU
);
461 static atomic_t active_events
= ATOMIC_INIT(0);
462 static DEFINE_MUTEX(pmu_reserve_mutex
);
465 hw_perf_event_destroy(struct perf_event
*event
)
467 if (atomic_dec_and_mutex_lock(&active_events
, &pmu_reserve_mutex
)) {
468 armpmu_release_hardware();
469 mutex_unlock(&pmu_reserve_mutex
);
474 __hw_perf_event_init(struct perf_event
*event
)
476 struct hw_perf_event
*hwc
= &event
->hw
;
479 /* Decode the generic type into an ARM event identifier. */
480 if (PERF_TYPE_HARDWARE
== event
->attr
.type
) {
481 mapping
= armpmu_map_event(event
->attr
.config
);
482 } else if (PERF_TYPE_HW_CACHE
== event
->attr
.type
) {
483 mapping
= armpmu_map_cache_event(event
->attr
.config
);
484 } else if (PERF_TYPE_RAW
== event
->attr
.type
) {
485 mapping
= armpmu_map_raw_event(event
->attr
.config
);
487 pr_debug("event type %x not supported\n", event
->attr
.type
);
492 pr_debug("event %x:%llx not supported\n", event
->attr
.type
,
498 * Check whether we need to exclude the counter from certain modes.
499 * The ARM performance counters are on all of the time so if someone
500 * has asked us for some excludes then we have to fail.
502 if (event
->attr
.exclude_kernel
|| event
->attr
.exclude_user
||
503 event
->attr
.exclude_hv
|| event
->attr
.exclude_idle
) {
504 pr_debug("ARM performance counters do not support "
510 * We don't assign an index until we actually place the event onto
511 * hardware. Use -1 to signify that we haven't decided where to put it
512 * yet. For SMP systems, each core has it's own PMU so we can't do any
513 * clever allocation or constraints checking at this point.
518 * Store the event encoding into the config_base field. config and
519 * event_base are unused as the only 2 things we need to know are
520 * the event mapping and the counter to use. The counter to use is
521 * also the indx and the config_base is the event type.
523 hwc
->config_base
= (unsigned long)mapping
;
527 if (!hwc
->sample_period
) {
528 hwc
->sample_period
= armpmu
->max_period
;
529 hwc
->last_period
= hwc
->sample_period
;
530 local64_set(&hwc
->period_left
, hwc
->sample_period
);
534 if (event
->group_leader
!= event
) {
535 err
= validate_group(event
);
543 static int armpmu_event_init(struct perf_event
*event
)
547 switch (event
->attr
.type
) {
549 case PERF_TYPE_HARDWARE
:
550 case PERF_TYPE_HW_CACHE
:
560 event
->destroy
= hw_perf_event_destroy
;
562 if (!atomic_inc_not_zero(&active_events
)) {
563 mutex_lock(&pmu_reserve_mutex
);
564 if (atomic_read(&active_events
) == 0) {
565 err
= armpmu_reserve_hardware();
569 atomic_inc(&active_events
);
570 mutex_unlock(&pmu_reserve_mutex
);
576 err
= __hw_perf_event_init(event
);
578 hw_perf_event_destroy(event
);
583 static void armpmu_enable(struct pmu
*pmu
)
585 /* Enable all of the perf events on hardware. */
586 int idx
, enabled
= 0;
587 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
592 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
593 struct perf_event
*event
= cpuc
->events
[idx
];
598 armpmu
->enable(&event
->hw
, idx
);
606 static void armpmu_disable(struct pmu
*pmu
)
612 static struct pmu pmu
= {
613 .pmu_enable
= armpmu_enable
,
614 .pmu_disable
= armpmu_disable
,
615 .event_init
= armpmu_event_init
,
618 .start
= armpmu_start
,
623 /* Include the PMU-specific implementations. */
624 #include "perf_event_xscale.c"
625 #include "perf_event_v6.c"
626 #include "perf_event_v7.c"
629 * Ensure the PMU has sane values out of reset.
630 * This requires SMP to be available, so exists as a separate initcall.
635 if (armpmu
&& armpmu
->reset
)
636 return on_each_cpu(armpmu
->reset
, NULL
, 1);
639 arch_initcall(armpmu_reset
);
642 init_hw_perf_events(void)
644 unsigned long cpuid
= read_cpuid_id();
645 unsigned long implementor
= (cpuid
& 0xFF000000) >> 24;
646 unsigned long part_number
= (cpuid
& 0xFFF0);
649 if (0x41 == implementor
) {
650 switch (part_number
) {
651 case 0xB360: /* ARM1136 */
652 case 0xB560: /* ARM1156 */
653 case 0xB760: /* ARM1176 */
654 armpmu
= armv6pmu_init();
656 case 0xB020: /* ARM11mpcore */
657 armpmu
= armv6mpcore_pmu_init();
659 case 0xC080: /* Cortex-A8 */
660 armpmu
= armv7_a8_pmu_init();
662 case 0xC090: /* Cortex-A9 */
663 armpmu
= armv7_a9_pmu_init();
665 case 0xC050: /* Cortex-A5 */
666 armpmu
= armv7_a5_pmu_init();
668 case 0xC0F0: /* Cortex-A15 */
669 armpmu
= armv7_a15_pmu_init();
672 /* Intel CPUs [xscale]. */
673 } else if (0x69 == implementor
) {
674 part_number
= (cpuid
>> 13) & 0x7;
675 switch (part_number
) {
677 armpmu
= xscale1pmu_init();
680 armpmu
= xscale2pmu_init();
686 pr_info("enabled with %s PMU driver, %d counters available\n",
687 armpmu
->name
, armpmu
->num_events
);
689 pr_info("no hardware support available\n");
692 perf_pmu_register(&pmu
, "cpu", PERF_TYPE_RAW
);
696 early_initcall(init_hw_perf_events
);
699 * Callchain handling code.
703 * The registers we're interested in are at the end of the variable
704 * length saved register structure. The fp points at the end of this
705 * structure so the address of this struct is:
706 * (struct frame_tail *)(xxx->fp)-1
708 * This code has been adapted from the ARM OProfile support.
711 struct frame_tail __user
*fp
;
714 } __attribute__((packed
));
717 * Get the return address for a single stackframe and return a pointer to the
720 static struct frame_tail __user
*
721 user_backtrace(struct frame_tail __user
*tail
,
722 struct perf_callchain_entry
*entry
)
724 struct frame_tail buftail
;
726 /* Also check accessibility of one struct frame_tail beyond */
727 if (!access_ok(VERIFY_READ
, tail
, sizeof(buftail
)))
729 if (__copy_from_user_inatomic(&buftail
, tail
, sizeof(buftail
)))
732 perf_callchain_store(entry
, buftail
.lr
);
735 * Frame pointers should strictly progress back up the stack
736 * (towards higher addresses).
738 if (tail
+ 1 >= buftail
.fp
)
741 return buftail
.fp
- 1;
745 perf_callchain_user(struct perf_callchain_entry
*entry
, struct pt_regs
*regs
)
747 struct frame_tail __user
*tail
;
750 tail
= (struct frame_tail __user
*)regs
->ARM_fp
- 1;
752 while ((entry
->nr
< PERF_MAX_STACK_DEPTH
) &&
753 tail
&& !((unsigned long)tail
& 0x3))
754 tail
= user_backtrace(tail
, entry
);
758 * Gets called by walk_stackframe() for every stackframe. This will be called
759 * whist unwinding the stackframe and is like a subroutine return so we use
763 callchain_trace(struct stackframe
*fr
,
766 struct perf_callchain_entry
*entry
= data
;
767 perf_callchain_store(entry
, fr
->pc
);
772 perf_callchain_kernel(struct perf_callchain_entry
*entry
, struct pt_regs
*regs
)
774 struct stackframe fr
;
776 fr
.fp
= regs
->ARM_fp
;
777 fr
.sp
= regs
->ARM_sp
;
778 fr
.lr
= regs
->ARM_lr
;
779 fr
.pc
= regs
->ARM_pc
;
780 walk_stackframe(&fr
, callchain_trace
, entry
);