4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
9 * This code is based on the sparc64 perf event code, which is in turn based
12 #define pr_fmt(fmt) "hw perfevents: " fmt
14 #include <linux/kernel.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/irq.h>
18 #include <linux/irqdesc.h>
20 #include <asm/irq_regs.h>
24 armpmu_map_cache_event(const unsigned (*cache_map
)
25 [PERF_COUNT_HW_CACHE_MAX
]
26 [PERF_COUNT_HW_CACHE_OP_MAX
]
27 [PERF_COUNT_HW_CACHE_RESULT_MAX
],
30 unsigned int cache_type
, cache_op
, cache_result
, ret
;
32 cache_type
= (config
>> 0) & 0xff;
33 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
36 cache_op
= (config
>> 8) & 0xff;
37 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
40 cache_result
= (config
>> 16) & 0xff;
41 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
44 ret
= (int)(*cache_map
)[cache_type
][cache_op
][cache_result
];
46 if (ret
== CACHE_OP_UNSUPPORTED
)
53 armpmu_map_hw_event(const unsigned (*event_map
)[PERF_COUNT_HW_MAX
], u64 config
)
57 if (config
>= PERF_COUNT_HW_MAX
)
60 mapping
= (*event_map
)[config
];
61 return mapping
== HW_OP_UNSUPPORTED
? -ENOENT
: mapping
;
65 armpmu_map_raw_event(u32 raw_event_mask
, u64 config
)
67 return (int)(config
& raw_event_mask
);
71 armpmu_map_event(struct perf_event
*event
,
72 const unsigned (*event_map
)[PERF_COUNT_HW_MAX
],
73 const unsigned (*cache_map
)
74 [PERF_COUNT_HW_CACHE_MAX
]
75 [PERF_COUNT_HW_CACHE_OP_MAX
]
76 [PERF_COUNT_HW_CACHE_RESULT_MAX
],
79 u64 config
= event
->attr
.config
;
80 int type
= event
->attr
.type
;
82 if (type
== event
->pmu
->type
)
83 return armpmu_map_raw_event(raw_event_mask
, config
);
86 case PERF_TYPE_HARDWARE
:
87 return armpmu_map_hw_event(event_map
, config
);
88 case PERF_TYPE_HW_CACHE
:
89 return armpmu_map_cache_event(cache_map
, config
);
91 return armpmu_map_raw_event(raw_event_mask
, config
);
97 int armpmu_event_set_period(struct perf_event
*event
)
99 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
100 struct hw_perf_event
*hwc
= &event
->hw
;
101 s64 left
= local64_read(&hwc
->period_left
);
102 s64 period
= hwc
->sample_period
;
105 if (unlikely(left
<= -period
)) {
107 local64_set(&hwc
->period_left
, left
);
108 hwc
->last_period
= period
;
112 if (unlikely(left
<= 0)) {
114 local64_set(&hwc
->period_left
, left
);
115 hwc
->last_period
= period
;
120 * Limit the maximum period to prevent the counter value
121 * from overtaking the one we are about to program. In
122 * effect we are reducing max_period to account for
123 * interrupt latency (and we are being very conservative).
125 if (left
> (armpmu
->max_period
>> 1))
126 left
= armpmu
->max_period
>> 1;
128 local64_set(&hwc
->prev_count
, (u64
)-left
);
130 armpmu
->write_counter(event
, (u64
)(-left
) & 0xffffffff);
132 perf_event_update_userpage(event
);
137 u64
armpmu_event_update(struct perf_event
*event
)
139 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
140 struct hw_perf_event
*hwc
= &event
->hw
;
141 u64 delta
, prev_raw_count
, new_raw_count
;
144 prev_raw_count
= local64_read(&hwc
->prev_count
);
145 new_raw_count
= armpmu
->read_counter(event
);
147 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
148 new_raw_count
) != prev_raw_count
)
151 delta
= (new_raw_count
- prev_raw_count
) & armpmu
->max_period
;
153 local64_add(delta
, &event
->count
);
154 local64_sub(delta
, &hwc
->period_left
);
156 return new_raw_count
;
160 armpmu_read(struct perf_event
*event
)
162 armpmu_event_update(event
);
166 armpmu_stop(struct perf_event
*event
, int flags
)
168 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
169 struct hw_perf_event
*hwc
= &event
->hw
;
172 * ARM pmu always has to update the counter, so ignore
173 * PERF_EF_UPDATE, see comments in armpmu_start().
175 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
176 armpmu
->disable(event
);
177 armpmu_event_update(event
);
178 hwc
->state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
182 static void armpmu_start(struct perf_event
*event
, int flags
)
184 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
185 struct hw_perf_event
*hwc
= &event
->hw
;
188 * ARM pmu always has to reprogram the period, so ignore
189 * PERF_EF_RELOAD, see the comment below.
191 if (flags
& PERF_EF_RELOAD
)
192 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
196 * Set the period again. Some counters can't be stopped, so when we
197 * were stopped we simply disabled the IRQ source and the counter
198 * may have been left counting. If we don't do this step then we may
199 * get an interrupt too soon or *way* too late if the overflow has
200 * happened since disabling.
202 armpmu_event_set_period(event
);
203 armpmu
->enable(event
);
207 armpmu_del(struct perf_event
*event
, int flags
)
209 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
210 struct pmu_hw_events
*hw_events
= this_cpu_ptr(armpmu
->hw_events
);
211 struct hw_perf_event
*hwc
= &event
->hw
;
214 armpmu_stop(event
, PERF_EF_UPDATE
);
215 hw_events
->events
[idx
] = NULL
;
216 clear_bit(idx
, hw_events
->used_mask
);
217 if (armpmu
->clear_event_idx
)
218 armpmu
->clear_event_idx(hw_events
, event
);
220 perf_event_update_userpage(event
);
224 armpmu_add(struct perf_event
*event
, int flags
)
226 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
227 struct pmu_hw_events
*hw_events
= this_cpu_ptr(armpmu
->hw_events
);
228 struct hw_perf_event
*hwc
= &event
->hw
;
232 perf_pmu_disable(event
->pmu
);
234 /* If we don't have a space for the counter then finish early. */
235 idx
= armpmu
->get_event_idx(hw_events
, event
);
242 * If there is an event in the counter we are going to use then make
243 * sure it is disabled.
246 armpmu
->disable(event
);
247 hw_events
->events
[idx
] = event
;
249 hwc
->state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
250 if (flags
& PERF_EF_START
)
251 armpmu_start(event
, PERF_EF_RELOAD
);
253 /* Propagate our changes to the userspace mapping. */
254 perf_event_update_userpage(event
);
257 perf_pmu_enable(event
->pmu
);
262 validate_event(struct pmu
*pmu
, struct pmu_hw_events
*hw_events
,
263 struct perf_event
*event
)
265 struct arm_pmu
*armpmu
;
267 if (is_software_event(event
))
271 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
272 * core perf code won't check that the pmu->ctx == leader->ctx
273 * until after pmu->event_init(event).
275 if (event
->pmu
!= pmu
)
278 if (event
->state
< PERF_EVENT_STATE_OFF
)
281 if (event
->state
== PERF_EVENT_STATE_OFF
&& !event
->attr
.enable_on_exec
)
284 armpmu
= to_arm_pmu(event
->pmu
);
285 return armpmu
->get_event_idx(hw_events
, event
) >= 0;
289 validate_group(struct perf_event
*event
)
291 struct perf_event
*sibling
, *leader
= event
->group_leader
;
292 struct pmu_hw_events fake_pmu
;
295 * Initialise the fake PMU. We only need to populate the
296 * used_mask for the purposes of validation.
298 memset(&fake_pmu
.used_mask
, 0, sizeof(fake_pmu
.used_mask
));
300 if (!validate_event(event
->pmu
, &fake_pmu
, leader
))
303 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
304 if (!validate_event(event
->pmu
, &fake_pmu
, sibling
))
308 if (!validate_event(event
->pmu
, &fake_pmu
, event
))
314 static irqreturn_t
armpmu_dispatch_irq(int irq
, void *dev
)
316 struct arm_pmu
*armpmu
;
317 struct platform_device
*plat_device
;
318 struct arm_pmu_platdata
*plat
;
320 u64 start_clock
, finish_clock
;
323 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
324 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
325 * do any necessary shifting, we just need to perform the first
328 armpmu
= *(void **)dev
;
329 plat_device
= armpmu
->plat_device
;
330 plat
= dev_get_platdata(&plat_device
->dev
);
332 start_clock
= sched_clock();
333 if (plat
&& plat
->handle_irq
)
334 ret
= plat
->handle_irq(irq
, armpmu
, armpmu
->handle_irq
);
336 ret
= armpmu
->handle_irq(irq
, armpmu
);
337 finish_clock
= sched_clock();
339 perf_sample_event_took(finish_clock
- start_clock
);
344 armpmu_release_hardware(struct arm_pmu
*armpmu
)
346 armpmu
->free_irq(armpmu
);
347 pm_runtime_put_sync(&armpmu
->plat_device
->dev
);
351 armpmu_reserve_hardware(struct arm_pmu
*armpmu
)
354 struct platform_device
*pmu_device
= armpmu
->plat_device
;
359 pm_runtime_get_sync(&pmu_device
->dev
);
360 err
= armpmu
->request_irq(armpmu
, armpmu_dispatch_irq
);
362 armpmu_release_hardware(armpmu
);
370 hw_perf_event_destroy(struct perf_event
*event
)
372 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
373 atomic_t
*active_events
= &armpmu
->active_events
;
374 struct mutex
*pmu_reserve_mutex
= &armpmu
->reserve_mutex
;
376 if (atomic_dec_and_mutex_lock(active_events
, pmu_reserve_mutex
)) {
377 armpmu_release_hardware(armpmu
);
378 mutex_unlock(pmu_reserve_mutex
);
383 event_requires_mode_exclusion(struct perf_event_attr
*attr
)
385 return attr
->exclude_idle
|| attr
->exclude_user
||
386 attr
->exclude_kernel
|| attr
->exclude_hv
;
390 __hw_perf_event_init(struct perf_event
*event
)
392 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
393 struct hw_perf_event
*hwc
= &event
->hw
;
396 mapping
= armpmu
->map_event(event
);
399 pr_debug("event %x:%llx not supported\n", event
->attr
.type
,
405 * We don't assign an index until we actually place the event onto
406 * hardware. Use -1 to signify that we haven't decided where to put it
407 * yet. For SMP systems, each core has it's own PMU so we can't do any
408 * clever allocation or constraints checking at this point.
411 hwc
->config_base
= 0;
416 * Check whether we need to exclude the counter from certain modes.
418 if ((!armpmu
->set_event_filter
||
419 armpmu
->set_event_filter(hwc
, &event
->attr
)) &&
420 event_requires_mode_exclusion(&event
->attr
)) {
421 pr_debug("ARM performance counters do not support "
427 * Store the event encoding into the config_base field.
429 hwc
->config_base
|= (unsigned long)mapping
;
431 if (!is_sampling_event(event
)) {
433 * For non-sampling runs, limit the sample_period to half
434 * of the counter width. That way, the new counter value
435 * is far less likely to overtake the previous one unless
436 * you have some serious IRQ latency issues.
438 hwc
->sample_period
= armpmu
->max_period
>> 1;
439 hwc
->last_period
= hwc
->sample_period
;
440 local64_set(&hwc
->period_left
, hwc
->sample_period
);
443 if (event
->group_leader
!= event
) {
444 if (validate_group(event
) != 0)
451 static int armpmu_event_init(struct perf_event
*event
)
453 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
455 atomic_t
*active_events
= &armpmu
->active_events
;
457 /* does not support taken branch sampling */
458 if (has_branch_stack(event
))
461 if (armpmu
->map_event(event
) == -ENOENT
)
464 event
->destroy
= hw_perf_event_destroy
;
466 if (!atomic_inc_not_zero(active_events
)) {
467 mutex_lock(&armpmu
->reserve_mutex
);
468 if (atomic_read(active_events
) == 0)
469 err
= armpmu_reserve_hardware(armpmu
);
472 atomic_inc(active_events
);
473 mutex_unlock(&armpmu
->reserve_mutex
);
479 err
= __hw_perf_event_init(event
);
481 hw_perf_event_destroy(event
);
486 static void armpmu_enable(struct pmu
*pmu
)
488 struct arm_pmu
*armpmu
= to_arm_pmu(pmu
);
489 struct pmu_hw_events
*hw_events
= this_cpu_ptr(armpmu
->hw_events
);
490 int enabled
= bitmap_weight(hw_events
->used_mask
, armpmu
->num_events
);
493 armpmu
->start(armpmu
);
496 static void armpmu_disable(struct pmu
*pmu
)
498 struct arm_pmu
*armpmu
= to_arm_pmu(pmu
);
499 armpmu
->stop(armpmu
);
503 static int armpmu_runtime_resume(struct device
*dev
)
505 struct arm_pmu_platdata
*plat
= dev_get_platdata(dev
);
507 if (plat
&& plat
->runtime_resume
)
508 return plat
->runtime_resume(dev
);
513 static int armpmu_runtime_suspend(struct device
*dev
)
515 struct arm_pmu_platdata
*plat
= dev_get_platdata(dev
);
517 if (plat
&& plat
->runtime_suspend
)
518 return plat
->runtime_suspend(dev
);
524 const struct dev_pm_ops armpmu_dev_pm_ops
= {
525 SET_RUNTIME_PM_OPS(armpmu_runtime_suspend
, armpmu_runtime_resume
, NULL
)
528 static void armpmu_init(struct arm_pmu
*armpmu
)
530 atomic_set(&armpmu
->active_events
, 0);
531 mutex_init(&armpmu
->reserve_mutex
);
533 armpmu
->pmu
= (struct pmu
) {
534 .pmu_enable
= armpmu_enable
,
535 .pmu_disable
= armpmu_disable
,
536 .event_init
= armpmu_event_init
,
539 .start
= armpmu_start
,
545 int armpmu_register(struct arm_pmu
*armpmu
, int type
)
548 pm_runtime_enable(&armpmu
->plat_device
->dev
);
549 pr_info("enabled with %s PMU driver, %d counters available\n",
550 armpmu
->name
, armpmu
->num_events
);
551 return perf_pmu_register(&armpmu
->pmu
, armpmu
->name
, type
);