2 * Linux performance counter support for MIPS.
4 * Copyright (C) 2010 MIPS Technologies, Inc.
5 * Author: Deng-Cheng Zhu
7 * This code is based on the implementation for ARM, which is in turn
8 * based on the sparc64 perf event code and the x86 code. Performance
9 * counter access is based on the MIPS Oprofile code. And the callchain
10 * support references the code of MIPS stacktrace.c.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #include <linux/cpumask.h>
18 #include <linux/interrupt.h>
19 #include <linux/smp.h>
20 #include <linux/kernel.h>
21 #include <linux/perf_event.h>
22 #include <linux/uaccess.h>
25 #include <asm/irq_regs.h>
26 #include <asm/stacktrace.h>
27 #include <asm/time.h> /* For perf_irq */
29 /* These are for 32bit counters. For 64bit ones, define them accordingly. */
30 #define MAX_PERIOD ((1ULL << 32) - 1)
31 #define VALID_COUNT 0x7fffffff
33 #define HIGHEST_BIT 31
35 #define MIPS_MAX_HWEVENTS 4
37 struct cpu_hw_events
{
38 /* Array of events on this cpu. */
39 struct perf_event
*events
[MIPS_MAX_HWEVENTS
];
42 * Set the bit (indexed by the counter number) when the counter
43 * is used for an event.
45 unsigned long used_mask
[BITS_TO_LONGS(MIPS_MAX_HWEVENTS
)];
48 * The borrowed MSB for the performance counter. A MIPS performance
49 * counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit
50 * counters) as a factor of determining whether a counter overflow
51 * should be signaled. So here we use a separate MSB for each
52 * counter to make things easy.
54 unsigned long msbs
[BITS_TO_LONGS(MIPS_MAX_HWEVENTS
)];
57 * Software copy of the control register for each performance counter.
58 * MIPS CPUs vary in performance counters. They use this differently,
59 * and even may not use it.
61 unsigned int saved_ctrl
[MIPS_MAX_HWEVENTS
];
63 DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
) = {
67 /* The description of MIPS performance events. */
68 struct mips_perf_event
{
69 unsigned int event_id
;
71 * MIPS performance counters are indexed starting from 0.
72 * CNTR_EVEN indicates the indexes of the counters to be used are
75 unsigned int cntr_mask
;
76 #define CNTR_EVEN 0x55555555
77 #define CNTR_ODD 0xaaaaaaaa
78 #ifdef CONFIG_MIPS_MT_SMP
91 static struct mips_perf_event raw_event
;
92 static DEFINE_MUTEX(raw_event_mutex
);
94 #define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
95 #define C(x) PERF_COUNT_HW_CACHE_##x
100 irqreturn_t (*handle_irq
)(int irq
, void *dev
);
101 int (*handle_shared_irq
)(void);
104 int (*alloc_counter
)(struct cpu_hw_events
*cpuc
,
105 struct hw_perf_event
*hwc
);
106 u64 (*read_counter
)(unsigned int idx
);
107 void (*write_counter
)(unsigned int idx
, u64 val
);
108 void (*enable_event
)(struct hw_perf_event
*evt
, int idx
);
109 void (*disable_event
)(int idx
);
110 const struct mips_perf_event
*(*map_raw_event
)(u64 config
);
111 const struct mips_perf_event (*general_event_map
)[PERF_COUNT_HW_MAX
];
112 const struct mips_perf_event (*cache_event_map
)
113 [PERF_COUNT_HW_CACHE_MAX
]
114 [PERF_COUNT_HW_CACHE_OP_MAX
]
115 [PERF_COUNT_HW_CACHE_RESULT_MAX
];
116 unsigned int num_counters
;
119 static const struct mips_pmu
*mipspmu
;
122 mipspmu_event_set_period(struct perf_event
*event
,
123 struct hw_perf_event
*hwc
,
126 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
127 s64 left
= local64_read(&hwc
->period_left
);
128 s64 period
= hwc
->sample_period
;
133 if (unlikely(left
<= -period
)) {
135 local64_set(&hwc
->period_left
, left
);
136 hwc
->last_period
= period
;
140 if (unlikely(left
<= 0)) {
142 local64_set(&hwc
->period_left
, left
);
143 hwc
->last_period
= period
;
147 if (left
> (s64
)MAX_PERIOD
)
150 local64_set(&hwc
->prev_count
, (u64
)-left
);
152 local_irq_save(flags
);
153 uleft
= (u64
)(-left
) & MAX_PERIOD
;
154 uleft
> VALID_COUNT
?
155 set_bit(idx
, cpuc
->msbs
) : clear_bit(idx
, cpuc
->msbs
);
156 mipspmu
->write_counter(idx
, (u64
)(-left
) & VALID_COUNT
);
157 local_irq_restore(flags
);
159 perf_event_update_userpage(event
);
164 static void mipspmu_event_update(struct perf_event
*event
,
165 struct hw_perf_event
*hwc
,
168 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
170 int shift
= 64 - TOTAL_BITS
;
171 s64 prev_raw_count
, new_raw_count
;
175 prev_raw_count
= local64_read(&hwc
->prev_count
);
176 local_irq_save(flags
);
177 /* Make the counter value be a "real" one. */
178 new_raw_count
= mipspmu
->read_counter(idx
);
179 if (new_raw_count
& (test_bit(idx
, cpuc
->msbs
) << HIGHEST_BIT
)) {
180 new_raw_count
&= VALID_COUNT
;
181 clear_bit(idx
, cpuc
->msbs
);
183 new_raw_count
|= (test_bit(idx
, cpuc
->msbs
) << HIGHEST_BIT
);
184 local_irq_restore(flags
);
186 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
187 new_raw_count
) != prev_raw_count
)
190 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
193 local64_add(delta
, &event
->count
);
194 local64_sub(delta
, &hwc
->period_left
);
197 static void mipspmu_start(struct perf_event
*event
, int flags
)
199 struct hw_perf_event
*hwc
= &event
->hw
;
204 if (flags
& PERF_EF_RELOAD
)
205 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
209 /* Set the period for the event. */
210 mipspmu_event_set_period(event
, hwc
, hwc
->idx
);
212 /* Enable the event. */
213 mipspmu
->enable_event(hwc
, hwc
->idx
);
216 static void mipspmu_stop(struct perf_event
*event
, int flags
)
218 struct hw_perf_event
*hwc
= &event
->hw
;
223 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
224 /* We are working on a local event. */
225 mipspmu
->disable_event(hwc
->idx
);
227 mipspmu_event_update(event
, hwc
, hwc
->idx
);
228 hwc
->state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
232 static int mipspmu_add(struct perf_event
*event
, int flags
)
234 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
235 struct hw_perf_event
*hwc
= &event
->hw
;
239 perf_pmu_disable(event
->pmu
);
241 /* To look for a free counter for this event. */
242 idx
= mipspmu
->alloc_counter(cpuc
, hwc
);
249 * If there is an event in the counter we are going to use then
250 * make sure it is disabled.
253 mipspmu
->disable_event(idx
);
254 cpuc
->events
[idx
] = event
;
256 hwc
->state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
257 if (flags
& PERF_EF_START
)
258 mipspmu_start(event
, PERF_EF_RELOAD
);
260 /* Propagate our changes to the userspace mapping. */
261 perf_event_update_userpage(event
);
264 perf_pmu_enable(event
->pmu
);
268 static void mipspmu_del(struct perf_event
*event
, int flags
)
270 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
271 struct hw_perf_event
*hwc
= &event
->hw
;
274 WARN_ON(idx
< 0 || idx
>= mipspmu
->num_counters
);
276 mipspmu_stop(event
, PERF_EF_UPDATE
);
277 cpuc
->events
[idx
] = NULL
;
278 clear_bit(idx
, cpuc
->used_mask
);
280 perf_event_update_userpage(event
);
283 static void mipspmu_read(struct perf_event
*event
)
285 struct hw_perf_event
*hwc
= &event
->hw
;
287 /* Don't read disabled counters! */
291 mipspmu_event_update(event
, hwc
, hwc
->idx
);
294 static void mipspmu_enable(struct pmu
*pmu
)
300 static void mipspmu_disable(struct pmu
*pmu
)
306 static atomic_t active_events
= ATOMIC_INIT(0);
307 static DEFINE_MUTEX(pmu_reserve_mutex
);
308 static int (*save_perf_irq
)(void);
310 static int mipspmu_get_irq(void)
314 if (mipspmu
->irq
>= 0) {
315 /* Request my own irq handler. */
316 err
= request_irq(mipspmu
->irq
, mipspmu
->handle_irq
,
317 IRQF_DISABLED
| IRQF_NOBALANCING
,
318 "mips_perf_pmu", NULL
);
320 pr_warning("Unable to request IRQ%d for MIPS "
321 "performance counters!\n", mipspmu
->irq
);
323 } else if (cp0_perfcount_irq
< 0) {
325 * We are sharing the irq number with the timer interrupt.
327 save_perf_irq
= perf_irq
;
328 perf_irq
= mipspmu
->handle_shared_irq
;
331 pr_warning("The platform hasn't properly defined its "
332 "interrupt controller.\n");
339 static void mipspmu_free_irq(void)
341 if (mipspmu
->irq
>= 0)
342 free_irq(mipspmu
->irq
, NULL
);
343 else if (cp0_perfcount_irq
< 0)
344 perf_irq
= save_perf_irq
;
348 * mipsxx/rm9000/loongson2 have different performance counters, they have
349 * specific low-level init routines.
351 static void reset_counters(void *arg
);
352 static int __hw_perf_event_init(struct perf_event
*event
);
354 static void hw_perf_event_destroy(struct perf_event
*event
)
356 if (atomic_dec_and_mutex_lock(&active_events
,
357 &pmu_reserve_mutex
)) {
359 * We must not call the destroy function with interrupts
362 on_each_cpu(reset_counters
,
363 (void *)(long)mipspmu
->num_counters
, 1);
365 mutex_unlock(&pmu_reserve_mutex
);
369 static int mipspmu_event_init(struct perf_event
*event
)
373 switch (event
->attr
.type
) {
375 case PERF_TYPE_HARDWARE
:
376 case PERF_TYPE_HW_CACHE
:
383 if (!mipspmu
|| event
->cpu
>= nr_cpumask_bits
||
384 (event
->cpu
>= 0 && !cpu_online(event
->cpu
)))
387 if (!atomic_inc_not_zero(&active_events
)) {
388 if (atomic_read(&active_events
) > MIPS_MAX_HWEVENTS
) {
389 atomic_dec(&active_events
);
393 mutex_lock(&pmu_reserve_mutex
);
394 if (atomic_read(&active_events
) == 0)
395 err
= mipspmu_get_irq();
398 atomic_inc(&active_events
);
399 mutex_unlock(&pmu_reserve_mutex
);
405 err
= __hw_perf_event_init(event
);
407 hw_perf_event_destroy(event
);
412 static struct pmu pmu
= {
413 .pmu_enable
= mipspmu_enable
,
414 .pmu_disable
= mipspmu_disable
,
415 .event_init
= mipspmu_event_init
,
418 .start
= mipspmu_start
,
419 .stop
= mipspmu_stop
,
420 .read
= mipspmu_read
,
423 static inline unsigned int
424 mipspmu_perf_event_encode(const struct mips_perf_event
*pev
)
427 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
430 #ifdef CONFIG_MIPS_MT_SMP
431 return ((unsigned int)pev
->range
<< 24) |
432 (pev
->cntr_mask
& 0xffff00) |
433 (pev
->event_id
& 0xff);
435 return (pev
->cntr_mask
& 0xffff00) |
436 (pev
->event_id
& 0xff);
440 static const struct mips_perf_event
*
441 mipspmu_map_general_event(int idx
)
443 const struct mips_perf_event
*pev
;
445 pev
= ((*mipspmu
->general_event_map
)[idx
].event_id
==
446 UNSUPPORTED_PERF_EVENT_ID
? ERR_PTR(-EOPNOTSUPP
) :
447 &(*mipspmu
->general_event_map
)[idx
]);
452 static const struct mips_perf_event
*
453 mipspmu_map_cache_event(u64 config
)
455 unsigned int cache_type
, cache_op
, cache_result
;
456 const struct mips_perf_event
*pev
;
458 cache_type
= (config
>> 0) & 0xff;
459 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
460 return ERR_PTR(-EINVAL
);
462 cache_op
= (config
>> 8) & 0xff;
463 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
464 return ERR_PTR(-EINVAL
);
466 cache_result
= (config
>> 16) & 0xff;
467 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
468 return ERR_PTR(-EINVAL
);
470 pev
= &((*mipspmu
->cache_event_map
)
475 if (pev
->event_id
== UNSUPPORTED_PERF_EVENT_ID
)
476 return ERR_PTR(-EOPNOTSUPP
);
482 static int validate_event(struct cpu_hw_events
*cpuc
,
483 struct perf_event
*event
)
485 struct hw_perf_event fake_hwc
= event
->hw
;
487 /* Allow mixed event group. So return 1 to pass validation. */
488 if (event
->pmu
!= &pmu
|| event
->state
<= PERF_EVENT_STATE_OFF
)
491 return mipspmu
->alloc_counter(cpuc
, &fake_hwc
) >= 0;
494 static int validate_group(struct perf_event
*event
)
496 struct perf_event
*sibling
, *leader
= event
->group_leader
;
497 struct cpu_hw_events fake_cpuc
;
499 memset(&fake_cpuc
, 0, sizeof(fake_cpuc
));
501 if (!validate_event(&fake_cpuc
, leader
))
504 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
505 if (!validate_event(&fake_cpuc
, sibling
))
509 if (!validate_event(&fake_cpuc
, event
))
515 /* This is needed by specific irq handlers in perf_event_*.c */
517 handle_associated_event(struct cpu_hw_events
*cpuc
,
518 int idx
, struct perf_sample_data
*data
, struct pt_regs
*regs
)
520 struct perf_event
*event
= cpuc
->events
[idx
];
521 struct hw_perf_event
*hwc
= &event
->hw
;
523 mipspmu_event_update(event
, hwc
, idx
);
524 data
->period
= event
->hw
.last_period
;
525 if (!mipspmu_event_set_period(event
, hwc
, idx
))
528 if (perf_event_overflow(event
, data
, regs
))
529 mipspmu
->disable_event(idx
);
532 #include "perf_event_mipsxx.c"
534 /* Callchain handling code. */
537 * Leave userspace callchain empty for now. When we find a way to trace
538 * the user stack callchains, we add here.
540 void perf_callchain_user(struct perf_callchain_entry
*entry
,
541 struct pt_regs
*regs
)
545 static void save_raw_perf_callchain(struct perf_callchain_entry
*entry
,
548 unsigned long *sp
= (unsigned long *)reg29
;
551 while (!kstack_end(sp
)) {
553 if (__kernel_text_address(addr
)) {
554 perf_callchain_store(entry
, addr
);
555 if (entry
->nr
>= PERF_MAX_STACK_DEPTH
)
561 void perf_callchain_kernel(struct perf_callchain_entry
*entry
,
562 struct pt_regs
*regs
)
564 unsigned long sp
= regs
->regs
[29];
565 #ifdef CONFIG_KALLSYMS
566 unsigned long ra
= regs
->regs
[31];
567 unsigned long pc
= regs
->cp0_epc
;
569 if (raw_show_trace
|| !__kernel_text_address(pc
)) {
570 unsigned long stack_page
=
571 (unsigned long)task_stack_page(current
);
572 if (stack_page
&& sp
>= stack_page
&&
573 sp
<= stack_page
+ THREAD_SIZE
- 32)
574 save_raw_perf_callchain(entry
, sp
);
578 perf_callchain_store(entry
, pc
);
579 if (entry
->nr
>= PERF_MAX_STACK_DEPTH
)
581 pc
= unwind_stack(current
, &sp
, pc
, &ra
);
584 save_raw_perf_callchain(entry
, sp
);