2 * Linux performance counter support for MIPS.
4 * Copyright (C) 2010 MIPS Technologies, Inc.
5 * Author: Deng-Cheng Zhu
7 * This code is based on the implementation for ARM, which is in turn
8 * based on the sparc64 perf event code and the x86 code. Performance
9 * counter access is based on the MIPS Oprofile code. And the callchain
10 * support references the code of MIPS stacktrace.c.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #include <linux/cpumask.h>
18 #include <linux/interrupt.h>
19 #include <linux/smp.h>
20 #include <linux/kernel.h>
21 #include <linux/perf_event.h>
22 #include <linux/uaccess.h>
25 #include <asm/irq_regs.h>
26 #include <asm/stacktrace.h>
27 #include <asm/time.h> /* For perf_irq */
29 /* These are for 32bit counters. For 64bit ones, define them accordingly. */
30 #define MAX_PERIOD ((1ULL << 32) - 1)
31 #define VALID_COUNT 0x7fffffff
33 #define HIGHEST_BIT 31
35 #define MIPS_MAX_HWEVENTS 4
37 struct cpu_hw_events
{
38 /* Array of events on this cpu. */
39 struct perf_event
*events
[MIPS_MAX_HWEVENTS
];
42 * Set the bit (indexed by the counter number) when the counter
43 * is used for an event.
45 unsigned long used_mask
[BITS_TO_LONGS(MIPS_MAX_HWEVENTS
)];
48 * The borrowed MSB for the performance counter. A MIPS performance
49 * counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit
50 * counters) as a factor of determining whether a counter overflow
51 * should be signaled. So here we use a separate MSB for each
52 * counter to make things easy.
54 unsigned long msbs
[BITS_TO_LONGS(MIPS_MAX_HWEVENTS
)];
57 * Software copy of the control register for each performance counter.
58 * MIPS CPUs vary in performance counters. They use this differently,
59 * and even may not use it.
61 unsigned int saved_ctrl
[MIPS_MAX_HWEVENTS
];
63 DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
) = {
67 /* The description of MIPS performance events. */
68 struct mips_perf_event
{
69 unsigned int event_id
;
71 * MIPS performance counters are indexed starting from 0.
72 * CNTR_EVEN indicates the indexes of the counters to be used are
75 unsigned int cntr_mask
;
76 #define CNTR_EVEN 0x55555555
77 #define CNTR_ODD 0xaaaaaaaa
78 #ifdef CONFIG_MIPS_MT_SMP
91 static struct mips_perf_event raw_event
;
92 static DEFINE_MUTEX(raw_event_mutex
);
94 #define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
95 #define C(x) PERF_COUNT_HW_CACHE_##x
100 irqreturn_t (*handle_irq
)(int irq
, void *dev
);
101 int (*handle_shared_irq
)(void);
104 int (*alloc_counter
)(struct cpu_hw_events
*cpuc
,
105 struct hw_perf_event
*hwc
);
106 u64 (*read_counter
)(unsigned int idx
);
107 void (*write_counter
)(unsigned int idx
, u64 val
);
108 void (*enable_event
)(struct hw_perf_event
*evt
, int idx
);
109 void (*disable_event
)(int idx
);
110 const struct mips_perf_event
*(*map_raw_event
)(u64 config
);
111 const struct mips_perf_event (*general_event_map
)[PERF_COUNT_HW_MAX
];
112 const struct mips_perf_event (*cache_event_map
)
113 [PERF_COUNT_HW_CACHE_MAX
]
114 [PERF_COUNT_HW_CACHE_OP_MAX
]
115 [PERF_COUNT_HW_CACHE_RESULT_MAX
];
116 unsigned int num_counters
;
119 static const struct mips_pmu
*mipspmu
;
122 mipspmu_event_set_period(struct perf_event
*event
,
123 struct hw_perf_event
*hwc
,
126 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
127 s64 left
= local64_read(&hwc
->period_left
);
128 s64 period
= hwc
->sample_period
;
133 if (unlikely(left
<= -period
)) {
135 local64_set(&hwc
->period_left
, left
);
136 hwc
->last_period
= period
;
140 if (unlikely(left
<= 0)) {
142 local64_set(&hwc
->period_left
, left
);
143 hwc
->last_period
= period
;
147 if (left
> (s64
)MAX_PERIOD
)
150 local64_set(&hwc
->prev_count
, (u64
)-left
);
152 local_irq_save(flags
);
153 uleft
= (u64
)(-left
) & MAX_PERIOD
;
154 uleft
> VALID_COUNT
?
155 set_bit(idx
, cpuc
->msbs
) : clear_bit(idx
, cpuc
->msbs
);
156 mipspmu
->write_counter(idx
, (u64
)(-left
) & VALID_COUNT
);
157 local_irq_restore(flags
);
159 perf_event_update_userpage(event
);
164 static void mipspmu_event_update(struct perf_event
*event
,
165 struct hw_perf_event
*hwc
,
168 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
170 int shift
= 64 - TOTAL_BITS
;
171 s64 prev_raw_count
, new_raw_count
;
175 prev_raw_count
= local64_read(&hwc
->prev_count
);
176 local_irq_save(flags
);
177 /* Make the counter value be a "real" one. */
178 new_raw_count
= mipspmu
->read_counter(idx
);
179 if (new_raw_count
& (test_bit(idx
, cpuc
->msbs
) << HIGHEST_BIT
)) {
180 new_raw_count
&= VALID_COUNT
;
181 clear_bit(idx
, cpuc
->msbs
);
183 new_raw_count
|= (test_bit(idx
, cpuc
->msbs
) << HIGHEST_BIT
);
184 local_irq_restore(flags
);
186 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
187 new_raw_count
) != prev_raw_count
)
190 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
193 local64_add(delta
, &event
->count
);
194 local64_sub(delta
, &hwc
->period_left
);
199 static void mipspmu_start(struct perf_event
*event
, int flags
)
201 struct hw_perf_event
*hwc
= &event
->hw
;
206 if (flags
& PERF_EF_RELOAD
)
207 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
211 /* Set the period for the event. */
212 mipspmu_event_set_period(event
, hwc
, hwc
->idx
);
214 /* Enable the event. */
215 mipspmu
->enable_event(hwc
, hwc
->idx
);
218 static void mipspmu_stop(struct perf_event
*event
, int flags
)
220 struct hw_perf_event
*hwc
= &event
->hw
;
225 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
226 /* We are working on a local event. */
227 mipspmu
->disable_event(hwc
->idx
);
229 mipspmu_event_update(event
, hwc
, hwc
->idx
);
230 hwc
->state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
234 static int mipspmu_add(struct perf_event
*event
, int flags
)
236 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
237 struct hw_perf_event
*hwc
= &event
->hw
;
241 perf_pmu_disable(event
->pmu
);
243 /* To look for a free counter for this event. */
244 idx
= mipspmu
->alloc_counter(cpuc
, hwc
);
251 * If there is an event in the counter we are going to use then
252 * make sure it is disabled.
255 mipspmu
->disable_event(idx
);
256 cpuc
->events
[idx
] = event
;
258 hwc
->state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
259 if (flags
& PERF_EF_START
)
260 mipspmu_start(event
, PERF_EF_RELOAD
);
262 /* Propagate our changes to the userspace mapping. */
263 perf_event_update_userpage(event
);
266 perf_pmu_enable(event
->pmu
);
270 static void mipspmu_del(struct perf_event
*event
, int flags
)
272 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
273 struct hw_perf_event
*hwc
= &event
->hw
;
276 WARN_ON(idx
< 0 || idx
>= mipspmu
->num_counters
);
278 mipspmu_stop(event
, PERF_EF_UPDATE
);
279 cpuc
->events
[idx
] = NULL
;
280 clear_bit(idx
, cpuc
->used_mask
);
282 perf_event_update_userpage(event
);
285 static void mipspmu_read(struct perf_event
*event
)
287 struct hw_perf_event
*hwc
= &event
->hw
;
289 /* Don't read disabled counters! */
293 mipspmu_event_update(event
, hwc
, hwc
->idx
);
296 static void mipspmu_enable(struct pmu
*pmu
)
302 static void mipspmu_disable(struct pmu
*pmu
)
308 static atomic_t active_events
= ATOMIC_INIT(0);
309 static DEFINE_MUTEX(pmu_reserve_mutex
);
310 static int (*save_perf_irq
)(void);
312 static int mipspmu_get_irq(void)
316 if (mipspmu
->irq
>= 0) {
317 /* Request my own irq handler. */
318 err
= request_irq(mipspmu
->irq
, mipspmu
->handle_irq
,
319 IRQF_DISABLED
| IRQF_NOBALANCING
,
320 "mips_perf_pmu", NULL
);
322 pr_warning("Unable to request IRQ%d for MIPS "
323 "performance counters!\n", mipspmu
->irq
);
325 } else if (cp0_perfcount_irq
< 0) {
327 * We are sharing the irq number with the timer interrupt.
329 save_perf_irq
= perf_irq
;
330 perf_irq
= mipspmu
->handle_shared_irq
;
333 pr_warning("The platform hasn't properly defined its "
334 "interrupt controller.\n");
341 static void mipspmu_free_irq(void)
343 if (mipspmu
->irq
>= 0)
344 free_irq(mipspmu
->irq
, NULL
);
345 else if (cp0_perfcount_irq
< 0)
346 perf_irq
= save_perf_irq
;
350 * mipsxx/rm9000/loongson2 have different performance counters, they have
351 * specific low-level init routines.
353 static void reset_counters(void *arg
);
354 static int __hw_perf_event_init(struct perf_event
*event
);
356 static void hw_perf_event_destroy(struct perf_event
*event
)
358 if (atomic_dec_and_mutex_lock(&active_events
,
359 &pmu_reserve_mutex
)) {
361 * We must not call the destroy function with interrupts
364 on_each_cpu(reset_counters
,
365 (void *)(long)mipspmu
->num_counters
, 1);
367 mutex_unlock(&pmu_reserve_mutex
);
371 static int mipspmu_event_init(struct perf_event
*event
)
375 switch (event
->attr
.type
) {
377 case PERF_TYPE_HARDWARE
:
378 case PERF_TYPE_HW_CACHE
:
385 if (!mipspmu
|| event
->cpu
>= nr_cpumask_bits
||
386 (event
->cpu
>= 0 && !cpu_online(event
->cpu
)))
389 if (!atomic_inc_not_zero(&active_events
)) {
390 if (atomic_read(&active_events
) > MIPS_MAX_HWEVENTS
) {
391 atomic_dec(&active_events
);
395 mutex_lock(&pmu_reserve_mutex
);
396 if (atomic_read(&active_events
) == 0)
397 err
= mipspmu_get_irq();
400 atomic_inc(&active_events
);
401 mutex_unlock(&pmu_reserve_mutex
);
407 err
= __hw_perf_event_init(event
);
409 hw_perf_event_destroy(event
);
414 static struct pmu pmu
= {
415 .pmu_enable
= mipspmu_enable
,
416 .pmu_disable
= mipspmu_disable
,
417 .event_init
= mipspmu_event_init
,
420 .start
= mipspmu_start
,
421 .stop
= mipspmu_stop
,
422 .read
= mipspmu_read
,
425 static inline unsigned int
426 mipspmu_perf_event_encode(const struct mips_perf_event
*pev
)
429 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
432 #ifdef CONFIG_MIPS_MT_SMP
433 return ((unsigned int)pev
->range
<< 24) |
434 (pev
->cntr_mask
& 0xffff00) |
435 (pev
->event_id
& 0xff);
437 return (pev
->cntr_mask
& 0xffff00) |
438 (pev
->event_id
& 0xff);
442 static const struct mips_perf_event
*
443 mipspmu_map_general_event(int idx
)
445 const struct mips_perf_event
*pev
;
447 pev
= ((*mipspmu
->general_event_map
)[idx
].event_id
==
448 UNSUPPORTED_PERF_EVENT_ID
? ERR_PTR(-EOPNOTSUPP
) :
449 &(*mipspmu
->general_event_map
)[idx
]);
454 static const struct mips_perf_event
*
455 mipspmu_map_cache_event(u64 config
)
457 unsigned int cache_type
, cache_op
, cache_result
;
458 const struct mips_perf_event
*pev
;
460 cache_type
= (config
>> 0) & 0xff;
461 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
462 return ERR_PTR(-EINVAL
);
464 cache_op
= (config
>> 8) & 0xff;
465 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
466 return ERR_PTR(-EINVAL
);
468 cache_result
= (config
>> 16) & 0xff;
469 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
470 return ERR_PTR(-EINVAL
);
472 pev
= &((*mipspmu
->cache_event_map
)
477 if (pev
->event_id
== UNSUPPORTED_PERF_EVENT_ID
)
478 return ERR_PTR(-EOPNOTSUPP
);
484 static int validate_event(struct cpu_hw_events
*cpuc
,
485 struct perf_event
*event
)
487 struct hw_perf_event fake_hwc
= event
->hw
;
489 /* Allow mixed event group. So return 1 to pass validation. */
490 if (event
->pmu
!= &pmu
|| event
->state
<= PERF_EVENT_STATE_OFF
)
493 return mipspmu
->alloc_counter(cpuc
, &fake_hwc
) >= 0;
496 static int validate_group(struct perf_event
*event
)
498 struct perf_event
*sibling
, *leader
= event
->group_leader
;
499 struct cpu_hw_events fake_cpuc
;
501 memset(&fake_cpuc
, 0, sizeof(fake_cpuc
));
503 if (!validate_event(&fake_cpuc
, leader
))
506 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
507 if (!validate_event(&fake_cpuc
, sibling
))
511 if (!validate_event(&fake_cpuc
, event
))
517 /* This is needed by specific irq handlers in perf_event_*.c */
519 handle_associated_event(struct cpu_hw_events
*cpuc
,
520 int idx
, struct perf_sample_data
*data
, struct pt_regs
*regs
)
522 struct perf_event
*event
= cpuc
->events
[idx
];
523 struct hw_perf_event
*hwc
= &event
->hw
;
525 mipspmu_event_update(event
, hwc
, idx
);
526 data
->period
= event
->hw
.last_period
;
527 if (!mipspmu_event_set_period(event
, hwc
, idx
))
530 if (perf_event_overflow(event
, 0, data
, regs
))
531 mipspmu
->disable_event(idx
);
534 #include "perf_event_mipsxx.c"
536 /* Callchain handling code. */
539 * Leave userspace callchain empty for now. When we find a way to trace
540 * the user stack callchains, we add here.
542 void perf_callchain_user(struct perf_callchain_entry
*entry
,
543 struct pt_regs
*regs
)
547 static void save_raw_perf_callchain(struct perf_callchain_entry
*entry
,
550 unsigned long *sp
= (unsigned long *)reg29
;
553 while (!kstack_end(sp
)) {
555 if (__kernel_text_address(addr
)) {
556 perf_callchain_store(entry
, addr
);
557 if (entry
->nr
>= PERF_MAX_STACK_DEPTH
)
563 void perf_callchain_kernel(struct perf_callchain_entry
*entry
,
564 struct pt_regs
*regs
)
566 unsigned long sp
= regs
->regs
[29];
567 #ifdef CONFIG_KALLSYMS
568 unsigned long ra
= regs
->regs
[31];
569 unsigned long pc
= regs
->cp0_epc
;
571 if (raw_show_trace
|| !__kernel_text_address(pc
)) {
572 unsigned long stack_page
=
573 (unsigned long)task_stack_page(current
);
574 if (stack_page
&& sp
>= stack_page
&&
575 sp
<= stack_page
+ THREAD_SIZE
- 32)
576 save_raw_perf_callchain(entry
, sp
);
580 perf_callchain_store(entry
, pc
);
581 if (entry
->nr
>= PERF_MAX_STACK_DEPTH
)
583 pc
= unwind_stack(current
, &sp
, pc
, &ra
);
586 save_raw_perf_callchain(entry
, sp
);