ARM: mm: avoid taking ASID spinlock on fastpath
[linux/fpc-iii.git] / arch / mips / kernel / perf_event_mipsxx.c
bloba9b995dcf69165b39b7dec0ddeefc9ed06b9980f
1 /*
2 * Linux performance counter support for MIPS.
4 * Copyright (C) 2010 MIPS Technologies, Inc.
5 * Copyright (C) 2011 Cavium Networks, Inc.
6 * Author: Deng-Cheng Zhu
8 * This code is based on the implementation for ARM, which is in turn
9 * based on the sparc64 perf event code and the x86 code. Performance
10 * counter access is based on the MIPS Oprofile code. And the callchain
11 * support references the code of MIPS stacktrace.c.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #include <linux/cpumask.h>
19 #include <linux/interrupt.h>
20 #include <linux/smp.h>
21 #include <linux/kernel.h>
22 #include <linux/perf_event.h>
23 #include <linux/uaccess.h>
25 #include <asm/irq.h>
26 #include <asm/irq_regs.h>
27 #include <asm/stacktrace.h>
28 #include <asm/time.h> /* For perf_irq */
30 #define MIPS_MAX_HWEVENTS 4
31 #define MIPS_TCS_PER_COUNTER 2
32 #define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
34 struct cpu_hw_events {
35 /* Array of events on this cpu. */
36 struct perf_event *events[MIPS_MAX_HWEVENTS];
39 * Set the bit (indexed by the counter number) when the counter
40 * is used for an event.
42 unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
45 * Software copy of the control register for each performance counter.
46 * MIPS CPUs vary in performance counters. They use this differently,
47 * and even may not use it.
49 unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
51 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
52 .saved_ctrl = {0},
55 /* The description of MIPS performance events. */
56 struct mips_perf_event {
57 unsigned int event_id;
59 * MIPS performance counters are indexed starting from 0.
60 * CNTR_EVEN indicates the indexes of the counters to be used are
61 * even numbers.
63 unsigned int cntr_mask;
64 #define CNTR_EVEN 0x55555555
65 #define CNTR_ODD 0xaaaaaaaa
66 #define CNTR_ALL 0xffffffff
67 #ifdef CONFIG_MIPS_MT_SMP
68 enum {
69 T = 0,
70 V = 1,
71 P = 2,
72 } range;
73 #else
74 #define T
75 #define V
76 #define P
77 #endif
80 static struct mips_perf_event raw_event;
81 static DEFINE_MUTEX(raw_event_mutex);
83 #define C(x) PERF_COUNT_HW_CACHE_##x
85 struct mips_pmu {
86 u64 max_period;
87 u64 valid_count;
88 u64 overflow;
89 const char *name;
90 int irq;
91 u64 (*read_counter)(unsigned int idx);
92 void (*write_counter)(unsigned int idx, u64 val);
93 const struct mips_perf_event *(*map_raw_event)(u64 config);
94 const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
95 const struct mips_perf_event (*cache_event_map)
96 [PERF_COUNT_HW_CACHE_MAX]
97 [PERF_COUNT_HW_CACHE_OP_MAX]
98 [PERF_COUNT_HW_CACHE_RESULT_MAX];
99 unsigned int num_counters;
102 static struct mips_pmu mipspmu;
104 #define M_CONFIG1_PC (1 << 4)
106 #define M_PERFCTL_EXL (1 << 0)
107 #define M_PERFCTL_KERNEL (1 << 1)
108 #define M_PERFCTL_SUPERVISOR (1 << 2)
109 #define M_PERFCTL_USER (1 << 3)
110 #define M_PERFCTL_INTERRUPT_ENABLE (1 << 4)
111 #define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
112 #define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
114 #ifdef CONFIG_CPU_BMIPS5000
115 #define M_PERFCTL_MT_EN(filter) 0
116 #else /* !CONFIG_CPU_BMIPS5000 */
117 #define M_PERFCTL_MT_EN(filter) ((filter) << 20)
118 #endif /* CONFIG_CPU_BMIPS5000 */
120 #define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
121 #define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
122 #define M_TC_EN_TC M_PERFCTL_MT_EN(2)
123 #define M_PERFCTL_TCID(tcid) ((tcid) << 22)
124 #define M_PERFCTL_WIDE (1 << 30)
125 #define M_PERFCTL_MORE (1 << 31)
126 #define M_PERFCTL_TC (1 << 30)
128 #define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
129 M_PERFCTL_KERNEL | \
130 M_PERFCTL_USER | \
131 M_PERFCTL_SUPERVISOR | \
132 M_PERFCTL_INTERRUPT_ENABLE)
134 #ifdef CONFIG_MIPS_MT_SMP
135 #define M_PERFCTL_CONFIG_MASK 0x3fff801f
136 #else
137 #define M_PERFCTL_CONFIG_MASK 0x1f
138 #endif
139 #define M_PERFCTL_EVENT_MASK 0xfe0
142 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
143 static int cpu_has_mipsmt_pertccounters;
145 static DEFINE_RWLOCK(pmuint_rwlock);
147 #if defined(CONFIG_CPU_BMIPS5000)
148 #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
149 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
150 #else
152 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
153 * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
155 #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
156 0 : smp_processor_id())
157 #endif
159 /* Copied from op_model_mipsxx.c */
160 static unsigned int vpe_shift(void)
162 if (num_possible_cpus() > 1)
163 return 1;
165 return 0;
168 static unsigned int counters_total_to_per_cpu(unsigned int counters)
170 return counters >> vpe_shift();
173 #else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
174 #define vpe_id() 0
176 #endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
178 static void resume_local_counters(void);
179 static void pause_local_counters(void);
180 static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
181 static int mipsxx_pmu_handle_shared_irq(void);
183 static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
185 if (vpe_id() == 1)
186 idx = (idx + 2) & 3;
187 return idx;
190 static u64 mipsxx_pmu_read_counter(unsigned int idx)
192 idx = mipsxx_pmu_swizzle_perf_idx(idx);
194 switch (idx) {
195 case 0:
197 * The counters are unsigned, we must cast to truncate
198 * off the high bits.
200 return (u32)read_c0_perfcntr0();
201 case 1:
202 return (u32)read_c0_perfcntr1();
203 case 2:
204 return (u32)read_c0_perfcntr2();
205 case 3:
206 return (u32)read_c0_perfcntr3();
207 default:
208 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
209 return 0;
213 static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
215 idx = mipsxx_pmu_swizzle_perf_idx(idx);
217 switch (idx) {
218 case 0:
219 return read_c0_perfcntr0_64();
220 case 1:
221 return read_c0_perfcntr1_64();
222 case 2:
223 return read_c0_perfcntr2_64();
224 case 3:
225 return read_c0_perfcntr3_64();
226 default:
227 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
228 return 0;
232 static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
234 idx = mipsxx_pmu_swizzle_perf_idx(idx);
236 switch (idx) {
237 case 0:
238 write_c0_perfcntr0(val);
239 return;
240 case 1:
241 write_c0_perfcntr1(val);
242 return;
243 case 2:
244 write_c0_perfcntr2(val);
245 return;
246 case 3:
247 write_c0_perfcntr3(val);
248 return;
252 static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
254 idx = mipsxx_pmu_swizzle_perf_idx(idx);
256 switch (idx) {
257 case 0:
258 write_c0_perfcntr0_64(val);
259 return;
260 case 1:
261 write_c0_perfcntr1_64(val);
262 return;
263 case 2:
264 write_c0_perfcntr2_64(val);
265 return;
266 case 3:
267 write_c0_perfcntr3_64(val);
268 return;
272 static unsigned int mipsxx_pmu_read_control(unsigned int idx)
274 idx = mipsxx_pmu_swizzle_perf_idx(idx);
276 switch (idx) {
277 case 0:
278 return read_c0_perfctrl0();
279 case 1:
280 return read_c0_perfctrl1();
281 case 2:
282 return read_c0_perfctrl2();
283 case 3:
284 return read_c0_perfctrl3();
285 default:
286 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
287 return 0;
291 static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
293 idx = mipsxx_pmu_swizzle_perf_idx(idx);
295 switch (idx) {
296 case 0:
297 write_c0_perfctrl0(val);
298 return;
299 case 1:
300 write_c0_perfctrl1(val);
301 return;
302 case 2:
303 write_c0_perfctrl2(val);
304 return;
305 case 3:
306 write_c0_perfctrl3(val);
307 return;
311 static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
312 struct hw_perf_event *hwc)
314 int i;
317 * We only need to care the counter mask. The range has been
318 * checked definitely.
320 unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
322 for (i = mipspmu.num_counters - 1; i >= 0; i--) {
324 * Note that some MIPS perf events can be counted by both
325 * even and odd counters, wheresas many other are only by
326 * even _or_ odd counters. This introduces an issue that
327 * when the former kind of event takes the counter the
328 * latter kind of event wants to use, then the "counter
329 * allocation" for the latter event will fail. In fact if
330 * they can be dynamically swapped, they both feel happy.
331 * But here we leave this issue alone for now.
333 if (test_bit(i, &cntr_mask) &&
334 !test_and_set_bit(i, cpuc->used_mask))
335 return i;
338 return -EAGAIN;
341 static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
343 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
345 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
347 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
348 (evt->config_base & M_PERFCTL_CONFIG_MASK) |
349 /* Make sure interrupt enabled. */
350 M_PERFCTL_INTERRUPT_ENABLE;
351 if (IS_ENABLED(CONFIG_CPU_BMIPS5000))
352 /* enable the counter for the calling thread */
353 cpuc->saved_ctrl[idx] |=
354 (1 << (12 + vpe_id())) | M_PERFCTL_TC;
357 * We do not actually let the counter run. Leave it until start().
361 static void mipsxx_pmu_disable_event(int idx)
363 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
364 unsigned long flags;
366 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
368 local_irq_save(flags);
369 cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
370 ~M_PERFCTL_COUNT_EVENT_WHENEVER;
371 mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
372 local_irq_restore(flags);
375 static int mipspmu_event_set_period(struct perf_event *event,
376 struct hw_perf_event *hwc,
377 int idx)
379 u64 left = local64_read(&hwc->period_left);
380 u64 period = hwc->sample_period;
381 int ret = 0;
383 if (unlikely((left + period) & (1ULL << 63))) {
384 /* left underflowed by more than period. */
385 left = period;
386 local64_set(&hwc->period_left, left);
387 hwc->last_period = period;
388 ret = 1;
389 } else if (unlikely((left + period) <= period)) {
390 /* left underflowed by less than period. */
391 left += period;
392 local64_set(&hwc->period_left, left);
393 hwc->last_period = period;
394 ret = 1;
397 if (left > mipspmu.max_period) {
398 left = mipspmu.max_period;
399 local64_set(&hwc->period_left, left);
402 local64_set(&hwc->prev_count, mipspmu.overflow - left);
404 mipspmu.write_counter(idx, mipspmu.overflow - left);
406 perf_event_update_userpage(event);
408 return ret;
411 static void mipspmu_event_update(struct perf_event *event,
412 struct hw_perf_event *hwc,
413 int idx)
415 u64 prev_raw_count, new_raw_count;
416 u64 delta;
418 again:
419 prev_raw_count = local64_read(&hwc->prev_count);
420 new_raw_count = mipspmu.read_counter(idx);
422 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
423 new_raw_count) != prev_raw_count)
424 goto again;
426 delta = new_raw_count - prev_raw_count;
428 local64_add(delta, &event->count);
429 local64_sub(delta, &hwc->period_left);
432 static void mipspmu_start(struct perf_event *event, int flags)
434 struct hw_perf_event *hwc = &event->hw;
436 if (flags & PERF_EF_RELOAD)
437 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
439 hwc->state = 0;
441 /* Set the period for the event. */
442 mipspmu_event_set_period(event, hwc, hwc->idx);
444 /* Enable the event. */
445 mipsxx_pmu_enable_event(hwc, hwc->idx);
448 static void mipspmu_stop(struct perf_event *event, int flags)
450 struct hw_perf_event *hwc = &event->hw;
452 if (!(hwc->state & PERF_HES_STOPPED)) {
453 /* We are working on a local event. */
454 mipsxx_pmu_disable_event(hwc->idx);
455 barrier();
456 mipspmu_event_update(event, hwc, hwc->idx);
457 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
461 static int mipspmu_add(struct perf_event *event, int flags)
463 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
464 struct hw_perf_event *hwc = &event->hw;
465 int idx;
466 int err = 0;
468 perf_pmu_disable(event->pmu);
470 /* To look for a free counter for this event. */
471 idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
472 if (idx < 0) {
473 err = idx;
474 goto out;
478 * If there is an event in the counter we are going to use then
479 * make sure it is disabled.
481 event->hw.idx = idx;
482 mipsxx_pmu_disable_event(idx);
483 cpuc->events[idx] = event;
485 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
486 if (flags & PERF_EF_START)
487 mipspmu_start(event, PERF_EF_RELOAD);
489 /* Propagate our changes to the userspace mapping. */
490 perf_event_update_userpage(event);
492 out:
493 perf_pmu_enable(event->pmu);
494 return err;
497 static void mipspmu_del(struct perf_event *event, int flags)
499 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
500 struct hw_perf_event *hwc = &event->hw;
501 int idx = hwc->idx;
503 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
505 mipspmu_stop(event, PERF_EF_UPDATE);
506 cpuc->events[idx] = NULL;
507 clear_bit(idx, cpuc->used_mask);
509 perf_event_update_userpage(event);
512 static void mipspmu_read(struct perf_event *event)
514 struct hw_perf_event *hwc = &event->hw;
516 /* Don't read disabled counters! */
517 if (hwc->idx < 0)
518 return;
520 mipspmu_event_update(event, hwc, hwc->idx);
523 static void mipspmu_enable(struct pmu *pmu)
525 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
526 write_unlock(&pmuint_rwlock);
527 #endif
528 resume_local_counters();
532 * MIPS performance counters can be per-TC. The control registers can
533 * not be directly accessed accross CPUs. Hence if we want to do global
534 * control, we need cross CPU calls. on_each_cpu() can help us, but we
535 * can not make sure this function is called with interrupts enabled. So
536 * here we pause local counters and then grab a rwlock and leave the
537 * counters on other CPUs alone. If any counter interrupt raises while
538 * we own the write lock, simply pause local counters on that CPU and
539 * spin in the handler. Also we know we won't be switched to another
540 * CPU after pausing local counters and before grabbing the lock.
542 static void mipspmu_disable(struct pmu *pmu)
544 pause_local_counters();
545 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
546 write_lock(&pmuint_rwlock);
547 #endif
550 static atomic_t active_events = ATOMIC_INIT(0);
551 static DEFINE_MUTEX(pmu_reserve_mutex);
552 static int (*save_perf_irq)(void);
554 static int mipspmu_get_irq(void)
556 int err;
558 if (mipspmu.irq >= 0) {
559 /* Request my own irq handler. */
560 err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
561 IRQF_PERCPU | IRQF_NOBALANCING,
562 "mips_perf_pmu", NULL);
563 if (err) {
564 pr_warning("Unable to request IRQ%d for MIPS "
565 "performance counters!\n", mipspmu.irq);
567 } else if (cp0_perfcount_irq < 0) {
569 * We are sharing the irq number with the timer interrupt.
571 save_perf_irq = perf_irq;
572 perf_irq = mipsxx_pmu_handle_shared_irq;
573 err = 0;
574 } else {
575 pr_warning("The platform hasn't properly defined its "
576 "interrupt controller.\n");
577 err = -ENOENT;
580 return err;
583 static void mipspmu_free_irq(void)
585 if (mipspmu.irq >= 0)
586 free_irq(mipspmu.irq, NULL);
587 else if (cp0_perfcount_irq < 0)
588 perf_irq = save_perf_irq;
592 * mipsxx/rm9000/loongson2 have different performance counters, they have
593 * specific low-level init routines.
595 static void reset_counters(void *arg);
596 static int __hw_perf_event_init(struct perf_event *event);
598 static void hw_perf_event_destroy(struct perf_event *event)
600 if (atomic_dec_and_mutex_lock(&active_events,
601 &pmu_reserve_mutex)) {
603 * We must not call the destroy function with interrupts
604 * disabled.
606 on_each_cpu(reset_counters,
607 (void *)(long)mipspmu.num_counters, 1);
608 mipspmu_free_irq();
609 mutex_unlock(&pmu_reserve_mutex);
613 static int mipspmu_event_init(struct perf_event *event)
615 int err = 0;
617 /* does not support taken branch sampling */
618 if (has_branch_stack(event))
619 return -EOPNOTSUPP;
621 switch (event->attr.type) {
622 case PERF_TYPE_RAW:
623 case PERF_TYPE_HARDWARE:
624 case PERF_TYPE_HW_CACHE:
625 break;
627 default:
628 return -ENOENT;
631 if (event->cpu >= nr_cpumask_bits ||
632 (event->cpu >= 0 && !cpu_online(event->cpu)))
633 return -ENODEV;
635 if (!atomic_inc_not_zero(&active_events)) {
636 mutex_lock(&pmu_reserve_mutex);
637 if (atomic_read(&active_events) == 0)
638 err = mipspmu_get_irq();
640 if (!err)
641 atomic_inc(&active_events);
642 mutex_unlock(&pmu_reserve_mutex);
645 if (err)
646 return err;
648 return __hw_perf_event_init(event);
651 static struct pmu pmu = {
652 .pmu_enable = mipspmu_enable,
653 .pmu_disable = mipspmu_disable,
654 .event_init = mipspmu_event_init,
655 .add = mipspmu_add,
656 .del = mipspmu_del,
657 .start = mipspmu_start,
658 .stop = mipspmu_stop,
659 .read = mipspmu_read,
662 static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
665 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
666 * event_id.
668 #ifdef CONFIG_MIPS_MT_SMP
669 return ((unsigned int)pev->range << 24) |
670 (pev->cntr_mask & 0xffff00) |
671 (pev->event_id & 0xff);
672 #else
673 return (pev->cntr_mask & 0xffff00) |
674 (pev->event_id & 0xff);
675 #endif
678 static const struct mips_perf_event *mipspmu_map_general_event(int idx)
681 if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
682 return ERR_PTR(-EOPNOTSUPP);
683 return &(*mipspmu.general_event_map)[idx];
686 static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
688 unsigned int cache_type, cache_op, cache_result;
689 const struct mips_perf_event *pev;
691 cache_type = (config >> 0) & 0xff;
692 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
693 return ERR_PTR(-EINVAL);
695 cache_op = (config >> 8) & 0xff;
696 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
697 return ERR_PTR(-EINVAL);
699 cache_result = (config >> 16) & 0xff;
700 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
701 return ERR_PTR(-EINVAL);
703 pev = &((*mipspmu.cache_event_map)
704 [cache_type]
705 [cache_op]
706 [cache_result]);
708 if (pev->cntr_mask == 0)
709 return ERR_PTR(-EOPNOTSUPP);
711 return pev;
715 static int validate_group(struct perf_event *event)
717 struct perf_event *sibling, *leader = event->group_leader;
718 struct cpu_hw_events fake_cpuc;
720 memset(&fake_cpuc, 0, sizeof(fake_cpuc));
722 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
723 return -EINVAL;
725 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
726 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
727 return -EINVAL;
730 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
731 return -EINVAL;
733 return 0;
736 /* This is needed by specific irq handlers in perf_event_*.c */
737 static void handle_associated_event(struct cpu_hw_events *cpuc,
738 int idx, struct perf_sample_data *data,
739 struct pt_regs *regs)
741 struct perf_event *event = cpuc->events[idx];
742 struct hw_perf_event *hwc = &event->hw;
744 mipspmu_event_update(event, hwc, idx);
745 data->period = event->hw.last_period;
746 if (!mipspmu_event_set_period(event, hwc, idx))
747 return;
749 if (perf_event_overflow(event, data, regs))
750 mipsxx_pmu_disable_event(idx);
754 static int __n_counters(void)
756 if (!(read_c0_config1() & M_CONFIG1_PC))
757 return 0;
758 if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
759 return 1;
760 if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
761 return 2;
762 if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
763 return 3;
765 return 4;
768 static int n_counters(void)
770 int counters;
772 switch (current_cpu_type()) {
773 case CPU_R10000:
774 counters = 2;
775 break;
777 case CPU_R12000:
778 case CPU_R14000:
779 counters = 4;
780 break;
782 default:
783 counters = __n_counters();
786 return counters;
789 static void reset_counters(void *arg)
791 int counters = (int)(long)arg;
792 switch (counters) {
793 case 4:
794 mipsxx_pmu_write_control(3, 0);
795 mipspmu.write_counter(3, 0);
796 case 3:
797 mipsxx_pmu_write_control(2, 0);
798 mipspmu.write_counter(2, 0);
799 case 2:
800 mipsxx_pmu_write_control(1, 0);
801 mipspmu.write_counter(1, 0);
802 case 1:
803 mipsxx_pmu_write_control(0, 0);
804 mipspmu.write_counter(0, 0);
808 /* 24K/34K/1004K cores can share the same event map. */
809 static const struct mips_perf_event mipsxxcore_event_map
810 [PERF_COUNT_HW_MAX] = {
811 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
812 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
813 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
814 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
817 /* 74K core has different branch event code. */
818 static const struct mips_perf_event mipsxx74Kcore_event_map
819 [PERF_COUNT_HW_MAX] = {
820 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
821 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
822 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
823 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
826 static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
827 [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
828 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
829 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
830 [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
831 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
832 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
833 [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
836 static const struct mips_perf_event bmips5000_event_map
837 [PERF_COUNT_HW_MAX] = {
838 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
839 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
840 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
843 /* 24K/34K/1004K cores can share the same cache event map. */
844 static const struct mips_perf_event mipsxxcore_cache_map
845 [PERF_COUNT_HW_CACHE_MAX]
846 [PERF_COUNT_HW_CACHE_OP_MAX]
847 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
848 [C(L1D)] = {
850 * Like some other architectures (e.g. ARM), the performance
851 * counters don't differentiate between read and write
852 * accesses/misses, so this isn't strictly correct, but it's the
853 * best we can do. Writes and reads get combined.
855 [C(OP_READ)] = {
856 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
857 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
859 [C(OP_WRITE)] = {
860 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
861 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
864 [C(L1I)] = {
865 [C(OP_READ)] = {
866 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
867 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
869 [C(OP_WRITE)] = {
870 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
871 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
873 [C(OP_PREFETCH)] = {
874 [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
876 * Note that MIPS has only "hit" events countable for
877 * the prefetch operation.
881 [C(LL)] = {
882 [C(OP_READ)] = {
883 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
884 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
886 [C(OP_WRITE)] = {
887 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
888 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
891 [C(DTLB)] = {
892 [C(OP_READ)] = {
893 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
894 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
896 [C(OP_WRITE)] = {
897 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
898 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
901 [C(ITLB)] = {
902 [C(OP_READ)] = {
903 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
904 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
906 [C(OP_WRITE)] = {
907 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
908 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
911 [C(BPU)] = {
912 /* Using the same code for *HW_BRANCH* */
913 [C(OP_READ)] = {
914 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
915 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
917 [C(OP_WRITE)] = {
918 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
919 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
924 /* 74K core has completely different cache event map. */
925 static const struct mips_perf_event mipsxx74Kcore_cache_map
926 [PERF_COUNT_HW_CACHE_MAX]
927 [PERF_COUNT_HW_CACHE_OP_MAX]
928 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
929 [C(L1D)] = {
931 * Like some other architectures (e.g. ARM), the performance
932 * counters don't differentiate between read and write
933 * accesses/misses, so this isn't strictly correct, but it's the
934 * best we can do. Writes and reads get combined.
936 [C(OP_READ)] = {
937 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
938 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
940 [C(OP_WRITE)] = {
941 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
942 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
945 [C(L1I)] = {
946 [C(OP_READ)] = {
947 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
948 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
950 [C(OP_WRITE)] = {
951 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
952 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
954 [C(OP_PREFETCH)] = {
955 [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
957 * Note that MIPS has only "hit" events countable for
958 * the prefetch operation.
962 [C(LL)] = {
963 [C(OP_READ)] = {
964 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
965 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
967 [C(OP_WRITE)] = {
968 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
969 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
972 [C(ITLB)] = {
973 [C(OP_READ)] = {
974 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
975 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
977 [C(OP_WRITE)] = {
978 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
979 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
982 [C(BPU)] = {
983 /* Using the same code for *HW_BRANCH* */
984 [C(OP_READ)] = {
985 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
986 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
988 [C(OP_WRITE)] = {
989 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
990 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
995 /* BMIPS5000 */
996 static const struct mips_perf_event bmips5000_cache_map
997 [PERF_COUNT_HW_CACHE_MAX]
998 [PERF_COUNT_HW_CACHE_OP_MAX]
999 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1000 [C(L1D)] = {
1002 * Like some other architectures (e.g. ARM), the performance
1003 * counters don't differentiate between read and write
1004 * accesses/misses, so this isn't strictly correct, but it's the
1005 * best we can do. Writes and reads get combined.
1007 [C(OP_READ)] = {
1008 [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
1009 [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
1011 [C(OP_WRITE)] = {
1012 [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
1013 [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
1016 [C(L1I)] = {
1017 [C(OP_READ)] = {
1018 [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
1019 [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
1021 [C(OP_WRITE)] = {
1022 [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
1023 [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
1025 [C(OP_PREFETCH)] = {
1026 [C(RESULT_ACCESS)] = { 23, CNTR_EVEN, T },
1028 * Note that MIPS has only "hit" events countable for
1029 * the prefetch operation.
1033 [C(LL)] = {
1034 [C(OP_READ)] = {
1035 [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
1036 [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
1038 [C(OP_WRITE)] = {
1039 [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
1040 [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
1043 [C(BPU)] = {
1044 /* Using the same code for *HW_BRANCH* */
1045 [C(OP_READ)] = {
1046 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
1048 [C(OP_WRITE)] = {
1049 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
1055 static const struct mips_perf_event octeon_cache_map
1056 [PERF_COUNT_HW_CACHE_MAX]
1057 [PERF_COUNT_HW_CACHE_OP_MAX]
1058 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1059 [C(L1D)] = {
1060 [C(OP_READ)] = {
1061 [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL },
1062 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
1064 [C(OP_WRITE)] = {
1065 [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
1068 [C(L1I)] = {
1069 [C(OP_READ)] = {
1070 [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
1072 [C(OP_PREFETCH)] = {
1073 [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
1076 [C(DTLB)] = {
1078 * Only general DTLB misses are counted use the same event for
1079 * read and write.
1081 [C(OP_READ)] = {
1082 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1084 [C(OP_WRITE)] = {
1085 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1088 [C(ITLB)] = {
1089 [C(OP_READ)] = {
1090 [C(RESULT_MISS)] = { 0x37, CNTR_ALL },
1095 #ifdef CONFIG_MIPS_MT_SMP
1096 static void check_and_calc_range(struct perf_event *event,
1097 const struct mips_perf_event *pev)
1099 struct hw_perf_event *hwc = &event->hw;
1101 if (event->cpu >= 0) {
1102 if (pev->range > V) {
1104 * The user selected an event that is processor
1105 * wide, while expecting it to be VPE wide.
1107 hwc->config_base |= M_TC_EN_ALL;
1108 } else {
1110 * FIXME: cpu_data[event->cpu].vpe_id reports 0
1111 * for both CPUs.
1113 hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
1114 hwc->config_base |= M_TC_EN_VPE;
1116 } else
1117 hwc->config_base |= M_TC_EN_ALL;
1119 #else
1120 static void check_and_calc_range(struct perf_event *event,
1121 const struct mips_perf_event *pev)
1124 #endif
1126 static int __hw_perf_event_init(struct perf_event *event)
1128 struct perf_event_attr *attr = &event->attr;
1129 struct hw_perf_event *hwc = &event->hw;
1130 const struct mips_perf_event *pev;
1131 int err;
1133 /* Returning MIPS event descriptor for generic perf event. */
1134 if (PERF_TYPE_HARDWARE == event->attr.type) {
1135 if (event->attr.config >= PERF_COUNT_HW_MAX)
1136 return -EINVAL;
1137 pev = mipspmu_map_general_event(event->attr.config);
1138 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1139 pev = mipspmu_map_cache_event(event->attr.config);
1140 } else if (PERF_TYPE_RAW == event->attr.type) {
1141 /* We are working on the global raw event. */
1142 mutex_lock(&raw_event_mutex);
1143 pev = mipspmu.map_raw_event(event->attr.config);
1144 } else {
1145 /* The event type is not (yet) supported. */
1146 return -EOPNOTSUPP;
1149 if (IS_ERR(pev)) {
1150 if (PERF_TYPE_RAW == event->attr.type)
1151 mutex_unlock(&raw_event_mutex);
1152 return PTR_ERR(pev);
1156 * We allow max flexibility on how each individual counter shared
1157 * by the single CPU operates (the mode exclusion and the range).
1159 hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
1161 /* Calculate range bits and validate it. */
1162 if (num_possible_cpus() > 1)
1163 check_and_calc_range(event, pev);
1165 hwc->event_base = mipspmu_perf_event_encode(pev);
1166 if (PERF_TYPE_RAW == event->attr.type)
1167 mutex_unlock(&raw_event_mutex);
1169 if (!attr->exclude_user)
1170 hwc->config_base |= M_PERFCTL_USER;
1171 if (!attr->exclude_kernel) {
1172 hwc->config_base |= M_PERFCTL_KERNEL;
1173 /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1174 hwc->config_base |= M_PERFCTL_EXL;
1176 if (!attr->exclude_hv)
1177 hwc->config_base |= M_PERFCTL_SUPERVISOR;
1179 hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1181 * The event can belong to another cpu. We do not assign a local
1182 * counter for it for now.
1184 hwc->idx = -1;
1185 hwc->config = 0;
1187 if (!hwc->sample_period) {
1188 hwc->sample_period = mipspmu.max_period;
1189 hwc->last_period = hwc->sample_period;
1190 local64_set(&hwc->period_left, hwc->sample_period);
1193 err = 0;
1194 if (event->group_leader != event)
1195 err = validate_group(event);
1197 event->destroy = hw_perf_event_destroy;
1199 if (err)
1200 event->destroy(event);
1202 return err;
1205 static void pause_local_counters(void)
1207 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1208 int ctr = mipspmu.num_counters;
1209 unsigned long flags;
1211 local_irq_save(flags);
1212 do {
1213 ctr--;
1214 cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1215 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1216 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1217 } while (ctr > 0);
1218 local_irq_restore(flags);
1221 static void resume_local_counters(void)
1223 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1224 int ctr = mipspmu.num_counters;
1226 do {
1227 ctr--;
1228 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1229 } while (ctr > 0);
1232 static int mipsxx_pmu_handle_shared_irq(void)
1234 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1235 struct perf_sample_data data;
1236 unsigned int counters = mipspmu.num_counters;
1237 u64 counter;
1238 int handled = IRQ_NONE;
1239 struct pt_regs *regs;
1241 if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
1242 return handled;
1244 * First we pause the local counters, so that when we are locked
1245 * here, the counters are all paused. When it gets locked due to
1246 * perf_disable(), the timer interrupt handler will be delayed.
1248 * See also mipsxx_pmu_start().
1250 pause_local_counters();
1251 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1252 read_lock(&pmuint_rwlock);
1253 #endif
1255 regs = get_irq_regs();
1257 perf_sample_data_init(&data, 0, 0);
1259 switch (counters) {
1260 #define HANDLE_COUNTER(n) \
1261 case n + 1: \
1262 if (test_bit(n, cpuc->used_mask)) { \
1263 counter = mipspmu.read_counter(n); \
1264 if (counter & mipspmu.overflow) { \
1265 handle_associated_event(cpuc, n, &data, regs); \
1266 handled = IRQ_HANDLED; \
1269 HANDLE_COUNTER(3)
1270 HANDLE_COUNTER(2)
1271 HANDLE_COUNTER(1)
1272 HANDLE_COUNTER(0)
1276 * Do all the work for the pending perf events. We can do this
1277 * in here because the performance counter interrupt is a regular
1278 * interrupt, not NMI.
1280 if (handled == IRQ_HANDLED)
1281 irq_work_run();
1283 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1284 read_unlock(&pmuint_rwlock);
1285 #endif
1286 resume_local_counters();
1287 return handled;
1290 static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1292 return mipsxx_pmu_handle_shared_irq();
1295 /* 24K */
1296 #define IS_BOTH_COUNTERS_24K_EVENT(b) \
1297 ((b) == 0 || (b) == 1 || (b) == 11)
1299 /* 34K */
1300 #define IS_BOTH_COUNTERS_34K_EVENT(b) \
1301 ((b) == 0 || (b) == 1 || (b) == 11)
1302 #ifdef CONFIG_MIPS_MT_SMP
1303 #define IS_RANGE_P_34K_EVENT(r, b) \
1304 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1305 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
1306 (r) == 176 || ((b) >= 50 && (b) <= 55) || \
1307 ((b) >= 64 && (b) <= 67))
1308 #define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1309 #endif
1311 /* 74K */
1312 #define IS_BOTH_COUNTERS_74K_EVENT(b) \
1313 ((b) == 0 || (b) == 1)
1315 /* 1004K */
1316 #define IS_BOTH_COUNTERS_1004K_EVENT(b) \
1317 ((b) == 0 || (b) == 1 || (b) == 11)
1318 #ifdef CONFIG_MIPS_MT_SMP
1319 #define IS_RANGE_P_1004K_EVENT(r, b) \
1320 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1321 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
1322 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
1323 (r) == 188 || (b) == 61 || (b) == 62 || \
1324 ((b) >= 64 && (b) <= 67))
1325 #define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
1326 #endif
1328 /* BMIPS5000 */
1329 #define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
1330 ((b) == 0 || (b) == 1)
1334 * User can use 0-255 raw events, where 0-127 for the events of even
1335 * counters, and 128-255 for odd counters. Note that bit 7 is used to
1336 * indicate the parity. So, for example, when user wants to take the
1337 * Event Num of 15 for odd counters (by referring to the user manual),
1338 * then 128 needs to be added to 15 as the input for the event config,
1339 * i.e., 143 (0x8F) to be used.
1341 static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1343 unsigned int raw_id = config & 0xff;
1344 unsigned int base_id = raw_id & 0x7f;
1346 raw_event.event_id = base_id;
1348 switch (current_cpu_type()) {
1349 case CPU_24K:
1350 if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1351 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1352 else
1353 raw_event.cntr_mask =
1354 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1355 #ifdef CONFIG_MIPS_MT_SMP
1357 * This is actually doing nothing. Non-multithreading
1358 * CPUs will not check and calculate the range.
1360 raw_event.range = P;
1361 #endif
1362 break;
1363 case CPU_34K:
1364 if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1365 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1366 else
1367 raw_event.cntr_mask =
1368 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1369 #ifdef CONFIG_MIPS_MT_SMP
1370 if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1371 raw_event.range = P;
1372 else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1373 raw_event.range = V;
1374 else
1375 raw_event.range = T;
1376 #endif
1377 break;
1378 case CPU_74K:
1379 if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1380 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1381 else
1382 raw_event.cntr_mask =
1383 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1384 #ifdef CONFIG_MIPS_MT_SMP
1385 raw_event.range = P;
1386 #endif
1387 break;
1388 case CPU_1004K:
1389 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1390 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1391 else
1392 raw_event.cntr_mask =
1393 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1394 #ifdef CONFIG_MIPS_MT_SMP
1395 if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1396 raw_event.range = P;
1397 else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1398 raw_event.range = V;
1399 else
1400 raw_event.range = T;
1401 #endif
1402 break;
1403 case CPU_BMIPS5000:
1404 if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
1405 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1406 else
1407 raw_event.cntr_mask =
1408 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1411 return &raw_event;
1414 static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1416 unsigned int raw_id = config & 0xff;
1417 unsigned int base_id = raw_id & 0x7f;
1420 raw_event.cntr_mask = CNTR_ALL;
1421 raw_event.event_id = base_id;
1423 if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
1424 if (base_id > 0x42)
1425 return ERR_PTR(-EOPNOTSUPP);
1426 } else {
1427 if (base_id > 0x3a)
1428 return ERR_PTR(-EOPNOTSUPP);
1431 switch (base_id) {
1432 case 0x00:
1433 case 0x0f:
1434 case 0x1e:
1435 case 0x1f:
1436 case 0x2f:
1437 case 0x34:
1438 case 0x3b ... 0x3f:
1439 return ERR_PTR(-EOPNOTSUPP);
1440 default:
1441 break;
1444 return &raw_event;
1447 static int __init
1448 init_hw_perf_events(void)
1450 int counters, irq;
1451 int counter_bits;
1453 pr_info("Performance counters: ");
1455 counters = n_counters();
1456 if (counters == 0) {
1457 pr_cont("No available PMU.\n");
1458 return -ENODEV;
1461 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1462 cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
1463 if (!cpu_has_mipsmt_pertccounters)
1464 counters = counters_total_to_per_cpu(counters);
1465 #endif
1467 #ifdef MSC01E_INT_BASE
1468 if (cpu_has_veic) {
1470 * Using platform specific interrupt controller defines.
1472 irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
1473 } else {
1474 #endif
1475 if ((cp0_perfcount_irq >= 0) &&
1476 (cp0_compare_irq != cp0_perfcount_irq))
1477 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1478 else
1479 irq = -1;
1480 #ifdef MSC01E_INT_BASE
1482 #endif
1484 mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1486 switch (current_cpu_type()) {
1487 case CPU_24K:
1488 mipspmu.name = "mips/24K";
1489 mipspmu.general_event_map = &mipsxxcore_event_map;
1490 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1491 break;
1492 case CPU_34K:
1493 mipspmu.name = "mips/34K";
1494 mipspmu.general_event_map = &mipsxxcore_event_map;
1495 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1496 break;
1497 case CPU_74K:
1498 mipspmu.name = "mips/74K";
1499 mipspmu.general_event_map = &mipsxx74Kcore_event_map;
1500 mipspmu.cache_event_map = &mipsxx74Kcore_cache_map;
1501 break;
1502 case CPU_1004K:
1503 mipspmu.name = "mips/1004K";
1504 mipspmu.general_event_map = &mipsxxcore_event_map;
1505 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1506 break;
1507 case CPU_LOONGSON1:
1508 mipspmu.name = "mips/loongson1";
1509 mipspmu.general_event_map = &mipsxxcore_event_map;
1510 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1511 break;
1512 case CPU_CAVIUM_OCTEON:
1513 case CPU_CAVIUM_OCTEON_PLUS:
1514 case CPU_CAVIUM_OCTEON2:
1515 mipspmu.name = "octeon";
1516 mipspmu.general_event_map = &octeon_event_map;
1517 mipspmu.cache_event_map = &octeon_cache_map;
1518 mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1519 break;
1520 case CPU_BMIPS5000:
1521 mipspmu.name = "BMIPS5000";
1522 mipspmu.general_event_map = &bmips5000_event_map;
1523 mipspmu.cache_event_map = &bmips5000_cache_map;
1524 break;
1525 default:
1526 pr_cont("Either hardware does not support performance "
1527 "counters, or not yet implemented.\n");
1528 return -ENODEV;
1531 mipspmu.num_counters = counters;
1532 mipspmu.irq = irq;
1534 if (read_c0_perfctrl0() & M_PERFCTL_WIDE) {
1535 mipspmu.max_period = (1ULL << 63) - 1;
1536 mipspmu.valid_count = (1ULL << 63) - 1;
1537 mipspmu.overflow = 1ULL << 63;
1538 mipspmu.read_counter = mipsxx_pmu_read_counter_64;
1539 mipspmu.write_counter = mipsxx_pmu_write_counter_64;
1540 counter_bits = 64;
1541 } else {
1542 mipspmu.max_period = (1ULL << 31) - 1;
1543 mipspmu.valid_count = (1ULL << 31) - 1;
1544 mipspmu.overflow = 1ULL << 31;
1545 mipspmu.read_counter = mipsxx_pmu_read_counter;
1546 mipspmu.write_counter = mipsxx_pmu_write_counter;
1547 counter_bits = 32;
1550 on_each_cpu(reset_counters, (void *)(long)counters, 1);
1552 pr_cont("%s PMU enabled, %d %d-bit counters available to each "
1553 "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
1554 irq < 0 ? " (share with timer interrupt)" : "");
1556 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1558 return 0;
1560 early_initcall(init_hw_perf_events);