Merge tag 'sched-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / alpha / kernel / perf_event.c
blobe7a59d927d78519c139f3532b395249f1a49b833
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Hardware performance events for the Alpha.
5 * We implement HW counts on the EV67 and subsequent CPUs only.
7 * (C) 2010 Michael J. Cree
9 * Somewhat based on the Sparc code, and to a lesser extent the PowerPC and
10 * ARM code, which are copyright by their respective authors.
13 #include <linux/perf_event.h>
14 #include <linux/kprobes.h>
15 #include <linux/kernel.h>
16 #include <linux/kdebug.h>
17 #include <linux/mutex.h>
18 #include <linux/init.h>
20 #include <asm/hwrpb.h>
21 #include <linux/atomic.h>
22 #include <asm/irq.h>
23 #include <asm/irq_regs.h>
24 #include <asm/pal.h>
25 #include <asm/wrperfmon.h>
26 #include <asm/hw_irq.h>
29 /* The maximum number of PMCs on any Alpha CPU whatsoever. */
30 #define MAX_HWEVENTS 3
31 #define PMC_NO_INDEX -1
33 /* For tracking PMCs and the hw events they monitor on each CPU. */
34 struct cpu_hw_events {
35 int enabled;
36 /* Number of events scheduled; also number entries valid in arrays below. */
37 int n_events;
38 /* Number events added since last hw_perf_disable(). */
39 int n_added;
40 /* Events currently scheduled. */
41 struct perf_event *event[MAX_HWEVENTS];
42 /* Event type of each scheduled event. */
43 unsigned long evtype[MAX_HWEVENTS];
44 /* Current index of each scheduled event; if not yet determined
45 * contains PMC_NO_INDEX.
47 int current_idx[MAX_HWEVENTS];
48 /* The active PMCs' config for easy use with wrperfmon(). */
49 unsigned long config;
50 /* The active counters' indices for easy use with wrperfmon(). */
51 unsigned long idx_mask;
53 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
58 * A structure to hold the description of the PMCs available on a particular
59 * type of Alpha CPU.
61 struct alpha_pmu_t {
62 /* Mapping of the perf system hw event types to indigenous event types */
63 const int *event_map;
64 /* The number of entries in the event_map */
65 int max_events;
66 /* The number of PMCs on this Alpha */
67 int num_pmcs;
69 * All PMC counters reside in the IBOX register PCTR. This is the
70 * LSB of the counter.
72 int pmc_count_shift[MAX_HWEVENTS];
74 * The mask that isolates the PMC bits when the LSB of the counter
75 * is shifted to bit 0.
77 unsigned long pmc_count_mask[MAX_HWEVENTS];
78 /* The maximum period the PMC can count. */
79 unsigned long pmc_max_period[MAX_HWEVENTS];
81 * The maximum value that may be written to the counter due to
82 * hardware restrictions is pmc_max_period - pmc_left.
84 long pmc_left[3];
85 /* Subroutine for allocation of PMCs. Enforces constraints. */
86 int (*check_constraints)(struct perf_event **, unsigned long *, int);
87 /* Subroutine for checking validity of a raw event for this PMU. */
88 int (*raw_event_valid)(u64 config);
92 * The Alpha CPU PMU description currently in operation. This is set during
93 * the boot process to the specific CPU of the machine.
95 static const struct alpha_pmu_t *alpha_pmu;
98 #define HW_OP_UNSUPPORTED -1
101 * The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs
102 * follow. Since they are identical we refer to them collectively as the
103 * EV67 henceforth.
107 * EV67 PMC event types
109 * There is no one-to-one mapping of the possible hw event types to the
110 * actual codes that are used to program the PMCs hence we introduce our
111 * own hw event type identifiers.
113 enum ev67_pmc_event_type {
114 EV67_CYCLES = 1,
115 EV67_INSTRUCTIONS,
116 EV67_BCACHEMISS,
117 EV67_MBOXREPLAY,
118 EV67_LAST_ET
120 #define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES)
123 /* Mapping of the hw event types to the perf tool interface */
124 static const int ev67_perfmon_event_map[] = {
125 [PERF_COUNT_HW_CPU_CYCLES] = EV67_CYCLES,
126 [PERF_COUNT_HW_INSTRUCTIONS] = EV67_INSTRUCTIONS,
127 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
128 [PERF_COUNT_HW_CACHE_MISSES] = EV67_BCACHEMISS,
131 struct ev67_mapping_t {
132 int config;
133 int idx;
137 * The mapping used for one event only - these must be in same order as enum
138 * ev67_pmc_event_type definition.
140 static const struct ev67_mapping_t ev67_mapping[] = {
141 {EV67_PCTR_INSTR_CYCLES, 1}, /* EV67_CYCLES, */
142 {EV67_PCTR_INSTR_CYCLES, 0}, /* EV67_INSTRUCTIONS */
143 {EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */
144 {EV67_PCTR_CYCLES_MBOX, 1} /* EV67_MBOXREPLAY */
149 * Check that a group of events can be simultaneously scheduled on to the
150 * EV67 PMU. Also allocate counter indices and config.
152 static int ev67_check_constraints(struct perf_event **event,
153 unsigned long *evtype, int n_ev)
155 int idx0;
156 unsigned long config;
158 idx0 = ev67_mapping[evtype[0]-1].idx;
159 config = ev67_mapping[evtype[0]-1].config;
160 if (n_ev == 1)
161 goto success;
163 BUG_ON(n_ev != 2);
165 if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) {
166 /* MBOX replay traps must be on PMC 1 */
167 idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0;
168 /* Only cycles can accompany MBOX replay traps */
169 if (evtype[idx0] == EV67_CYCLES) {
170 config = EV67_PCTR_CYCLES_MBOX;
171 goto success;
175 if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) {
176 /* Bcache misses must be on PMC 1 */
177 idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0;
178 /* Only instructions can accompany Bcache misses */
179 if (evtype[idx0] == EV67_INSTRUCTIONS) {
180 config = EV67_PCTR_INSTR_BCACHEMISS;
181 goto success;
185 if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) {
186 /* Instructions must be on PMC 0 */
187 idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1;
188 /* By this point only cycles can accompany instructions */
189 if (evtype[idx0^1] == EV67_CYCLES) {
190 config = EV67_PCTR_INSTR_CYCLES;
191 goto success;
195 /* Otherwise, darn it, there is a conflict. */
196 return -1;
198 success:
199 event[0]->hw.idx = idx0;
200 event[0]->hw.config_base = config;
201 if (n_ev == 2) {
202 event[1]->hw.idx = idx0 ^ 1;
203 event[1]->hw.config_base = config;
205 return 0;
209 static int ev67_raw_event_valid(u64 config)
211 return config >= EV67_CYCLES && config < EV67_LAST_ET;
215 static const struct alpha_pmu_t ev67_pmu = {
216 .event_map = ev67_perfmon_event_map,
217 .max_events = ARRAY_SIZE(ev67_perfmon_event_map),
218 .num_pmcs = 2,
219 .pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0},
220 .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0},
221 .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0},
222 .pmc_left = {16, 4, 0},
223 .check_constraints = ev67_check_constraints,
224 .raw_event_valid = ev67_raw_event_valid,
230 * Helper routines to ensure that we read/write only the correct PMC bits
231 * when calling the wrperfmon PALcall.
233 static inline void alpha_write_pmc(int idx, unsigned long val)
235 val &= alpha_pmu->pmc_count_mask[idx];
236 val <<= alpha_pmu->pmc_count_shift[idx];
237 val |= (1<<idx);
238 wrperfmon(PERFMON_CMD_WRITE, val);
241 static inline unsigned long alpha_read_pmc(int idx)
243 unsigned long val;
245 val = wrperfmon(PERFMON_CMD_READ, 0);
246 val >>= alpha_pmu->pmc_count_shift[idx];
247 val &= alpha_pmu->pmc_count_mask[idx];
248 return val;
251 /* Set a new period to sample over */
252 static int alpha_perf_event_set_period(struct perf_event *event,
253 struct hw_perf_event *hwc, int idx)
255 long left = local64_read(&hwc->period_left);
256 long period = hwc->sample_period;
257 int ret = 0;
259 if (unlikely(left <= -period)) {
260 left = period;
261 local64_set(&hwc->period_left, left);
262 hwc->last_period = period;
263 ret = 1;
266 if (unlikely(left <= 0)) {
267 left += period;
268 local64_set(&hwc->period_left, left);
269 hwc->last_period = period;
270 ret = 1;
274 * Hardware restrictions require that the counters must not be
275 * written with values that are too close to the maximum period.
277 if (unlikely(left < alpha_pmu->pmc_left[idx]))
278 left = alpha_pmu->pmc_left[idx];
280 if (left > (long)alpha_pmu->pmc_max_period[idx])
281 left = alpha_pmu->pmc_max_period[idx];
283 local64_set(&hwc->prev_count, (unsigned long)(-left));
285 alpha_write_pmc(idx, (unsigned long)(-left));
287 perf_event_update_userpage(event);
289 return ret;
294 * Calculates the count (the 'delta') since the last time the PMC was read.
296 * As the PMCs' full period can easily be exceeded within the perf system
297 * sampling period we cannot use any high order bits as a guard bit in the
298 * PMCs to detect overflow as is done by other architectures. The code here
299 * calculates the delta on the basis that there is no overflow when ovf is
300 * zero. The value passed via ovf by the interrupt handler corrects for
301 * overflow.
303 * This can be racey on rare occasions -- a call to this routine can occur
304 * with an overflowed counter just before the PMI service routine is called.
305 * The check for delta negative hopefully always rectifies this situation.
307 static unsigned long alpha_perf_event_update(struct perf_event *event,
308 struct hw_perf_event *hwc, int idx, long ovf)
310 long prev_raw_count, new_raw_count;
311 long delta;
313 again:
314 prev_raw_count = local64_read(&hwc->prev_count);
315 new_raw_count = alpha_read_pmc(idx);
317 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
318 new_raw_count) != prev_raw_count)
319 goto again;
321 delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
323 /* It is possible on very rare occasions that the PMC has overflowed
324 * but the interrupt is yet to come. Detect and fix this situation.
326 if (unlikely(delta < 0)) {
327 delta += alpha_pmu->pmc_max_period[idx] + 1;
330 local64_add(delta, &event->count);
331 local64_sub(delta, &hwc->period_left);
333 return new_raw_count;
338 * Collect all HW events into the array event[].
340 static int collect_events(struct perf_event *group, int max_count,
341 struct perf_event *event[], unsigned long *evtype,
342 int *current_idx)
344 struct perf_event *pe;
345 int n = 0;
347 if (!is_software_event(group)) {
348 if (n >= max_count)
349 return -1;
350 event[n] = group;
351 evtype[n] = group->hw.event_base;
352 current_idx[n++] = PMC_NO_INDEX;
354 for_each_sibling_event(pe, group) {
355 if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) {
356 if (n >= max_count)
357 return -1;
358 event[n] = pe;
359 evtype[n] = pe->hw.event_base;
360 current_idx[n++] = PMC_NO_INDEX;
363 return n;
369 * Check that a group of events can be simultaneously scheduled on to the PMU.
371 static int alpha_check_constraints(struct perf_event **events,
372 unsigned long *evtypes, int n_ev)
375 /* No HW events is possible from hw_perf_group_sched_in(). */
376 if (n_ev == 0)
377 return 0;
379 if (n_ev > alpha_pmu->num_pmcs)
380 return -1;
382 return alpha_pmu->check_constraints(events, evtypes, n_ev);
387 * If new events have been scheduled then update cpuc with the new
388 * configuration. This may involve shifting cycle counts from one PMC to
389 * another.
391 static void maybe_change_configuration(struct cpu_hw_events *cpuc)
393 int j;
395 if (cpuc->n_added == 0)
396 return;
398 /* Find counters that are moving to another PMC and update */
399 for (j = 0; j < cpuc->n_events; j++) {
400 struct perf_event *pe = cpuc->event[j];
402 if (cpuc->current_idx[j] != PMC_NO_INDEX &&
403 cpuc->current_idx[j] != pe->hw.idx) {
404 alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0);
405 cpuc->current_idx[j] = PMC_NO_INDEX;
409 /* Assign to counters all unassigned events. */
410 cpuc->idx_mask = 0;
411 for (j = 0; j < cpuc->n_events; j++) {
412 struct perf_event *pe = cpuc->event[j];
413 struct hw_perf_event *hwc = &pe->hw;
414 int idx = hwc->idx;
416 if (cpuc->current_idx[j] == PMC_NO_INDEX) {
417 alpha_perf_event_set_period(pe, hwc, idx);
418 cpuc->current_idx[j] = idx;
421 if (!(hwc->state & PERF_HES_STOPPED))
422 cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
424 cpuc->config = cpuc->event[0]->hw.config_base;
429 /* Schedule perf HW event on to PMU.
430 * - this function is called from outside this module via the pmu struct
431 * returned from perf event initialisation.
433 static int alpha_pmu_add(struct perf_event *event, int flags)
435 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
436 struct hw_perf_event *hwc = &event->hw;
437 int n0;
438 int ret;
439 unsigned long irq_flags;
442 * The Sparc code has the IRQ disable first followed by the perf
443 * disable, however this can lead to an overflowed counter with the
444 * PMI disabled on rare occasions. The alpha_perf_event_update()
445 * routine should detect this situation by noting a negative delta,
446 * nevertheless we disable the PMCs first to enable a potential
447 * final PMI to occur before we disable interrupts.
449 perf_pmu_disable(event->pmu);
450 local_irq_save(irq_flags);
452 /* Default to error to be returned */
453 ret = -EAGAIN;
455 /* Insert event on to PMU and if successful modify ret to valid return */
456 n0 = cpuc->n_events;
457 if (n0 < alpha_pmu->num_pmcs) {
458 cpuc->event[n0] = event;
459 cpuc->evtype[n0] = event->hw.event_base;
460 cpuc->current_idx[n0] = PMC_NO_INDEX;
462 if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) {
463 cpuc->n_events++;
464 cpuc->n_added++;
465 ret = 0;
469 hwc->state = PERF_HES_UPTODATE;
470 if (!(flags & PERF_EF_START))
471 hwc->state |= PERF_HES_STOPPED;
473 local_irq_restore(irq_flags);
474 perf_pmu_enable(event->pmu);
476 return ret;
481 /* Disable performance monitoring unit
482 * - this function is called from outside this module via the pmu struct
483 * returned from perf event initialisation.
485 static void alpha_pmu_del(struct perf_event *event, int flags)
487 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
488 struct hw_perf_event *hwc = &event->hw;
489 unsigned long irq_flags;
490 int j;
492 perf_pmu_disable(event->pmu);
493 local_irq_save(irq_flags);
495 for (j = 0; j < cpuc->n_events; j++) {
496 if (event == cpuc->event[j]) {
497 int idx = cpuc->current_idx[j];
499 /* Shift remaining entries down into the existing
500 * slot.
502 while (++j < cpuc->n_events) {
503 cpuc->event[j - 1] = cpuc->event[j];
504 cpuc->evtype[j - 1] = cpuc->evtype[j];
505 cpuc->current_idx[j - 1] =
506 cpuc->current_idx[j];
509 /* Absorb the final count and turn off the event. */
510 alpha_perf_event_update(event, hwc, idx, 0);
511 perf_event_update_userpage(event);
513 cpuc->idx_mask &= ~(1UL<<idx);
514 cpuc->n_events--;
515 break;
519 local_irq_restore(irq_flags);
520 perf_pmu_enable(event->pmu);
524 static void alpha_pmu_read(struct perf_event *event)
526 struct hw_perf_event *hwc = &event->hw;
528 alpha_perf_event_update(event, hwc, hwc->idx, 0);
532 static void alpha_pmu_stop(struct perf_event *event, int flags)
534 struct hw_perf_event *hwc = &event->hw;
535 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
537 if (!(hwc->state & PERF_HES_STOPPED)) {
538 cpuc->idx_mask &= ~(1UL<<hwc->idx);
539 hwc->state |= PERF_HES_STOPPED;
542 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
543 alpha_perf_event_update(event, hwc, hwc->idx, 0);
544 hwc->state |= PERF_HES_UPTODATE;
547 if (cpuc->enabled)
548 wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx));
552 static void alpha_pmu_start(struct perf_event *event, int flags)
554 struct hw_perf_event *hwc = &event->hw;
555 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
557 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
558 return;
560 if (flags & PERF_EF_RELOAD) {
561 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
562 alpha_perf_event_set_period(event, hwc, hwc->idx);
565 hwc->state = 0;
567 cpuc->idx_mask |= 1UL<<hwc->idx;
568 if (cpuc->enabled)
569 wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
574 * Check that CPU performance counters are supported.
575 * - currently support EV67 and later CPUs.
576 * - actually some later revisions of the EV6 have the same PMC model as the
577 * EV67 but we don't do suffiently deep CPU detection to detect them.
578 * Bad luck to the very few people who might have one, I guess.
580 static int supported_cpu(void)
582 struct percpu_struct *cpu;
583 unsigned long cputype;
585 /* Get cpu type from HW */
586 cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset);
587 cputype = cpu->type & 0xffffffff;
588 /* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */
589 return (cputype >= EV67_CPU) && (cputype <= EV69_CPU);
594 static void hw_perf_event_destroy(struct perf_event *event)
596 /* Nothing to be done! */
597 return;
602 static int __hw_perf_event_init(struct perf_event *event)
604 struct perf_event_attr *attr = &event->attr;
605 struct hw_perf_event *hwc = &event->hw;
606 struct perf_event *evts[MAX_HWEVENTS];
607 unsigned long evtypes[MAX_HWEVENTS];
608 int idx_rubbish_bin[MAX_HWEVENTS];
609 int ev;
610 int n;
612 /* We only support a limited range of HARDWARE event types with one
613 * only programmable via a RAW event type.
615 if (attr->type == PERF_TYPE_HARDWARE) {
616 if (attr->config >= alpha_pmu->max_events)
617 return -EINVAL;
618 ev = alpha_pmu->event_map[attr->config];
619 } else if (attr->type == PERF_TYPE_HW_CACHE) {
620 return -EOPNOTSUPP;
621 } else if (attr->type == PERF_TYPE_RAW) {
622 if (!alpha_pmu->raw_event_valid(attr->config))
623 return -EINVAL;
624 ev = attr->config;
625 } else {
626 return -EOPNOTSUPP;
629 if (ev < 0) {
630 return ev;
634 * We place the event type in event_base here and leave calculation
635 * of the codes to programme the PMU for alpha_pmu_enable() because
636 * it is only then we will know what HW events are actually
637 * scheduled on to the PMU. At that point the code to programme the
638 * PMU is put into config_base and the PMC to use is placed into
639 * idx. We initialise idx (below) to PMC_NO_INDEX to indicate that
640 * it is yet to be determined.
642 hwc->event_base = ev;
644 /* Collect events in a group together suitable for calling
645 * alpha_check_constraints() to verify that the group as a whole can
646 * be scheduled on to the PMU.
648 n = 0;
649 if (event->group_leader != event) {
650 n = collect_events(event->group_leader,
651 alpha_pmu->num_pmcs - 1,
652 evts, evtypes, idx_rubbish_bin);
653 if (n < 0)
654 return -EINVAL;
656 evtypes[n] = hwc->event_base;
657 evts[n] = event;
659 if (alpha_check_constraints(evts, evtypes, n + 1))
660 return -EINVAL;
662 /* Indicate that PMU config and idx are yet to be determined. */
663 hwc->config_base = 0;
664 hwc->idx = PMC_NO_INDEX;
666 event->destroy = hw_perf_event_destroy;
669 * Most architectures reserve the PMU for their use at this point.
670 * As there is no existing mechanism to arbitrate usage and there
671 * appears to be no other user of the Alpha PMU we just assume
672 * that we can just use it, hence a NO-OP here.
674 * Maybe an alpha_reserve_pmu() routine should be implemented but is
675 * anything else ever going to use it?
678 if (!hwc->sample_period) {
679 hwc->sample_period = alpha_pmu->pmc_max_period[0];
680 hwc->last_period = hwc->sample_period;
681 local64_set(&hwc->period_left, hwc->sample_period);
684 return 0;
688 * Main entry point to initialise a HW performance event.
690 static int alpha_pmu_event_init(struct perf_event *event)
692 int err;
694 /* does not support taken branch sampling */
695 if (has_branch_stack(event))
696 return -EOPNOTSUPP;
698 switch (event->attr.type) {
699 case PERF_TYPE_RAW:
700 case PERF_TYPE_HARDWARE:
701 case PERF_TYPE_HW_CACHE:
702 break;
704 default:
705 return -ENOENT;
708 if (!alpha_pmu)
709 return -ENODEV;
711 /* Do the real initialisation work. */
712 err = __hw_perf_event_init(event);
714 return err;
718 * Main entry point - enable HW performance counters.
720 static void alpha_pmu_enable(struct pmu *pmu)
722 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
724 if (cpuc->enabled)
725 return;
727 cpuc->enabled = 1;
728 barrier();
730 if (cpuc->n_events > 0) {
731 /* Update cpuc with information from any new scheduled events. */
732 maybe_change_configuration(cpuc);
734 /* Start counting the desired events. */
735 wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE);
736 wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config);
737 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
743 * Main entry point - disable HW performance counters.
746 static void alpha_pmu_disable(struct pmu *pmu)
748 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
750 if (!cpuc->enabled)
751 return;
753 cpuc->enabled = 0;
754 cpuc->n_added = 0;
756 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
759 static struct pmu pmu = {
760 .pmu_enable = alpha_pmu_enable,
761 .pmu_disable = alpha_pmu_disable,
762 .event_init = alpha_pmu_event_init,
763 .add = alpha_pmu_add,
764 .del = alpha_pmu_del,
765 .start = alpha_pmu_start,
766 .stop = alpha_pmu_stop,
767 .read = alpha_pmu_read,
768 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
773 * Main entry point - don't know when this is called but it
774 * obviously dumps debug info.
776 void perf_event_print_debug(void)
778 unsigned long flags;
779 unsigned long pcr;
780 int pcr0, pcr1;
781 int cpu;
783 if (!supported_cpu())
784 return;
786 local_irq_save(flags);
788 cpu = smp_processor_id();
790 pcr = wrperfmon(PERFMON_CMD_READ, 0);
791 pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0];
792 pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1];
794 pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1);
796 local_irq_restore(flags);
801 * Performance Monitoring Interrupt Service Routine called when a PMC
802 * overflows. The PMC that overflowed is passed in la_ptr.
804 static void alpha_perf_event_irq_handler(unsigned long la_ptr,
805 struct pt_regs *regs)
807 struct cpu_hw_events *cpuc;
808 struct perf_sample_data data;
809 struct perf_event *event;
810 struct hw_perf_event *hwc;
811 int idx, j;
813 __this_cpu_inc(irq_pmi_count);
814 cpuc = this_cpu_ptr(&cpu_hw_events);
816 /* Completely counting through the PMC's period to trigger a new PMC
817 * overflow interrupt while in this interrupt routine is utterly
818 * disastrous! The EV6 and EV67 counters are sufficiently large to
819 * prevent this but to be really sure disable the PMCs.
821 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
823 /* la_ptr is the counter that overflowed. */
824 if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
825 /* This should never occur! */
826 irq_err_count++;
827 pr_warn("PMI: silly index %ld\n", la_ptr);
828 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
829 return;
832 idx = la_ptr;
834 for (j = 0; j < cpuc->n_events; j++) {
835 if (cpuc->current_idx[j] == idx)
836 break;
839 if (unlikely(j == cpuc->n_events)) {
840 /* This can occur if the event is disabled right on a PMC overflow. */
841 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
842 return;
845 event = cpuc->event[j];
847 if (unlikely(!event)) {
848 /* This should never occur! */
849 irq_err_count++;
850 pr_warn("PMI: No event at index %d!\n", idx);
851 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
852 return;
855 hwc = &event->hw;
856 alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
857 perf_sample_data_init(&data, 0, hwc->last_period);
859 if (alpha_perf_event_set_period(event, hwc, idx)) {
860 if (perf_event_overflow(event, &data, regs)) {
861 /* Interrupts coming too quickly; "throttle" the
862 * counter, i.e., disable it for a little while.
864 alpha_pmu_stop(event, 0);
867 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
869 return;
875 * Init call to initialise performance events at kernel startup.
877 int __init init_hw_perf_events(void)
879 pr_info("Performance events: ");
881 if (!supported_cpu()) {
882 pr_cont("No support for your CPU.\n");
883 return 0;
886 pr_cont("Supported CPU type!\n");
888 /* Override performance counter IRQ vector */
890 perf_irq = alpha_perf_event_irq_handler;
892 /* And set up PMU specification */
893 alpha_pmu = &ev67_pmu;
895 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
897 return 0;
899 early_initcall(init_hw_perf_events);