Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux/fpc-iii.git] / arch / alpha / kernel / perf_event.c
blobc52e7f0ee5f6084bd2c8068a552659f97edd9a8d
1 /*
2 * Hardware performance events for the Alpha.
4 * We implement HW counts on the EV67 and subsequent CPUs only.
6 * (C) 2010 Michael J. Cree
8 * Somewhat based on the Sparc code, and to a lesser extent the PowerPC and
9 * ARM code, which are copyright by their respective authors.
12 #include <linux/perf_event.h>
13 #include <linux/kprobes.h>
14 #include <linux/kernel.h>
15 #include <linux/kdebug.h>
16 #include <linux/mutex.h>
17 #include <linux/init.h>
19 #include <asm/hwrpb.h>
20 #include <linux/atomic.h>
21 #include <asm/irq.h>
22 #include <asm/irq_regs.h>
23 #include <asm/pal.h>
24 #include <asm/wrperfmon.h>
25 #include <asm/hw_irq.h>
28 /* The maximum number of PMCs on any Alpha CPU whatsoever. */
29 #define MAX_HWEVENTS 3
30 #define PMC_NO_INDEX -1
32 /* For tracking PMCs and the hw events they monitor on each CPU. */
33 struct cpu_hw_events {
34 int enabled;
35 /* Number of events scheduled; also number entries valid in arrays below. */
36 int n_events;
37 /* Number events added since last hw_perf_disable(). */
38 int n_added;
39 /* Events currently scheduled. */
40 struct perf_event *event[MAX_HWEVENTS];
41 /* Event type of each scheduled event. */
42 unsigned long evtype[MAX_HWEVENTS];
43 /* Current index of each scheduled event; if not yet determined
44 * contains PMC_NO_INDEX.
46 int current_idx[MAX_HWEVENTS];
47 /* The active PMCs' config for easy use with wrperfmon(). */
48 unsigned long config;
49 /* The active counters' indices for easy use with wrperfmon(). */
50 unsigned long idx_mask;
52 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
57 * A structure to hold the description of the PMCs available on a particular
58 * type of Alpha CPU.
60 struct alpha_pmu_t {
61 /* Mapping of the perf system hw event types to indigenous event types */
62 const int *event_map;
63 /* The number of entries in the event_map */
64 int max_events;
65 /* The number of PMCs on this Alpha */
66 int num_pmcs;
68 * All PMC counters reside in the IBOX register PCTR. This is the
69 * LSB of the counter.
71 int pmc_count_shift[MAX_HWEVENTS];
73 * The mask that isolates the PMC bits when the LSB of the counter
74 * is shifted to bit 0.
76 unsigned long pmc_count_mask[MAX_HWEVENTS];
77 /* The maximum period the PMC can count. */
78 unsigned long pmc_max_period[MAX_HWEVENTS];
80 * The maximum value that may be written to the counter due to
81 * hardware restrictions is pmc_max_period - pmc_left.
83 long pmc_left[3];
84 /* Subroutine for allocation of PMCs. Enforces constraints. */
85 int (*check_constraints)(struct perf_event **, unsigned long *, int);
86 /* Subroutine for checking validity of a raw event for this PMU. */
87 int (*raw_event_valid)(u64 config);
91 * The Alpha CPU PMU description currently in operation. This is set during
92 * the boot process to the specific CPU of the machine.
94 static const struct alpha_pmu_t *alpha_pmu;
97 #define HW_OP_UNSUPPORTED -1
100 * The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs
101 * follow. Since they are identical we refer to them collectively as the
102 * EV67 henceforth.
106 * EV67 PMC event types
108 * There is no one-to-one mapping of the possible hw event types to the
109 * actual codes that are used to program the PMCs hence we introduce our
110 * own hw event type identifiers.
112 enum ev67_pmc_event_type {
113 EV67_CYCLES = 1,
114 EV67_INSTRUCTIONS,
115 EV67_BCACHEMISS,
116 EV67_MBOXREPLAY,
117 EV67_LAST_ET
119 #define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES)
122 /* Mapping of the hw event types to the perf tool interface */
123 static const int ev67_perfmon_event_map[] = {
124 [PERF_COUNT_HW_CPU_CYCLES] = EV67_CYCLES,
125 [PERF_COUNT_HW_INSTRUCTIONS] = EV67_INSTRUCTIONS,
126 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
127 [PERF_COUNT_HW_CACHE_MISSES] = EV67_BCACHEMISS,
130 struct ev67_mapping_t {
131 int config;
132 int idx;
136 * The mapping used for one event only - these must be in same order as enum
137 * ev67_pmc_event_type definition.
139 static const struct ev67_mapping_t ev67_mapping[] = {
140 {EV67_PCTR_INSTR_CYCLES, 1}, /* EV67_CYCLES, */
141 {EV67_PCTR_INSTR_CYCLES, 0}, /* EV67_INSTRUCTIONS */
142 {EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */
143 {EV67_PCTR_CYCLES_MBOX, 1} /* EV67_MBOXREPLAY */
148 * Check that a group of events can be simultaneously scheduled on to the
149 * EV67 PMU. Also allocate counter indices and config.
151 static int ev67_check_constraints(struct perf_event **event,
152 unsigned long *evtype, int n_ev)
154 int idx0;
155 unsigned long config;
157 idx0 = ev67_mapping[evtype[0]-1].idx;
158 config = ev67_mapping[evtype[0]-1].config;
159 if (n_ev == 1)
160 goto success;
162 BUG_ON(n_ev != 2);
164 if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) {
165 /* MBOX replay traps must be on PMC 1 */
166 idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0;
167 /* Only cycles can accompany MBOX replay traps */
168 if (evtype[idx0] == EV67_CYCLES) {
169 config = EV67_PCTR_CYCLES_MBOX;
170 goto success;
174 if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) {
175 /* Bcache misses must be on PMC 1 */
176 idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0;
177 /* Only instructions can accompany Bcache misses */
178 if (evtype[idx0] == EV67_INSTRUCTIONS) {
179 config = EV67_PCTR_INSTR_BCACHEMISS;
180 goto success;
184 if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) {
185 /* Instructions must be on PMC 0 */
186 idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1;
187 /* By this point only cycles can accompany instructions */
188 if (evtype[idx0^1] == EV67_CYCLES) {
189 config = EV67_PCTR_INSTR_CYCLES;
190 goto success;
194 /* Otherwise, darn it, there is a conflict. */
195 return -1;
197 success:
198 event[0]->hw.idx = idx0;
199 event[0]->hw.config_base = config;
200 if (n_ev == 2) {
201 event[1]->hw.idx = idx0 ^ 1;
202 event[1]->hw.config_base = config;
204 return 0;
208 static int ev67_raw_event_valid(u64 config)
210 return config >= EV67_CYCLES && config < EV67_LAST_ET;
214 static const struct alpha_pmu_t ev67_pmu = {
215 .event_map = ev67_perfmon_event_map,
216 .max_events = ARRAY_SIZE(ev67_perfmon_event_map),
217 .num_pmcs = 2,
218 .pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0},
219 .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0},
220 .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0},
221 .pmc_left = {16, 4, 0},
222 .check_constraints = ev67_check_constraints,
223 .raw_event_valid = ev67_raw_event_valid,
229 * Helper routines to ensure that we read/write only the correct PMC bits
230 * when calling the wrperfmon PALcall.
232 static inline void alpha_write_pmc(int idx, unsigned long val)
234 val &= alpha_pmu->pmc_count_mask[idx];
235 val <<= alpha_pmu->pmc_count_shift[idx];
236 val |= (1<<idx);
237 wrperfmon(PERFMON_CMD_WRITE, val);
240 static inline unsigned long alpha_read_pmc(int idx)
242 unsigned long val;
244 val = wrperfmon(PERFMON_CMD_READ, 0);
245 val >>= alpha_pmu->pmc_count_shift[idx];
246 val &= alpha_pmu->pmc_count_mask[idx];
247 return val;
250 /* Set a new period to sample over */
251 static int alpha_perf_event_set_period(struct perf_event *event,
252 struct hw_perf_event *hwc, int idx)
254 long left = local64_read(&hwc->period_left);
255 long period = hwc->sample_period;
256 int ret = 0;
258 if (unlikely(left <= -period)) {
259 left = period;
260 local64_set(&hwc->period_left, left);
261 hwc->last_period = period;
262 ret = 1;
265 if (unlikely(left <= 0)) {
266 left += period;
267 local64_set(&hwc->period_left, left);
268 hwc->last_period = period;
269 ret = 1;
273 * Hardware restrictions require that the counters must not be
274 * written with values that are too close to the maximum period.
276 if (unlikely(left < alpha_pmu->pmc_left[idx]))
277 left = alpha_pmu->pmc_left[idx];
279 if (left > (long)alpha_pmu->pmc_max_period[idx])
280 left = alpha_pmu->pmc_max_period[idx];
282 local64_set(&hwc->prev_count, (unsigned long)(-left));
284 alpha_write_pmc(idx, (unsigned long)(-left));
286 perf_event_update_userpage(event);
288 return ret;
293 * Calculates the count (the 'delta') since the last time the PMC was read.
295 * As the PMCs' full period can easily be exceeded within the perf system
296 * sampling period we cannot use any high order bits as a guard bit in the
297 * PMCs to detect overflow as is done by other architectures. The code here
298 * calculates the delta on the basis that there is no overflow when ovf is
299 * zero. The value passed via ovf by the interrupt handler corrects for
300 * overflow.
302 * This can be racey on rare occasions -- a call to this routine can occur
303 * with an overflowed counter just before the PMI service routine is called.
304 * The check for delta negative hopefully always rectifies this situation.
306 static unsigned long alpha_perf_event_update(struct perf_event *event,
307 struct hw_perf_event *hwc, int idx, long ovf)
309 long prev_raw_count, new_raw_count;
310 long delta;
312 again:
313 prev_raw_count = local64_read(&hwc->prev_count);
314 new_raw_count = alpha_read_pmc(idx);
316 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
317 new_raw_count) != prev_raw_count)
318 goto again;
320 delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
322 /* It is possible on very rare occasions that the PMC has overflowed
323 * but the interrupt is yet to come. Detect and fix this situation.
325 if (unlikely(delta < 0)) {
326 delta += alpha_pmu->pmc_max_period[idx] + 1;
329 local64_add(delta, &event->count);
330 local64_sub(delta, &hwc->period_left);
332 return new_raw_count;
337 * Collect all HW events into the array event[].
339 static int collect_events(struct perf_event *group, int max_count,
340 struct perf_event *event[], unsigned long *evtype,
341 int *current_idx)
343 struct perf_event *pe;
344 int n = 0;
346 if (!is_software_event(group)) {
347 if (n >= max_count)
348 return -1;
349 event[n] = group;
350 evtype[n] = group->hw.event_base;
351 current_idx[n++] = PMC_NO_INDEX;
353 list_for_each_entry(pe, &group->sibling_list, group_entry) {
354 if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) {
355 if (n >= max_count)
356 return -1;
357 event[n] = pe;
358 evtype[n] = pe->hw.event_base;
359 current_idx[n++] = PMC_NO_INDEX;
362 return n;
368 * Check that a group of events can be simultaneously scheduled on to the PMU.
370 static int alpha_check_constraints(struct perf_event **events,
371 unsigned long *evtypes, int n_ev)
374 /* No HW events is possible from hw_perf_group_sched_in(). */
375 if (n_ev == 0)
376 return 0;
378 if (n_ev > alpha_pmu->num_pmcs)
379 return -1;
381 return alpha_pmu->check_constraints(events, evtypes, n_ev);
386 * If new events have been scheduled then update cpuc with the new
387 * configuration. This may involve shifting cycle counts from one PMC to
388 * another.
390 static void maybe_change_configuration(struct cpu_hw_events *cpuc)
392 int j;
394 if (cpuc->n_added == 0)
395 return;
397 /* Find counters that are moving to another PMC and update */
398 for (j = 0; j < cpuc->n_events; j++) {
399 struct perf_event *pe = cpuc->event[j];
401 if (cpuc->current_idx[j] != PMC_NO_INDEX &&
402 cpuc->current_idx[j] != pe->hw.idx) {
403 alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0);
404 cpuc->current_idx[j] = PMC_NO_INDEX;
408 /* Assign to counters all unassigned events. */
409 cpuc->idx_mask = 0;
410 for (j = 0; j < cpuc->n_events; j++) {
411 struct perf_event *pe = cpuc->event[j];
412 struct hw_perf_event *hwc = &pe->hw;
413 int idx = hwc->idx;
415 if (cpuc->current_idx[j] == PMC_NO_INDEX) {
416 alpha_perf_event_set_period(pe, hwc, idx);
417 cpuc->current_idx[j] = idx;
420 if (!(hwc->state & PERF_HES_STOPPED))
421 cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
423 cpuc->config = cpuc->event[0]->hw.config_base;
428 /* Schedule perf HW event on to PMU.
429 * - this function is called from outside this module via the pmu struct
430 * returned from perf event initialisation.
432 static int alpha_pmu_add(struct perf_event *event, int flags)
434 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
435 struct hw_perf_event *hwc = &event->hw;
436 int n0;
437 int ret;
438 unsigned long irq_flags;
441 * The Sparc code has the IRQ disable first followed by the perf
442 * disable, however this can lead to an overflowed counter with the
443 * PMI disabled on rare occasions. The alpha_perf_event_update()
444 * routine should detect this situation by noting a negative delta,
445 * nevertheless we disable the PMCs first to enable a potential
446 * final PMI to occur before we disable interrupts.
448 perf_pmu_disable(event->pmu);
449 local_irq_save(irq_flags);
451 /* Default to error to be returned */
452 ret = -EAGAIN;
454 /* Insert event on to PMU and if successful modify ret to valid return */
455 n0 = cpuc->n_events;
456 if (n0 < alpha_pmu->num_pmcs) {
457 cpuc->event[n0] = event;
458 cpuc->evtype[n0] = event->hw.event_base;
459 cpuc->current_idx[n0] = PMC_NO_INDEX;
461 if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) {
462 cpuc->n_events++;
463 cpuc->n_added++;
464 ret = 0;
468 hwc->state = PERF_HES_UPTODATE;
469 if (!(flags & PERF_EF_START))
470 hwc->state |= PERF_HES_STOPPED;
472 local_irq_restore(irq_flags);
473 perf_pmu_enable(event->pmu);
475 return ret;
480 /* Disable performance monitoring unit
481 * - this function is called from outside this module via the pmu struct
482 * returned from perf event initialisation.
484 static void alpha_pmu_del(struct perf_event *event, int flags)
486 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
487 struct hw_perf_event *hwc = &event->hw;
488 unsigned long irq_flags;
489 int j;
491 perf_pmu_disable(event->pmu);
492 local_irq_save(irq_flags);
494 for (j = 0; j < cpuc->n_events; j++) {
495 if (event == cpuc->event[j]) {
496 int idx = cpuc->current_idx[j];
498 /* Shift remaining entries down into the existing
499 * slot.
501 while (++j < cpuc->n_events) {
502 cpuc->event[j - 1] = cpuc->event[j];
503 cpuc->evtype[j - 1] = cpuc->evtype[j];
504 cpuc->current_idx[j - 1] =
505 cpuc->current_idx[j];
508 /* Absorb the final count and turn off the event. */
509 alpha_perf_event_update(event, hwc, idx, 0);
510 perf_event_update_userpage(event);
512 cpuc->idx_mask &= ~(1UL<<idx);
513 cpuc->n_events--;
514 break;
518 local_irq_restore(irq_flags);
519 perf_pmu_enable(event->pmu);
523 static void alpha_pmu_read(struct perf_event *event)
525 struct hw_perf_event *hwc = &event->hw;
527 alpha_perf_event_update(event, hwc, hwc->idx, 0);
531 static void alpha_pmu_stop(struct perf_event *event, int flags)
533 struct hw_perf_event *hwc = &event->hw;
534 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
536 if (!(hwc->state & PERF_HES_STOPPED)) {
537 cpuc->idx_mask &= ~(1UL<<hwc->idx);
538 hwc->state |= PERF_HES_STOPPED;
541 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
542 alpha_perf_event_update(event, hwc, hwc->idx, 0);
543 hwc->state |= PERF_HES_UPTODATE;
546 if (cpuc->enabled)
547 wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx));
551 static void alpha_pmu_start(struct perf_event *event, int flags)
553 struct hw_perf_event *hwc = &event->hw;
554 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
556 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
557 return;
559 if (flags & PERF_EF_RELOAD) {
560 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
561 alpha_perf_event_set_period(event, hwc, hwc->idx);
564 hwc->state = 0;
566 cpuc->idx_mask |= 1UL<<hwc->idx;
567 if (cpuc->enabled)
568 wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
573 * Check that CPU performance counters are supported.
574 * - currently support EV67 and later CPUs.
575 * - actually some later revisions of the EV6 have the same PMC model as the
576 * EV67 but we don't do suffiently deep CPU detection to detect them.
577 * Bad luck to the very few people who might have one, I guess.
579 static int supported_cpu(void)
581 struct percpu_struct *cpu;
582 unsigned long cputype;
584 /* Get cpu type from HW */
585 cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset);
586 cputype = cpu->type & 0xffffffff;
587 /* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */
588 return (cputype >= EV67_CPU) && (cputype <= EV69_CPU);
593 static void hw_perf_event_destroy(struct perf_event *event)
595 /* Nothing to be done! */
596 return;
601 static int __hw_perf_event_init(struct perf_event *event)
603 struct perf_event_attr *attr = &event->attr;
604 struct hw_perf_event *hwc = &event->hw;
605 struct perf_event *evts[MAX_HWEVENTS];
606 unsigned long evtypes[MAX_HWEVENTS];
607 int idx_rubbish_bin[MAX_HWEVENTS];
608 int ev;
609 int n;
611 /* We only support a limited range of HARDWARE event types with one
612 * only programmable via a RAW event type.
614 if (attr->type == PERF_TYPE_HARDWARE) {
615 if (attr->config >= alpha_pmu->max_events)
616 return -EINVAL;
617 ev = alpha_pmu->event_map[attr->config];
618 } else if (attr->type == PERF_TYPE_HW_CACHE) {
619 return -EOPNOTSUPP;
620 } else if (attr->type == PERF_TYPE_RAW) {
621 if (!alpha_pmu->raw_event_valid(attr->config))
622 return -EINVAL;
623 ev = attr->config;
624 } else {
625 return -EOPNOTSUPP;
628 if (ev < 0) {
629 return ev;
632 /* The EV67 does not support mode exclusion */
633 if (attr->exclude_kernel || attr->exclude_user
634 || attr->exclude_hv || attr->exclude_idle) {
635 return -EPERM;
639 * We place the event type in event_base here and leave calculation
640 * of the codes to programme the PMU for alpha_pmu_enable() because
641 * it is only then we will know what HW events are actually
642 * scheduled on to the PMU. At that point the code to programme the
643 * PMU is put into config_base and the PMC to use is placed into
644 * idx. We initialise idx (below) to PMC_NO_INDEX to indicate that
645 * it is yet to be determined.
647 hwc->event_base = ev;
649 /* Collect events in a group together suitable for calling
650 * alpha_check_constraints() to verify that the group as a whole can
651 * be scheduled on to the PMU.
653 n = 0;
654 if (event->group_leader != event) {
655 n = collect_events(event->group_leader,
656 alpha_pmu->num_pmcs - 1,
657 evts, evtypes, idx_rubbish_bin);
658 if (n < 0)
659 return -EINVAL;
661 evtypes[n] = hwc->event_base;
662 evts[n] = event;
664 if (alpha_check_constraints(evts, evtypes, n + 1))
665 return -EINVAL;
667 /* Indicate that PMU config and idx are yet to be determined. */
668 hwc->config_base = 0;
669 hwc->idx = PMC_NO_INDEX;
671 event->destroy = hw_perf_event_destroy;
674 * Most architectures reserve the PMU for their use at this point.
675 * As there is no existing mechanism to arbitrate usage and there
676 * appears to be no other user of the Alpha PMU we just assume
677 * that we can just use it, hence a NO-OP here.
679 * Maybe an alpha_reserve_pmu() routine should be implemented but is
680 * anything else ever going to use it?
683 if (!hwc->sample_period) {
684 hwc->sample_period = alpha_pmu->pmc_max_period[0];
685 hwc->last_period = hwc->sample_period;
686 local64_set(&hwc->period_left, hwc->sample_period);
689 return 0;
693 * Main entry point to initialise a HW performance event.
695 static int alpha_pmu_event_init(struct perf_event *event)
697 int err;
699 /* does not support taken branch sampling */
700 if (has_branch_stack(event))
701 return -EOPNOTSUPP;
703 switch (event->attr.type) {
704 case PERF_TYPE_RAW:
705 case PERF_TYPE_HARDWARE:
706 case PERF_TYPE_HW_CACHE:
707 break;
709 default:
710 return -ENOENT;
713 if (!alpha_pmu)
714 return -ENODEV;
716 /* Do the real initialisation work. */
717 err = __hw_perf_event_init(event);
719 return err;
723 * Main entry point - enable HW performance counters.
725 static void alpha_pmu_enable(struct pmu *pmu)
727 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
729 if (cpuc->enabled)
730 return;
732 cpuc->enabled = 1;
733 barrier();
735 if (cpuc->n_events > 0) {
736 /* Update cpuc with information from any new scheduled events. */
737 maybe_change_configuration(cpuc);
739 /* Start counting the desired events. */
740 wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE);
741 wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config);
742 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
748 * Main entry point - disable HW performance counters.
751 static void alpha_pmu_disable(struct pmu *pmu)
753 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
755 if (!cpuc->enabled)
756 return;
758 cpuc->enabled = 0;
759 cpuc->n_added = 0;
761 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
764 static struct pmu pmu = {
765 .pmu_enable = alpha_pmu_enable,
766 .pmu_disable = alpha_pmu_disable,
767 .event_init = alpha_pmu_event_init,
768 .add = alpha_pmu_add,
769 .del = alpha_pmu_del,
770 .start = alpha_pmu_start,
771 .stop = alpha_pmu_stop,
772 .read = alpha_pmu_read,
777 * Main entry point - don't know when this is called but it
778 * obviously dumps debug info.
780 void perf_event_print_debug(void)
782 unsigned long flags;
783 unsigned long pcr;
784 int pcr0, pcr1;
785 int cpu;
787 if (!supported_cpu())
788 return;
790 local_irq_save(flags);
792 cpu = smp_processor_id();
794 pcr = wrperfmon(PERFMON_CMD_READ, 0);
795 pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0];
796 pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1];
798 pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1);
800 local_irq_restore(flags);
805 * Performance Monitoring Interrupt Service Routine called when a PMC
806 * overflows. The PMC that overflowed is passed in la_ptr.
808 static void alpha_perf_event_irq_handler(unsigned long la_ptr,
809 struct pt_regs *regs)
811 struct cpu_hw_events *cpuc;
812 struct perf_sample_data data;
813 struct perf_event *event;
814 struct hw_perf_event *hwc;
815 int idx, j;
817 __get_cpu_var(irq_pmi_count)++;
818 cpuc = &__get_cpu_var(cpu_hw_events);
820 /* Completely counting through the PMC's period to trigger a new PMC
821 * overflow interrupt while in this interrupt routine is utterly
822 * disastrous! The EV6 and EV67 counters are sufficiently large to
823 * prevent this but to be really sure disable the PMCs.
825 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
827 /* la_ptr is the counter that overflowed. */
828 if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
829 /* This should never occur! */
830 irq_err_count++;
831 pr_warning("PMI: silly index %ld\n", la_ptr);
832 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
833 return;
836 idx = la_ptr;
838 for (j = 0; j < cpuc->n_events; j++) {
839 if (cpuc->current_idx[j] == idx)
840 break;
843 if (unlikely(j == cpuc->n_events)) {
844 /* This can occur if the event is disabled right on a PMC overflow. */
845 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
846 return;
849 event = cpuc->event[j];
851 if (unlikely(!event)) {
852 /* This should never occur! */
853 irq_err_count++;
854 pr_warning("PMI: No event at index %d!\n", idx);
855 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
856 return;
859 hwc = &event->hw;
860 alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
861 perf_sample_data_init(&data, 0, hwc->last_period);
863 if (alpha_perf_event_set_period(event, hwc, idx)) {
864 if (perf_event_overflow(event, &data, regs)) {
865 /* Interrupts coming too quickly; "throttle" the
866 * counter, i.e., disable it for a little while.
868 alpha_pmu_stop(event, 0);
871 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
873 return;
879 * Init call to initialise performance events at kernel startup.
881 int __init init_hw_perf_events(void)
883 pr_info("Performance events: ");
885 if (!supported_cpu()) {
886 pr_cont("No support for your CPU.\n");
887 return 0;
890 pr_cont("Supported CPU type!\n");
892 /* Override performance counter IRQ vector */
894 perf_irq = alpha_perf_event_irq_handler;
896 /* And set up PMU specification */
897 alpha_pmu = &ev67_pmu;
899 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
901 return 0;
903 early_initcall(init_hw_perf_events);