Linux 2.6.31.6
[linux/fpc-iii.git] / arch / powerpc / kernel / perf_counter.c
blob7ceefaf3a7f5e51af1fb44dff457117d7034f4f4
1 /*
2 * Performance counter support - powerpc architecture code
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/perf_counter.h>
14 #include <linux/percpu.h>
15 #include <linux/hardirq.h>
16 #include <asm/reg.h>
17 #include <asm/pmc.h>
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/ptrace.h>
22 struct cpu_hw_counters {
23 int n_counters;
24 int n_percpu;
25 int disabled;
26 int n_added;
27 int n_limited;
28 u8 pmcs_enabled;
29 struct perf_counter *counter[MAX_HWCOUNTERS];
30 u64 events[MAX_HWCOUNTERS];
31 unsigned int flags[MAX_HWCOUNTERS];
32 unsigned long mmcr[3];
33 struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
34 u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
35 u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
36 unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
37 unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
39 DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
41 struct power_pmu *ppmu;
44 * Normally, to ignore kernel events we set the FCS (freeze counters
45 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
46 * hypervisor bit set in the MSR, or if we are running on a processor
47 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
48 * then we need to use the FCHV bit to ignore kernel events.
50 static unsigned int freeze_counters_kernel = MMCR0_FCS;
53 * 32-bit doesn't have MMCRA but does have an MMCR2,
54 * and a few other names are different.
56 #ifdef CONFIG_PPC32
58 #define MMCR0_FCHV 0
59 #define MMCR0_PMCjCE MMCR0_PMCnCE
61 #define SPRN_MMCRA SPRN_MMCR2
62 #define MMCRA_SAMPLE_ENABLE 0
64 static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
66 return 0;
68 static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
69 static inline u32 perf_get_misc_flags(struct pt_regs *regs)
71 return 0;
73 static inline void perf_read_regs(struct pt_regs *regs) { }
74 static inline int perf_intr_is_nmi(struct pt_regs *regs)
76 return 0;
79 #endif /* CONFIG_PPC32 */
82 * Things that are specific to 64-bit implementations.
84 #ifdef CONFIG_PPC64
86 static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
88 unsigned long mmcra = regs->dsisr;
90 if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
91 unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
92 if (slot > 1)
93 return 4 * (slot - 1);
95 return 0;
99 * The user wants a data address recorded.
100 * If we're not doing instruction sampling, give them the SDAR
101 * (sampled data address). If we are doing instruction sampling, then
102 * only give them the SDAR if it corresponds to the instruction
103 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC
104 * bit in MMCRA.
106 static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
108 unsigned long mmcra = regs->dsisr;
109 unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
110 POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
112 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
113 *addrp = mfspr(SPRN_SDAR);
116 static inline u32 perf_get_misc_flags(struct pt_regs *regs)
118 unsigned long mmcra = regs->dsisr;
120 if (TRAP(regs) != 0xf00)
121 return 0; /* not a PMU interrupt */
123 if (ppmu->flags & PPMU_ALT_SIPR) {
124 if (mmcra & POWER6_MMCRA_SIHV)
125 return PERF_EVENT_MISC_HYPERVISOR;
126 return (mmcra & POWER6_MMCRA_SIPR) ?
127 PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL;
129 if (mmcra & MMCRA_SIHV)
130 return PERF_EVENT_MISC_HYPERVISOR;
131 return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
132 PERF_EVENT_MISC_KERNEL;
136 * Overload regs->dsisr to store MMCRA so we only need to read it once
137 * on each interrupt.
139 static inline void perf_read_regs(struct pt_regs *regs)
141 regs->dsisr = mfspr(SPRN_MMCRA);
145 * If interrupts were soft-disabled when a PMU interrupt occurs, treat
146 * it as an NMI.
148 static inline int perf_intr_is_nmi(struct pt_regs *regs)
150 return !regs->softe;
153 #endif /* CONFIG_PPC64 */
155 static void perf_counter_interrupt(struct pt_regs *regs);
157 void perf_counter_print_debug(void)
162 * Read one performance monitor counter (PMC).
164 static unsigned long read_pmc(int idx)
166 unsigned long val;
168 switch (idx) {
169 case 1:
170 val = mfspr(SPRN_PMC1);
171 break;
172 case 2:
173 val = mfspr(SPRN_PMC2);
174 break;
175 case 3:
176 val = mfspr(SPRN_PMC3);
177 break;
178 case 4:
179 val = mfspr(SPRN_PMC4);
180 break;
181 case 5:
182 val = mfspr(SPRN_PMC5);
183 break;
184 case 6:
185 val = mfspr(SPRN_PMC6);
186 break;
187 #ifdef CONFIG_PPC64
188 case 7:
189 val = mfspr(SPRN_PMC7);
190 break;
191 case 8:
192 val = mfspr(SPRN_PMC8);
193 break;
194 #endif /* CONFIG_PPC64 */
195 default:
196 printk(KERN_ERR "oops trying to read PMC%d\n", idx);
197 val = 0;
199 return val;
203 * Write one PMC.
205 static void write_pmc(int idx, unsigned long val)
207 switch (idx) {
208 case 1:
209 mtspr(SPRN_PMC1, val);
210 break;
211 case 2:
212 mtspr(SPRN_PMC2, val);
213 break;
214 case 3:
215 mtspr(SPRN_PMC3, val);
216 break;
217 case 4:
218 mtspr(SPRN_PMC4, val);
219 break;
220 case 5:
221 mtspr(SPRN_PMC5, val);
222 break;
223 case 6:
224 mtspr(SPRN_PMC6, val);
225 break;
226 #ifdef CONFIG_PPC64
227 case 7:
228 mtspr(SPRN_PMC7, val);
229 break;
230 case 8:
231 mtspr(SPRN_PMC8, val);
232 break;
233 #endif /* CONFIG_PPC64 */
234 default:
235 printk(KERN_ERR "oops trying to write PMC%d\n", idx);
240 * Check if a set of events can all go on the PMU at once.
241 * If they can't, this will look at alternative codes for the events
242 * and see if any combination of alternative codes is feasible.
243 * The feasible set is returned in event[].
245 static int power_check_constraints(struct cpu_hw_counters *cpuhw,
246 u64 event[], unsigned int cflags[],
247 int n_ev)
249 unsigned long mask, value, nv;
250 unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
251 int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
252 int i, j;
253 unsigned long addf = ppmu->add_fields;
254 unsigned long tadd = ppmu->test_adder;
256 if (n_ev > ppmu->n_counter)
257 return -1;
259 /* First see if the events will go on as-is */
260 for (i = 0; i < n_ev; ++i) {
261 if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
262 && !ppmu->limited_pmc_event(event[i])) {
263 ppmu->get_alternatives(event[i], cflags[i],
264 cpuhw->alternatives[i]);
265 event[i] = cpuhw->alternatives[i][0];
267 if (ppmu->get_constraint(event[i], &cpuhw->amasks[i][0],
268 &cpuhw->avalues[i][0]))
269 return -1;
271 value = mask = 0;
272 for (i = 0; i < n_ev; ++i) {
273 nv = (value | cpuhw->avalues[i][0]) +
274 (value & cpuhw->avalues[i][0] & addf);
275 if ((((nv + tadd) ^ value) & mask) != 0 ||
276 (((nv + tadd) ^ cpuhw->avalues[i][0]) &
277 cpuhw->amasks[i][0]) != 0)
278 break;
279 value = nv;
280 mask |= cpuhw->amasks[i][0];
282 if (i == n_ev)
283 return 0; /* all OK */
285 /* doesn't work, gather alternatives... */
286 if (!ppmu->get_alternatives)
287 return -1;
288 for (i = 0; i < n_ev; ++i) {
289 choice[i] = 0;
290 n_alt[i] = ppmu->get_alternatives(event[i], cflags[i],
291 cpuhw->alternatives[i]);
292 for (j = 1; j < n_alt[i]; ++j)
293 ppmu->get_constraint(cpuhw->alternatives[i][j],
294 &cpuhw->amasks[i][j],
295 &cpuhw->avalues[i][j]);
298 /* enumerate all possibilities and see if any will work */
299 i = 0;
300 j = -1;
301 value = mask = nv = 0;
302 while (i < n_ev) {
303 if (j >= 0) {
304 /* we're backtracking, restore context */
305 value = svalues[i];
306 mask = smasks[i];
307 j = choice[i];
310 * See if any alternative k for event i,
311 * where k > j, will satisfy the constraints.
313 while (++j < n_alt[i]) {
314 nv = (value | cpuhw->avalues[i][j]) +
315 (value & cpuhw->avalues[i][j] & addf);
316 if ((((nv + tadd) ^ value) & mask) == 0 &&
317 (((nv + tadd) ^ cpuhw->avalues[i][j])
318 & cpuhw->amasks[i][j]) == 0)
319 break;
321 if (j >= n_alt[i]) {
323 * No feasible alternative, backtrack
324 * to event i-1 and continue enumerating its
325 * alternatives from where we got up to.
327 if (--i < 0)
328 return -1;
329 } else {
331 * Found a feasible alternative for event i,
332 * remember where we got up to with this event,
333 * go on to the next event, and start with
334 * the first alternative for it.
336 choice[i] = j;
337 svalues[i] = value;
338 smasks[i] = mask;
339 value = nv;
340 mask |= cpuhw->amasks[i][j];
341 ++i;
342 j = -1;
346 /* OK, we have a feasible combination, tell the caller the solution */
347 for (i = 0; i < n_ev; ++i)
348 event[i] = cpuhw->alternatives[i][choice[i]];
349 return 0;
353 * Check if newly-added counters have consistent settings for
354 * exclude_{user,kernel,hv} with each other and any previously
355 * added counters.
357 static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
358 int n_prev, int n_new)
360 int eu = 0, ek = 0, eh = 0;
361 int i, n, first;
362 struct perf_counter *counter;
364 n = n_prev + n_new;
365 if (n <= 1)
366 return 0;
368 first = 1;
369 for (i = 0; i < n; ++i) {
370 if (cflags[i] & PPMU_LIMITED_PMC_OK) {
371 cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
372 continue;
374 counter = ctrs[i];
375 if (first) {
376 eu = counter->attr.exclude_user;
377 ek = counter->attr.exclude_kernel;
378 eh = counter->attr.exclude_hv;
379 first = 0;
380 } else if (counter->attr.exclude_user != eu ||
381 counter->attr.exclude_kernel != ek ||
382 counter->attr.exclude_hv != eh) {
383 return -EAGAIN;
387 if (eu || ek || eh)
388 for (i = 0; i < n; ++i)
389 if (cflags[i] & PPMU_LIMITED_PMC_OK)
390 cflags[i] |= PPMU_LIMITED_PMC_REQD;
392 return 0;
395 static void power_pmu_read(struct perf_counter *counter)
397 s64 val, delta, prev;
399 if (!counter->hw.idx)
400 return;
402 * Performance monitor interrupts come even when interrupts
403 * are soft-disabled, as long as interrupts are hard-enabled.
404 * Therefore we treat them like NMIs.
406 do {
407 prev = atomic64_read(&counter->hw.prev_count);
408 barrier();
409 val = read_pmc(counter->hw.idx);
410 } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev);
412 /* The counters are only 32 bits wide */
413 delta = (val - prev) & 0xfffffffful;
414 atomic64_add(delta, &counter->count);
415 atomic64_sub(delta, &counter->hw.period_left);
419 * On some machines, PMC5 and PMC6 can't be written, don't respect
420 * the freeze conditions, and don't generate interrupts. This tells
421 * us if `counter' is using such a PMC.
423 static int is_limited_pmc(int pmcnum)
425 return (ppmu->flags & PPMU_LIMITED_PMC5_6)
426 && (pmcnum == 5 || pmcnum == 6);
429 static void freeze_limited_counters(struct cpu_hw_counters *cpuhw,
430 unsigned long pmc5, unsigned long pmc6)
432 struct perf_counter *counter;
433 u64 val, prev, delta;
434 int i;
436 for (i = 0; i < cpuhw->n_limited; ++i) {
437 counter = cpuhw->limited_counter[i];
438 if (!counter->hw.idx)
439 continue;
440 val = (counter->hw.idx == 5) ? pmc5 : pmc6;
441 prev = atomic64_read(&counter->hw.prev_count);
442 counter->hw.idx = 0;
443 delta = (val - prev) & 0xfffffffful;
444 atomic64_add(delta, &counter->count);
448 static void thaw_limited_counters(struct cpu_hw_counters *cpuhw,
449 unsigned long pmc5, unsigned long pmc6)
451 struct perf_counter *counter;
452 u64 val;
453 int i;
455 for (i = 0; i < cpuhw->n_limited; ++i) {
456 counter = cpuhw->limited_counter[i];
457 counter->hw.idx = cpuhw->limited_hwidx[i];
458 val = (counter->hw.idx == 5) ? pmc5 : pmc6;
459 atomic64_set(&counter->hw.prev_count, val);
460 perf_counter_update_userpage(counter);
465 * Since limited counters don't respect the freeze conditions, we
466 * have to read them immediately after freezing or unfreezing the
467 * other counters. We try to keep the values from the limited
468 * counters as consistent as possible by keeping the delay (in
469 * cycles and instructions) between freezing/unfreezing and reading
470 * the limited counters as small and consistent as possible.
471 * Therefore, if any limited counters are in use, we read them
472 * both, and always in the same order, to minimize variability,
473 * and do it inside the same asm that writes MMCR0.
475 static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
477 unsigned long pmc5, pmc6;
479 if (!cpuhw->n_limited) {
480 mtspr(SPRN_MMCR0, mmcr0);
481 return;
485 * Write MMCR0, then read PMC5 and PMC6 immediately.
486 * To ensure we don't get a performance monitor interrupt
487 * between writing MMCR0 and freezing/thawing the limited
488 * counters, we first write MMCR0 with the counter overflow
489 * interrupt enable bits turned off.
491 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
492 : "=&r" (pmc5), "=&r" (pmc6)
493 : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
494 "i" (SPRN_MMCR0),
495 "i" (SPRN_PMC5), "i" (SPRN_PMC6));
497 if (mmcr0 & MMCR0_FC)
498 freeze_limited_counters(cpuhw, pmc5, pmc6);
499 else
500 thaw_limited_counters(cpuhw, pmc5, pmc6);
503 * Write the full MMCR0 including the counter overflow interrupt
504 * enable bits, if necessary.
506 if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
507 mtspr(SPRN_MMCR0, mmcr0);
511 * Disable all counters to prevent PMU interrupts and to allow
512 * counters to be added or removed.
514 void hw_perf_disable(void)
516 struct cpu_hw_counters *cpuhw;
517 unsigned long flags;
519 if (!ppmu)
520 return;
521 local_irq_save(flags);
522 cpuhw = &__get_cpu_var(cpu_hw_counters);
524 if (!cpuhw->disabled) {
525 cpuhw->disabled = 1;
526 cpuhw->n_added = 0;
529 * Check if we ever enabled the PMU on this cpu.
531 if (!cpuhw->pmcs_enabled) {
532 ppc_enable_pmcs();
533 cpuhw->pmcs_enabled = 1;
537 * Disable instruction sampling if it was enabled
539 if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
540 mtspr(SPRN_MMCRA,
541 cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
542 mb();
546 * Set the 'freeze counters' bit.
547 * The barrier is to make sure the mtspr has been
548 * executed and the PMU has frozen the counters
549 * before we return.
551 write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
552 mb();
554 local_irq_restore(flags);
558 * Re-enable all counters if disable == 0.
559 * If we were previously disabled and counters were added, then
560 * put the new config on the PMU.
562 void hw_perf_enable(void)
564 struct perf_counter *counter;
565 struct cpu_hw_counters *cpuhw;
566 unsigned long flags;
567 long i;
568 unsigned long val;
569 s64 left;
570 unsigned int hwc_index[MAX_HWCOUNTERS];
571 int n_lim;
572 int idx;
574 if (!ppmu)
575 return;
576 local_irq_save(flags);
577 cpuhw = &__get_cpu_var(cpu_hw_counters);
578 if (!cpuhw->disabled) {
579 local_irq_restore(flags);
580 return;
582 cpuhw->disabled = 0;
585 * If we didn't change anything, or only removed counters,
586 * no need to recalculate MMCR* settings and reset the PMCs.
587 * Just reenable the PMU with the current MMCR* settings
588 * (possibly updated for removal of counters).
590 if (!cpuhw->n_added) {
591 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
592 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
593 if (cpuhw->n_counters == 0)
594 ppc_set_pmu_inuse(0);
595 goto out_enable;
599 * Compute MMCR* values for the new set of counters
601 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index,
602 cpuhw->mmcr)) {
603 /* shouldn't ever get here */
604 printk(KERN_ERR "oops compute_mmcr failed\n");
605 goto out;
609 * Add in MMCR0 freeze bits corresponding to the
610 * attr.exclude_* bits for the first counter.
611 * We have already checked that all counters have the
612 * same values for these bits as the first counter.
614 counter = cpuhw->counter[0];
615 if (counter->attr.exclude_user)
616 cpuhw->mmcr[0] |= MMCR0_FCP;
617 if (counter->attr.exclude_kernel)
618 cpuhw->mmcr[0] |= freeze_counters_kernel;
619 if (counter->attr.exclude_hv)
620 cpuhw->mmcr[0] |= MMCR0_FCHV;
623 * Write the new configuration to MMCR* with the freeze
624 * bit set and set the hardware counters to their initial values.
625 * Then unfreeze the counters.
627 ppc_set_pmu_inuse(1);
628 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
629 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
630 mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
631 | MMCR0_FC);
634 * Read off any pre-existing counters that need to move
635 * to another PMC.
637 for (i = 0; i < cpuhw->n_counters; ++i) {
638 counter = cpuhw->counter[i];
639 if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) {
640 power_pmu_read(counter);
641 write_pmc(counter->hw.idx, 0);
642 counter->hw.idx = 0;
647 * Initialize the PMCs for all the new and moved counters.
649 cpuhw->n_limited = n_lim = 0;
650 for (i = 0; i < cpuhw->n_counters; ++i) {
651 counter = cpuhw->counter[i];
652 if (counter->hw.idx)
653 continue;
654 idx = hwc_index[i] + 1;
655 if (is_limited_pmc(idx)) {
656 cpuhw->limited_counter[n_lim] = counter;
657 cpuhw->limited_hwidx[n_lim] = idx;
658 ++n_lim;
659 continue;
661 val = 0;
662 if (counter->hw.sample_period) {
663 left = atomic64_read(&counter->hw.period_left);
664 if (left < 0x80000000L)
665 val = 0x80000000L - left;
667 atomic64_set(&counter->hw.prev_count, val);
668 counter->hw.idx = idx;
669 write_pmc(idx, val);
670 perf_counter_update_userpage(counter);
672 cpuhw->n_limited = n_lim;
673 cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
675 out_enable:
676 mb();
677 write_mmcr0(cpuhw, cpuhw->mmcr[0]);
680 * Enable instruction sampling if necessary
682 if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
683 mb();
684 mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
687 out:
688 local_irq_restore(flags);
691 static int collect_events(struct perf_counter *group, int max_count,
692 struct perf_counter *ctrs[], u64 *events,
693 unsigned int *flags)
695 int n = 0;
696 struct perf_counter *counter;
698 if (!is_software_counter(group)) {
699 if (n >= max_count)
700 return -1;
701 ctrs[n] = group;
702 flags[n] = group->hw.counter_base;
703 events[n++] = group->hw.config;
705 list_for_each_entry(counter, &group->sibling_list, list_entry) {
706 if (!is_software_counter(counter) &&
707 counter->state != PERF_COUNTER_STATE_OFF) {
708 if (n >= max_count)
709 return -1;
710 ctrs[n] = counter;
711 flags[n] = counter->hw.counter_base;
712 events[n++] = counter->hw.config;
715 return n;
718 static void counter_sched_in(struct perf_counter *counter, int cpu)
720 counter->state = PERF_COUNTER_STATE_ACTIVE;
721 counter->oncpu = cpu;
722 counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped;
723 if (is_software_counter(counter))
724 counter->pmu->enable(counter);
728 * Called to enable a whole group of counters.
729 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
730 * Assumes the caller has disabled interrupts and has
731 * frozen the PMU with hw_perf_save_disable.
733 int hw_perf_group_sched_in(struct perf_counter *group_leader,
734 struct perf_cpu_context *cpuctx,
735 struct perf_counter_context *ctx, int cpu)
737 struct cpu_hw_counters *cpuhw;
738 long i, n, n0;
739 struct perf_counter *sub;
741 if (!ppmu)
742 return 0;
743 cpuhw = &__get_cpu_var(cpu_hw_counters);
744 n0 = cpuhw->n_counters;
745 n = collect_events(group_leader, ppmu->n_counter - n0,
746 &cpuhw->counter[n0], &cpuhw->events[n0],
747 &cpuhw->flags[n0]);
748 if (n < 0)
749 return -EAGAIN;
750 if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n))
751 return -EAGAIN;
752 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0);
753 if (i < 0)
754 return -EAGAIN;
755 cpuhw->n_counters = n0 + n;
756 cpuhw->n_added += n;
759 * OK, this group can go on; update counter states etc.,
760 * and enable any software counters
762 for (i = n0; i < n0 + n; ++i)
763 cpuhw->counter[i]->hw.config = cpuhw->events[i];
764 cpuctx->active_oncpu += n;
765 n = 1;
766 counter_sched_in(group_leader, cpu);
767 list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
768 if (sub->state != PERF_COUNTER_STATE_OFF) {
769 counter_sched_in(sub, cpu);
770 ++n;
773 ctx->nr_active += n;
775 return 1;
779 * Add a counter to the PMU.
780 * If all counters are not already frozen, then we disable and
781 * re-enable the PMU in order to get hw_perf_enable to do the
782 * actual work of reconfiguring the PMU.
784 static int power_pmu_enable(struct perf_counter *counter)
786 struct cpu_hw_counters *cpuhw;
787 unsigned long flags;
788 int n0;
789 int ret = -EAGAIN;
791 local_irq_save(flags);
792 perf_disable();
795 * Add the counter to the list (if there is room)
796 * and check whether the total set is still feasible.
798 cpuhw = &__get_cpu_var(cpu_hw_counters);
799 n0 = cpuhw->n_counters;
800 if (n0 >= ppmu->n_counter)
801 goto out;
802 cpuhw->counter[n0] = counter;
803 cpuhw->events[n0] = counter->hw.config;
804 cpuhw->flags[n0] = counter->hw.counter_base;
805 if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1))
806 goto out;
807 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
808 goto out;
810 counter->hw.config = cpuhw->events[n0];
811 ++cpuhw->n_counters;
812 ++cpuhw->n_added;
814 ret = 0;
815 out:
816 perf_enable();
817 local_irq_restore(flags);
818 return ret;
822 * Remove a counter from the PMU.
824 static void power_pmu_disable(struct perf_counter *counter)
826 struct cpu_hw_counters *cpuhw;
827 long i;
828 unsigned long flags;
830 local_irq_save(flags);
831 perf_disable();
833 power_pmu_read(counter);
835 cpuhw = &__get_cpu_var(cpu_hw_counters);
836 for (i = 0; i < cpuhw->n_counters; ++i) {
837 if (counter == cpuhw->counter[i]) {
838 while (++i < cpuhw->n_counters)
839 cpuhw->counter[i-1] = cpuhw->counter[i];
840 --cpuhw->n_counters;
841 ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr);
842 if (counter->hw.idx) {
843 write_pmc(counter->hw.idx, 0);
844 counter->hw.idx = 0;
846 perf_counter_update_userpage(counter);
847 break;
850 for (i = 0; i < cpuhw->n_limited; ++i)
851 if (counter == cpuhw->limited_counter[i])
852 break;
853 if (i < cpuhw->n_limited) {
854 while (++i < cpuhw->n_limited) {
855 cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
856 cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
858 --cpuhw->n_limited;
860 if (cpuhw->n_counters == 0) {
861 /* disable exceptions if no counters are running */
862 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
865 perf_enable();
866 local_irq_restore(flags);
870 * Re-enable interrupts on a counter after they were throttled
871 * because they were coming too fast.
873 static void power_pmu_unthrottle(struct perf_counter *counter)
875 s64 val, left;
876 unsigned long flags;
878 if (!counter->hw.idx || !counter->hw.sample_period)
879 return;
880 local_irq_save(flags);
881 perf_disable();
882 power_pmu_read(counter);
883 left = counter->hw.sample_period;
884 counter->hw.last_period = left;
885 val = 0;
886 if (left < 0x80000000L)
887 val = 0x80000000L - left;
888 write_pmc(counter->hw.idx, val);
889 atomic64_set(&counter->hw.prev_count, val);
890 atomic64_set(&counter->hw.period_left, left);
891 perf_counter_update_userpage(counter);
892 perf_enable();
893 local_irq_restore(flags);
896 struct pmu power_pmu = {
897 .enable = power_pmu_enable,
898 .disable = power_pmu_disable,
899 .read = power_pmu_read,
900 .unthrottle = power_pmu_unthrottle,
904 * Return 1 if we might be able to put counter on a limited PMC,
905 * or 0 if not.
906 * A counter can only go on a limited PMC if it counts something
907 * that a limited PMC can count, doesn't require interrupts, and
908 * doesn't exclude any processor mode.
910 static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
911 unsigned int flags)
913 int n;
914 u64 alt[MAX_EVENT_ALTERNATIVES];
916 if (counter->attr.exclude_user
917 || counter->attr.exclude_kernel
918 || counter->attr.exclude_hv
919 || counter->attr.sample_period)
920 return 0;
922 if (ppmu->limited_pmc_event(ev))
923 return 1;
926 * The requested event isn't on a limited PMC already;
927 * see if any alternative code goes on a limited PMC.
929 if (!ppmu->get_alternatives)
930 return 0;
932 flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
933 n = ppmu->get_alternatives(ev, flags, alt);
935 return n > 0;
939 * Find an alternative event that goes on a normal PMC, if possible,
940 * and return the event code, or 0 if there is no such alternative.
941 * (Note: event code 0 is "don't count" on all machines.)
943 static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
945 u64 alt[MAX_EVENT_ALTERNATIVES];
946 int n;
948 flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
949 n = ppmu->get_alternatives(ev, flags, alt);
950 if (!n)
951 return 0;
952 return alt[0];
955 /* Number of perf_counters counting hardware events */
956 static atomic_t num_counters;
957 /* Used to avoid races in calling reserve/release_pmc_hardware */
958 static DEFINE_MUTEX(pmc_reserve_mutex);
961 * Release the PMU if this is the last perf_counter.
963 static void hw_perf_counter_destroy(struct perf_counter *counter)
965 if (!atomic_add_unless(&num_counters, -1, 1)) {
966 mutex_lock(&pmc_reserve_mutex);
967 if (atomic_dec_return(&num_counters) == 0)
968 release_pmc_hardware();
969 mutex_unlock(&pmc_reserve_mutex);
974 * Translate a generic cache event config to a raw event code.
976 static int hw_perf_cache_event(u64 config, u64 *eventp)
978 unsigned long type, op, result;
979 int ev;
981 if (!ppmu->cache_events)
982 return -EINVAL;
984 /* unpack config */
985 type = config & 0xff;
986 op = (config >> 8) & 0xff;
987 result = (config >> 16) & 0xff;
989 if (type >= PERF_COUNT_HW_CACHE_MAX ||
990 op >= PERF_COUNT_HW_CACHE_OP_MAX ||
991 result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
992 return -EINVAL;
994 ev = (*ppmu->cache_events)[type][op][result];
995 if (ev == 0)
996 return -EOPNOTSUPP;
997 if (ev == -1)
998 return -EINVAL;
999 *eventp = ev;
1000 return 0;
1003 const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
1005 u64 ev;
1006 unsigned long flags;
1007 struct perf_counter *ctrs[MAX_HWCOUNTERS];
1008 u64 events[MAX_HWCOUNTERS];
1009 unsigned int cflags[MAX_HWCOUNTERS];
1010 int n;
1011 int err;
1012 struct cpu_hw_counters *cpuhw;
1014 if (!ppmu)
1015 return ERR_PTR(-ENXIO);
1016 switch (counter->attr.type) {
1017 case PERF_TYPE_HARDWARE:
1018 ev = counter->attr.config;
1019 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
1020 return ERR_PTR(-EOPNOTSUPP);
1021 ev = ppmu->generic_events[ev];
1022 break;
1023 case PERF_TYPE_HW_CACHE:
1024 err = hw_perf_cache_event(counter->attr.config, &ev);
1025 if (err)
1026 return ERR_PTR(err);
1027 break;
1028 case PERF_TYPE_RAW:
1029 ev = counter->attr.config;
1030 break;
1031 default:
1032 return ERR_PTR(-EINVAL);
1034 counter->hw.config_base = ev;
1035 counter->hw.idx = 0;
1038 * If we are not running on a hypervisor, force the
1039 * exclude_hv bit to 0 so that we don't care what
1040 * the user set it to.
1042 if (!firmware_has_feature(FW_FEATURE_LPAR))
1043 counter->attr.exclude_hv = 0;
1046 * If this is a per-task counter, then we can use
1047 * PM_RUN_* events interchangeably with their non RUN_*
1048 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
1049 * XXX we should check if the task is an idle task.
1051 flags = 0;
1052 if (counter->ctx->task)
1053 flags |= PPMU_ONLY_COUNT_RUN;
1056 * If this machine has limited counters, check whether this
1057 * event could go on a limited counter.
1059 if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
1060 if (can_go_on_limited_pmc(counter, ev, flags)) {
1061 flags |= PPMU_LIMITED_PMC_OK;
1062 } else if (ppmu->limited_pmc_event(ev)) {
1064 * The requested event is on a limited PMC,
1065 * but we can't use a limited PMC; see if any
1066 * alternative goes on a normal PMC.
1068 ev = normal_pmc_alternative(ev, flags);
1069 if (!ev)
1070 return ERR_PTR(-EINVAL);
1075 * If this is in a group, check if it can go on with all the
1076 * other hardware counters in the group. We assume the counter
1077 * hasn't been linked into its leader's sibling list at this point.
1079 n = 0;
1080 if (counter->group_leader != counter) {
1081 n = collect_events(counter->group_leader, ppmu->n_counter - 1,
1082 ctrs, events, cflags);
1083 if (n < 0)
1084 return ERR_PTR(-EINVAL);
1086 events[n] = ev;
1087 ctrs[n] = counter;
1088 cflags[n] = flags;
1089 if (check_excludes(ctrs, cflags, n, 1))
1090 return ERR_PTR(-EINVAL);
1092 cpuhw = &get_cpu_var(cpu_hw_counters);
1093 err = power_check_constraints(cpuhw, events, cflags, n + 1);
1094 put_cpu_var(cpu_hw_counters);
1095 if (err)
1096 return ERR_PTR(-EINVAL);
1098 counter->hw.config = events[n];
1099 counter->hw.counter_base = cflags[n];
1100 counter->hw.last_period = counter->hw.sample_period;
1101 atomic64_set(&counter->hw.period_left, counter->hw.last_period);
1104 * See if we need to reserve the PMU.
1105 * If no counters are currently in use, then we have to take a
1106 * mutex to ensure that we don't race with another task doing
1107 * reserve_pmc_hardware or release_pmc_hardware.
1109 err = 0;
1110 if (!atomic_inc_not_zero(&num_counters)) {
1111 mutex_lock(&pmc_reserve_mutex);
1112 if (atomic_read(&num_counters) == 0 &&
1113 reserve_pmc_hardware(perf_counter_interrupt))
1114 err = -EBUSY;
1115 else
1116 atomic_inc(&num_counters);
1117 mutex_unlock(&pmc_reserve_mutex);
1119 counter->destroy = hw_perf_counter_destroy;
1121 if (err)
1122 return ERR_PTR(err);
1123 return &power_pmu;
1127 * A counter has overflowed; update its count and record
1128 * things if requested. Note that interrupts are hard-disabled
1129 * here so there is no possibility of being interrupted.
1131 static void record_and_restart(struct perf_counter *counter, unsigned long val,
1132 struct pt_regs *regs, int nmi)
1134 u64 period = counter->hw.sample_period;
1135 s64 prev, delta, left;
1136 int record = 0;
1138 /* we don't have to worry about interrupts here */
1139 prev = atomic64_read(&counter->hw.prev_count);
1140 delta = (val - prev) & 0xfffffffful;
1141 atomic64_add(delta, &counter->count);
1144 * See if the total period for this counter has expired,
1145 * and update for the next period.
1147 val = 0;
1148 left = atomic64_read(&counter->hw.period_left) - delta;
1149 if (period) {
1150 if (left <= 0) {
1151 left += period;
1152 if (left <= 0)
1153 left = period;
1154 record = 1;
1156 if (left < 0x80000000LL)
1157 val = 0x80000000LL - left;
1161 * Finally record data if requested.
1163 if (record) {
1164 struct perf_sample_data data = {
1165 .regs = regs,
1166 .addr = 0,
1167 .period = counter->hw.last_period,
1170 if (counter->attr.sample_type & PERF_SAMPLE_ADDR)
1171 perf_get_data_addr(regs, &data.addr);
1173 if (perf_counter_overflow(counter, nmi, &data)) {
1175 * Interrupts are coming too fast - throttle them
1176 * by setting the counter to 0, so it will be
1177 * at least 2^30 cycles until the next interrupt
1178 * (assuming each counter counts at most 2 counts
1179 * per cycle).
1181 val = 0;
1182 left = ~0ULL >> 1;
1186 write_pmc(counter->hw.idx, val);
1187 atomic64_set(&counter->hw.prev_count, val);
1188 atomic64_set(&counter->hw.period_left, left);
1189 perf_counter_update_userpage(counter);
1193 * Called from generic code to get the misc flags (i.e. processor mode)
1194 * for an event.
1196 unsigned long perf_misc_flags(struct pt_regs *regs)
1198 u32 flags = perf_get_misc_flags(regs);
1200 if (flags)
1201 return flags;
1202 return user_mode(regs) ? PERF_EVENT_MISC_USER :
1203 PERF_EVENT_MISC_KERNEL;
1207 * Called from generic code to get the instruction pointer
1208 * for an event.
1210 unsigned long perf_instruction_pointer(struct pt_regs *regs)
1212 unsigned long ip;
1214 if (TRAP(regs) != 0xf00)
1215 return regs->nip; /* not a PMU interrupt */
1217 ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
1218 return ip;
1222 * Performance monitor interrupt stuff
1224 static void perf_counter_interrupt(struct pt_regs *regs)
1226 int i;
1227 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
1228 struct perf_counter *counter;
1229 unsigned long val;
1230 int found = 0;
1231 int nmi;
1233 if (cpuhw->n_limited)
1234 freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
1235 mfspr(SPRN_PMC6));
1237 perf_read_regs(regs);
1239 nmi = perf_intr_is_nmi(regs);
1240 if (nmi)
1241 nmi_enter();
1242 else
1243 irq_enter();
1245 for (i = 0; i < cpuhw->n_counters; ++i) {
1246 counter = cpuhw->counter[i];
1247 if (!counter->hw.idx || is_limited_pmc(counter->hw.idx))
1248 continue;
1249 val = read_pmc(counter->hw.idx);
1250 if ((int)val < 0) {
1251 /* counter has overflowed */
1252 found = 1;
1253 record_and_restart(counter, val, regs, nmi);
1258 * In case we didn't find and reset the counter that caused
1259 * the interrupt, scan all counters and reset any that are
1260 * negative, to avoid getting continual interrupts.
1261 * Any that we processed in the previous loop will not be negative.
1263 if (!found) {
1264 for (i = 0; i < ppmu->n_counter; ++i) {
1265 if (is_limited_pmc(i + 1))
1266 continue;
1267 val = read_pmc(i + 1);
1268 if ((int)val < 0)
1269 write_pmc(i + 1, 0);
1274 * Reset MMCR0 to its normal value. This will set PMXE and
1275 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
1276 * and thus allow interrupts to occur again.
1277 * XXX might want to use MSR.PM to keep the counters frozen until
1278 * we get back out of this interrupt.
1280 write_mmcr0(cpuhw, cpuhw->mmcr[0]);
1282 if (nmi)
1283 nmi_exit();
1284 else
1285 irq_exit();
1288 void hw_perf_counter_setup(int cpu)
1290 struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
1292 if (!ppmu)
1293 return;
1294 memset(cpuhw, 0, sizeof(*cpuhw));
1295 cpuhw->mmcr[0] = MMCR0_FC;
1298 int register_power_pmu(struct power_pmu *pmu)
1300 if (ppmu)
1301 return -EBUSY; /* something's already registered */
1303 ppmu = pmu;
1304 pr_info("%s performance monitor hardware support registered\n",
1305 pmu->name);
1307 #ifdef MSR_HV
1309 * Use FCHV to ignore kernel events if MSR.HV is set.
1311 if (mfmsr() & MSR_HV)
1312 freeze_counters_kernel = MMCR0_FCHV;
1313 #endif /* CONFIG_PPC64 */
1315 return 0;