2 * local apic based NMI watchdog for various CPUs.
4 * This file also handles reservation of performance counters for coordination
5 * with other users (like oprofile).
7 * Note that these events normally don't tick when the CPU idles. This means
8 * the frequency varies with CPU load.
10 * Original code for K7/P6 written by Keith Owens
14 #include <linux/percpu.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/bitops.h>
18 #include <linux/smp.h>
20 #include <linux/kprobes.h>
23 #include <asm/perf_event.h>
26 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
27 * offset from MSR_P4_BSU_ESCR0.
29 * It will be the max for all platforms (for now)
31 #define NMI_MAX_COUNTER_BITS 66
34 * perfctr_nmi_owner tracks the ownership of the perfctr registers:
35 * evtsel_nmi_owner tracks the ownership of the event selection
36 * - different performance counters/ event selection may be reserved for
37 * different subsystems this reservation system just tries to coordinate
40 static DECLARE_BITMAP(perfctr_nmi_owner
, NMI_MAX_COUNTER_BITS
);
41 static DECLARE_BITMAP(evntsel_nmi_owner
, NMI_MAX_COUNTER_BITS
);
43 /* converts an msr to an appropriate reservation bit */
44 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr
)
46 /* returns the bit offset of the performance counter register */
47 switch (boot_cpu_data
.x86_vendor
) {
49 if (msr
>= MSR_F15H_PERF_CTR
)
50 return (msr
- MSR_F15H_PERF_CTR
) >> 1;
51 return msr
- MSR_K7_PERFCTR0
;
52 case X86_VENDOR_INTEL
:
53 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
54 return msr
- MSR_ARCH_PERFMON_PERFCTR0
;
56 switch (boot_cpu_data
.x86
) {
58 return msr
- MSR_P6_PERFCTR0
;
60 return msr
- MSR_P4_BPU_PERFCTR0
;
67 * converts an msr to an appropriate reservation bit
68 * returns the bit offset of the event selection register
70 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr
)
72 /* returns the bit offset of the event selection register */
73 switch (boot_cpu_data
.x86_vendor
) {
75 if (msr
>= MSR_F15H_PERF_CTL
)
76 return (msr
- MSR_F15H_PERF_CTL
) >> 1;
77 return msr
- MSR_K7_EVNTSEL0
;
78 case X86_VENDOR_INTEL
:
79 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
80 return msr
- MSR_ARCH_PERFMON_EVENTSEL0
;
82 switch (boot_cpu_data
.x86
) {
84 return msr
- MSR_P6_EVNTSEL0
;
86 return msr
- MSR_P4_BSU_ESCR0
;
93 /* checks for a bit availability (hack for oprofile) */
94 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter
)
96 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
98 return !test_bit(counter
, perfctr_nmi_owner
);
100 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit
);
102 int reserve_perfctr_nmi(unsigned int msr
)
104 unsigned int counter
;
106 counter
= nmi_perfctr_msr_to_bit(msr
);
107 /* register not managed by the allocator? */
108 if (counter
> NMI_MAX_COUNTER_BITS
)
111 if (!test_and_set_bit(counter
, perfctr_nmi_owner
))
115 EXPORT_SYMBOL(reserve_perfctr_nmi
);
117 void release_perfctr_nmi(unsigned int msr
)
119 unsigned int counter
;
121 counter
= nmi_perfctr_msr_to_bit(msr
);
122 /* register not managed by the allocator? */
123 if (counter
> NMI_MAX_COUNTER_BITS
)
126 clear_bit(counter
, perfctr_nmi_owner
);
128 EXPORT_SYMBOL(release_perfctr_nmi
);
130 int reserve_evntsel_nmi(unsigned int msr
)
132 unsigned int counter
;
134 counter
= nmi_evntsel_msr_to_bit(msr
);
135 /* register not managed by the allocator? */
136 if (counter
> NMI_MAX_COUNTER_BITS
)
139 if (!test_and_set_bit(counter
, evntsel_nmi_owner
))
143 EXPORT_SYMBOL(reserve_evntsel_nmi
);
145 void release_evntsel_nmi(unsigned int msr
)
147 unsigned int counter
;
149 counter
= nmi_evntsel_msr_to_bit(msr
);
150 /* register not managed by the allocator? */
151 if (counter
> NMI_MAX_COUNTER_BITS
)
154 clear_bit(counter
, evntsel_nmi_owner
);
156 EXPORT_SYMBOL(release_evntsel_nmi
);