2 * local apic based NMI watchdog for various CPUs.
4 * This file also handles reservation of performance counters for coordination
5 * with other users (like oprofile).
7 * Note that these events normally don't tick when the CPU idles. This means
8 * the frequency varies with CPU load.
10 * Original code for K7/P6 written by Keith Owens
14 #include <linux/percpu.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/bitops.h>
18 #include <linux/smp.h>
20 #include <linux/kprobes.h>
23 #include <asm/perf_event.h>
26 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
27 * offset from MSR_P4_BSU_ESCR0.
29 * It will be the max for all platforms (for now)
31 #define NMI_MAX_COUNTER_BITS 66
34 * perfctr_nmi_owner tracks the ownership of the perfctr registers:
35 * evtsel_nmi_owner tracks the ownership of the event selection
36 * - different performance counters/ event selection may be reserved for
37 * different subsystems this reservation system just tries to coordinate
40 static DECLARE_BITMAP(perfctr_nmi_owner
, NMI_MAX_COUNTER_BITS
);
41 static DECLARE_BITMAP(evntsel_nmi_owner
, NMI_MAX_COUNTER_BITS
);
43 /* converts an msr to an appropriate reservation bit */
44 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr
)
46 /* returns the bit offset of the performance counter register */
47 switch (boot_cpu_data
.x86_vendor
) {
49 if (msr
>= MSR_F15H_PERF_CTR
)
50 return (msr
- MSR_F15H_PERF_CTR
) >> 1;
51 return msr
- MSR_K7_PERFCTR0
;
52 case X86_VENDOR_INTEL
:
53 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
54 return msr
- MSR_ARCH_PERFMON_PERFCTR0
;
56 switch (boot_cpu_data
.x86
) {
58 return msr
- MSR_P6_PERFCTR0
;
60 return msr
- MSR_KNC_PERFCTR0
;
62 return msr
- MSR_P4_BPU_PERFCTR0
;
69 * converts an msr to an appropriate reservation bit
70 * returns the bit offset of the event selection register
72 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr
)
74 /* returns the bit offset of the event selection register */
75 switch (boot_cpu_data
.x86_vendor
) {
77 if (msr
>= MSR_F15H_PERF_CTL
)
78 return (msr
- MSR_F15H_PERF_CTL
) >> 1;
79 return msr
- MSR_K7_EVNTSEL0
;
80 case X86_VENDOR_INTEL
:
81 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
82 return msr
- MSR_ARCH_PERFMON_EVENTSEL0
;
84 switch (boot_cpu_data
.x86
) {
86 return msr
- MSR_P6_EVNTSEL0
;
88 return msr
- MSR_KNC_EVNTSEL0
;
90 return msr
- MSR_P4_BSU_ESCR0
;
97 /* checks for a bit availability (hack for oprofile) */
98 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter
)
100 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
102 return !test_bit(counter
, perfctr_nmi_owner
);
104 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit
);
106 int reserve_perfctr_nmi(unsigned int msr
)
108 unsigned int counter
;
110 counter
= nmi_perfctr_msr_to_bit(msr
);
111 /* register not managed by the allocator? */
112 if (counter
> NMI_MAX_COUNTER_BITS
)
115 if (!test_and_set_bit(counter
, perfctr_nmi_owner
))
119 EXPORT_SYMBOL(reserve_perfctr_nmi
);
121 void release_perfctr_nmi(unsigned int msr
)
123 unsigned int counter
;
125 counter
= nmi_perfctr_msr_to_bit(msr
);
126 /* register not managed by the allocator? */
127 if (counter
> NMI_MAX_COUNTER_BITS
)
130 clear_bit(counter
, perfctr_nmi_owner
);
132 EXPORT_SYMBOL(release_perfctr_nmi
);
134 int reserve_evntsel_nmi(unsigned int msr
)
136 unsigned int counter
;
138 counter
= nmi_evntsel_msr_to_bit(msr
);
139 /* register not managed by the allocator? */
140 if (counter
> NMI_MAX_COUNTER_BITS
)
143 if (!test_and_set_bit(counter
, evntsel_nmi_owner
))
147 EXPORT_SYMBOL(reserve_evntsel_nmi
);
149 void release_evntsel_nmi(unsigned int msr
)
151 unsigned int counter
;
153 counter
= nmi_evntsel_msr_to_bit(msr
);
154 /* register not managed by the allocator? */
155 if (counter
> NMI_MAX_COUNTER_BITS
)
158 clear_bit(counter
, evntsel_nmi_owner
);
160 EXPORT_SYMBOL(release_evntsel_nmi
);