1 // SPDX-License-Identifier: GPL-2.0
3 * local apic based NMI watchdog for various CPUs.
5 * This file also handles reservation of performance counters for coordination
6 * with other users (like oprofile).
8 * Note that these events normally don't tick when the CPU idles. This means
9 * the frequency varies with CPU load.
11 * Original code for K7/P6 written by Keith Owens
15 #include <linux/percpu.h>
16 #include <linux/export.h>
17 #include <linux/kernel.h>
18 #include <linux/bitops.h>
19 #include <linux/smp.h>
21 #include <linux/kprobes.h>
24 #include <asm/perf_event.h>
27 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
28 * offset from MSR_P4_BSU_ESCR0.
30 * It will be the max for all platforms (for now)
32 #define NMI_MAX_COUNTER_BITS 66
35 * perfctr_nmi_owner tracks the ownership of the perfctr registers:
36 * evtsel_nmi_owner tracks the ownership of the event selection
37 * - different performance counters/ event selection may be reserved for
38 * different subsystems this reservation system just tries to coordinate
41 static DECLARE_BITMAP(perfctr_nmi_owner
, NMI_MAX_COUNTER_BITS
);
42 static DECLARE_BITMAP(evntsel_nmi_owner
, NMI_MAX_COUNTER_BITS
);
44 /* converts an msr to an appropriate reservation bit */
45 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr
)
47 /* returns the bit offset of the performance counter register */
48 switch (boot_cpu_data
.x86_vendor
) {
49 case X86_VENDOR_HYGON
:
51 if (msr
>= MSR_F15H_PERF_CTR
)
52 return (msr
- MSR_F15H_PERF_CTR
) >> 1;
53 return msr
- MSR_K7_PERFCTR0
;
54 case X86_VENDOR_INTEL
:
55 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
56 return msr
- MSR_ARCH_PERFMON_PERFCTR0
;
58 switch (boot_cpu_data
.x86
) {
60 return msr
- MSR_P6_PERFCTR0
;
62 return msr
- MSR_KNC_PERFCTR0
;
64 return msr
- MSR_P4_BPU_PERFCTR0
;
71 * converts an msr to an appropriate reservation bit
72 * returns the bit offset of the event selection register
74 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr
)
76 /* returns the bit offset of the event selection register */
77 switch (boot_cpu_data
.x86_vendor
) {
78 case X86_VENDOR_HYGON
:
80 if (msr
>= MSR_F15H_PERF_CTL
)
81 return (msr
- MSR_F15H_PERF_CTL
) >> 1;
82 return msr
- MSR_K7_EVNTSEL0
;
83 case X86_VENDOR_INTEL
:
84 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
85 return msr
- MSR_ARCH_PERFMON_EVENTSEL0
;
87 switch (boot_cpu_data
.x86
) {
89 return msr
- MSR_P6_EVNTSEL0
;
91 return msr
- MSR_KNC_EVNTSEL0
;
93 return msr
- MSR_P4_BSU_ESCR0
;
100 /* checks for a bit availability (hack for oprofile) */
101 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter
)
103 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
105 return !test_bit(counter
, perfctr_nmi_owner
);
107 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit
);
109 int reserve_perfctr_nmi(unsigned int msr
)
111 unsigned int counter
;
113 counter
= nmi_perfctr_msr_to_bit(msr
);
114 /* register not managed by the allocator? */
115 if (counter
> NMI_MAX_COUNTER_BITS
)
118 if (!test_and_set_bit(counter
, perfctr_nmi_owner
))
122 EXPORT_SYMBOL(reserve_perfctr_nmi
);
124 void release_perfctr_nmi(unsigned int msr
)
126 unsigned int counter
;
128 counter
= nmi_perfctr_msr_to_bit(msr
);
129 /* register not managed by the allocator? */
130 if (counter
> NMI_MAX_COUNTER_BITS
)
133 clear_bit(counter
, perfctr_nmi_owner
);
135 EXPORT_SYMBOL(release_perfctr_nmi
);
137 int reserve_evntsel_nmi(unsigned int msr
)
139 unsigned int counter
;
141 counter
= nmi_evntsel_msr_to_bit(msr
);
142 /* register not managed by the allocator? */
143 if (counter
> NMI_MAX_COUNTER_BITS
)
146 if (!test_and_set_bit(counter
, evntsel_nmi_owner
))
150 EXPORT_SYMBOL(reserve_evntsel_nmi
);
152 void release_evntsel_nmi(unsigned int msr
)
154 unsigned int counter
;
156 counter
= nmi_evntsel_msr_to_bit(msr
);
157 /* register not managed by the allocator? */
158 if (counter
> NMI_MAX_COUNTER_BITS
)
161 clear_bit(counter
, evntsel_nmi_owner
);
163 EXPORT_SYMBOL(release_evntsel_nmi
);