2 * linux/arch/x86_64/nmi.c
4 * NMI watchdog support on APIC systems
6 * Started by Ingo Molnar <mingo@redhat.com>
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
12 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
15 #include <linux/config.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/sysdev.h>
21 #include <linux/nmi.h>
22 #include <linux/sysctl.h>
23 #include <linux/kprobes.h>
27 #include <asm/proto.h>
28 #include <asm/kdebug.h>
30 #include <asm/intel_arch_perfmon.h>
33 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
34 * - it may be reserved by some other driver, or not
35 * - when not reserved by some other driver, it may be used for
36 * the NMI watchdog, or not
38 * This is maintained separately from nmi_active because the NMI
39 * watchdog may also be driven from the I/O APIC timer.
41 static DEFINE_SPINLOCK(lapic_nmi_owner_lock
);
42 static unsigned int lapic_nmi_owner
;
43 #define LAPIC_NMI_WATCHDOG (1<<0)
44 #define LAPIC_NMI_RESERVED (1<<1)
47 * +1: the lapic NMI watchdog is active, but can be disabled
48 * 0: the lapic NMI watchdog has not been set up, and cannot
50 * -1: the lapic NMI watchdog is disabled, but can be enabled
52 int nmi_active
; /* oprofile uses this */
55 unsigned int nmi_watchdog
= NMI_DEFAULT
;
56 static unsigned int nmi_hz
= HZ
;
57 static unsigned int nmi_perfctr_msr
; /* the MSR to reset in NMI handler */
58 static unsigned int nmi_p4_cccr_val
;
60 /* Note that these events don't tick when the CPU idles. This means
61 the frequency varies with CPU load. */
63 #define K7_EVNTSEL_ENABLE (1 << 22)
64 #define K7_EVNTSEL_INT (1 << 20)
65 #define K7_EVNTSEL_OS (1 << 17)
66 #define K7_EVNTSEL_USR (1 << 16)
67 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
68 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
70 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
71 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
73 #define MSR_P4_MISC_ENABLE 0x1A0
74 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
75 #define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12)
76 #define MSR_P4_PERFCTR0 0x300
77 #define MSR_P4_CCCR0 0x360
78 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
79 #define P4_ESCR_OS (1<<3)
80 #define P4_ESCR_USR (1<<2)
81 #define P4_CCCR_OVF_PMI0 (1<<26)
82 #define P4_CCCR_OVF_PMI1 (1<<27)
83 #define P4_CCCR_THRESHOLD(N) ((N)<<20)
84 #define P4_CCCR_COMPLEMENT (1<<19)
85 #define P4_CCCR_COMPARE (1<<18)
86 #define P4_CCCR_REQUIRED (3<<16)
87 #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
88 #define P4_CCCR_ENABLE (1<<12)
89 /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
90 CRU_ESCR0 (with any non-null event selector) through a complemented
91 max threshold. [IA32-Vol3, Section 14.9.9] */
92 #define MSR_P4_IQ_COUNTER0 0x30C
93 #define P4_NMI_CRU_ESCR0 (P4_ESCR_EVENT_SELECT(0x3F)|P4_ESCR_OS|P4_ESCR_USR)
94 #define P4_NMI_IQ_CCCR0 \
95 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
96 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
98 static __cpuinit
inline int nmi_known_cpu(void)
100 switch (boot_cpu_data
.x86_vendor
) {
102 return boot_cpu_data
.x86
== 15;
103 case X86_VENDOR_INTEL
:
104 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
107 return (boot_cpu_data
.x86
== 15);
112 /* Run after command line and cpu_init init, but before all other checks */
113 void __cpuinit
nmi_watchdog_default(void)
115 if (nmi_watchdog
!= NMI_DEFAULT
)
118 nmi_watchdog
= NMI_LOCAL_APIC
;
120 nmi_watchdog
= NMI_IO_APIC
;
124 /* The performance counters used by NMI_LOCAL_APIC don't trigger when
125 * the CPU is idle. To make sure the NMI watchdog really ticks on all
126 * CPUs during the test make them busy.
128 static __init
void nmi_cpu_busy(void *data
)
130 volatile int *endflag
= data
;
132 /* Intentionally don't use cpu_relax here. This is
133 to make sure that the performance counter really ticks,
134 even if there is a simulator or similar that catches the
135 pause instruction. On a real HT machine this is fine because
136 all other CPUs are busy with "useless" delay loops and don't
137 care if they get somewhat less cycles. */
138 while (*endflag
== 0)
143 int __init
check_nmi_watchdog (void)
145 volatile int endflag
= 0;
149 counts
= kmalloc(NR_CPUS
* sizeof(int), GFP_KERNEL
);
153 printk(KERN_INFO
"testing NMI watchdog ... ");
156 if (nmi_watchdog
== NMI_LOCAL_APIC
)
157 smp_call_function(nmi_cpu_busy
, (void *)&endflag
, 0, 0);
160 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++)
161 counts
[cpu
] = cpu_pda(cpu
)->__nmi_count
;
163 mdelay((10*1000)/nmi_hz
); // wait 10 ticks
165 for_each_online_cpu(cpu
) {
166 if (cpu_pda(cpu
)->__nmi_count
- counts
[cpu
] <= 5) {
168 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
171 cpu_pda(cpu
)->__nmi_count
);
173 lapic_nmi_owner
&= ~LAPIC_NMI_WATCHDOG
;
182 /* now that we know it works we can reduce NMI frequency to
183 something more reasonable; makes a difference in some configs */
184 if (nmi_watchdog
== NMI_LOCAL_APIC
)
191 int __init
setup_nmi_watchdog(char *str
)
195 if (!strncmp(str
,"panic",5)) {
196 panic_on_timeout
= 1;
197 str
= strchr(str
, ',');
203 get_option(&str
, &nmi
);
205 if (nmi
>= NMI_INVALID
)
211 __setup("nmi_watchdog=", setup_nmi_watchdog
);
213 static void disable_intel_arch_watchdog(void);
215 static void disable_lapic_nmi_watchdog(void)
219 switch (boot_cpu_data
.x86_vendor
) {
221 wrmsr(MSR_K7_EVNTSEL0
, 0, 0);
223 case X86_VENDOR_INTEL
:
224 if (boot_cpu_data
.x86
== 15) {
225 wrmsr(MSR_P4_IQ_CCCR0
, 0, 0);
226 wrmsr(MSR_P4_CRU_ESCR0
, 0, 0);
227 } else if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
228 disable_intel_arch_watchdog();
233 /* tell do_nmi() and others that we're not active any more */
237 static void enable_lapic_nmi_watchdog(void)
239 if (nmi_active
< 0) {
240 nmi_watchdog
= NMI_LOCAL_APIC
;
241 touch_nmi_watchdog();
242 setup_apic_nmi_watchdog();
246 int reserve_lapic_nmi(void)
248 unsigned int old_owner
;
250 spin_lock(&lapic_nmi_owner_lock
);
251 old_owner
= lapic_nmi_owner
;
252 lapic_nmi_owner
|= LAPIC_NMI_RESERVED
;
253 spin_unlock(&lapic_nmi_owner_lock
);
254 if (old_owner
& LAPIC_NMI_RESERVED
)
256 if (old_owner
& LAPIC_NMI_WATCHDOG
)
257 disable_lapic_nmi_watchdog();
261 void release_lapic_nmi(void)
263 unsigned int new_owner
;
265 spin_lock(&lapic_nmi_owner_lock
);
266 new_owner
= lapic_nmi_owner
& ~LAPIC_NMI_RESERVED
;
267 lapic_nmi_owner
= new_owner
;
268 spin_unlock(&lapic_nmi_owner_lock
);
269 if (new_owner
& LAPIC_NMI_WATCHDOG
)
270 enable_lapic_nmi_watchdog();
273 void disable_timer_nmi_watchdog(void)
275 if ((nmi_watchdog
!= NMI_IO_APIC
) || (nmi_active
<= 0))
279 unset_nmi_callback();
281 nmi_watchdog
= NMI_NONE
;
284 void enable_timer_nmi_watchdog(void)
286 if (nmi_active
< 0) {
287 nmi_watchdog
= NMI_IO_APIC
;
288 touch_nmi_watchdog();
296 static int nmi_pm_active
; /* nmi_active before suspend */
298 static int lapic_nmi_suspend(struct sys_device
*dev
, pm_message_t state
)
300 nmi_pm_active
= nmi_active
;
301 disable_lapic_nmi_watchdog();
305 static int lapic_nmi_resume(struct sys_device
*dev
)
307 if (nmi_pm_active
> 0)
308 enable_lapic_nmi_watchdog();
312 static struct sysdev_class nmi_sysclass
= {
313 set_kset_name("lapic_nmi"),
314 .resume
= lapic_nmi_resume
,
315 .suspend
= lapic_nmi_suspend
,
318 static struct sys_device device_lapic_nmi
= {
320 .cls
= &nmi_sysclass
,
323 static int __init
init_lapic_nmi_sysfs(void)
327 if (nmi_active
== 0 || nmi_watchdog
!= NMI_LOCAL_APIC
)
330 error
= sysdev_class_register(&nmi_sysclass
);
332 error
= sysdev_register(&device_lapic_nmi
);
335 /* must come after the local APIC's device_initcall() */
336 late_initcall(init_lapic_nmi_sysfs
);
338 #endif /* CONFIG_PM */
341 * Activate the NMI watchdog via the local APIC.
342 * Original code written by Keith Owens.
345 static void clear_msr_range(unsigned int base
, unsigned int n
)
349 for(i
= 0; i
< n
; ++i
)
353 static void setup_k7_watchdog(void)
356 unsigned int evntsel
;
358 nmi_perfctr_msr
= MSR_K7_PERFCTR0
;
360 for(i
= 0; i
< 4; ++i
) {
361 /* Simulator may not support it */
362 if (checking_wrmsrl(MSR_K7_EVNTSEL0
+i
, 0UL)) {
366 wrmsrl(MSR_K7_PERFCTR0
+i
, 0UL);
369 evntsel
= K7_EVNTSEL_INT
374 wrmsr(MSR_K7_EVNTSEL0
, evntsel
, 0);
375 wrmsrl(MSR_K7_PERFCTR0
, -((u64
)cpu_khz
* 1000 / nmi_hz
));
376 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
377 evntsel
|= K7_EVNTSEL_ENABLE
;
378 wrmsr(MSR_K7_EVNTSEL0
, evntsel
, 0);
381 static void disable_intel_arch_watchdog(void)
386 * Check whether the Architectural PerfMon supports
387 * Unhalted Core Cycles Event or not.
388 * NOTE: Corresponding bit = 0 in ebp indicates event present.
391 if (!(ebx
& ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT
))
392 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0
, 0, 0);
395 static int setup_intel_arch_watchdog(void)
397 unsigned int evntsel
;
401 * Check whether the Architectural PerfMon supports
402 * Unhalted Core Cycles Event or not.
403 * NOTE: Corresponding bit = 0 in ebp indicates event present.
406 if ((ebx
& ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT
))
409 nmi_perfctr_msr
= MSR_ARCH_PERFMON_PERFCTR0
;
411 clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0
, 2);
412 clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0
, 2);
414 evntsel
= ARCH_PERFMON_EVENTSEL_INT
415 | ARCH_PERFMON_EVENTSEL_OS
416 | ARCH_PERFMON_EVENTSEL_USR
417 | ARCH_PERFMON_NMI_EVENT_SEL
418 | ARCH_PERFMON_NMI_EVENT_UMASK
;
420 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0
, evntsel
, 0);
421 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
, -((u64
)cpu_khz
* 1000 / nmi_hz
));
422 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
423 evntsel
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
424 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0
, evntsel
, 0);
429 static int setup_p4_watchdog(void)
431 unsigned int misc_enable
, dummy
;
433 rdmsr(MSR_P4_MISC_ENABLE
, misc_enable
, dummy
);
434 if (!(misc_enable
& MSR_P4_MISC_ENABLE_PERF_AVAIL
))
437 nmi_perfctr_msr
= MSR_P4_IQ_COUNTER0
;
438 nmi_p4_cccr_val
= P4_NMI_IQ_CCCR0
;
440 if (smp_num_siblings
== 2)
441 nmi_p4_cccr_val
|= P4_CCCR_OVF_PMI1
;
444 if (!(misc_enable
& MSR_P4_MISC_ENABLE_PEBS_UNAVAIL
))
445 clear_msr_range(0x3F1, 2);
446 /* MSR 0x3F0 seems to have a default value of 0xFC00, but current
447 docs doesn't fully define it, so leave it alone for now. */
448 if (boot_cpu_data
.x86_model
>= 0x3) {
449 /* MSR_P4_IQ_ESCR0/1 (0x3ba/0x3bb) removed */
450 clear_msr_range(0x3A0, 26);
451 clear_msr_range(0x3BC, 3);
453 clear_msr_range(0x3A0, 31);
455 clear_msr_range(0x3C0, 6);
456 clear_msr_range(0x3C8, 6);
457 clear_msr_range(0x3E0, 2);
458 clear_msr_range(MSR_P4_CCCR0
, 18);
459 clear_msr_range(MSR_P4_PERFCTR0
, 18);
461 wrmsr(MSR_P4_CRU_ESCR0
, P4_NMI_CRU_ESCR0
, 0);
462 wrmsr(MSR_P4_IQ_CCCR0
, P4_NMI_IQ_CCCR0
& ~P4_CCCR_ENABLE
, 0);
463 Dprintk("setting P4_IQ_COUNTER0 to 0x%08lx\n", -(cpu_khz
* 1000UL / nmi_hz
));
464 wrmsrl(MSR_P4_IQ_COUNTER0
, -((u64
)cpu_khz
* 1000 / nmi_hz
));
465 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
466 wrmsr(MSR_P4_IQ_CCCR0
, nmi_p4_cccr_val
, 0);
470 void setup_apic_nmi_watchdog(void)
472 switch (boot_cpu_data
.x86_vendor
) {
474 if (boot_cpu_data
.x86
!= 15)
476 if (strstr(boot_cpu_data
.x86_model_id
, "Screwdriver"))
480 case X86_VENDOR_INTEL
:
481 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
482 if (!setup_intel_arch_watchdog())
484 } else if (boot_cpu_data
.x86
== 15) {
485 if (!setup_p4_watchdog())
496 lapic_nmi_owner
= LAPIC_NMI_WATCHDOG
;
501 * the best way to detect whether a CPU has a 'hard lockup' problem
502 * is to check it's local APIC timer IRQ counts. If they are not
503 * changing then that CPU has some problem.
505 * as these watchdog NMI IRQs are generated on every CPU, we only
506 * have to check the current processor.
509 static DEFINE_PER_CPU(unsigned, last_irq_sum
);
510 static DEFINE_PER_CPU(local_t
, alert_counter
);
511 static DEFINE_PER_CPU(int, nmi_touch
);
513 void touch_nmi_watchdog (void)
515 if (nmi_watchdog
> 0) {
519 * Tell other CPUs to reset their alert counters. We cannot
520 * do it ourselves because the alert count increase is not
523 for_each_present_cpu (cpu
)
524 per_cpu(nmi_touch
, cpu
) = 1;
527 touch_softlockup_watchdog();
530 void __kprobes
nmi_watchdog_tick(struct pt_regs
* regs
, unsigned reason
)
535 sum
= read_pda(apic_timer_irqs
);
536 if (__get_cpu_var(nmi_touch
)) {
537 __get_cpu_var(nmi_touch
) = 0;
540 #ifdef CONFIG_X86_MCE
541 /* Could check oops_in_progress here too, but it's safer
543 if (atomic_read(&mce_entry
) > 0)
546 if (!touched
&& __get_cpu_var(last_irq_sum
) == sum
) {
548 * Ayiee, looks like this CPU is stuck ...
549 * wait a few IRQs (5 seconds) before doing the oops ...
551 local_inc(&__get_cpu_var(alert_counter
));
552 if (local_read(&__get_cpu_var(alert_counter
)) == 5*nmi_hz
) {
553 if (notify_die(DIE_NMI
, "nmi", regs
, reason
, 2, SIGINT
)
555 local_set(&__get_cpu_var(alert_counter
), 0);
558 die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs
);
561 __get_cpu_var(last_irq_sum
) = sum
;
562 local_set(&__get_cpu_var(alert_counter
), 0);
564 if (nmi_perfctr_msr
) {
565 if (nmi_perfctr_msr
== MSR_P4_IQ_COUNTER0
) {
568 * - An overflown perfctr will assert its interrupt
569 * until the OVF flag in its CCCR is cleared.
570 * - LVTPC is masked on interrupt and must be
571 * unmasked by the LVTPC handler.
573 wrmsr(MSR_P4_IQ_CCCR0
, nmi_p4_cccr_val
, 0);
574 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
575 } else if (nmi_perfctr_msr
== MSR_ARCH_PERFMON_PERFCTR0
) {
577 * For Intel based architectural perfmon
578 * - LVTPC is masked on interrupt and must be
579 * unmasked by the LVTPC handler.
581 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
583 wrmsrl(nmi_perfctr_msr
, -((u64
)cpu_khz
* 1000 / nmi_hz
));
587 static __kprobes
int dummy_nmi_callback(struct pt_regs
* regs
, int cpu
)
592 static nmi_callback_t nmi_callback
= dummy_nmi_callback
;
594 asmlinkage __kprobes
void do_nmi(struct pt_regs
* regs
, long error_code
)
596 int cpu
= safe_smp_processor_id();
599 add_pda(__nmi_count
,1);
600 if (!rcu_dereference(nmi_callback
)(regs
, cpu
))
601 default_do_nmi(regs
);
605 void set_nmi_callback(nmi_callback_t callback
)
608 rcu_assign_pointer(nmi_callback
, callback
);
611 void unset_nmi_callback(void)
613 nmi_callback
= dummy_nmi_callback
;
618 static int unknown_nmi_panic_callback(struct pt_regs
*regs
, int cpu
)
620 unsigned char reason
= get_nmi_reason();
623 if (!(reason
& 0xc0)) {
624 sprintf(buf
, "NMI received for unknown reason %02x\n", reason
);
631 * proc handler for /proc/sys/kernel/unknown_nmi_panic
633 int proc_unknown_nmi_panic(struct ctl_table
*table
, int write
, struct file
*file
,
634 void __user
*buffer
, size_t *length
, loff_t
*ppos
)
638 old_state
= unknown_nmi_panic
;
639 proc_dointvec(table
, write
, file
, buffer
, length
, ppos
);
640 if (!!old_state
== !!unknown_nmi_panic
)
643 if (unknown_nmi_panic
) {
644 if (reserve_lapic_nmi() < 0) {
645 unknown_nmi_panic
= 0;
648 set_nmi_callback(unknown_nmi_panic_callback
);
652 unset_nmi_callback();
659 EXPORT_SYMBOL(nmi_active
);
660 EXPORT_SYMBOL(nmi_watchdog
);
661 EXPORT_SYMBOL(reserve_lapic_nmi
);
662 EXPORT_SYMBOL(release_lapic_nmi
);
663 EXPORT_SYMBOL(disable_timer_nmi_watchdog
);
664 EXPORT_SYMBOL(enable_timer_nmi_watchdog
);
665 EXPORT_SYMBOL(touch_nmi_watchdog
);