2 * NMI watchdog support on APIC systems
4 * Started by Ingo Molnar <mingo@redhat.com>
7 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
8 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
10 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
13 #include <linux/nmi.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/sysdev.h>
19 #include <linux/sysctl.h>
20 #include <linux/kprobes.h>
21 #include <linux/cpumask.h>
22 #include <linux/kdebug.h>
26 #include <asm/proto.h>
29 int unknown_nmi_panic
;
30 int nmi_watchdog_enabled
;
31 int panic_on_unrecovered_nmi
;
33 static cpumask_t backtrace_mask
= CPU_MASK_NONE
;
36 * >0: the lapic NMI watchdog is active, but can be disabled
37 * <0: the lapic NMI watchdog has not been set up, and cannot
39 * 0: the lapic NMI watchdog is disabled, but can be enabled
41 atomic_t nmi_active
= ATOMIC_INIT(0); /* oprofile uses this */
44 unsigned int nmi_watchdog
= NMI_DEFAULT
;
45 static unsigned int nmi_hz
= HZ
;
47 static DEFINE_PER_CPU(short, wd_enabled
);
49 /* local prototypes */
50 static int unknown_nmi_panic_callback(struct pt_regs
*regs
, int cpu
);
52 /* Run after command line and cpu_init init, but before all other checks */
53 void nmi_watchdog_default(void)
55 if (nmi_watchdog
!= NMI_DEFAULT
)
57 nmi_watchdog
= NMI_NONE
;
60 static int endflag __initdata
= 0;
63 /* The performance counters used by NMI_LOCAL_APIC don't trigger when
64 * the CPU is idle. To make sure the NMI watchdog really ticks on all
65 * CPUs during the test make them busy.
67 static __init
void nmi_cpu_busy(void *data
)
69 local_irq_enable_in_hardirq();
70 /* Intentionally don't use cpu_relax here. This is
71 to make sure that the performance counter really ticks,
72 even if there is a simulator or similar that catches the
73 pause instruction. On a real HT machine this is fine because
74 all other CPUs are busy with "useless" delay loops and don't
75 care if they get somewhat less cycles. */
81 int __init
check_nmi_watchdog (void)
86 if ((nmi_watchdog
== NMI_NONE
) || (nmi_watchdog
== NMI_DISABLED
))
89 if (!atomic_read(&nmi_active
))
92 counts
= kmalloc(NR_CPUS
* sizeof(int), GFP_KERNEL
);
96 printk(KERN_INFO
"testing NMI watchdog ... ");
99 if (nmi_watchdog
== NMI_LOCAL_APIC
)
100 smp_call_function(nmi_cpu_busy
, (void *)&endflag
, 0, 0);
103 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++)
104 counts
[cpu
] = cpu_pda(cpu
)->__nmi_count
;
106 mdelay((20*1000)/nmi_hz
); // wait 20 ticks
108 for_each_online_cpu(cpu
) {
109 if (!per_cpu(wd_enabled
, cpu
))
111 if (cpu_pda(cpu
)->__nmi_count
- counts
[cpu
] <= 5) {
112 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
115 cpu_pda(cpu
)->__nmi_count
);
116 per_cpu(wd_enabled
, cpu
) = 0;
117 atomic_dec(&nmi_active
);
120 if (!atomic_read(&nmi_active
)) {
122 atomic_set(&nmi_active
, -1);
129 /* now that we know it works we can reduce NMI frequency to
130 something more reasonable; makes a difference in some configs */
131 if (nmi_watchdog
== NMI_LOCAL_APIC
)
132 nmi_hz
= lapic_adjust_nmi_hz(1);
138 int __init
setup_nmi_watchdog(char *str
)
142 if (!strncmp(str
,"panic",5)) {
143 panic_on_timeout
= 1;
144 str
= strchr(str
, ',');
150 get_option(&str
, &nmi
);
152 if ((nmi
>= NMI_INVALID
) || (nmi
< NMI_NONE
))
159 __setup("nmi_watchdog=", setup_nmi_watchdog
);
162 static void __acpi_nmi_disable(void *__unused
)
164 apic_write(APIC_LVT0
, APIC_DM_NMI
| APIC_LVT_MASKED
);
168 * Disable timer based NMIs on all CPUs:
170 void acpi_nmi_disable(void)
172 if (atomic_read(&nmi_active
) && nmi_watchdog
== NMI_IO_APIC
)
173 on_each_cpu(__acpi_nmi_disable
, NULL
, 0, 1);
176 static void __acpi_nmi_enable(void *__unused
)
178 apic_write(APIC_LVT0
, APIC_DM_NMI
);
182 * Enable timer based NMIs on all CPUs:
184 void acpi_nmi_enable(void)
186 if (atomic_read(&nmi_active
) && nmi_watchdog
== NMI_IO_APIC
)
187 on_each_cpu(__acpi_nmi_enable
, NULL
, 0, 1);
191 static int nmi_pm_active
; /* nmi_active before suspend */
193 static int lapic_nmi_suspend(struct sys_device
*dev
, pm_message_t state
)
195 /* only CPU0 goes here, other CPUs should be offline */
196 nmi_pm_active
= atomic_read(&nmi_active
);
197 stop_apic_nmi_watchdog(NULL
);
198 BUG_ON(atomic_read(&nmi_active
) != 0);
202 static int lapic_nmi_resume(struct sys_device
*dev
)
204 /* only CPU0 goes here, other CPUs should be offline */
205 if (nmi_pm_active
> 0) {
206 setup_apic_nmi_watchdog(NULL
);
207 touch_nmi_watchdog();
212 static struct sysdev_class nmi_sysclass
= {
213 set_kset_name("lapic_nmi"),
214 .resume
= lapic_nmi_resume
,
215 .suspend
= lapic_nmi_suspend
,
218 static struct sys_device device_lapic_nmi
= {
220 .cls
= &nmi_sysclass
,
223 static int __init
init_lapic_nmi_sysfs(void)
227 /* should really be a BUG_ON but b/c this is an
228 * init call, it just doesn't work. -dcz
230 if (nmi_watchdog
!= NMI_LOCAL_APIC
)
233 if ( atomic_read(&nmi_active
) < 0 )
236 error
= sysdev_class_register(&nmi_sysclass
);
238 error
= sysdev_register(&device_lapic_nmi
);
241 /* must come after the local APIC's device_initcall() */
242 late_initcall(init_lapic_nmi_sysfs
);
244 #endif /* CONFIG_PM */
246 void setup_apic_nmi_watchdog(void *unused
)
248 if (__get_cpu_var(wd_enabled
) == 1)
251 /* cheap hack to support suspend/resume */
252 /* if cpu0 is not active neither should the other cpus */
253 if ((smp_processor_id() != 0) && (atomic_read(&nmi_active
) <= 0))
256 switch (nmi_watchdog
) {
258 __get_cpu_var(wd_enabled
) = 1;
259 if (lapic_watchdog_init(nmi_hz
) < 0) {
260 __get_cpu_var(wd_enabled
) = 0;
265 __get_cpu_var(wd_enabled
) = 1;
266 atomic_inc(&nmi_active
);
270 void stop_apic_nmi_watchdog(void *unused
)
272 /* only support LOCAL and IO APICs for now */
273 if ((nmi_watchdog
!= NMI_LOCAL_APIC
) &&
274 (nmi_watchdog
!= NMI_IO_APIC
))
276 if (__get_cpu_var(wd_enabled
) == 0)
278 if (nmi_watchdog
== NMI_LOCAL_APIC
)
279 lapic_watchdog_stop();
280 __get_cpu_var(wd_enabled
) = 0;
281 atomic_dec(&nmi_active
);
285 * the best way to detect whether a CPU has a 'hard lockup' problem
286 * is to check it's local APIC timer IRQ counts. If they are not
287 * changing then that CPU has some problem.
289 * as these watchdog NMI IRQs are generated on every CPU, we only
290 * have to check the current processor.
293 static DEFINE_PER_CPU(unsigned, last_irq_sum
);
294 static DEFINE_PER_CPU(local_t
, alert_counter
);
295 static DEFINE_PER_CPU(int, nmi_touch
);
297 void touch_nmi_watchdog(void)
299 if (nmi_watchdog
> 0) {
303 * Tell other CPUs to reset their alert counters. We cannot
304 * do it ourselves because the alert count increase is not
307 for_each_present_cpu(cpu
) {
308 if (per_cpu(nmi_touch
, cpu
) != 1)
309 per_cpu(nmi_touch
, cpu
) = 1;
313 touch_softlockup_watchdog();
316 int __kprobes
nmi_watchdog_tick(struct pt_regs
* regs
, unsigned reason
)
320 int cpu
= smp_processor_id();
323 /* check for other users first */
324 if (notify_die(DIE_NMI
, "nmi", regs
, reason
, 2, SIGINT
)
330 sum
= read_pda(apic_timer_irqs
) + read_pda(irq0_irqs
);
331 if (__get_cpu_var(nmi_touch
)) {
332 __get_cpu_var(nmi_touch
) = 0;
336 if (cpu_isset(cpu
, backtrace_mask
)) {
337 static DEFINE_SPINLOCK(lock
); /* Serialise the printks */
340 printk("NMI backtrace for cpu %d\n", cpu
);
343 cpu_clear(cpu
, backtrace_mask
);
346 #ifdef CONFIG_X86_MCE
347 /* Could check oops_in_progress here too, but it's safer
349 if (atomic_read(&mce_entry
) > 0)
352 /* if the apic timer isn't firing, this cpu isn't doing much */
353 if (!touched
&& __get_cpu_var(last_irq_sum
) == sum
) {
355 * Ayiee, looks like this CPU is stuck ...
356 * wait a few IRQs (5 seconds) before doing the oops ...
358 local_inc(&__get_cpu_var(alert_counter
));
359 if (local_read(&__get_cpu_var(alert_counter
)) == 5*nmi_hz
)
360 die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs
,
363 __get_cpu_var(last_irq_sum
) = sum
;
364 local_set(&__get_cpu_var(alert_counter
), 0);
367 /* see if the nmi watchdog went off */
368 if (!__get_cpu_var(wd_enabled
))
370 switch (nmi_watchdog
) {
372 rc
|= lapic_wd_event(nmi_hz
);
375 /* don't know how to accurately check for this.
376 * just assume it was a watchdog timer interrupt
377 * This matches the old behaviour.
385 static unsigned ignore_nmis
;
387 asmlinkage __kprobes
void do_nmi(struct pt_regs
* regs
, long error_code
)
390 add_pda(__nmi_count
,1);
392 default_do_nmi(regs
);
396 int do_nmi_callback(struct pt_regs
* regs
, int cpu
)
399 if (unknown_nmi_panic
)
400 return unknown_nmi_panic_callback(regs
, cpu
);
411 void restart_nmi(void)
419 static int unknown_nmi_panic_callback(struct pt_regs
*regs
, int cpu
)
421 unsigned char reason
= get_nmi_reason();
424 sprintf(buf
, "NMI received for unknown reason %02x\n", reason
);
425 die_nmi(buf
, regs
, 1); /* Always panic here */
430 * proc handler for /proc/sys/kernel/nmi
432 int proc_nmi_enabled(struct ctl_table
*table
, int write
, struct file
*file
,
433 void __user
*buffer
, size_t *length
, loff_t
*ppos
)
437 nmi_watchdog_enabled
= (atomic_read(&nmi_active
) > 0) ? 1 : 0;
438 old_state
= nmi_watchdog_enabled
;
439 proc_dointvec(table
, write
, file
, buffer
, length
, ppos
);
440 if (!!old_state
== !!nmi_watchdog_enabled
)
443 if (atomic_read(&nmi_active
) < 0 || nmi_watchdog
== NMI_DISABLED
) {
444 printk( KERN_WARNING
"NMI watchdog is permanently disabled\n");
448 /* if nmi_watchdog is not set yet, then set it */
449 nmi_watchdog_default();
451 if (nmi_watchdog
== NMI_LOCAL_APIC
) {
452 if (nmi_watchdog_enabled
)
453 enable_lapic_nmi_watchdog();
455 disable_lapic_nmi_watchdog();
458 "NMI watchdog doesn't know what hardware to touch\n");
466 void __trigger_all_cpu_backtrace(void)
470 backtrace_mask
= cpu_online_map
;
471 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
472 for (i
= 0; i
< 10 * 1000; i
++) {
473 if (cpus_empty(backtrace_mask
))
479 EXPORT_SYMBOL(nmi_active
);
480 EXPORT_SYMBOL(nmi_watchdog
);
481 EXPORT_SYMBOL(touch_nmi_watchdog
);