2 * Detect hard and soft lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
12 #define pr_fmt(fmt) "NMI watchdog: " fmt
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/delay.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/lockdep.h>
22 #include <linux/notifier.h>
23 #include <linux/module.h>
24 #include <linux/sysctl.h>
25 #include <linux/smpboot.h>
26 #include <linux/sched/rt.h>
28 #include <asm/irq_regs.h>
29 #include <linux/kvm_para.h>
30 #include <linux/perf_event.h>
32 int watchdog_user_enabled
= 1;
33 int __read_mostly watchdog_thresh
= 10;
34 static int __read_mostly watchdog_running
;
35 static u64 __read_mostly sample_period
;
37 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts
);
38 static DEFINE_PER_CPU(struct task_struct
*, softlockup_watchdog
);
39 static DEFINE_PER_CPU(struct hrtimer
, watchdog_hrtimer
);
40 static DEFINE_PER_CPU(bool, softlockup_touch_sync
);
41 static DEFINE_PER_CPU(bool, soft_watchdog_warn
);
42 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts
);
43 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt
);
44 #ifdef CONFIG_HARDLOCKUP_DETECTOR
45 static DEFINE_PER_CPU(bool, hard_watchdog_warn
);
46 static DEFINE_PER_CPU(bool, watchdog_nmi_touch
);
47 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved
);
48 static DEFINE_PER_CPU(struct perf_event
*, watchdog_ev
);
53 * Should we panic when a soft-lockup or hard-lockup occurs:
55 #ifdef CONFIG_HARDLOCKUP_DETECTOR
56 static int hardlockup_panic
=
57 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE
;
59 static int __init
hardlockup_panic_setup(char *str
)
61 if (!strncmp(str
, "panic", 5))
63 else if (!strncmp(str
, "nopanic", 7))
65 else if (!strncmp(str
, "0", 1))
66 watchdog_user_enabled
= 0;
69 __setup("nmi_watchdog=", hardlockup_panic_setup
);
72 unsigned int __read_mostly softlockup_panic
=
73 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
;
75 static int __init
softlockup_panic_setup(char *str
)
77 softlockup_panic
= simple_strtoul(str
, NULL
, 0);
81 __setup("softlockup_panic=", softlockup_panic_setup
);
83 static int __init
nowatchdog_setup(char *str
)
85 watchdog_user_enabled
= 0;
88 __setup("nowatchdog", nowatchdog_setup
);
91 static int __init
nosoftlockup_setup(char *str
)
93 watchdog_user_enabled
= 0;
96 __setup("nosoftlockup", nosoftlockup_setup
);
100 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
101 * lockups can have false positives under extreme conditions. So we generally
102 * want a higher threshold for soft lockups than for hard lockups. So we couple
103 * the thresholds with a factor: we make the soft threshold twice the amount of
104 * time the hard threshold is.
106 static int get_softlockup_thresh(void)
108 return watchdog_thresh
* 2;
112 * Returns seconds, approximately. We don't need nanosecond
113 * resolution, and we don't need to waste time with a big divide when
116 static unsigned long get_timestamp(void)
118 return local_clock() >> 30LL; /* 2^30 ~= 10^9 */
121 static void set_sample_period(void)
124 * convert watchdog_thresh from seconds to ns
125 * the divide by 5 is to give hrtimer several chances (two
126 * or three with the current relation between the soft
127 * and hard thresholds) to increment before the
128 * hardlockup detector generates a warning
130 sample_period
= get_softlockup_thresh() * ((u64
)NSEC_PER_SEC
/ 5);
133 /* Commands for resetting the watchdog */
134 static void __touch_watchdog(void)
136 __this_cpu_write(watchdog_touch_ts
, get_timestamp());
139 void touch_softlockup_watchdog(void)
142 * Preemption can be enabled. It doesn't matter which CPU's timestamp
143 * gets zeroed here, so use the raw_ operation.
145 raw_cpu_write(watchdog_touch_ts
, 0);
147 EXPORT_SYMBOL(touch_softlockup_watchdog
);
149 void touch_all_softlockup_watchdogs(void)
154 * this is done lockless
155 * do we care if a 0 races with a timestamp?
156 * all it means is the softlock check starts one cycle later
158 for_each_online_cpu(cpu
)
159 per_cpu(watchdog_touch_ts
, cpu
) = 0;
162 #ifdef CONFIG_HARDLOCKUP_DETECTOR
163 void touch_nmi_watchdog(void)
166 * Using __raw here because some code paths have
167 * preemption enabled. If preemption is enabled
168 * then interrupts should be enabled too, in which
169 * case we shouldn't have to worry about the watchdog
172 __raw_get_cpu_var(watchdog_nmi_touch
) = true;
173 touch_softlockup_watchdog();
175 EXPORT_SYMBOL(touch_nmi_watchdog
);
179 void touch_softlockup_watchdog_sync(void)
181 __raw_get_cpu_var(softlockup_touch_sync
) = true;
182 __raw_get_cpu_var(watchdog_touch_ts
) = 0;
185 #ifdef CONFIG_HARDLOCKUP_DETECTOR
186 /* watchdog detector functions */
187 static int is_hardlockup(void)
189 unsigned long hrint
= __this_cpu_read(hrtimer_interrupts
);
191 if (__this_cpu_read(hrtimer_interrupts_saved
) == hrint
)
194 __this_cpu_write(hrtimer_interrupts_saved
, hrint
);
199 static int is_softlockup(unsigned long touch_ts
)
201 unsigned long now
= get_timestamp();
203 /* Warn about unreasonable delays: */
204 if (time_after(now
, touch_ts
+ get_softlockup_thresh()))
205 return now
- touch_ts
;
210 #ifdef CONFIG_HARDLOCKUP_DETECTOR
212 static struct perf_event_attr wd_hw_attr
= {
213 .type
= PERF_TYPE_HARDWARE
,
214 .config
= PERF_COUNT_HW_CPU_CYCLES
,
215 .size
= sizeof(struct perf_event_attr
),
220 /* Callback function for perf event subsystem */
221 static void watchdog_overflow_callback(struct perf_event
*event
,
222 struct perf_sample_data
*data
,
223 struct pt_regs
*regs
)
225 /* Ensure the watchdog never gets throttled */
226 event
->hw
.interrupts
= 0;
228 if (__this_cpu_read(watchdog_nmi_touch
) == true) {
229 __this_cpu_write(watchdog_nmi_touch
, false);
233 /* check for a hardlockup
234 * This is done by making sure our timer interrupt
235 * is incrementing. The timer interrupt should have
236 * fired multiple times before we overflow'd. If it hasn't
237 * then this is a good indication the cpu is stuck
239 if (is_hardlockup()) {
240 int this_cpu
= smp_processor_id();
242 /* only print hardlockups once */
243 if (__this_cpu_read(hard_watchdog_warn
) == true)
246 if (hardlockup_panic
)
247 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu
);
249 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu
);
251 __this_cpu_write(hard_watchdog_warn
, true);
255 __this_cpu_write(hard_watchdog_warn
, false);
258 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
260 static void watchdog_interrupt_count(void)
262 __this_cpu_inc(hrtimer_interrupts
);
265 static int watchdog_nmi_enable(unsigned int cpu
);
266 static void watchdog_nmi_disable(unsigned int cpu
);
268 /* watchdog kicker functions */
269 static enum hrtimer_restart
watchdog_timer_fn(struct hrtimer
*hrtimer
)
271 unsigned long touch_ts
= __this_cpu_read(watchdog_touch_ts
);
272 struct pt_regs
*regs
= get_irq_regs();
275 /* kick the hardlockup detector */
276 watchdog_interrupt_count();
278 /* kick the softlockup detector */
279 wake_up_process(__this_cpu_read(softlockup_watchdog
));
282 hrtimer_forward_now(hrtimer
, ns_to_ktime(sample_period
));
285 if (unlikely(__this_cpu_read(softlockup_touch_sync
))) {
287 * If the time stamp was touched atomically
288 * make sure the scheduler tick is up to date.
290 __this_cpu_write(softlockup_touch_sync
, false);
294 /* Clear the guest paused flag on watchdog reset */
295 kvm_check_and_clear_guest_paused();
297 return HRTIMER_RESTART
;
300 /* check for a softlockup
301 * This is done by making sure a high priority task is
302 * being scheduled. The task touches the watchdog to
303 * indicate it is getting cpu time. If it hasn't then
304 * this is a good indication some task is hogging the cpu
306 duration
= is_softlockup(touch_ts
);
307 if (unlikely(duration
)) {
309 * If a virtual machine is stopped by the host it can look to
310 * the watchdog like a soft lockup, check to see if the host
311 * stopped the vm before we issue the warning
313 if (kvm_check_and_clear_guest_paused())
314 return HRTIMER_RESTART
;
317 if (__this_cpu_read(soft_watchdog_warn
) == true)
318 return HRTIMER_RESTART
;
320 printk(KERN_EMERG
"BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
321 smp_processor_id(), duration
,
322 current
->comm
, task_pid_nr(current
));
324 print_irqtrace_events(current
);
330 if (softlockup_panic
)
331 panic("softlockup: hung tasks");
332 __this_cpu_write(soft_watchdog_warn
, true);
334 __this_cpu_write(soft_watchdog_warn
, false);
336 return HRTIMER_RESTART
;
339 static void watchdog_set_prio(unsigned int policy
, unsigned int prio
)
341 struct sched_param param
= { .sched_priority
= prio
};
343 sched_setscheduler(current
, policy
, ¶m
);
346 static void watchdog_enable(unsigned int cpu
)
348 struct hrtimer
*hrtimer
= &__raw_get_cpu_var(watchdog_hrtimer
);
350 /* kick off the timer for the hardlockup detector */
351 hrtimer_init(hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
352 hrtimer
->function
= watchdog_timer_fn
;
354 /* Enable the perf event */
355 watchdog_nmi_enable(cpu
);
357 /* done here because hrtimer_start can only pin to smp_processor_id() */
358 hrtimer_start(hrtimer
, ns_to_ktime(sample_period
),
359 HRTIMER_MODE_REL_PINNED
);
361 /* initialize timestamp */
362 watchdog_set_prio(SCHED_FIFO
, MAX_RT_PRIO
- 1);
366 static void watchdog_disable(unsigned int cpu
)
368 struct hrtimer
*hrtimer
= &__raw_get_cpu_var(watchdog_hrtimer
);
370 watchdog_set_prio(SCHED_NORMAL
, 0);
371 hrtimer_cancel(hrtimer
);
372 /* disable the perf event */
373 watchdog_nmi_disable(cpu
);
376 static void watchdog_cleanup(unsigned int cpu
, bool online
)
378 watchdog_disable(cpu
);
381 static int watchdog_should_run(unsigned int cpu
)
383 return __this_cpu_read(hrtimer_interrupts
) !=
384 __this_cpu_read(soft_lockup_hrtimer_cnt
);
388 * The watchdog thread function - touches the timestamp.
390 * It only runs once every sample_period seconds (4 seconds by
391 * default) to reset the softlockup timestamp. If this gets delayed
392 * for more than 2*watchdog_thresh seconds then the debug-printout
393 * triggers in watchdog_timer_fn().
395 static void watchdog(unsigned int cpu
)
397 __this_cpu_write(soft_lockup_hrtimer_cnt
,
398 __this_cpu_read(hrtimer_interrupts
));
402 #ifdef CONFIG_HARDLOCKUP_DETECTOR
404 * People like the simple clean cpu node info on boot.
405 * Reduce the watchdog noise by only printing messages
406 * that are different from what cpu0 displayed.
408 static unsigned long cpu0_err
;
410 static int watchdog_nmi_enable(unsigned int cpu
)
412 struct perf_event_attr
*wd_attr
;
413 struct perf_event
*event
= per_cpu(watchdog_ev
, cpu
);
415 /* is it already setup and enabled? */
416 if (event
&& event
->state
> PERF_EVENT_STATE_OFF
)
419 /* it is setup but not enabled */
423 wd_attr
= &wd_hw_attr
;
424 wd_attr
->sample_period
= hw_nmi_get_sample_period(watchdog_thresh
);
426 /* Try to register using hardware perf events */
427 event
= perf_event_create_kernel_counter(wd_attr
, cpu
, NULL
, watchdog_overflow_callback
, NULL
);
429 /* save cpu0 error for future comparision */
430 if (cpu
== 0 && IS_ERR(event
))
431 cpu0_err
= PTR_ERR(event
);
433 if (!IS_ERR(event
)) {
434 /* only print for cpu0 or different than cpu0 */
435 if (cpu
== 0 || cpu0_err
)
436 pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
440 /* skip displaying the same error again */
441 if (cpu
> 0 && (PTR_ERR(event
) == cpu0_err
))
442 return PTR_ERR(event
);
444 /* vary the KERN level based on the returned errno */
445 if (PTR_ERR(event
) == -EOPNOTSUPP
)
446 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu
);
447 else if (PTR_ERR(event
) == -ENOENT
)
448 pr_warning("disabled (cpu%i): hardware events not enabled\n",
451 pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
452 cpu
, PTR_ERR(event
));
453 return PTR_ERR(event
);
457 per_cpu(watchdog_ev
, cpu
) = event
;
459 perf_event_enable(per_cpu(watchdog_ev
, cpu
));
464 static void watchdog_nmi_disable(unsigned int cpu
)
466 struct perf_event
*event
= per_cpu(watchdog_ev
, cpu
);
469 perf_event_disable(event
);
470 per_cpu(watchdog_ev
, cpu
) = NULL
;
472 /* should be in cleanup, but blocks oprofile */
473 perf_event_release_kernel(event
);
478 static int watchdog_nmi_enable(unsigned int cpu
) { return 0; }
479 static void watchdog_nmi_disable(unsigned int cpu
) { return; }
480 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
482 static struct smp_hotplug_thread watchdog_threads
= {
483 .store
= &softlockup_watchdog
,
484 .thread_should_run
= watchdog_should_run
,
485 .thread_fn
= watchdog
,
486 .thread_comm
= "watchdog/%u",
487 .setup
= watchdog_enable
,
488 .cleanup
= watchdog_cleanup
,
489 .park
= watchdog_disable
,
490 .unpark
= watchdog_enable
,
493 static void restart_watchdog_hrtimer(void *info
)
495 struct hrtimer
*hrtimer
= &__raw_get_cpu_var(watchdog_hrtimer
);
499 * No need to cancel and restart hrtimer if it is currently executing
500 * because it will reprogram itself with the new period now.
501 * We should never see it unqueued here because we are running per-cpu
502 * with interrupts disabled.
504 ret
= hrtimer_try_to_cancel(hrtimer
);
506 hrtimer_start(hrtimer
, ns_to_ktime(sample_period
),
507 HRTIMER_MODE_REL_PINNED
);
510 static void update_timers(int cpu
)
513 * Make sure that perf event counter will adopt to a new
514 * sampling period. Updating the sampling period directly would
515 * be much nicer but we do not have an API for that now so
516 * let's use a big hammer.
517 * Hrtimer will adopt the new period on the next tick but this
518 * might be late already so we have to restart the timer as well.
520 watchdog_nmi_disable(cpu
);
521 smp_call_function_single(cpu
, restart_watchdog_hrtimer
, NULL
, 1);
522 watchdog_nmi_enable(cpu
);
525 static void update_timers_all_cpus(void)
531 for_each_online_cpu(cpu
)
537 static int watchdog_enable_all_cpus(bool sample_period_changed
)
541 if (!watchdog_running
) {
542 err
= smpboot_register_percpu_thread(&watchdog_threads
);
544 pr_err("Failed to create watchdog threads, disabled\n");
546 watchdog_running
= 1;
547 } else if (sample_period_changed
) {
548 update_timers_all_cpus();
554 /* prepare/enable/disable routines */
555 /* sysctl functions */
557 static void watchdog_disable_all_cpus(void)
559 if (watchdog_running
) {
560 watchdog_running
= 0;
561 smpboot_unregister_percpu_thread(&watchdog_threads
);
566 * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
569 int proc_dowatchdog(struct ctl_table
*table
, int write
,
570 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
572 int err
, old_thresh
, old_enabled
;
573 static DEFINE_MUTEX(watchdog_proc_mutex
);
575 mutex_lock(&watchdog_proc_mutex
);
576 old_thresh
= ACCESS_ONCE(watchdog_thresh
);
577 old_enabled
= ACCESS_ONCE(watchdog_user_enabled
);
579 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
585 * Watchdog threads shouldn't be enabled if they are
586 * disabled. The 'watchdog_running' variable check in
587 * watchdog_*_all_cpus() function takes care of this.
589 if (watchdog_user_enabled
&& watchdog_thresh
)
590 err
= watchdog_enable_all_cpus(old_thresh
!= watchdog_thresh
);
592 watchdog_disable_all_cpus();
594 /* Restore old values on failure */
596 watchdog_thresh
= old_thresh
;
597 watchdog_user_enabled
= old_enabled
;
600 mutex_unlock(&watchdog_proc_mutex
);
603 #endif /* CONFIG_SYSCTL */
605 void __init
lockup_detector_init(void)
609 if (watchdog_user_enabled
)
610 watchdog_enable_all_cpus(false);