2 * Detect hard and soft lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
12 #define pr_fmt(fmt) "watchdog: " fmt
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/sysctl.h>
20 #include <linux/smpboot.h>
21 #include <linux/sched/rt.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/tick.h>
24 #include <linux/workqueue.h>
25 #include <linux/sched/clock.h>
26 #include <linux/sched/debug.h>
28 #include <asm/irq_regs.h>
29 #include <linux/kvm_para.h>
30 #include <linux/kthread.h>
32 static DEFINE_MUTEX(watchdog_mutex
);
34 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
35 # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
36 # define NMI_WATCHDOG_DEFAULT 1
38 # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED)
39 # define NMI_WATCHDOG_DEFAULT 0
42 unsigned long __read_mostly watchdog_enabled
;
43 int __read_mostly watchdog_user_enabled
= 1;
44 int __read_mostly nmi_watchdog_user_enabled
= NMI_WATCHDOG_DEFAULT
;
45 int __read_mostly soft_watchdog_user_enabled
= 1;
46 int __read_mostly watchdog_thresh
= 10;
47 int __read_mostly nmi_watchdog_available
;
49 struct cpumask watchdog_allowed_mask __read_mostly
;
51 struct cpumask watchdog_cpumask __read_mostly
;
52 unsigned long *watchdog_cpumask_bits
= cpumask_bits(&watchdog_cpumask
);
54 #ifdef CONFIG_HARDLOCKUP_DETECTOR
56 * Should we panic when a soft-lockup or hard-lockup occurs:
58 unsigned int __read_mostly hardlockup_panic
=
59 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE
;
61 * We may not want to enable hard lockup detection by default in all cases,
62 * for example when running the kernel as a guest on a hypervisor. In these
63 * cases this function can be called to disable hard lockup detection. This
64 * function should only be executed once by the boot processor before the
65 * kernel command line parameters are parsed, because otherwise it is not
66 * possible to override this in hardlockup_panic_setup().
68 void __init
hardlockup_detector_disable(void)
70 nmi_watchdog_user_enabled
= 0;
73 static int __init
hardlockup_panic_setup(char *str
)
75 if (!strncmp(str
, "panic", 5))
77 else if (!strncmp(str
, "nopanic", 7))
79 else if (!strncmp(str
, "0", 1))
80 nmi_watchdog_user_enabled
= 0;
81 else if (!strncmp(str
, "1", 1))
82 nmi_watchdog_user_enabled
= 1;
85 __setup("nmi_watchdog=", hardlockup_panic_setup
);
88 int __read_mostly sysctl_hardlockup_all_cpu_backtrace
;
90 static int __init
hardlockup_all_cpu_backtrace_setup(char *str
)
92 sysctl_hardlockup_all_cpu_backtrace
= !!simple_strtol(str
, NULL
, 0);
95 __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup
);
96 # endif /* CONFIG_SMP */
97 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
100 * These functions can be overridden if an architecture implements its
101 * own hardlockup detector.
103 * watchdog_nmi_enable/disable can be implemented to start and stop when
104 * softlockup watchdog threads start and stop. The arch must select the
105 * SOFTLOCKUP_DETECTOR Kconfig.
107 int __weak
watchdog_nmi_enable(unsigned int cpu
)
109 hardlockup_detector_perf_enable();
113 void __weak
watchdog_nmi_disable(unsigned int cpu
)
115 hardlockup_detector_perf_disable();
118 /* Return 0, if a NMI watchdog is available. Error code otherwise */
119 int __weak __init
watchdog_nmi_probe(void)
121 return hardlockup_detector_perf_init();
125 * watchdog_nmi_stop - Stop the watchdog for reconfiguration
127 * The reconfiguration steps are:
128 * watchdog_nmi_stop();
129 * update_variables();
130 * watchdog_nmi_start();
132 void __weak
watchdog_nmi_stop(void) { }
135 * watchdog_nmi_start - Start the watchdog after reconfiguration
137 * Counterpart to watchdog_nmi_stop().
139 * The following variables have been updated in update_variables() and
140 * contain the currently valid configuration:
145 void __weak
watchdog_nmi_start(void) { }
148 * lockup_detector_update_enable - Update the sysctl enable bit
150 * Caller needs to make sure that the NMI/perf watchdogs are off, so this
151 * can't race with watchdog_nmi_disable().
153 static void lockup_detector_update_enable(void)
155 watchdog_enabled
= 0;
156 if (!watchdog_user_enabled
)
158 if (nmi_watchdog_available
&& nmi_watchdog_user_enabled
)
159 watchdog_enabled
|= NMI_WATCHDOG_ENABLED
;
160 if (soft_watchdog_user_enabled
)
161 watchdog_enabled
|= SOFT_WATCHDOG_ENABLED
;
164 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
166 /* Global variables, exported for sysctl */
167 unsigned int __read_mostly softlockup_panic
=
168 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
;
170 static bool softlockup_threads_initialized __read_mostly
;
171 static u64 __read_mostly sample_period
;
173 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts
);
174 static DEFINE_PER_CPU(struct task_struct
*, softlockup_watchdog
);
175 static DEFINE_PER_CPU(struct hrtimer
, watchdog_hrtimer
);
176 static DEFINE_PER_CPU(bool, softlockup_touch_sync
);
177 static DEFINE_PER_CPU(bool, soft_watchdog_warn
);
178 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts
);
179 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt
);
180 static DEFINE_PER_CPU(struct task_struct
*, softlockup_task_ptr_saved
);
181 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved
);
182 static unsigned long soft_lockup_nmi_warn
;
184 static int __init
softlockup_panic_setup(char *str
)
186 softlockup_panic
= simple_strtoul(str
, NULL
, 0);
189 __setup("softlockup_panic=", softlockup_panic_setup
);
191 static int __init
nowatchdog_setup(char *str
)
193 watchdog_user_enabled
= 0;
196 __setup("nowatchdog", nowatchdog_setup
);
198 static int __init
nosoftlockup_setup(char *str
)
200 soft_watchdog_user_enabled
= 0;
203 __setup("nosoftlockup", nosoftlockup_setup
);
206 int __read_mostly sysctl_softlockup_all_cpu_backtrace
;
208 static int __init
softlockup_all_cpu_backtrace_setup(char *str
)
210 sysctl_softlockup_all_cpu_backtrace
= !!simple_strtol(str
, NULL
, 0);
213 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup
);
216 static void __lockup_detector_cleanup(void);
219 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
220 * lockups can have false positives under extreme conditions. So we generally
221 * want a higher threshold for soft lockups than for hard lockups. So we couple
222 * the thresholds with a factor: we make the soft threshold twice the amount of
223 * time the hard threshold is.
225 static int get_softlockup_thresh(void)
227 return watchdog_thresh
* 2;
231 * Returns seconds, approximately. We don't need nanosecond
232 * resolution, and we don't need to waste time with a big divide when
235 static unsigned long get_timestamp(void)
237 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
240 static void set_sample_period(void)
243 * convert watchdog_thresh from seconds to ns
244 * the divide by 5 is to give hrtimer several chances (two
245 * or three with the current relation between the soft
246 * and hard thresholds) to increment before the
247 * hardlockup detector generates a warning
249 sample_period
= get_softlockup_thresh() * ((u64
)NSEC_PER_SEC
/ 5);
250 watchdog_update_hrtimer_threshold(sample_period
);
253 /* Commands for resetting the watchdog */
254 static void __touch_watchdog(void)
256 __this_cpu_write(watchdog_touch_ts
, get_timestamp());
260 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
262 * Call when the scheduler may have stalled for legitimate reasons
263 * preventing the watchdog task from executing - e.g. the scheduler
264 * entering idle state. This should only be used for scheduler events.
265 * Use touch_softlockup_watchdog() for everything else.
267 void touch_softlockup_watchdog_sched(void)
270 * Preemption can be enabled. It doesn't matter which CPU's timestamp
271 * gets zeroed here, so use the raw_ operation.
273 raw_cpu_write(watchdog_touch_ts
, 0);
276 void touch_softlockup_watchdog(void)
278 touch_softlockup_watchdog_sched();
279 wq_watchdog_touch(raw_smp_processor_id());
281 EXPORT_SYMBOL(touch_softlockup_watchdog
);
283 void touch_all_softlockup_watchdogs(void)
288 * watchdog_mutex cannpt be taken here, as this might be called
289 * from (soft)interrupt context, so the access to
290 * watchdog_allowed_cpumask might race with a concurrent update.
292 * The watchdog time stamp can race against a concurrent real
293 * update as well, the only side effect might be a cycle delay for
294 * the softlockup check.
296 for_each_cpu(cpu
, &watchdog_allowed_mask
)
297 per_cpu(watchdog_touch_ts
, cpu
) = 0;
298 wq_watchdog_touch(-1);
301 void touch_softlockup_watchdog_sync(void)
303 __this_cpu_write(softlockup_touch_sync
, true);
304 __this_cpu_write(watchdog_touch_ts
, 0);
307 static int is_softlockup(unsigned long touch_ts
)
309 unsigned long now
= get_timestamp();
311 if ((watchdog_enabled
& SOFT_WATCHDOG_ENABLED
) && watchdog_thresh
){
312 /* Warn about unreasonable delays. */
313 if (time_after(now
, touch_ts
+ get_softlockup_thresh()))
314 return now
- touch_ts
;
319 /* watchdog detector functions */
320 bool is_hardlockup(void)
322 unsigned long hrint
= __this_cpu_read(hrtimer_interrupts
);
324 if (__this_cpu_read(hrtimer_interrupts_saved
) == hrint
)
327 __this_cpu_write(hrtimer_interrupts_saved
, hrint
);
331 static void watchdog_interrupt_count(void)
333 __this_cpu_inc(hrtimer_interrupts
);
336 /* watchdog kicker functions */
337 static enum hrtimer_restart
watchdog_timer_fn(struct hrtimer
*hrtimer
)
339 unsigned long touch_ts
= __this_cpu_read(watchdog_touch_ts
);
340 struct pt_regs
*regs
= get_irq_regs();
342 int softlockup_all_cpu_backtrace
= sysctl_softlockup_all_cpu_backtrace
;
344 if (!watchdog_enabled
)
345 return HRTIMER_NORESTART
;
347 /* kick the hardlockup detector */
348 watchdog_interrupt_count();
350 /* kick the softlockup detector */
351 wake_up_process(__this_cpu_read(softlockup_watchdog
));
354 hrtimer_forward_now(hrtimer
, ns_to_ktime(sample_period
));
357 if (unlikely(__this_cpu_read(softlockup_touch_sync
))) {
359 * If the time stamp was touched atomically
360 * make sure the scheduler tick is up to date.
362 __this_cpu_write(softlockup_touch_sync
, false);
366 /* Clear the guest paused flag on watchdog reset */
367 kvm_check_and_clear_guest_paused();
369 return HRTIMER_RESTART
;
372 /* check for a softlockup
373 * This is done by making sure a high priority task is
374 * being scheduled. The task touches the watchdog to
375 * indicate it is getting cpu time. If it hasn't then
376 * this is a good indication some task is hogging the cpu
378 duration
= is_softlockup(touch_ts
);
379 if (unlikely(duration
)) {
381 * If a virtual machine is stopped by the host it can look to
382 * the watchdog like a soft lockup, check to see if the host
383 * stopped the vm before we issue the warning
385 if (kvm_check_and_clear_guest_paused())
386 return HRTIMER_RESTART
;
389 if (__this_cpu_read(soft_watchdog_warn
) == true) {
391 * When multiple processes are causing softlockups the
392 * softlockup detector only warns on the first one
393 * because the code relies on a full quiet cycle to
394 * re-arm. The second process prevents the quiet cycle
395 * and never gets reported. Use task pointers to detect
398 if (__this_cpu_read(softlockup_task_ptr_saved
) !=
400 __this_cpu_write(soft_watchdog_warn
, false);
403 return HRTIMER_RESTART
;
406 if (softlockup_all_cpu_backtrace
) {
407 /* Prevent multiple soft-lockup reports if one cpu is already
408 * engaged in dumping cpu back traces
410 if (test_and_set_bit(0, &soft_lockup_nmi_warn
)) {
411 /* Someone else will report us. Let's give up */
412 __this_cpu_write(soft_watchdog_warn
, true);
413 return HRTIMER_RESTART
;
417 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
418 smp_processor_id(), duration
,
419 current
->comm
, task_pid_nr(current
));
420 __this_cpu_write(softlockup_task_ptr_saved
, current
);
422 print_irqtrace_events(current
);
428 if (softlockup_all_cpu_backtrace
) {
429 /* Avoid generating two back traces for current
430 * given that one is already made above
432 trigger_allbutself_cpu_backtrace();
434 clear_bit(0, &soft_lockup_nmi_warn
);
435 /* Barrier to sync with other cpus */
436 smp_mb__after_atomic();
439 add_taint(TAINT_SOFTLOCKUP
, LOCKDEP_STILL_OK
);
440 if (softlockup_panic
)
441 panic("softlockup: hung tasks");
442 __this_cpu_write(soft_watchdog_warn
, true);
444 __this_cpu_write(soft_watchdog_warn
, false);
446 return HRTIMER_RESTART
;
449 static void watchdog_set_prio(unsigned int policy
, unsigned int prio
)
451 struct sched_param param
= { .sched_priority
= prio
};
453 sched_setscheduler(current
, policy
, ¶m
);
456 static void watchdog_enable(unsigned int cpu
)
458 struct hrtimer
*hrtimer
= this_cpu_ptr(&watchdog_hrtimer
);
461 * Start the timer first to prevent the NMI watchdog triggering
462 * before the timer has a chance to fire.
464 hrtimer_init(hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
465 hrtimer
->function
= watchdog_timer_fn
;
466 hrtimer_start(hrtimer
, ns_to_ktime(sample_period
),
467 HRTIMER_MODE_REL_PINNED
);
469 /* Initialize timestamp */
471 /* Enable the perf event */
472 if (watchdog_enabled
& NMI_WATCHDOG_ENABLED
)
473 watchdog_nmi_enable(cpu
);
475 watchdog_set_prio(SCHED_FIFO
, MAX_RT_PRIO
- 1);
478 static void watchdog_disable(unsigned int cpu
)
480 struct hrtimer
*hrtimer
= this_cpu_ptr(&watchdog_hrtimer
);
482 watchdog_set_prio(SCHED_NORMAL
, 0);
484 * Disable the perf event first. That prevents that a large delay
485 * between disabling the timer and disabling the perf event causes
486 * the perf NMI to detect a false positive.
488 watchdog_nmi_disable(cpu
);
489 hrtimer_cancel(hrtimer
);
492 static void watchdog_cleanup(unsigned int cpu
, bool online
)
494 watchdog_disable(cpu
);
497 static int watchdog_should_run(unsigned int cpu
)
499 return __this_cpu_read(hrtimer_interrupts
) !=
500 __this_cpu_read(soft_lockup_hrtimer_cnt
);
504 * The watchdog thread function - touches the timestamp.
506 * It only runs once every sample_period seconds (4 seconds by
507 * default) to reset the softlockup timestamp. If this gets delayed
508 * for more than 2*watchdog_thresh seconds then the debug-printout
509 * triggers in watchdog_timer_fn().
511 static void watchdog(unsigned int cpu
)
513 __this_cpu_write(soft_lockup_hrtimer_cnt
,
514 __this_cpu_read(hrtimer_interrupts
));
518 static struct smp_hotplug_thread watchdog_threads
= {
519 .store
= &softlockup_watchdog
,
520 .thread_should_run
= watchdog_should_run
,
521 .thread_fn
= watchdog
,
522 .thread_comm
= "watchdog/%u",
523 .setup
= watchdog_enable
,
524 .cleanup
= watchdog_cleanup
,
525 .park
= watchdog_disable
,
526 .unpark
= watchdog_enable
,
529 static void softlockup_update_smpboot_threads(void)
531 lockdep_assert_held(&watchdog_mutex
);
533 if (!softlockup_threads_initialized
)
536 smpboot_update_cpumask_percpu_thread(&watchdog_threads
,
537 &watchdog_allowed_mask
);
540 /* Temporarily park all watchdog threads */
541 static void softlockup_park_all_threads(void)
543 cpumask_clear(&watchdog_allowed_mask
);
544 softlockup_update_smpboot_threads();
547 /* Unpark enabled threads */
548 static void softlockup_unpark_threads(void)
550 cpumask_copy(&watchdog_allowed_mask
, &watchdog_cpumask
);
551 softlockup_update_smpboot_threads();
554 static void lockup_detector_reconfigure(void)
558 softlockup_park_all_threads();
560 lockup_detector_update_enable();
561 if (watchdog_enabled
&& watchdog_thresh
)
562 softlockup_unpark_threads();
563 watchdog_nmi_start();
566 * Must be called outside the cpus locked section to prevent
567 * recursive locking in the perf code.
569 __lockup_detector_cleanup();
573 * Create the watchdog thread infrastructure and configure the detector(s).
575 * The threads are not unparked as watchdog_allowed_mask is empty. When
576 * the threads are sucessfully initialized, take the proper locks and
577 * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
579 static __init
void lockup_detector_setup(void)
584 * If sysctl is off and watchdog got disabled on the command line,
585 * nothing to do here.
587 lockup_detector_update_enable();
589 if (!IS_ENABLED(CONFIG_SYSCTL
) &&
590 !(watchdog_enabled
&& watchdog_thresh
))
593 ret
= smpboot_register_percpu_thread_cpumask(&watchdog_threads
,
594 &watchdog_allowed_mask
);
596 pr_err("Failed to initialize soft lockup detector threads\n");
600 mutex_lock(&watchdog_mutex
);
601 softlockup_threads_initialized
= true;
602 lockup_detector_reconfigure();
603 mutex_unlock(&watchdog_mutex
);
606 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
607 static inline int watchdog_park_threads(void) { return 0; }
608 static inline void watchdog_unpark_threads(void) { }
609 static inline int watchdog_enable_all_cpus(void) { return 0; }
610 static inline void watchdog_disable_all_cpus(void) { }
611 static void lockup_detector_reconfigure(void)
615 lockup_detector_update_enable();
616 watchdog_nmi_start();
619 static inline void lockup_detector_setup(void)
621 lockup_detector_reconfigure();
623 #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
625 static void __lockup_detector_cleanup(void)
627 lockdep_assert_held(&watchdog_mutex
);
628 hardlockup_detector_perf_cleanup();
632 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
634 * Caller must not hold the cpu hotplug rwsem.
636 void lockup_detector_cleanup(void)
638 mutex_lock(&watchdog_mutex
);
639 __lockup_detector_cleanup();
640 mutex_unlock(&watchdog_mutex
);
644 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
646 * Special interface for parisc. It prevents lockup detector warnings from
647 * the default pm_poweroff() function which busy loops forever.
649 void lockup_detector_soft_poweroff(void)
651 watchdog_enabled
= 0;
656 /* Propagate any changes to the watchdog threads */
657 static void proc_watchdog_update(void)
659 /* Remove impossible cpus to keep sysctl output clean. */
660 cpumask_and(&watchdog_cpumask
, &watchdog_cpumask
, cpu_possible_mask
);
661 lockup_detector_reconfigure();
665 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
667 * caller | table->data points to | 'which'
668 * -------------------|----------------------------|--------------------------
669 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
670 * | | SOFT_WATCHDOG_ENABLED
671 * -------------------|----------------------------|--------------------------
672 * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED
673 * -------------------|----------------------------|--------------------------
674 * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
676 static int proc_watchdog_common(int which
, struct ctl_table
*table
, int write
,
677 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
679 int err
, old
, *param
= table
->data
;
681 mutex_lock(&watchdog_mutex
);
685 * On read synchronize the userspace interface. This is a
688 *param
= (watchdog_enabled
& which
) != 0;
689 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
691 old
= READ_ONCE(*param
);
692 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
693 if (!err
&& old
!= READ_ONCE(*param
))
694 proc_watchdog_update();
696 mutex_unlock(&watchdog_mutex
);
701 * /proc/sys/kernel/watchdog
703 int proc_watchdog(struct ctl_table
*table
, int write
,
704 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
706 return proc_watchdog_common(NMI_WATCHDOG_ENABLED
|SOFT_WATCHDOG_ENABLED
,
707 table
, write
, buffer
, lenp
, ppos
);
711 * /proc/sys/kernel/nmi_watchdog
713 int proc_nmi_watchdog(struct ctl_table
*table
, int write
,
714 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
716 if (!nmi_watchdog_available
&& write
)
718 return proc_watchdog_common(NMI_WATCHDOG_ENABLED
,
719 table
, write
, buffer
, lenp
, ppos
);
723 * /proc/sys/kernel/soft_watchdog
725 int proc_soft_watchdog(struct ctl_table
*table
, int write
,
726 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
728 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED
,
729 table
, write
, buffer
, lenp
, ppos
);
733 * /proc/sys/kernel/watchdog_thresh
735 int proc_watchdog_thresh(struct ctl_table
*table
, int write
,
736 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
740 mutex_lock(&watchdog_mutex
);
742 old
= READ_ONCE(watchdog_thresh
);
743 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
745 if (!err
&& write
&& old
!= READ_ONCE(watchdog_thresh
))
746 proc_watchdog_update();
748 mutex_unlock(&watchdog_mutex
);
753 * The cpumask is the mask of possible cpus that the watchdog can run
754 * on, not the mask of cpus it is actually running on. This allows the
755 * user to specify a mask that will include cpus that have not yet
756 * been brought online, if desired.
758 int proc_watchdog_cpumask(struct ctl_table
*table
, int write
,
759 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
763 mutex_lock(&watchdog_mutex
);
765 err
= proc_do_large_bitmap(table
, write
, buffer
, lenp
, ppos
);
767 proc_watchdog_update();
769 mutex_unlock(&watchdog_mutex
);
772 #endif /* CONFIG_SYSCTL */
774 void __init
lockup_detector_init(void)
776 #ifdef CONFIG_NO_HZ_FULL
777 if (tick_nohz_full_enabled()) {
778 pr_info("Disabling watchdog on nohz_full cores by default\n");
779 cpumask_copy(&watchdog_cpumask
, housekeeping_mask
);
781 cpumask_copy(&watchdog_cpumask
, cpu_possible_mask
);
783 cpumask_copy(&watchdog_cpumask
, cpu_possible_mask
);
786 if (!watchdog_nmi_probe())
787 nmi_watchdog_available
= true;
788 lockup_detector_setup();