1 /* SPDX-License-Identifier: GPL-2.0 */
3 * linux/include/linux/nmi.h
8 #include <linux/sched.h>
11 /* Arch specific watchdogs might need to share extra watchdog-related APIs. */
12 #if defined(CONFIG_HARDLOCKUP_DETECTOR_ARCH) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
16 #ifdef CONFIG_LOCKUP_DETECTOR
17 void lockup_detector_init(void);
18 void lockup_detector_retry_init(void);
19 void lockup_detector_soft_poweroff(void);
20 void lockup_detector_cleanup(void);
22 extern int watchdog_user_enabled
;
23 extern int watchdog_thresh
;
24 extern unsigned long watchdog_enabled
;
26 extern struct cpumask watchdog_cpumask
;
27 extern unsigned long *watchdog_cpumask_bits
;
29 extern int sysctl_softlockup_all_cpu_backtrace
;
30 extern int sysctl_hardlockup_all_cpu_backtrace
;
32 #define sysctl_softlockup_all_cpu_backtrace 0
33 #define sysctl_hardlockup_all_cpu_backtrace 0
34 #endif /* !CONFIG_SMP */
36 #else /* CONFIG_LOCKUP_DETECTOR */
37 static inline void lockup_detector_init(void) { }
38 static inline void lockup_detector_retry_init(void) { }
39 static inline void lockup_detector_soft_poweroff(void) { }
40 static inline void lockup_detector_cleanup(void) { }
41 #endif /* !CONFIG_LOCKUP_DETECTOR */
43 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
44 extern void touch_softlockup_watchdog_sched(void);
45 extern void touch_softlockup_watchdog(void);
46 extern void touch_softlockup_watchdog_sync(void);
47 extern void touch_all_softlockup_watchdogs(void);
48 extern unsigned int softlockup_panic
;
50 extern int lockup_detector_online_cpu(unsigned int cpu
);
51 extern int lockup_detector_offline_cpu(unsigned int cpu
);
52 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
53 static inline void touch_softlockup_watchdog_sched(void) { }
54 static inline void touch_softlockup_watchdog(void) { }
55 static inline void touch_softlockup_watchdog_sync(void) { }
56 static inline void touch_all_softlockup_watchdogs(void) { }
58 #define lockup_detector_online_cpu NULL
59 #define lockup_detector_offline_cpu NULL
60 #endif /* CONFIG_SOFTLOCKUP_DETECTOR */
62 #ifdef CONFIG_DETECT_HUNG_TASK
63 void reset_hung_task_detector(void);
65 static inline void reset_hung_task_detector(void) { }
69 * The run state of the lockup detectors is controlled by the content of the
70 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
71 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
73 * 'watchdog_user_enabled', 'watchdog_hardlockup_user_enabled' and
74 * 'watchdog_softlockup_user_enabled' are variables that are only used as an
75 * 'interface' between the parameters in /proc/sys/kernel and the internal
76 * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
77 * handled differently because its value is not boolean, and the lockup
78 * detectors are 'suspended' while 'watchdog_thresh' is equal zero.
80 #define WATCHDOG_HARDLOCKUP_ENABLED_BIT 0
81 #define WATCHDOG_SOFTOCKUP_ENABLED_BIT 1
82 #define WATCHDOG_HARDLOCKUP_ENABLED (1 << WATCHDOG_HARDLOCKUP_ENABLED_BIT)
83 #define WATCHDOG_SOFTOCKUP_ENABLED (1 << WATCHDOG_SOFTOCKUP_ENABLED_BIT)
85 #if defined(CONFIG_HARDLOCKUP_DETECTOR)
86 extern void hardlockup_detector_disable(void);
87 extern unsigned int hardlockup_panic
;
89 static inline void hardlockup_detector_disable(void) {}
92 /* Sparc64 has special implemetantion that is always enabled. */
93 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
94 void arch_touch_nmi_watchdog(void);
96 static inline void arch_touch_nmi_watchdog(void) { }
99 #if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER)
100 void watchdog_hardlockup_touch_cpu(unsigned int cpu
);
101 void watchdog_hardlockup_check(unsigned int cpu
, struct pt_regs
*regs
);
104 #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
105 extern void hardlockup_detector_perf_stop(void);
106 extern void hardlockup_detector_perf_restart(void);
107 extern void hardlockup_detector_perf_cleanup(void);
108 extern void hardlockup_config_perf_event(const char *str
);
110 static inline void hardlockup_detector_perf_stop(void) { }
111 static inline void hardlockup_detector_perf_restart(void) { }
112 static inline void hardlockup_detector_perf_cleanup(void) { }
113 static inline void hardlockup_config_perf_event(const char *str
) { }
116 void watchdog_hardlockup_stop(void);
117 void watchdog_hardlockup_start(void);
118 int watchdog_hardlockup_probe(void);
119 void watchdog_hardlockup_enable(unsigned int cpu
);
120 void watchdog_hardlockup_disable(unsigned int cpu
);
122 void lockup_detector_reconfigure(void);
124 #ifdef CONFIG_HARDLOCKUP_DETECTOR_BUDDY
125 void watchdog_buddy_check_hardlockup(int hrtimer_interrupts
);
127 static inline void watchdog_buddy_check_hardlockup(int hrtimer_interrupts
) {}
131 * touch_nmi_watchdog - manually reset the hardlockup watchdog timeout.
133 * If we support detecting hardlockups, touch_nmi_watchdog() may be
134 * used to pet the watchdog (reset the timeout) - for code which
135 * intentionally disables interrupts for a long time. This call is stateless.
137 * Though this function has "nmi" in the name, the hardlockup watchdog might
138 * not be backed by NMIs. This function will likely be renamed to
139 * touch_hardlockup_watchdog() in the future.
141 static inline void touch_nmi_watchdog(void)
144 * Pass on to the hardlockup detector selected via CONFIG_. Note that
145 * the hardlockup detector may not be arch-specific nor using NMIs
146 * and the arch_touch_nmi_watchdog() function will likely be renamed
149 arch_touch_nmi_watchdog();
151 touch_softlockup_watchdog();
155 * Create trigger_all_cpu_backtrace() out of the arch-provided
156 * base function. Return whether such support was available,
157 * to allow calling code to fall back to some other mechanism:
159 #ifdef arch_trigger_cpumask_backtrace
160 static inline bool trigger_all_cpu_backtrace(void)
162 arch_trigger_cpumask_backtrace(cpu_online_mask
, -1);
166 static inline bool trigger_allbutcpu_cpu_backtrace(int exclude_cpu
)
168 arch_trigger_cpumask_backtrace(cpu_online_mask
, exclude_cpu
);
172 static inline bool trigger_cpumask_backtrace(struct cpumask
*mask
)
174 arch_trigger_cpumask_backtrace(mask
, -1);
178 static inline bool trigger_single_cpu_backtrace(int cpu
)
180 arch_trigger_cpumask_backtrace(cpumask_of(cpu
), -1);
184 /* generic implementation */
185 void nmi_trigger_cpumask_backtrace(const cpumask_t
*mask
,
187 void (*raise
)(cpumask_t
*mask
));
188 bool nmi_cpu_backtrace(struct pt_regs
*regs
);
191 static inline bool trigger_all_cpu_backtrace(void)
195 static inline bool trigger_allbutcpu_cpu_backtrace(int exclude_cpu
)
199 static inline bool trigger_cpumask_backtrace(struct cpumask
*mask
)
203 static inline bool trigger_single_cpu_backtrace(int cpu
)
209 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
210 u64
hw_nmi_get_sample_period(int watchdog_thresh
);
211 bool arch_perf_nmi_is_available(void);
214 #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
215 defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
216 void watchdog_update_hrtimer_threshold(u64 period
);
218 static inline void watchdog_update_hrtimer_threshold(u64 period
) { }
221 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
225 #ifdef CONFIG_NMI_CHECK_CPU
226 void nmi_backtrace_stall_snap(const struct cpumask
*btp
);
227 void nmi_backtrace_stall_check(const struct cpumask
*btp
);
229 static inline void nmi_backtrace_stall_snap(const struct cpumask
*btp
) {}
230 static inline void nmi_backtrace_stall_check(const struct cpumask
*btp
) {}