Merge branch 'for-linus' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / kernel / watchdog_hld.c
blob295a0d84934cb1d3a9a87abd4a8ac0f1d38656a5
1 /*
2 * Detect hard lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
12 #define pr_fmt(fmt) "NMI watchdog: " fmt
14 #include <linux/nmi.h>
15 #include <linux/module.h>
16 #include <linux/sched/debug.h>
18 #include <asm/irq_regs.h>
19 #include <linux/perf_event.h>
21 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
22 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
23 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
25 static unsigned long hardlockup_allcpu_dumped;
27 void arch_touch_nmi_watchdog(void)
30 * Using __raw here because some code paths have
31 * preemption enabled. If preemption is enabled
32 * then interrupts should be enabled too, in which
33 * case we shouldn't have to worry about the watchdog
34 * going off.
36 raw_cpu_write(watchdog_nmi_touch, true);
38 EXPORT_SYMBOL(arch_touch_nmi_watchdog);
40 static struct perf_event_attr wd_hw_attr = {
41 .type = PERF_TYPE_HARDWARE,
42 .config = PERF_COUNT_HW_CPU_CYCLES,
43 .size = sizeof(struct perf_event_attr),
44 .pinned = 1,
45 .disabled = 1,
48 /* Callback function for perf event subsystem */
49 static void watchdog_overflow_callback(struct perf_event *event,
50 struct perf_sample_data *data,
51 struct pt_regs *regs)
53 /* Ensure the watchdog never gets throttled */
54 event->hw.interrupts = 0;
56 if (atomic_read(&watchdog_park_in_progress) != 0)
57 return;
59 if (__this_cpu_read(watchdog_nmi_touch) == true) {
60 __this_cpu_write(watchdog_nmi_touch, false);
61 return;
64 /* check for a hardlockup
65 * This is done by making sure our timer interrupt
66 * is incrementing. The timer interrupt should have
67 * fired multiple times before we overflow'd. If it hasn't
68 * then this is a good indication the cpu is stuck
70 if (is_hardlockup()) {
71 int this_cpu = smp_processor_id();
73 /* only print hardlockups once */
74 if (__this_cpu_read(hard_watchdog_warn) == true)
75 return;
77 pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
78 print_modules();
79 print_irqtrace_events(current);
80 if (regs)
81 show_regs(regs);
82 else
83 dump_stack();
86 * Perform all-CPU dump only once to avoid multiple hardlockups
87 * generating interleaving traces
89 if (sysctl_hardlockup_all_cpu_backtrace &&
90 !test_and_set_bit(0, &hardlockup_allcpu_dumped))
91 trigger_allbutself_cpu_backtrace();
93 if (hardlockup_panic)
94 nmi_panic(regs, "Hard LOCKUP");
96 __this_cpu_write(hard_watchdog_warn, true);
97 return;
100 __this_cpu_write(hard_watchdog_warn, false);
101 return;
105 * People like the simple clean cpu node info on boot.
106 * Reduce the watchdog noise by only printing messages
107 * that are different from what cpu0 displayed.
109 static unsigned long firstcpu_err;
110 static atomic_t watchdog_cpus;
112 int watchdog_nmi_enable(unsigned int cpu)
114 struct perf_event_attr *wd_attr;
115 struct perf_event *event = per_cpu(watchdog_ev, cpu);
116 int firstcpu = 0;
118 /* nothing to do if the hard lockup detector is disabled */
119 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
120 goto out;
122 /* is it already setup and enabled? */
123 if (event && event->state > PERF_EVENT_STATE_OFF)
124 goto out;
126 /* it is setup but not enabled */
127 if (event != NULL)
128 goto out_enable;
130 if (atomic_inc_return(&watchdog_cpus) == 1)
131 firstcpu = 1;
133 wd_attr = &wd_hw_attr;
134 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
136 /* Try to register using hardware perf events */
137 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
139 /* save the first cpu's error for future comparision */
140 if (firstcpu && IS_ERR(event))
141 firstcpu_err = PTR_ERR(event);
143 if (!IS_ERR(event)) {
144 /* only print for the first cpu initialized */
145 if (firstcpu || firstcpu_err)
146 pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
147 goto out_save;
151 * Disable the hard lockup detector if _any_ CPU fails to set up
152 * set up the hardware perf event. The watchdog() function checks
153 * the NMI_WATCHDOG_ENABLED bit periodically.
155 * The barriers are for syncing up watchdog_enabled across all the
156 * cpus, as clear_bit() does not use barriers.
158 smp_mb__before_atomic();
159 clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
160 smp_mb__after_atomic();
162 /* skip displaying the same error again */
163 if (!firstcpu && (PTR_ERR(event) == firstcpu_err))
164 return PTR_ERR(event);
166 /* vary the KERN level based on the returned errno */
167 if (PTR_ERR(event) == -EOPNOTSUPP)
168 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
169 else if (PTR_ERR(event) == -ENOENT)
170 pr_warn("disabled (cpu%i): hardware events not enabled\n",
171 cpu);
172 else
173 pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
174 cpu, PTR_ERR(event));
176 pr_info("Shutting down hard lockup detector on all cpus\n");
178 return PTR_ERR(event);
180 /* success path */
181 out_save:
182 per_cpu(watchdog_ev, cpu) = event;
183 out_enable:
184 perf_event_enable(per_cpu(watchdog_ev, cpu));
185 out:
186 return 0;
189 void watchdog_nmi_disable(unsigned int cpu)
191 struct perf_event *event = per_cpu(watchdog_ev, cpu);
193 if (event) {
194 perf_event_disable(event);
195 per_cpu(watchdog_ev, cpu) = NULL;
197 /* should be in cleanup, but blocks oprofile */
198 perf_event_release_kernel(event);
200 /* watchdog_nmi_enable() expects this to be zero initially. */
201 if (atomic_dec_and_test(&watchdog_cpus))
202 firstcpu_err = 0;