2 * NMI backtrace support
4 * Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King,
5 * with the following header:
7 * HW NMI watchdog support
9 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
11 * Arch specific calls to support NMI watchdog
13 * Bits copied from original nmi.c file
15 #include <linux/cpumask.h>
16 #include <linux/delay.h>
17 #include <linux/kprobes.h>
18 #include <linux/nmi.h>
19 #include <linux/seq_buf.h>
21 #ifdef arch_trigger_all_cpu_backtrace
22 /* For reliability, we're prepared to waste bits here. */
23 static DECLARE_BITMAP(backtrace_mask
, NR_CPUS
) __read_mostly
;
24 static cpumask_t printtrace_mask
;
26 #define NMI_BUF_SIZE 4096
29 unsigned char buffer
[NMI_BUF_SIZE
];
33 /* Safe printing in NMI context */
34 static DEFINE_PER_CPU(struct nmi_seq_buf
, nmi_print_seq
);
36 /* "in progress" flag of arch_trigger_all_cpu_backtrace */
37 static unsigned long backtrace_flag
;
39 static void print_seq_line(struct nmi_seq_buf
*s
, int start
, int end
)
41 const char *buf
= s
->buffer
+ start
;
43 printk("%.*s", (end
- start
) + 1, buf
);
46 void nmi_trigger_all_cpu_backtrace(bool include_self
,
47 void (*raise
)(cpumask_t
*mask
))
49 struct nmi_seq_buf
*s
;
50 int i
, cpu
, this_cpu
= get_cpu();
52 if (test_and_set_bit(0, &backtrace_flag
)) {
54 * If there is already a trigger_all_cpu_backtrace() in progress
55 * (backtrace_flag == 1), don't output double cpu dump infos.
61 cpumask_copy(to_cpumask(backtrace_mask
), cpu_online_mask
);
63 cpumask_clear_cpu(this_cpu
, to_cpumask(backtrace_mask
));
65 cpumask_copy(&printtrace_mask
, to_cpumask(backtrace_mask
));
68 * Set up per_cpu seq_buf buffers that the NMIs running on the other
71 for_each_cpu(cpu
, to_cpumask(backtrace_mask
)) {
72 s
= &per_cpu(nmi_print_seq
, cpu
);
73 seq_buf_init(&s
->seq
, s
->buffer
, NMI_BUF_SIZE
);
76 if (!cpumask_empty(to_cpumask(backtrace_mask
))) {
77 pr_info("Sending NMI to %s CPUs:\n",
78 (include_self
? "all" : "other"));
79 raise(to_cpumask(backtrace_mask
));
82 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
83 for (i
= 0; i
< 10 * 1000; i
++) {
84 if (cpumask_empty(to_cpumask(backtrace_mask
)))
87 touch_softlockup_watchdog();
91 * Now that all the NMIs have triggered, we can dump out their
92 * back traces safely to the console.
94 for_each_cpu(cpu
, &printtrace_mask
) {
97 s
= &per_cpu(nmi_print_seq
, cpu
);
98 len
= seq_buf_used(&s
->seq
);
102 /* Print line by line. */
103 for (i
= 0; i
< len
; i
++) {
104 if (s
->buffer
[i
] == '\n') {
105 print_seq_line(s
, last_i
, i
);
109 /* Check if there was a partial line. */
111 print_seq_line(s
, last_i
, len
- 1);
116 clear_bit(0, &backtrace_flag
);
117 smp_mb__after_atomic();
122 * It is not safe to call printk() directly from NMI handlers.
123 * It may be fine if the NMI detected a lock up and we have no choice
124 * but to do so, but doing a NMI on all other CPUs to get a back trace
125 * can be done with a sysrq-l. We don't want that to lock up, which
126 * can happen if the NMI interrupts a printk in progress.
128 * Instead, we redirect the vprintk() to this nmi_vprintk() that writes
129 * the content into a per cpu seq_buf buffer. Then when the NMIs are
130 * all done, we can safely dump the contents of the seq_buf to a printk()
131 * from a non NMI context.
133 static int nmi_vprintk(const char *fmt
, va_list args
)
135 struct nmi_seq_buf
*s
= this_cpu_ptr(&nmi_print_seq
);
136 unsigned int len
= seq_buf_used(&s
->seq
);
138 seq_buf_vprintf(&s
->seq
, fmt
, args
);
139 return seq_buf_used(&s
->seq
) - len
;
142 bool nmi_cpu_backtrace(struct pt_regs
*regs
)
144 int cpu
= smp_processor_id();
146 if (cpumask_test_cpu(cpu
, to_cpumask(backtrace_mask
))) {
147 printk_func_t printk_func_save
= this_cpu_read(printk_func
);
149 /* Replace printk to write into the NMI seq */
150 this_cpu_write(printk_func
, nmi_vprintk
);
151 pr_warn("NMI backtrace for cpu %d\n", cpu
);
153 this_cpu_write(printk_func
, printk_func_save
);
155 cpumask_clear_cpu(cpu
, to_cpumask(backtrace_mask
));
161 NOKPROBE_SYMBOL(nmi_cpu_backtrace
);