2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/utsname.h>
9 #include <linux/hardirq.h>
10 #include <linux/kdebug.h>
11 #include <linux/module.h>
12 #include <linux/ptrace.h>
13 #include <linux/ftrace.h>
14 #include <linux/kexec.h>
15 #include <linux/bug.h>
16 #include <linux/nmi.h>
17 #include <linux/sysfs.h>
19 #include <asm/stacktrace.h>
21 #include "dumpstack.h"
23 int panic_on_unrecovered_nmi
;
25 unsigned int code_bytes
= 64;
26 int kstack_depth_to_print
= 3 * STACKSLOTS_PER_LINE
;
27 static int die_counter
;
29 void printk_address(unsigned long address
, int reliable
)
31 printk(" [<%p>] %s%pS\n", (void *) address
,
32 reliable
? "" : "? ", (void *) address
);
35 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
37 print_ftrace_graph_addr(unsigned long addr
, void *data
,
38 const struct stacktrace_ops
*ops
,
39 struct thread_info
*tinfo
, int *graph
)
41 struct task_struct
*task
= tinfo
->task
;
42 unsigned long ret_addr
;
43 int index
= task
->curr_ret_stack
;
45 if (addr
!= (unsigned long)return_to_handler
)
48 if (!task
->ret_stack
|| index
< *graph
)
52 ret_addr
= task
->ret_stack
[index
].ret
;
54 ops
->address(data
, ret_addr
, 1);
60 print_ftrace_graph_addr(unsigned long addr
, void *data
,
61 const struct stacktrace_ops
*ops
,
62 struct thread_info
*tinfo
, int *graph
)
67 * x86-64 can have up to three kernel stacks:
70 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
73 static inline int valid_stack_ptr(struct thread_info
*tinfo
,
74 void *p
, unsigned int size
, void *end
)
78 if (p
< end
&& p
>= (end
-THREAD_SIZE
))
83 return p
> t
&& p
< t
+ THREAD_SIZE
- size
;
87 print_context_stack(struct thread_info
*tinfo
,
88 unsigned long *stack
, unsigned long bp
,
89 const struct stacktrace_ops
*ops
, void *data
,
90 unsigned long *end
, int *graph
)
92 struct stack_frame
*frame
= (struct stack_frame
*)bp
;
94 while (valid_stack_ptr(tinfo
, stack
, sizeof(*stack
), end
)) {
98 if (__kernel_text_address(addr
)) {
99 if ((unsigned long) stack
== bp
+ sizeof(long)) {
100 ops
->address(data
, addr
, 1);
101 frame
= frame
->next_frame
;
102 bp
= (unsigned long) frame
;
104 ops
->address(data
, addr
, 0);
106 print_ftrace_graph_addr(addr
, data
, ops
, tinfo
, graph
);
112 EXPORT_SYMBOL_GPL(print_context_stack
);
115 print_context_stack_bp(struct thread_info
*tinfo
,
116 unsigned long *stack
, unsigned long bp
,
117 const struct stacktrace_ops
*ops
, void *data
,
118 unsigned long *end
, int *graph
)
120 struct stack_frame
*frame
= (struct stack_frame
*)bp
;
121 unsigned long *ret_addr
= &frame
->return_address
;
123 while (valid_stack_ptr(tinfo
, ret_addr
, sizeof(*ret_addr
), end
)) {
124 unsigned long addr
= *ret_addr
;
126 if (!__kernel_text_address(addr
))
129 ops
->address(data
, addr
, 1);
130 frame
= frame
->next_frame
;
131 ret_addr
= &frame
->return_address
;
132 print_ftrace_graph_addr(addr
, data
, ops
, tinfo
, graph
);
135 return (unsigned long)frame
;
137 EXPORT_SYMBOL_GPL(print_context_stack_bp
);
141 print_trace_warning_symbol(void *data
, char *msg
, unsigned long symbol
)
144 print_symbol(msg
, symbol
);
148 static void print_trace_warning(void *data
, char *msg
)
150 printk("%s%s\n", (char *)data
, msg
);
153 static int print_trace_stack(void *data
, char *name
)
155 printk("%s <%s> ", (char *)data
, name
);
160 * Print one address/symbol entries per line.
162 static void print_trace_address(void *data
, unsigned long addr
, int reliable
)
164 touch_nmi_watchdog();
166 printk_address(addr
, reliable
);
169 static const struct stacktrace_ops print_trace_ops
= {
170 .warning
= print_trace_warning
,
171 .warning_symbol
= print_trace_warning_symbol
,
172 .stack
= print_trace_stack
,
173 .address
= print_trace_address
,
174 .walk_stack
= print_context_stack
,
178 show_trace_log_lvl(struct task_struct
*task
, struct pt_regs
*regs
,
179 unsigned long *stack
, unsigned long bp
, char *log_lvl
)
181 printk("%sCall Trace:\n", log_lvl
);
182 dump_trace(task
, regs
, stack
, bp
, &print_trace_ops
, log_lvl
);
185 void show_trace(struct task_struct
*task
, struct pt_regs
*regs
,
186 unsigned long *stack
, unsigned long bp
)
188 show_trace_log_lvl(task
, regs
, stack
, bp
, "");
191 void show_stack(struct task_struct
*task
, unsigned long *sp
)
193 show_stack_log_lvl(task
, NULL
, sp
, 0, "");
197 * The architecture-independent dump_stack generator
199 void dump_stack(void)
201 unsigned long bp
= 0;
204 #ifdef CONFIG_FRAME_POINTER
209 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
210 current
->pid
, current
->comm
, print_tainted(),
211 init_utsname()->release
,
212 (int)strcspn(init_utsname()->version
, " "),
213 init_utsname()->version
);
214 show_trace(NULL
, NULL
, &stack
, bp
);
216 EXPORT_SYMBOL(dump_stack
);
218 static arch_spinlock_t die_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
219 static int die_owner
= -1;
220 static unsigned int die_nest_count
;
222 unsigned __kprobes
long oops_begin(void)
227 /* notify the hw-branch tracer so it may disable tracing and
228 add the last trace to the trace buffer -
229 the earlier this happens, the more useful the trace. */
230 trace_hw_branch_oops();
234 /* racy, but better than risking deadlock. */
235 raw_local_irq_save(flags
);
236 cpu
= smp_processor_id();
237 if (!arch_spin_trylock(&die_lock
)) {
238 if (cpu
== die_owner
)
239 /* nested oops. should stop eventually */;
241 arch_spin_lock(&die_lock
);
250 void __kprobes
oops_end(unsigned long flags
, struct pt_regs
*regs
, int signr
)
252 if (regs
&& kexec_should_crash(current
))
257 add_taint(TAINT_DIE
);
260 /* Nest count reaches zero, release the lock. */
261 arch_spin_unlock(&die_lock
);
262 raw_local_irq_restore(flags
);
268 panic("Fatal exception in interrupt");
270 panic("Fatal exception");
274 int __kprobes
__die(const char *str
, struct pt_regs
*regs
, long err
)
280 printk(KERN_EMERG
"%s: %04lx [#%d] ", str
, err
& 0xffff, ++die_counter
);
281 #ifdef CONFIG_PREEMPT
287 #ifdef CONFIG_DEBUG_PAGEALLOC
288 printk("DEBUG_PAGEALLOC");
291 sysfs_printk_last_file();
292 if (notify_die(DIE_OOPS
, str
, regs
, err
,
293 current
->thread
.trap_no
, SIGSEGV
) == NOTIFY_STOP
)
296 show_registers(regs
);
298 if (user_mode_vm(regs
)) {
300 ss
= regs
->ss
& 0xffff;
302 sp
= kernel_stack_pointer(regs
);
305 printk(KERN_EMERG
"EIP: [<%08lx>] ", regs
->ip
);
306 print_symbol("%s", regs
->ip
);
307 printk(" SS:ESP %04x:%08lx\n", ss
, sp
);
309 /* Executive summary in case the oops scrolled away */
310 printk(KERN_ALERT
"RIP ");
311 printk_address(regs
->ip
, 1);
312 printk(" RSP <%016lx>\n", regs
->sp
);
318 * This is gone through when something in the kernel has done something bad
319 * and is about to be terminated:
321 void die(const char *str
, struct pt_regs
*regs
, long err
)
323 unsigned long flags
= oops_begin();
326 if (!user_mode_vm(regs
))
327 report_bug(regs
->ip
, regs
);
329 if (__die(str
, regs
, err
))
331 oops_end(flags
, regs
, sig
);
334 void notrace __kprobes
335 die_nmi(char *str
, struct pt_regs
*regs
, int do_panic
)
339 if (notify_die(DIE_NMIWATCHDOG
, str
, regs
, 0, 2, SIGINT
) == NOTIFY_STOP
)
343 * We are in trouble anyway, lets at least try
344 * to get a message out.
346 flags
= oops_begin();
347 printk(KERN_EMERG
"%s", str
);
348 printk(" on CPU%d, ip %08lx, registers:\n",
349 smp_processor_id(), regs
->ip
);
350 show_registers(regs
);
351 oops_end(flags
, regs
, 0);
352 if (do_panic
|| panic_on_oops
)
353 panic("Non maskable interrupt");
359 static int __init
oops_setup(char *s
)
363 if (!strcmp(s
, "panic"))
367 early_param("oops", oops_setup
);
369 static int __init
kstack_setup(char *s
)
373 kstack_depth_to_print
= simple_strtoul(s
, NULL
, 0);
376 early_param("kstack", kstack_setup
);
378 static int __init
code_bytes_setup(char *s
)
380 code_bytes
= simple_strtoul(s
, NULL
, 0);
381 if (code_bytes
> 8192)
386 __setup("code_bytes=", code_bytes_setup
);