2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/utsname.h>
9 #include <linux/hardirq.h>
10 #include <linux/kdebug.h>
11 #include <linux/module.h>
12 #include <linux/ptrace.h>
13 #include <linux/sched/debug.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/ftrace.h>
16 #include <linux/kexec.h>
17 #include <linux/bug.h>
18 #include <linux/nmi.h>
19 #include <linux/sysfs.h>
20 #include <linux/kasan.h>
22 #include <asm/cpu_entry_area.h>
23 #include <asm/stacktrace.h>
24 #include <asm/unwind.h>
26 int panic_on_unrecovered_nmi
;
28 static int die_counter
;
30 static struct pt_regs exec_summary_regs
;
32 bool noinstr
in_task_stack(unsigned long *stack
, struct task_struct
*task
,
33 struct stack_info
*info
)
35 unsigned long *begin
= task_stack_page(task
);
36 unsigned long *end
= task_stack_page(task
) + THREAD_SIZE
;
38 if (stack
< begin
|| stack
>= end
)
41 info
->type
= STACK_TYPE_TASK
;
49 /* Called from get_stack_info_noinstr - so must be noinstr too */
50 bool noinstr
in_entry_stack(unsigned long *stack
, struct stack_info
*info
)
52 struct entry_stack
*ss
= cpu_entry_stack(smp_processor_id());
57 if ((void *)stack
< begin
|| (void *)stack
>= end
)
60 info
->type
= STACK_TYPE_ENTRY
;
68 static void printk_stack_address(unsigned long address
, int reliable
,
72 printk("%s %s%pB\n", log_lvl
, reliable
? "" : "? ", (void *)address
);
75 static int copy_code(struct pt_regs
*regs
, u8
*buf
, unsigned long src
,
79 return copy_from_kernel_nofault(buf
, (u8
*)src
, nbytes
);
81 /* The user space code from other tasks cannot be accessed. */
82 if (regs
!= task_pt_regs(current
))
85 * Make sure userspace isn't trying to trick us into dumping kernel
86 * memory by pointing the userspace instruction pointer at it.
88 if (__chk_range_not_ok(src
, nbytes
, TASK_SIZE_MAX
))
92 * Even if named copy_from_user_nmi() this can be invoked from
93 * other contexts and will not try to resolve a pagefault, which is
94 * the correct thing to do here as this code can be called from any
97 return copy_from_user_nmi(buf
, (void __user
*)src
, nbytes
);
101 * There are a couple of reasons for the 2/3rd prologue, courtesy of Linus:
103 * In case where we don't have the exact kernel image (which, if we did, we can
104 * simply disassemble and navigate to the RIP), the purpose of the bigger
105 * prologue is to have more context and to be able to correlate the code from
106 * the different toolchains better.
108 * In addition, it helps in recreating the register allocation of the failing
109 * kernel and thus make sense of the register dump.
111 * What is more, the additional complication of a variable length insn arch like
112 * x86 warrants having longer byte sequence before rIP so that the disassembler
113 * can "sync" up properly and find instruction boundaries when decoding the
116 * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random
117 * guesstimate in attempt to achieve all of the above.
119 void show_opcodes(struct pt_regs
*regs
, const char *loglvl
)
121 #define PROLOGUE_SIZE 42
122 #define EPILOGUE_SIZE 21
123 #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
124 u8 opcodes
[OPCODE_BUFSIZE
];
125 unsigned long prologue
= regs
->ip
- PROLOGUE_SIZE
;
127 switch (copy_code(regs
, opcodes
, prologue
, sizeof(opcodes
))) {
129 printk("%sCode: %" __stringify(PROLOGUE_SIZE
) "ph <%02x> %"
130 __stringify(EPILOGUE_SIZE
) "ph\n", loglvl
, opcodes
,
131 opcodes
[PROLOGUE_SIZE
], opcodes
+ PROLOGUE_SIZE
+ 1);
134 /* No access to the user space stack of other tasks. Ignore. */
137 printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n",
143 void show_ip(struct pt_regs
*regs
, const char *loglvl
)
146 printk("%sEIP: %pS\n", loglvl
, (void *)regs
->ip
);
148 printk("%sRIP: %04x:%pS\n", loglvl
, (int)regs
->cs
, (void *)regs
->ip
);
150 show_opcodes(regs
, loglvl
);
153 void show_iret_regs(struct pt_regs
*regs
, const char *log_lvl
)
155 show_ip(regs
, log_lvl
);
156 printk("%sRSP: %04x:%016lx EFLAGS: %08lx", log_lvl
, (int)regs
->ss
,
157 regs
->sp
, regs
->flags
);
160 static void show_regs_if_on_stack(struct stack_info
*info
, struct pt_regs
*regs
,
161 bool partial
, const char *log_lvl
)
164 * These on_stack() checks aren't strictly necessary: the unwind code
165 * has already validated the 'regs' pointer. The checks are done for
166 * ordering reasons: if the registers are on the next stack, we don't
167 * want to print them out yet. Otherwise they'll be shown as part of
168 * the wrong stack. Later, when show_trace_log_lvl() switches to the
169 * next stack, this function will be called again with the same regs so
170 * they can be printed in the right context.
172 if (!partial
&& on_stack(info
, regs
, sizeof(*regs
))) {
173 __show_regs(regs
, SHOW_REGS_SHORT
, log_lvl
);
175 } else if (partial
&& on_stack(info
, (void *)regs
+ IRET_FRAME_OFFSET
,
178 * When an interrupt or exception occurs in entry code, the
179 * full pt_regs might not have been saved yet. In that case
180 * just print the iret frame.
182 show_iret_regs(regs
, log_lvl
);
186 static void show_trace_log_lvl(struct task_struct
*task
, struct pt_regs
*regs
,
187 unsigned long *stack
, const char *log_lvl
)
189 struct unwind_state state
;
190 struct stack_info stack_info
= {0};
191 unsigned long visit_mask
= 0;
193 bool partial
= false;
195 printk("%sCall Trace:\n", log_lvl
);
197 unwind_start(&state
, task
, regs
, stack
);
198 stack
= stack
? : get_stack_pointer(task
, regs
);
199 regs
= unwind_get_entry_regs(&state
, &partial
);
202 * Iterate through the stacks, starting with the current stack pointer.
203 * Each stack has a pointer to the next one.
205 * x86-64 can have several stacks:
208 * - HW exception stacks (double fault, nmi, debug, mce)
211 * x86-32 can have up to four stacks:
217 for ( ; stack
; stack
= PTR_ALIGN(stack_info
.next_sp
, sizeof(long))) {
218 const char *stack_name
;
220 if (get_stack_info(stack
, task
, &stack_info
, &visit_mask
)) {
222 * We weren't on a valid stack. It's possible that
223 * we overflowed a valid stack into a guard page.
224 * See if the next page up is valid so that we can
225 * generate some kind of backtrace if this happens.
227 stack
= (unsigned long *)PAGE_ALIGN((unsigned long)stack
);
228 if (get_stack_info(stack
, task
, &stack_info
, &visit_mask
))
232 stack_name
= stack_type_name(stack_info
.type
);
234 printk("%s <%s>\n", log_lvl
, stack_name
);
237 show_regs_if_on_stack(&stack_info
, regs
, partial
, log_lvl
);
240 * Scan the stack, printing any text addresses we find. At the
241 * same time, follow proper stack frames with the unwinder.
243 * Addresses found during the scan which are not reported by
244 * the unwinder are considered to be additional clues which are
245 * sometimes useful for debugging and are prefixed with '?'.
246 * This also serves as a failsafe option in case the unwinder
247 * goes off in the weeds.
249 for (; stack
< stack_info
.end
; stack
++) {
250 unsigned long real_addr
;
252 unsigned long addr
= READ_ONCE_NOCHECK(*stack
);
253 unsigned long *ret_addr_p
=
254 unwind_get_return_address_ptr(&state
);
256 if (!__kernel_text_address(addr
))
260 * Don't print regs->ip again if it was already printed
261 * by show_regs_if_on_stack().
263 if (regs
&& stack
== ®s
->ip
)
266 if (stack
== ret_addr_p
)
270 * When function graph tracing is enabled for a
271 * function, its return address on the stack is
272 * replaced with the address of an ftrace handler
273 * (return_to_handler). In that case, before printing
274 * the "real" address, we want to print the handler
275 * address as an "unreliable" hint that function graph
276 * tracing was involved.
278 real_addr
= ftrace_graph_ret_addr(task
, &graph_idx
,
280 if (real_addr
!= addr
)
281 printk_stack_address(addr
, 0, log_lvl
);
282 printk_stack_address(real_addr
, reliable
, log_lvl
);
289 * Get the next frame from the unwinder. No need to
290 * check for an error: if anything goes wrong, the rest
291 * of the addresses will just be printed as unreliable.
293 unwind_next_frame(&state
);
295 /* if the frame has entry regs, print them */
296 regs
= unwind_get_entry_regs(&state
, &partial
);
298 show_regs_if_on_stack(&stack_info
, regs
, partial
, log_lvl
);
302 printk("%s </%s>\n", log_lvl
, stack_name
);
306 void show_stack(struct task_struct
*task
, unsigned long *sp
,
309 task
= task
? : current
;
312 * Stack frames below this one aren't interesting. Don't show them
313 * if we're printing for %current.
315 if (!sp
&& task
== current
)
316 sp
= get_stack_pointer(current
, NULL
);
318 show_trace_log_lvl(task
, NULL
, sp
, loglvl
);
321 void show_stack_regs(struct pt_regs
*regs
)
323 show_trace_log_lvl(current
, regs
, NULL
, KERN_DEFAULT
);
326 static arch_spinlock_t die_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
327 static int die_owner
= -1;
328 static unsigned int die_nest_count
;
330 unsigned long oops_begin(void)
337 /* racy, but better than risking deadlock. */
338 raw_local_irq_save(flags
);
339 cpu
= smp_processor_id();
340 if (!arch_spin_trylock(&die_lock
)) {
341 if (cpu
== die_owner
)
342 /* nested oops. should stop eventually */;
344 arch_spin_lock(&die_lock
);
352 NOKPROBE_SYMBOL(oops_begin
);
354 void __noreturn
rewind_stack_do_exit(int signr
);
356 void oops_end(unsigned long flags
, struct pt_regs
*regs
, int signr
)
358 if (regs
&& kexec_should_crash(current
))
363 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
366 /* Nest count reaches zero, release the lock. */
367 arch_spin_unlock(&die_lock
);
368 raw_local_irq_restore(flags
);
371 /* Executive summary in case the oops scrolled away */
372 __show_regs(&exec_summary_regs
, SHOW_REGS_ALL
, KERN_DEFAULT
);
377 panic("Fatal exception in interrupt");
379 panic("Fatal exception");
382 * We're not going to return, but we might be on an IST stack or
383 * have very little stack space left. Rewind the stack and kill
385 * Before we rewind the stack, we have to tell KASAN that we're going to
386 * reuse the task stack and that existing poisons are invalid.
388 kasan_unpoison_task_stack(current
);
389 rewind_stack_do_exit(signr
);
391 NOKPROBE_SYMBOL(oops_end
);
393 static void __die_header(const char *str
, struct pt_regs
*regs
, long err
)
397 /* Save the regs of the first oops for the executive summary later. */
399 exec_summary_regs
= *regs
;
401 if (IS_ENABLED(CONFIG_PREEMPTION
))
402 pr
= IS_ENABLED(CONFIG_PREEMPT_RT
) ? " PREEMPT_RT" : " PREEMPT";
405 "%s: %04lx [#%d]%s%s%s%s%s\n", str
, err
& 0xffff, ++die_counter
,
407 IS_ENABLED(CONFIG_SMP
) ? " SMP" : "",
408 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
409 IS_ENABLED(CONFIG_KASAN
) ? " KASAN" : "",
410 IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION
) ?
411 (boot_cpu_has(X86_FEATURE_PTI
) ? " PTI" : " NOPTI") : "");
413 NOKPROBE_SYMBOL(__die_header
);
415 static int __die_body(const char *str
, struct pt_regs
*regs
, long err
)
420 if (notify_die(DIE_OOPS
, str
, regs
, err
,
421 current
->thread
.trap_nr
, SIGSEGV
) == NOTIFY_STOP
)
426 NOKPROBE_SYMBOL(__die_body
);
428 int __die(const char *str
, struct pt_regs
*regs
, long err
)
430 __die_header(str
, regs
, err
);
431 return __die_body(str
, regs
, err
);
433 NOKPROBE_SYMBOL(__die
);
436 * This is gone through when something in the kernel has done something bad
437 * and is about to be terminated:
439 void die(const char *str
, struct pt_regs
*regs
, long err
)
441 unsigned long flags
= oops_begin();
444 if (__die(str
, regs
, err
))
446 oops_end(flags
, regs
, sig
);
449 void die_addr(const char *str
, struct pt_regs
*regs
, long err
, long gp_addr
)
451 unsigned long flags
= oops_begin();
454 __die_header(str
, regs
, err
);
456 kasan_non_canonical_hook(gp_addr
);
457 if (__die_body(str
, regs
, err
))
459 oops_end(flags
, regs
, sig
);
462 void show_regs(struct pt_regs
*regs
)
464 enum show_regs_mode print_kernel_regs
;
466 show_regs_print_info(KERN_DEFAULT
);
468 print_kernel_regs
= user_mode(regs
) ? SHOW_REGS_USER
: SHOW_REGS_ALL
;
469 __show_regs(regs
, print_kernel_regs
, KERN_DEFAULT
);
472 * When in-kernel, we also print out the stack at the time of the fault..
474 if (!user_mode(regs
))
475 show_trace_log_lvl(current
, regs
, NULL
, KERN_DEFAULT
);