2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/utsname.h>
9 #include <linux/hardirq.h>
10 #include <linux/kdebug.h>
11 #include <linux/module.h>
12 #include <linux/ptrace.h>
13 #include <linux/sched/debug.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/ftrace.h>
16 #include <linux/kexec.h>
17 #include <linux/bug.h>
18 #include <linux/nmi.h>
19 #include <linux/sysfs.h>
20 #include <linux/kasan.h>
22 #include <asm/cpu_entry_area.h>
23 #include <asm/stacktrace.h>
24 #include <asm/unwind.h>
26 int panic_on_unrecovered_nmi
;
28 static int die_counter
;
30 static struct pt_regs exec_summary_regs
;
32 bool in_task_stack(unsigned long *stack
, struct task_struct
*task
,
33 struct stack_info
*info
)
35 unsigned long *begin
= task_stack_page(task
);
36 unsigned long *end
= task_stack_page(task
) + THREAD_SIZE
;
38 if (stack
< begin
|| stack
>= end
)
41 info
->type
= STACK_TYPE_TASK
;
49 bool in_entry_stack(unsigned long *stack
, struct stack_info
*info
)
51 struct entry_stack
*ss
= cpu_entry_stack(smp_processor_id());
56 if ((void *)stack
< begin
|| (void *)stack
>= end
)
59 info
->type
= STACK_TYPE_ENTRY
;
67 static void printk_stack_address(unsigned long address
, int reliable
,
71 printk("%s %s%pB\n", log_lvl
, reliable
? "" : "? ", (void *)address
);
75 * There are a couple of reasons for the 2/3rd prologue, courtesy of Linus:
77 * In case where we don't have the exact kernel image (which, if we did, we can
78 * simply disassemble and navigate to the RIP), the purpose of the bigger
79 * prologue is to have more context and to be able to correlate the code from
80 * the different toolchains better.
82 * In addition, it helps in recreating the register allocation of the failing
83 * kernel and thus make sense of the register dump.
85 * What is more, the additional complication of a variable length insn arch like
86 * x86 warrants having longer byte sequence before rIP so that the disassembler
87 * can "sync" up properly and find instruction boundaries when decoding the
90 * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random
91 * guesstimate in attempt to achieve all of the above.
93 void show_opcodes(struct pt_regs
*regs
, const char *loglvl
)
95 #define PROLOGUE_SIZE 42
96 #define EPILOGUE_SIZE 21
97 #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
98 u8 opcodes
[OPCODE_BUFSIZE
];
99 unsigned long prologue
= regs
->ip
- PROLOGUE_SIZE
;
103 * Make sure userspace isn't trying to trick us into dumping kernel
104 * memory by pointing the userspace instruction pointer at it.
106 bad_ip
= user_mode(regs
) &&
107 __chk_range_not_ok(prologue
, OPCODE_BUFSIZE
, TASK_SIZE_MAX
);
109 if (bad_ip
|| probe_kernel_read(opcodes
, (u8
*)prologue
,
111 printk("%sCode: Bad RIP value.\n", loglvl
);
113 printk("%sCode: %" __stringify(PROLOGUE_SIZE
) "ph <%02x> %"
114 __stringify(EPILOGUE_SIZE
) "ph\n", loglvl
, opcodes
,
115 opcodes
[PROLOGUE_SIZE
], opcodes
+ PROLOGUE_SIZE
+ 1);
119 void show_ip(struct pt_regs
*regs
, const char *loglvl
)
122 printk("%sEIP: %pS\n", loglvl
, (void *)regs
->ip
);
124 printk("%sRIP: %04x:%pS\n", loglvl
, (int)regs
->cs
, (void *)regs
->ip
);
126 show_opcodes(regs
, loglvl
);
129 void show_iret_regs(struct pt_regs
*regs
)
131 show_ip(regs
, KERN_DEFAULT
);
132 printk(KERN_DEFAULT
"RSP: %04x:%016lx EFLAGS: %08lx", (int)regs
->ss
,
133 regs
->sp
, regs
->flags
);
136 static void show_regs_if_on_stack(struct stack_info
*info
, struct pt_regs
*regs
,
140 * These on_stack() checks aren't strictly necessary: the unwind code
141 * has already validated the 'regs' pointer. The checks are done for
142 * ordering reasons: if the registers are on the next stack, we don't
143 * want to print them out yet. Otherwise they'll be shown as part of
144 * the wrong stack. Later, when show_trace_log_lvl() switches to the
145 * next stack, this function will be called again with the same regs so
146 * they can be printed in the right context.
148 if (!partial
&& on_stack(info
, regs
, sizeof(*regs
))) {
149 __show_regs(regs
, SHOW_REGS_SHORT
);
151 } else if (partial
&& on_stack(info
, (void *)regs
+ IRET_FRAME_OFFSET
,
154 * When an interrupt or exception occurs in entry code, the
155 * full pt_regs might not have been saved yet. In that case
156 * just print the iret frame.
158 show_iret_regs(regs
);
162 void show_trace_log_lvl(struct task_struct
*task
, struct pt_regs
*regs
,
163 unsigned long *stack
, char *log_lvl
)
165 struct unwind_state state
;
166 struct stack_info stack_info
= {0};
167 unsigned long visit_mask
= 0;
169 bool partial
= false;
171 printk("%sCall Trace:\n", log_lvl
);
173 unwind_start(&state
, task
, regs
, stack
);
174 stack
= stack
? : get_stack_pointer(task
, regs
);
175 regs
= unwind_get_entry_regs(&state
, &partial
);
178 * Iterate through the stacks, starting with the current stack pointer.
179 * Each stack has a pointer to the next one.
181 * x86-64 can have several stacks:
184 * - HW exception stacks (double fault, nmi, debug, mce)
187 * x86-32 can have up to four stacks:
193 for ( ; stack
; stack
= PTR_ALIGN(stack_info
.next_sp
, sizeof(long))) {
194 const char *stack_name
;
196 if (get_stack_info(stack
, task
, &stack_info
, &visit_mask
)) {
198 * We weren't on a valid stack. It's possible that
199 * we overflowed a valid stack into a guard page.
200 * See if the next page up is valid so that we can
201 * generate some kind of backtrace if this happens.
203 stack
= (unsigned long *)PAGE_ALIGN((unsigned long)stack
);
204 if (get_stack_info(stack
, task
, &stack_info
, &visit_mask
))
208 stack_name
= stack_type_name(stack_info
.type
);
210 printk("%s <%s>\n", log_lvl
, stack_name
);
213 show_regs_if_on_stack(&stack_info
, regs
, partial
);
216 * Scan the stack, printing any text addresses we find. At the
217 * same time, follow proper stack frames with the unwinder.
219 * Addresses found during the scan which are not reported by
220 * the unwinder are considered to be additional clues which are
221 * sometimes useful for debugging and are prefixed with '?'.
222 * This also serves as a failsafe option in case the unwinder
223 * goes off in the weeds.
225 for (; stack
< stack_info
.end
; stack
++) {
226 unsigned long real_addr
;
228 unsigned long addr
= READ_ONCE_NOCHECK(*stack
);
229 unsigned long *ret_addr_p
=
230 unwind_get_return_address_ptr(&state
);
232 if (!__kernel_text_address(addr
))
236 * Don't print regs->ip again if it was already printed
237 * by show_regs_if_on_stack().
239 if (regs
&& stack
== ®s
->ip
)
242 if (stack
== ret_addr_p
)
246 * When function graph tracing is enabled for a
247 * function, its return address on the stack is
248 * replaced with the address of an ftrace handler
249 * (return_to_handler). In that case, before printing
250 * the "real" address, we want to print the handler
251 * address as an "unreliable" hint that function graph
252 * tracing was involved.
254 real_addr
= ftrace_graph_ret_addr(task
, &graph_idx
,
256 if (real_addr
!= addr
)
257 printk_stack_address(addr
, 0, log_lvl
);
258 printk_stack_address(real_addr
, reliable
, log_lvl
);
265 * Get the next frame from the unwinder. No need to
266 * check for an error: if anything goes wrong, the rest
267 * of the addresses will just be printed as unreliable.
269 unwind_next_frame(&state
);
271 /* if the frame has entry regs, print them */
272 regs
= unwind_get_entry_regs(&state
, &partial
);
274 show_regs_if_on_stack(&stack_info
, regs
, partial
);
278 printk("%s </%s>\n", log_lvl
, stack_name
);
282 void show_stack(struct task_struct
*task
, unsigned long *sp
)
284 task
= task
? : current
;
287 * Stack frames below this one aren't interesting. Don't show them
288 * if we're printing for %current.
290 if (!sp
&& task
== current
)
291 sp
= get_stack_pointer(current
, NULL
);
293 show_trace_log_lvl(task
, NULL
, sp
, KERN_DEFAULT
);
296 void show_stack_regs(struct pt_regs
*regs
)
298 show_trace_log_lvl(current
, regs
, NULL
, KERN_DEFAULT
);
301 static arch_spinlock_t die_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
302 static int die_owner
= -1;
303 static unsigned int die_nest_count
;
305 unsigned long oops_begin(void)
312 /* racy, but better than risking deadlock. */
313 raw_local_irq_save(flags
);
314 cpu
= smp_processor_id();
315 if (!arch_spin_trylock(&die_lock
)) {
316 if (cpu
== die_owner
)
317 /* nested oops. should stop eventually */;
319 arch_spin_lock(&die_lock
);
327 NOKPROBE_SYMBOL(oops_begin
);
329 void __noreturn
rewind_stack_do_exit(int signr
);
331 void oops_end(unsigned long flags
, struct pt_regs
*regs
, int signr
)
333 if (regs
&& kexec_should_crash(current
))
338 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
341 /* Nest count reaches zero, release the lock. */
342 arch_spin_unlock(&die_lock
);
343 raw_local_irq_restore(flags
);
346 /* Executive summary in case the oops scrolled away */
347 __show_regs(&exec_summary_regs
, SHOW_REGS_ALL
);
352 panic("Fatal exception in interrupt");
354 panic("Fatal exception");
357 * We're not going to return, but we might be on an IST stack or
358 * have very little stack space left. Rewind the stack and kill
360 * Before we rewind the stack, we have to tell KASAN that we're going to
361 * reuse the task stack and that existing poisons are invalid.
363 kasan_unpoison_task_stack(current
);
364 rewind_stack_do_exit(signr
);
366 NOKPROBE_SYMBOL(oops_end
);
368 static void __die_header(const char *str
, struct pt_regs
*regs
, long err
)
372 /* Save the regs of the first oops for the executive summary later. */
374 exec_summary_regs
= *regs
;
376 if (IS_ENABLED(CONFIG_PREEMPTION
))
377 pr
= IS_ENABLED(CONFIG_PREEMPT_RT
) ? " PREEMPT_RT" : " PREEMPT";
380 "%s: %04lx [#%d]%s%s%s%s%s\n", str
, err
& 0xffff, ++die_counter
,
382 IS_ENABLED(CONFIG_SMP
) ? " SMP" : "",
383 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
384 IS_ENABLED(CONFIG_KASAN
) ? " KASAN" : "",
385 IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION
) ?
386 (boot_cpu_has(X86_FEATURE_PTI
) ? " PTI" : " NOPTI") : "");
388 NOKPROBE_SYMBOL(__die_header
);
390 static int __die_body(const char *str
, struct pt_regs
*regs
, long err
)
395 if (notify_die(DIE_OOPS
, str
, regs
, err
,
396 current
->thread
.trap_nr
, SIGSEGV
) == NOTIFY_STOP
)
401 NOKPROBE_SYMBOL(__die_body
);
403 int __die(const char *str
, struct pt_regs
*regs
, long err
)
405 __die_header(str
, regs
, err
);
406 return __die_body(str
, regs
, err
);
408 NOKPROBE_SYMBOL(__die
);
411 * This is gone through when something in the kernel has done something bad
412 * and is about to be terminated:
414 void die(const char *str
, struct pt_regs
*regs
, long err
)
416 unsigned long flags
= oops_begin();
419 if (__die(str
, regs
, err
))
421 oops_end(flags
, regs
, sig
);
424 void die_addr(const char *str
, struct pt_regs
*regs
, long err
, long gp_addr
)
426 unsigned long flags
= oops_begin();
429 __die_header(str
, regs
, err
);
431 kasan_non_canonical_hook(gp_addr
);
432 if (__die_body(str
, regs
, err
))
434 oops_end(flags
, regs
, sig
);
437 void show_regs(struct pt_regs
*regs
)
439 show_regs_print_info(KERN_DEFAULT
);
441 __show_regs(regs
, user_mode(regs
) ? SHOW_REGS_USER
: SHOW_REGS_ALL
);
444 * When in-kernel, we also print out the stack at the time of the fault..
446 if (!user_mode(regs
))
447 show_trace_log_lvl(current
, regs
, NULL
, KERN_DEFAULT
);