1 // SPDX-License-Identifier: GPL-2.0
4 * Stack trace utility functions etc.
6 * Copyright 2008 Christoph Hellwig, IBM Corp.
7 * Copyright 2018 SUSE Linux GmbH
8 * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
11 #include <linux/export.h>
12 #include <linux/kallsyms.h>
13 #include <linux/module.h>
14 #include <linux/nmi.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/sched/task_stack.h>
18 #include <linux/stacktrace.h>
19 #include <asm/ptrace.h>
20 #include <asm/processor.h>
21 #include <linux/ftrace.h>
22 #include <asm/kprobes.h>
27 * Save stack-backtrace addresses into a stack_trace buffer.
29 static void save_context_stack(struct stack_trace
*trace
, unsigned long sp
,
30 struct task_struct
*tsk
, int savesched
)
33 unsigned long *stack
= (unsigned long *) sp
;
34 unsigned long newsp
, ip
;
36 if (!validate_sp(sp
, tsk
, STACK_FRAME_OVERHEAD
))
40 ip
= stack
[STACK_FRAME_LR_SAVE
];
42 if (savesched
|| !in_sched_functions(ip
)) {
44 trace
->entries
[trace
->nr_entries
++] = ip
;
49 if (trace
->nr_entries
>= trace
->max_entries
)
56 void save_stack_trace(struct stack_trace
*trace
)
60 sp
= current_stack_pointer();
62 save_context_stack(trace
, sp
, current
, 1);
64 EXPORT_SYMBOL_GPL(save_stack_trace
);
66 void save_stack_trace_tsk(struct task_struct
*tsk
, struct stack_trace
*trace
)
70 if (!try_get_task_stack(tsk
))
74 sp
= current_stack_pointer();
78 save_context_stack(trace
, sp
, tsk
, 0);
82 EXPORT_SYMBOL_GPL(save_stack_trace_tsk
);
85 save_stack_trace_regs(struct pt_regs
*regs
, struct stack_trace
*trace
)
87 save_context_stack(trace
, regs
->gpr
[1], current
, 0);
89 EXPORT_SYMBOL_GPL(save_stack_trace_regs
);
91 #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
93 * This function returns an error if it detects any unreliable features of the
94 * stack. Otherwise it guarantees that the stack trace is reliable.
96 * If the task is not 'current', the caller *must* ensure the task is inactive.
98 static int __save_stack_trace_tsk_reliable(struct task_struct
*tsk
,
99 struct stack_trace
*trace
)
103 unsigned long stack_page
= (unsigned long)task_stack_page(tsk
);
104 unsigned long stack_end
;
108 stack_end
= stack_page
+ THREAD_SIZE
;
109 if (!is_idle_task(tsk
)) {
111 * For user tasks, this is the SP value loaded on
112 * kernel entry, see "PACAKSAVE(r13)" in _switch() and
113 * system_call_common()/EXCEPTION_PROLOG_COMMON().
115 * Likewise for non-swapper kernel threads,
116 * this also happens to be the top of the stack
117 * as setup by copy_thread().
119 * Note that stack backlinks are not properly setup by
120 * copy_thread() and thus, a forked task() will have
121 * an unreliable stack trace until it's been
122 * _switch()'ed to for the first time.
124 stack_end
-= STACK_FRAME_OVERHEAD
+ sizeof(struct pt_regs
);
127 * idle tasks have a custom stack layout,
128 * c.f. cpu_idle_thread_init().
130 stack_end
-= STACK_FRAME_OVERHEAD
;
134 sp
= current_stack_pointer();
136 sp
= tsk
->thread
.ksp
;
138 if (sp
< stack_page
+ sizeof(struct thread_struct
) ||
139 sp
> stack_end
- STACK_FRAME_MIN_SIZE
) {
143 for (firstframe
= true; sp
!= stack_end
;
144 firstframe
= false, sp
= newsp
) {
145 unsigned long *stack
= (unsigned long *) sp
;
148 /* sanity check: ABI requires SP to be aligned 16 bytes. */
153 /* Stack grows downwards; unwinder may only go up. */
157 if (newsp
!= stack_end
&&
158 newsp
> stack_end
- STACK_FRAME_MIN_SIZE
) {
159 return -EINVAL
; /* invalid backlink, too far up. */
163 * We can only trust the bottom frame's backlink, the
164 * rest of the frame may be uninitialized, continue to
170 /* Mark stacktraces with exception frames as unreliable. */
171 if (sp
<= stack_end
- STACK_INT_FRAME_SIZE
&&
172 stack
[STACK_FRAME_MARKER
] == STACK_FRAME_REGS_MARKER
) {
176 /* Examine the saved LR: it must point into kernel code. */
177 ip
= stack
[STACK_FRAME_LR_SAVE
];
178 if (!__kernel_text_address(ip
))
182 * FIXME: IMHO these tests do not belong in
183 * arch-dependent code, they are generic.
185 ip
= ftrace_graph_ret_addr(tsk
, &graph_idx
, ip
, stack
);
186 #ifdef CONFIG_KPROBES
188 * Mark stacktraces with kretprobed functions on them
191 if (ip
== (unsigned long)kretprobe_trampoline
)
195 if (trace
->nr_entries
>= trace
->max_entries
)
198 trace
->entries
[trace
->nr_entries
++] = ip
;
205 int save_stack_trace_tsk_reliable(struct task_struct
*tsk
,
206 struct stack_trace
*trace
)
211 * If the task doesn't have a stack (e.g., a zombie), the stack is
214 if (!try_get_task_stack(tsk
))
217 ret
= __save_stack_trace_tsk_reliable(tsk
, trace
);
223 #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
225 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
226 static void handle_backtrace_ipi(struct pt_regs
*regs
)
228 nmi_cpu_backtrace(regs
);
231 static void raise_backtrace_ipi(cpumask_t
*mask
)
235 for_each_cpu(cpu
, mask
) {
236 if (cpu
== smp_processor_id())
237 handle_backtrace_ipi(NULL
);
239 smp_send_safe_nmi_ipi(cpu
, handle_backtrace_ipi
, 5 * USEC_PER_SEC
);
242 for_each_cpu(cpu
, mask
) {
243 struct paca_struct
*p
= paca_ptrs
[cpu
];
245 cpumask_clear_cpu(cpu
, mask
);
247 pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu
);
248 if (!virt_addr_valid(p
)) {
249 pr_warn("paca pointer appears corrupt? (%px)\n", p
);
253 pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d",
254 p
->irq_soft_mask
, p
->in_mce
, p
->in_nmi
);
256 if (virt_addr_valid(p
->__current
))
257 pr_cont(" current: %d (%s)\n", p
->__current
->pid
,
260 pr_cont(" current pointer corrupt? (%px)\n", p
->__current
);
262 pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p
->saved_r1
);
263 show_stack(p
->__current
, (unsigned long *)p
->saved_r1
);
267 void arch_trigger_cpumask_backtrace(const cpumask_t
*mask
, bool exclude_self
)
269 nmi_trigger_cpumask_backtrace(mask
, exclude_self
, raise_backtrace_ipi
);
271 #endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */