1 // SPDX-License-Identifier: GPL-2.0-only
3 * Stack tracing support
5 * Copyright (C) 2012 ARM Ltd.
7 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/filter.h>
11 #include <linux/ftrace.h>
12 #include <linux/kprobes.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/stacktrace.h>
20 #include <asm/stack_pointer.h>
21 #include <asm/stacktrace.h>
24 KUNWIND_SOURCE_UNKNOWN
,
26 KUNWIND_SOURCE_CALLER
,
28 KUNWIND_SOURCE_REGS_PC
,
29 KUNWIND_SOURCE_REGS_LR
,
35 unsigned long fgraph
: 1,
43 * @common: Common unwind state.
44 * @task: The task being unwound.
45 * @graph_idx: Used by ftrace_graph_ret_addr() for optimized stack unwinding.
46 * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
47 * associated with the most recently encountered replacement lr
50 struct kunwind_state
{
51 struct unwind_state common
;
52 struct task_struct
*task
;
54 #ifdef CONFIG_KRETPROBES
55 struct llist_node
*kr_cur
;
57 enum kunwind_source source
;
58 union unwind_flags flags
;
62 static __always_inline
void
63 kunwind_init(struct kunwind_state
*state
,
64 struct task_struct
*task
)
66 unwind_init_common(&state
->common
);
68 state
->source
= KUNWIND_SOURCE_UNKNOWN
;
74 * Start an unwind from a pt_regs.
76 * The unwind will begin at the PC within the regs.
78 * The regs must be on a stack currently owned by the calling task.
80 static __always_inline
void
81 kunwind_init_from_regs(struct kunwind_state
*state
,
84 kunwind_init(state
, current
);
87 state
->common
.fp
= regs
->regs
[29];
88 state
->common
.pc
= regs
->pc
;
89 state
->source
= KUNWIND_SOURCE_REGS_PC
;
93 * Start an unwind from a caller.
95 * The unwind will begin at the caller of whichever function this is inlined
98 * The function which invokes this must be noinline.
100 static __always_inline
void
101 kunwind_init_from_caller(struct kunwind_state
*state
)
103 kunwind_init(state
, current
);
105 state
->common
.fp
= (unsigned long)__builtin_frame_address(1);
106 state
->common
.pc
= (unsigned long)__builtin_return_address(0);
107 state
->source
= KUNWIND_SOURCE_CALLER
;
111 * Start an unwind from a blocked task.
113 * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
116 * The caller should ensure the task is blocked in cpu_switch_to() for the
117 * duration of the unwind, or the unwind will be bogus. It is never valid to
118 * call this for the current task.
120 static __always_inline
void
121 kunwind_init_from_task(struct kunwind_state
*state
,
122 struct task_struct
*task
)
124 kunwind_init(state
, task
);
126 state
->common
.fp
= thread_saved_fp(task
);
127 state
->common
.pc
= thread_saved_pc(task
);
128 state
->source
= KUNWIND_SOURCE_TASK
;
131 static __always_inline
int
132 kunwind_recover_return_address(struct kunwind_state
*state
)
134 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
135 if (state
->task
->ret_stack
&&
136 (state
->common
.pc
== (unsigned long)return_to_handler
)) {
137 unsigned long orig_pc
;
138 orig_pc
= ftrace_graph_ret_addr(state
->task
, &state
->graph_idx
,
140 (void *)state
->common
.fp
);
141 if (WARN_ON_ONCE(state
->common
.pc
== orig_pc
))
143 state
->common
.pc
= orig_pc
;
144 state
->flags
.fgraph
= 1;
146 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
148 #ifdef CONFIG_KRETPROBES
149 if (is_kretprobe_trampoline(state
->common
.pc
)) {
150 unsigned long orig_pc
;
151 orig_pc
= kretprobe_find_ret_addr(state
->task
,
152 (void *)state
->common
.fp
,
154 state
->common
.pc
= orig_pc
;
155 state
->flags
.kretprobe
= 1;
157 #endif /* CONFIG_KRETPROBES */
162 static __always_inline
163 int kunwind_next_regs_pc(struct kunwind_state
*state
)
165 struct stack_info
*info
;
166 unsigned long fp
= state
->common
.fp
;
167 struct pt_regs
*regs
;
169 regs
= container_of((u64
*)fp
, struct pt_regs
, stackframe
.record
.fp
);
171 info
= unwind_find_stack(&state
->common
, (unsigned long)regs
, sizeof(*regs
));
175 unwind_consume_stack(&state
->common
, info
, (unsigned long)regs
,
179 state
->common
.pc
= regs
->pc
;
180 state
->common
.fp
= regs
->regs
[29];
181 state
->source
= KUNWIND_SOURCE_REGS_PC
;
185 static __always_inline
int
186 kunwind_next_regs_lr(struct kunwind_state
*state
)
189 * The stack for the regs was consumed by kunwind_next_regs_pc(), so we
190 * cannot consume that again here, but we know the regs are safe to
193 state
->common
.pc
= state
->regs
->regs
[30];
194 state
->common
.fp
= state
->regs
->regs
[29];
196 state
->source
= KUNWIND_SOURCE_REGS_LR
;
201 static __always_inline
int
202 kunwind_next_frame_record_meta(struct kunwind_state
*state
)
204 struct task_struct
*tsk
= state
->task
;
205 unsigned long fp
= state
->common
.fp
;
206 struct frame_record_meta
*meta
;
207 struct stack_info
*info
;
209 info
= unwind_find_stack(&state
->common
, fp
, sizeof(*meta
));
213 meta
= (struct frame_record_meta
*)fp
;
214 switch (READ_ONCE(meta
->type
)) {
215 case FRAME_META_TYPE_FINAL
:
216 if (meta
== &task_pt_regs(tsk
)->stackframe
)
220 case FRAME_META_TYPE_PT_REGS
:
221 return kunwind_next_regs_pc(state
);
228 static __always_inline
int
229 kunwind_next_frame_record(struct kunwind_state
*state
)
231 unsigned long fp
= state
->common
.fp
;
232 struct frame_record
*record
;
233 struct stack_info
*info
;
234 unsigned long new_fp
, new_pc
;
239 info
= unwind_find_stack(&state
->common
, fp
, sizeof(*record
));
243 record
= (struct frame_record
*)fp
;
244 new_fp
= READ_ONCE(record
->fp
);
245 new_pc
= READ_ONCE(record
->lr
);
247 if (!new_fp
&& !new_pc
)
248 return kunwind_next_frame_record_meta(state
);
250 unwind_consume_stack(&state
->common
, info
, fp
, sizeof(*record
));
252 state
->common
.fp
= new_fp
;
253 state
->common
.pc
= new_pc
;
254 state
->source
= KUNWIND_SOURCE_FRAME
;
260 * Unwind from one frame record (A) to the next frame record (B).
262 * We terminate early if the location of B indicates a malformed chain of frame
263 * records (e.g. a cycle), determined based on the location and fp value of A
264 * and the location (but not the fp value) of B.
266 static __always_inline
int
267 kunwind_next(struct kunwind_state
*state
)
271 state
->flags
.all
= 0;
273 switch (state
->source
) {
274 case KUNWIND_SOURCE_FRAME
:
275 case KUNWIND_SOURCE_CALLER
:
276 case KUNWIND_SOURCE_TASK
:
277 case KUNWIND_SOURCE_REGS_LR
:
278 err
= kunwind_next_frame_record(state
);
280 case KUNWIND_SOURCE_REGS_PC
:
281 err
= kunwind_next_regs_lr(state
);
290 state
->common
.pc
= ptrauth_strip_kernel_insn_pac(state
->common
.pc
);
292 return kunwind_recover_return_address(state
);
295 typedef bool (*kunwind_consume_fn
)(const struct kunwind_state
*state
, void *cookie
);
297 static __always_inline
void
298 do_kunwind(struct kunwind_state
*state
, kunwind_consume_fn consume_state
,
301 if (kunwind_recover_return_address(state
))
307 if (!consume_state(state
, cookie
))
309 ret
= kunwind_next(state
);
316 * Per-cpu stacks are only accessible when unwinding the current task in a
317 * non-preemptible context.
319 #define STACKINFO_CPU(name) \
321 ((task == current) && !preemptible()) \
322 ? stackinfo_get_##name() \
323 : stackinfo_get_unknown(); \
327 * SDEI stacks are only accessible when unwinding the current task in an NMI
330 #define STACKINFO_SDEI(name) \
332 ((task == current) && in_nmi()) \
333 ? stackinfo_get_sdei_##name() \
334 : stackinfo_get_unknown(); \
337 #define STACKINFO_EFI \
339 ((task == current) && current_in_efi()) \
340 ? stackinfo_get_efi() \
341 : stackinfo_get_unknown(); \
344 static __always_inline
void
345 kunwind_stack_walk(kunwind_consume_fn consume_state
,
346 void *cookie
, struct task_struct
*task
,
347 struct pt_regs
*regs
)
349 struct stack_info stacks
[] = {
350 stackinfo_get_task(task
),
352 #if defined(CONFIG_VMAP_STACK)
353 STACKINFO_CPU(overflow
),
355 #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
356 STACKINFO_SDEI(normal
),
357 STACKINFO_SDEI(critical
),
363 struct kunwind_state state
= {
366 .nr_stacks
= ARRAY_SIZE(stacks
),
373 kunwind_init_from_regs(&state
, regs
);
374 } else if (task
== current
) {
375 kunwind_init_from_caller(&state
);
377 kunwind_init_from_task(&state
, task
);
380 do_kunwind(&state
, consume_state
, cookie
);
383 struct kunwind_consume_entry_data
{
384 stack_trace_consume_fn consume_entry
;
388 static __always_inline
bool
389 arch_kunwind_consume_entry(const struct kunwind_state
*state
, void *cookie
)
391 struct kunwind_consume_entry_data
*data
= cookie
;
392 return data
->consume_entry(data
->cookie
, state
->common
.pc
);
395 noinline noinstr
void arch_stack_walk(stack_trace_consume_fn consume_entry
,
396 void *cookie
, struct task_struct
*task
,
397 struct pt_regs
*regs
)
399 struct kunwind_consume_entry_data data
= {
400 .consume_entry
= consume_entry
,
404 kunwind_stack_walk(arch_kunwind_consume_entry
, &data
, task
, regs
);
407 struct bpf_unwind_consume_entry_data
{
408 bool (*consume_entry
)(void *cookie
, u64 ip
, u64 sp
, u64 fp
);
413 arch_bpf_unwind_consume_entry(const struct kunwind_state
*state
, void *cookie
)
415 struct bpf_unwind_consume_entry_data
*data
= cookie
;
417 return data
->consume_entry(data
->cookie
, state
->common
.pc
, 0,
421 noinline noinstr
void arch_bpf_stack_walk(bool (*consume_entry
)(void *cookie
, u64 ip
, u64 sp
,
422 u64 fp
), void *cookie
)
424 struct bpf_unwind_consume_entry_data data
= {
425 .consume_entry
= consume_entry
,
429 kunwind_stack_walk(arch_bpf_unwind_consume_entry
, &data
, current
, NULL
);
432 static const char *state_source_string(const struct kunwind_state
*state
)
434 switch (state
->source
) {
435 case KUNWIND_SOURCE_FRAME
: return NULL
;
436 case KUNWIND_SOURCE_CALLER
: return "C";
437 case KUNWIND_SOURCE_TASK
: return "T";
438 case KUNWIND_SOURCE_REGS_PC
: return "P";
439 case KUNWIND_SOURCE_REGS_LR
: return "L";
444 static bool dump_backtrace_entry(const struct kunwind_state
*state
, void *arg
)
446 const char *source
= state_source_string(state
);
447 union unwind_flags flags
= state
->flags
;
448 bool has_info
= source
|| flags
.all
;
451 printk("%s %pSb%s%s%s%s%s\n", loglvl
,
452 (void *)state
->common
.pc
,
453 has_info
? " (" : "",
454 source
? source
: "",
455 flags
.fgraph
? "F" : "",
456 flags
.kretprobe
? "K" : "",
457 has_info
? ")" : "");
462 void dump_backtrace(struct pt_regs
*regs
, struct task_struct
*tsk
,
465 pr_debug("%s(regs = %p tsk = %p)\n", __func__
, regs
, tsk
);
467 if (regs
&& user_mode(regs
))
473 if (!try_get_task_stack(tsk
))
476 printk("%sCall trace:\n", loglvl
);
477 kunwind_stack_walk(dump_backtrace_entry
, (void *)loglvl
, tsk
, regs
);
482 void show_stack(struct task_struct
*tsk
, unsigned long *sp
, const char *loglvl
)
484 dump_backtrace(NULL
, tsk
, loglvl
);
489 * The struct defined for userspace stack frame in AARCH64 mode.
492 struct frame_tail __user
*fp
;
494 } __attribute__((packed
));
497 * Get the return address for a single stackframe and return a pointer to the
500 static struct frame_tail __user
*
501 unwind_user_frame(struct frame_tail __user
*tail
, void *cookie
,
502 stack_trace_consume_fn consume_entry
)
504 struct frame_tail buftail
;
508 /* Also check accessibility of one struct frame_tail beyond */
509 if (!access_ok(tail
, sizeof(buftail
)))
513 err
= __copy_from_user_inatomic(&buftail
, tail
, sizeof(buftail
));
519 lr
= ptrauth_strip_user_insn_pac(buftail
.lr
);
521 if (!consume_entry(cookie
, lr
))
525 * Frame pointers should strictly progress back up the stack
526 * (towards higher addresses).
528 if (tail
>= buftail
.fp
)
536 * The registers we're interested in are at the end of the variable
537 * length saved register structure. The fp points at the end of this
538 * structure so the address of this struct is:
539 * (struct compat_frame_tail *)(xxx->fp)-1
541 * This code has been adapted from the ARM OProfile support.
543 struct compat_frame_tail
{
544 compat_uptr_t fp
; /* a (struct compat_frame_tail *) in compat mode */
547 } __attribute__((packed
));
549 static struct compat_frame_tail __user
*
550 unwind_compat_user_frame(struct compat_frame_tail __user
*tail
, void *cookie
,
551 stack_trace_consume_fn consume_entry
)
553 struct compat_frame_tail buftail
;
556 /* Also check accessibility of one struct frame_tail beyond */
557 if (!access_ok(tail
, sizeof(buftail
)))
561 err
= __copy_from_user_inatomic(&buftail
, tail
, sizeof(buftail
));
567 if (!consume_entry(cookie
, buftail
.lr
))
571 * Frame pointers should strictly progress back up the stack
572 * (towards higher addresses).
574 if (tail
+ 1 >= (struct compat_frame_tail __user
*)
575 compat_ptr(buftail
.fp
))
578 return (struct compat_frame_tail __user
*)compat_ptr(buftail
.fp
) - 1;
580 #endif /* CONFIG_COMPAT */
583 void arch_stack_walk_user(stack_trace_consume_fn consume_entry
, void *cookie
,
584 const struct pt_regs
*regs
)
586 if (!consume_entry(cookie
, regs
->pc
))
589 if (!compat_user_mode(regs
)) {
591 struct frame_tail __user
*tail
;
593 tail
= (struct frame_tail __user
*)regs
->regs
[29];
594 while (tail
&& !((unsigned long)tail
& 0x7))
595 tail
= unwind_user_frame(tail
, cookie
, consume_entry
);
598 /* AARCH32 compat mode */
599 struct compat_frame_tail __user
*tail
;
601 tail
= (struct compat_frame_tail __user
*)regs
->compat_fp
- 1;
602 while (tail
&& !((unsigned long)tail
& 0x3))
603 tail
= unwind_compat_user_frame(tail
, cookie
, consume_entry
);