Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / arch / arm64 / kernel / stacktrace.c
blobcaef85462acb6f37c38f45246d08858728f1cc4f
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Stack tracing support
5 * Copyright (C) 2012 ARM Ltd.
6 */
7 #include <linux/kernel.h>
8 #include <linux/efi.h>
9 #include <linux/export.h>
10 #include <linux/filter.h>
11 #include <linux/ftrace.h>
12 #include <linux/kprobes.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/stacktrace.h>
18 #include <asm/efi.h>
19 #include <asm/irq.h>
20 #include <asm/stack_pointer.h>
21 #include <asm/stacktrace.h>
23 enum kunwind_source {
24 KUNWIND_SOURCE_UNKNOWN,
25 KUNWIND_SOURCE_FRAME,
26 KUNWIND_SOURCE_CALLER,
27 KUNWIND_SOURCE_TASK,
28 KUNWIND_SOURCE_REGS_PC,
29 KUNWIND_SOURCE_REGS_LR,
32 union unwind_flags {
33 unsigned long all;
34 struct {
35 unsigned long fgraph : 1,
36 kretprobe : 1;
41 * Kernel unwind state
43 * @common: Common unwind state.
44 * @task: The task being unwound.
45 * @graph_idx: Used by ftrace_graph_ret_addr() for optimized stack unwinding.
46 * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
47 * associated with the most recently encountered replacement lr
48 * value.
50 struct kunwind_state {
51 struct unwind_state common;
52 struct task_struct *task;
53 int graph_idx;
54 #ifdef CONFIG_KRETPROBES
55 struct llist_node *kr_cur;
56 #endif
57 enum kunwind_source source;
58 union unwind_flags flags;
59 struct pt_regs *regs;
62 static __always_inline void
63 kunwind_init(struct kunwind_state *state,
64 struct task_struct *task)
66 unwind_init_common(&state->common);
67 state->task = task;
68 state->source = KUNWIND_SOURCE_UNKNOWN;
69 state->flags.all = 0;
70 state->regs = NULL;
74 * Start an unwind from a pt_regs.
76 * The unwind will begin at the PC within the regs.
78 * The regs must be on a stack currently owned by the calling task.
80 static __always_inline void
81 kunwind_init_from_regs(struct kunwind_state *state,
82 struct pt_regs *regs)
84 kunwind_init(state, current);
86 state->regs = regs;
87 state->common.fp = regs->regs[29];
88 state->common.pc = regs->pc;
89 state->source = KUNWIND_SOURCE_REGS_PC;
93 * Start an unwind from a caller.
95 * The unwind will begin at the caller of whichever function this is inlined
96 * into.
98 * The function which invokes this must be noinline.
100 static __always_inline void
101 kunwind_init_from_caller(struct kunwind_state *state)
103 kunwind_init(state, current);
105 state->common.fp = (unsigned long)__builtin_frame_address(1);
106 state->common.pc = (unsigned long)__builtin_return_address(0);
107 state->source = KUNWIND_SOURCE_CALLER;
111 * Start an unwind from a blocked task.
113 * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
114 * cpu_switch_to()).
116 * The caller should ensure the task is blocked in cpu_switch_to() for the
117 * duration of the unwind, or the unwind will be bogus. It is never valid to
118 * call this for the current task.
120 static __always_inline void
121 kunwind_init_from_task(struct kunwind_state *state,
122 struct task_struct *task)
124 kunwind_init(state, task);
126 state->common.fp = thread_saved_fp(task);
127 state->common.pc = thread_saved_pc(task);
128 state->source = KUNWIND_SOURCE_TASK;
131 static __always_inline int
132 kunwind_recover_return_address(struct kunwind_state *state)
134 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
135 if (state->task->ret_stack &&
136 (state->common.pc == (unsigned long)return_to_handler)) {
137 unsigned long orig_pc;
138 orig_pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
139 state->common.pc,
140 (void *)state->common.fp);
141 if (WARN_ON_ONCE(state->common.pc == orig_pc))
142 return -EINVAL;
143 state->common.pc = orig_pc;
144 state->flags.fgraph = 1;
146 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
148 #ifdef CONFIG_KRETPROBES
149 if (is_kretprobe_trampoline(state->common.pc)) {
150 unsigned long orig_pc;
151 orig_pc = kretprobe_find_ret_addr(state->task,
152 (void *)state->common.fp,
153 &state->kr_cur);
154 state->common.pc = orig_pc;
155 state->flags.kretprobe = 1;
157 #endif /* CONFIG_KRETPROBES */
159 return 0;
162 static __always_inline
163 int kunwind_next_regs_pc(struct kunwind_state *state)
165 struct stack_info *info;
166 unsigned long fp = state->common.fp;
167 struct pt_regs *regs;
169 regs = container_of((u64 *)fp, struct pt_regs, stackframe.record.fp);
171 info = unwind_find_stack(&state->common, (unsigned long)regs, sizeof(*regs));
172 if (!info)
173 return -EINVAL;
175 unwind_consume_stack(&state->common, info, (unsigned long)regs,
176 sizeof(*regs));
178 state->regs = regs;
179 state->common.pc = regs->pc;
180 state->common.fp = regs->regs[29];
181 state->source = KUNWIND_SOURCE_REGS_PC;
182 return 0;
185 static __always_inline int
186 kunwind_next_regs_lr(struct kunwind_state *state)
189 * The stack for the regs was consumed by kunwind_next_regs_pc(), so we
190 * cannot consume that again here, but we know the regs are safe to
191 * access.
193 state->common.pc = state->regs->regs[30];
194 state->common.fp = state->regs->regs[29];
195 state->regs = NULL;
196 state->source = KUNWIND_SOURCE_REGS_LR;
198 return 0;
201 static __always_inline int
202 kunwind_next_frame_record_meta(struct kunwind_state *state)
204 struct task_struct *tsk = state->task;
205 unsigned long fp = state->common.fp;
206 struct frame_record_meta *meta;
207 struct stack_info *info;
209 info = unwind_find_stack(&state->common, fp, sizeof(*meta));
210 if (!info)
211 return -EINVAL;
213 meta = (struct frame_record_meta *)fp;
214 switch (READ_ONCE(meta->type)) {
215 case FRAME_META_TYPE_FINAL:
216 if (meta == &task_pt_regs(tsk)->stackframe)
217 return -ENOENT;
218 WARN_ON_ONCE(1);
219 return -EINVAL;
220 case FRAME_META_TYPE_PT_REGS:
221 return kunwind_next_regs_pc(state);
222 default:
223 WARN_ON_ONCE(1);
224 return -EINVAL;
228 static __always_inline int
229 kunwind_next_frame_record(struct kunwind_state *state)
231 unsigned long fp = state->common.fp;
232 struct frame_record *record;
233 struct stack_info *info;
234 unsigned long new_fp, new_pc;
236 if (fp & 0x7)
237 return -EINVAL;
239 info = unwind_find_stack(&state->common, fp, sizeof(*record));
240 if (!info)
241 return -EINVAL;
243 record = (struct frame_record *)fp;
244 new_fp = READ_ONCE(record->fp);
245 new_pc = READ_ONCE(record->lr);
247 if (!new_fp && !new_pc)
248 return kunwind_next_frame_record_meta(state);
250 unwind_consume_stack(&state->common, info, fp, sizeof(*record));
252 state->common.fp = new_fp;
253 state->common.pc = new_pc;
254 state->source = KUNWIND_SOURCE_FRAME;
256 return 0;
260 * Unwind from one frame record (A) to the next frame record (B).
262 * We terminate early if the location of B indicates a malformed chain of frame
263 * records (e.g. a cycle), determined based on the location and fp value of A
264 * and the location (but not the fp value) of B.
266 static __always_inline int
267 kunwind_next(struct kunwind_state *state)
269 int err;
271 state->flags.all = 0;
273 switch (state->source) {
274 case KUNWIND_SOURCE_FRAME:
275 case KUNWIND_SOURCE_CALLER:
276 case KUNWIND_SOURCE_TASK:
277 case KUNWIND_SOURCE_REGS_LR:
278 err = kunwind_next_frame_record(state);
279 break;
280 case KUNWIND_SOURCE_REGS_PC:
281 err = kunwind_next_regs_lr(state);
282 break;
283 default:
284 err = -EINVAL;
287 if (err)
288 return err;
290 state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
292 return kunwind_recover_return_address(state);
295 typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
297 static __always_inline void
298 do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
299 void *cookie)
301 if (kunwind_recover_return_address(state))
302 return;
304 while (1) {
305 int ret;
307 if (!consume_state(state, cookie))
308 break;
309 ret = kunwind_next(state);
310 if (ret < 0)
311 break;
316 * Per-cpu stacks are only accessible when unwinding the current task in a
317 * non-preemptible context.
319 #define STACKINFO_CPU(name) \
320 ({ \
321 ((task == current) && !preemptible()) \
322 ? stackinfo_get_##name() \
323 : stackinfo_get_unknown(); \
327 * SDEI stacks are only accessible when unwinding the current task in an NMI
328 * context.
330 #define STACKINFO_SDEI(name) \
331 ({ \
332 ((task == current) && in_nmi()) \
333 ? stackinfo_get_sdei_##name() \
334 : stackinfo_get_unknown(); \
337 #define STACKINFO_EFI \
338 ({ \
339 ((task == current) && current_in_efi()) \
340 ? stackinfo_get_efi() \
341 : stackinfo_get_unknown(); \
344 static __always_inline void
345 kunwind_stack_walk(kunwind_consume_fn consume_state,
346 void *cookie, struct task_struct *task,
347 struct pt_regs *regs)
349 struct stack_info stacks[] = {
350 stackinfo_get_task(task),
351 STACKINFO_CPU(irq),
352 #if defined(CONFIG_VMAP_STACK)
353 STACKINFO_CPU(overflow),
354 #endif
355 #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
356 STACKINFO_SDEI(normal),
357 STACKINFO_SDEI(critical),
358 #endif
359 #ifdef CONFIG_EFI
360 STACKINFO_EFI,
361 #endif
363 struct kunwind_state state = {
364 .common = {
365 .stacks = stacks,
366 .nr_stacks = ARRAY_SIZE(stacks),
370 if (regs) {
371 if (task != current)
372 return;
373 kunwind_init_from_regs(&state, regs);
374 } else if (task == current) {
375 kunwind_init_from_caller(&state);
376 } else {
377 kunwind_init_from_task(&state, task);
380 do_kunwind(&state, consume_state, cookie);
383 struct kunwind_consume_entry_data {
384 stack_trace_consume_fn consume_entry;
385 void *cookie;
388 static __always_inline bool
389 arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
391 struct kunwind_consume_entry_data *data = cookie;
392 return data->consume_entry(data->cookie, state->common.pc);
395 noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
396 void *cookie, struct task_struct *task,
397 struct pt_regs *regs)
399 struct kunwind_consume_entry_data data = {
400 .consume_entry = consume_entry,
401 .cookie = cookie,
404 kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
407 struct bpf_unwind_consume_entry_data {
408 bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
409 void *cookie;
412 static bool
413 arch_bpf_unwind_consume_entry(const struct kunwind_state *state, void *cookie)
415 struct bpf_unwind_consume_entry_data *data = cookie;
417 return data->consume_entry(data->cookie, state->common.pc, 0,
418 state->common.fp);
421 noinline noinstr void arch_bpf_stack_walk(bool (*consume_entry)(void *cookie, u64 ip, u64 sp,
422 u64 fp), void *cookie)
424 struct bpf_unwind_consume_entry_data data = {
425 .consume_entry = consume_entry,
426 .cookie = cookie,
429 kunwind_stack_walk(arch_bpf_unwind_consume_entry, &data, current, NULL);
432 static const char *state_source_string(const struct kunwind_state *state)
434 switch (state->source) {
435 case KUNWIND_SOURCE_FRAME: return NULL;
436 case KUNWIND_SOURCE_CALLER: return "C";
437 case KUNWIND_SOURCE_TASK: return "T";
438 case KUNWIND_SOURCE_REGS_PC: return "P";
439 case KUNWIND_SOURCE_REGS_LR: return "L";
440 default: return "U";
444 static bool dump_backtrace_entry(const struct kunwind_state *state, void *arg)
446 const char *source = state_source_string(state);
447 union unwind_flags flags = state->flags;
448 bool has_info = source || flags.all;
449 char *loglvl = arg;
451 printk("%s %pSb%s%s%s%s%s\n", loglvl,
452 (void *)state->common.pc,
453 has_info ? " (" : "",
454 source ? source : "",
455 flags.fgraph ? "F" : "",
456 flags.kretprobe ? "K" : "",
457 has_info ? ")" : "");
459 return true;
462 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
463 const char *loglvl)
465 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
467 if (regs && user_mode(regs))
468 return;
470 if (!tsk)
471 tsk = current;
473 if (!try_get_task_stack(tsk))
474 return;
476 printk("%sCall trace:\n", loglvl);
477 kunwind_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
479 put_task_stack(tsk);
482 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
484 dump_backtrace(NULL, tsk, loglvl);
485 barrier();
489 * The struct defined for userspace stack frame in AARCH64 mode.
491 struct frame_tail {
492 struct frame_tail __user *fp;
493 unsigned long lr;
494 } __attribute__((packed));
497 * Get the return address for a single stackframe and return a pointer to the
498 * next frame tail.
500 static struct frame_tail __user *
501 unwind_user_frame(struct frame_tail __user *tail, void *cookie,
502 stack_trace_consume_fn consume_entry)
504 struct frame_tail buftail;
505 unsigned long err;
506 unsigned long lr;
508 /* Also check accessibility of one struct frame_tail beyond */
509 if (!access_ok(tail, sizeof(buftail)))
510 return NULL;
512 pagefault_disable();
513 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
514 pagefault_enable();
516 if (err)
517 return NULL;
519 lr = ptrauth_strip_user_insn_pac(buftail.lr);
521 if (!consume_entry(cookie, lr))
522 return NULL;
525 * Frame pointers should strictly progress back up the stack
526 * (towards higher addresses).
528 if (tail >= buftail.fp)
529 return NULL;
531 return buftail.fp;
534 #ifdef CONFIG_COMPAT
536 * The registers we're interested in are at the end of the variable
537 * length saved register structure. The fp points at the end of this
538 * structure so the address of this struct is:
539 * (struct compat_frame_tail *)(xxx->fp)-1
541 * This code has been adapted from the ARM OProfile support.
543 struct compat_frame_tail {
544 compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
545 u32 sp;
546 u32 lr;
547 } __attribute__((packed));
549 static struct compat_frame_tail __user *
550 unwind_compat_user_frame(struct compat_frame_tail __user *tail, void *cookie,
551 stack_trace_consume_fn consume_entry)
553 struct compat_frame_tail buftail;
554 unsigned long err;
556 /* Also check accessibility of one struct frame_tail beyond */
557 if (!access_ok(tail, sizeof(buftail)))
558 return NULL;
560 pagefault_disable();
561 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
562 pagefault_enable();
564 if (err)
565 return NULL;
567 if (!consume_entry(cookie, buftail.lr))
568 return NULL;
571 * Frame pointers should strictly progress back up the stack
572 * (towards higher addresses).
574 if (tail + 1 >= (struct compat_frame_tail __user *)
575 compat_ptr(buftail.fp))
576 return NULL;
578 return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
580 #endif /* CONFIG_COMPAT */
583 void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
584 const struct pt_regs *regs)
586 if (!consume_entry(cookie, regs->pc))
587 return;
589 if (!compat_user_mode(regs)) {
590 /* AARCH64 mode */
591 struct frame_tail __user *tail;
593 tail = (struct frame_tail __user *)regs->regs[29];
594 while (tail && !((unsigned long)tail & 0x7))
595 tail = unwind_user_frame(tail, cookie, consume_entry);
596 } else {
597 #ifdef CONFIG_COMPAT
598 /* AARCH32 compat mode */
599 struct compat_frame_tail __user *tail;
601 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
602 while (tail && !((unsigned long)tail & 0x3))
603 tail = unwind_compat_user_frame(tail, cookie, consume_entry);
604 #endif