1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 ARM Ltd.
5 #ifndef __ASM_STACKTRACE_H
6 #define __ASM_STACKTRACE_H
8 #include <linux/percpu.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/types.h>
13 #include <asm/memory.h>
14 #include <asm/ptrace.h>
22 STACK_TYPE_SDEI_NORMAL
,
23 STACK_TYPE_SDEI_CRITICAL
,
34 * A snapshot of a frame record or fp/lr register values, along with some
35 * accounting information necessary for robust unwinding.
37 * @fp: The fp value in the frame record (or the real fp)
38 * @pc: The fp value in the frame record (or the real lr)
40 * @stacks_done: Stacks which have been entirely unwound, for which it is no
41 * longer valid to unwind to.
43 * @prev_fp: The fp that pointed to this frame record, or a synthetic value
44 * of 0. This is used to ensure that within a stack, each
45 * subsequent frame record is at an increasing address.
46 * @prev_type: The type of stack this frame record was on, or a synthetic
47 * value of STACK_TYPE_UNKNOWN. This is used to detect a
48 * transition from one stack to another.
50 * @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a
51 * replacement lr value in the ftrace graph stack.
56 DECLARE_BITMAP(stacks_done
, __NR_STACK_TYPES
);
57 unsigned long prev_fp
;
58 enum stack_type prev_type
;
59 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
64 extern int unwind_frame(struct task_struct
*tsk
, struct stackframe
*frame
);
65 extern void walk_stackframe(struct task_struct
*tsk
, struct stackframe
*frame
,
66 int (*fn
)(struct stackframe
*, void *), void *data
);
67 extern void dump_backtrace(struct pt_regs
*regs
, struct task_struct
*tsk
);
69 DECLARE_PER_CPU(unsigned long *, irq_stack_ptr
);
71 static inline bool on_irq_stack(unsigned long sp
,
72 struct stack_info
*info
)
74 unsigned long low
= (unsigned long)raw_cpu_read(irq_stack_ptr
);
75 unsigned long high
= low
+ IRQ_STACK_SIZE
;
80 if (sp
< low
|| sp
>= high
)
86 info
->type
= STACK_TYPE_IRQ
;
92 static inline bool on_task_stack(const struct task_struct
*tsk
,
94 struct stack_info
*info
)
96 unsigned long low
= (unsigned long)task_stack_page(tsk
);
97 unsigned long high
= low
+ THREAD_SIZE
;
99 if (sp
< low
|| sp
>= high
)
105 info
->type
= STACK_TYPE_TASK
;
111 #ifdef CONFIG_VMAP_STACK
112 DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE
/sizeof(long)], overflow_stack
);
114 static inline bool on_overflow_stack(unsigned long sp
,
115 struct stack_info
*info
)
117 unsigned long low
= (unsigned long)raw_cpu_ptr(overflow_stack
);
118 unsigned long high
= low
+ OVERFLOW_STACK_SIZE
;
120 if (sp
< low
|| sp
>= high
)
126 info
->type
= STACK_TYPE_OVERFLOW
;
132 static inline bool on_overflow_stack(unsigned long sp
,
133 struct stack_info
*info
) { return false; }
138 * We can only safely access per-cpu stacks from current in a non-preemptible
141 static inline bool on_accessible_stack(const struct task_struct
*tsk
,
143 struct stack_info
*info
)
146 info
->type
= STACK_TYPE_UNKNOWN
;
148 if (on_task_stack(tsk
, sp
, info
))
150 if (tsk
!= current
|| preemptible())
152 if (on_irq_stack(sp
, info
))
154 if (on_overflow_stack(sp
, info
))
156 if (on_sdei_stack(sp
, info
))
162 static inline void start_backtrace(struct stackframe
*frame
,
163 unsigned long fp
, unsigned long pc
)
167 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
172 * Prime the first unwind.
174 * In unwind_frame() we'll check that the FP points to a valid stack,
175 * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
176 * treated as a transition to whichever stack that happens to be. The
177 * prev_fp value won't be used, but we set it to 0 such that it is
178 * definitely not an accessible stack address.
180 bitmap_zero(frame
->stacks_done
, __NR_STACK_TYPES
);
182 frame
->prev_type
= STACK_TYPE_UNKNOWN
;
185 #endif /* __ASM_STACKTRACE_H */