1 // SPDX-License-Identifier: GPL-2.0
3 * Kernel unwinding support
5 * (c) 2002-2004 Randolph Chung <tausq@debian.org>
7 * Derived partially from the IA64 implementation. The PA-RISC
8 * Runtime Architecture Document is also a useful reference to
9 * understand what is happening here
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/sort.h>
17 #include <linux/sched/task_stack.h>
19 #include <linux/uaccess.h>
20 #include <asm/assembly.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/ptrace.h>
24 #include <asm/unwind.h>
25 #include <asm/switch_to.h>
26 #include <asm/sections.h>
27 #include <asm/ftrace.h>
31 #define dbg(x...) pr_debug(x)
33 #define dbg(x...) do { } while (0)
36 #define KERNEL_START (KERNEL_BINARY_TEXT_START)
38 extern struct unwind_table_entry __start___unwind
[];
39 extern struct unwind_table_entry __stop___unwind
[];
41 static DEFINE_SPINLOCK(unwind_lock
);
43 * the kernel unwind block is not dynamically allocated so that
44 * we can call unwind_init as early in the bootup process as
45 * possible (before the slab allocator is initialized)
47 static struct unwind_table kernel_unwind_table __ro_after_init
;
48 static LIST_HEAD(unwind_tables
);
50 static inline const struct unwind_table_entry
*
51 find_unwind_entry_in_table(const struct unwind_table
*table
, unsigned long addr
)
53 const struct unwind_table_entry
*e
= NULL
;
54 unsigned long lo
, hi
, mid
;
57 hi
= table
->length
- 1;
60 mid
= (hi
- lo
) / 2 + lo
;
61 e
= &table
->table
[mid
];
62 if (addr
< e
->region_start
)
64 else if (addr
> e
->region_end
)
73 static const struct unwind_table_entry
*
74 find_unwind_entry(unsigned long addr
)
76 struct unwind_table
*table
;
77 const struct unwind_table_entry
*e
= NULL
;
79 if (addr
>= kernel_unwind_table
.start
&&
80 addr
<= kernel_unwind_table
.end
)
81 e
= find_unwind_entry_in_table(&kernel_unwind_table
, addr
);
85 spin_lock_irqsave(&unwind_lock
, flags
);
86 list_for_each_entry(table
, &unwind_tables
, list
) {
87 if (addr
>= table
->start
&&
89 e
= find_unwind_entry_in_table(table
, addr
);
91 /* Move-to-front to exploit common traces */
92 list_move(&table
->list
, &unwind_tables
);
96 spin_unlock_irqrestore(&unwind_lock
, flags
);
103 unwind_table_init(struct unwind_table
*table
, const char *name
,
104 unsigned long base_addr
, unsigned long gp
,
105 void *table_start
, void *table_end
)
107 struct unwind_table_entry
*start
= table_start
;
108 struct unwind_table_entry
*end
=
109 (struct unwind_table_entry
*)table_end
- 1;
112 table
->base_addr
= base_addr
;
114 table
->start
= base_addr
+ start
->region_start
;
115 table
->end
= base_addr
+ end
->region_end
;
116 table
->table
= (struct unwind_table_entry
*)table_start
;
117 table
->length
= end
- start
+ 1;
118 INIT_LIST_HEAD(&table
->list
);
120 for (; start
<= end
; start
++) {
122 start
->region_end
> (start
+1)->region_start
) {
123 pr_warn("Out of order unwind entry! %px and %px\n",
127 start
->region_start
+= base_addr
;
128 start
->region_end
+= base_addr
;
132 static int cmp_unwind_table_entry(const void *a
, const void *b
)
134 return ((const struct unwind_table_entry
*)a
)->region_start
135 - ((const struct unwind_table_entry
*)b
)->region_start
;
139 unwind_table_sort(struct unwind_table_entry
*start
,
140 struct unwind_table_entry
*finish
)
142 sort(start
, finish
- start
, sizeof(struct unwind_table_entry
),
143 cmp_unwind_table_entry
, NULL
);
146 struct unwind_table
*
147 unwind_table_add(const char *name
, unsigned long base_addr
,
149 void *start
, void *end
)
151 struct unwind_table
*table
;
153 struct unwind_table_entry
*s
= (struct unwind_table_entry
*)start
;
154 struct unwind_table_entry
*e
= (struct unwind_table_entry
*)end
;
156 unwind_table_sort(s
, e
);
158 table
= kmalloc(sizeof(struct unwind_table
), GFP_USER
);
161 unwind_table_init(table
, name
, base_addr
, gp
, start
, end
);
162 spin_lock_irqsave(&unwind_lock
, flags
);
163 list_add_tail(&table
->list
, &unwind_tables
);
164 spin_unlock_irqrestore(&unwind_lock
, flags
);
169 void unwind_table_remove(struct unwind_table
*table
)
173 spin_lock_irqsave(&unwind_lock
, flags
);
174 list_del(&table
->list
);
175 spin_unlock_irqrestore(&unwind_lock
, flags
);
180 /* Called from setup_arch to import the kernel unwind info */
181 int __init
unwind_init(void)
183 long start __maybe_unused
, stop __maybe_unused
;
184 register unsigned long gp
__asm__ ("r27");
186 start
= (long)&__start___unwind
[0];
187 stop
= (long)&__stop___unwind
[0];
189 dbg("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
191 (stop
- start
) / sizeof(struct unwind_table_entry
));
193 unwind_table_init(&kernel_unwind_table
, "kernel", KERNEL_START
,
195 &__start___unwind
[0], &__stop___unwind
[0]);
199 for (i
= 0; i
< 10; i
++)
201 printk("region 0x%x-0x%x\n",
202 __start___unwind
[i
].region_start
,
203 __start___unwind
[i
].region_end
);
210 static bool pc_is_kernel_fn(unsigned long pc
, void *fn
)
212 return (unsigned long)dereference_kernel_function_descriptor(fn
) == pc
;
215 static int unwind_special(struct unwind_frame_info
*info
, unsigned long pc
, int frame_size
)
218 * We have to use void * instead of a function pointer, because
219 * function pointers aren't a pointer to the function on 64-bit.
220 * Make them const so the compiler knows they live in .text
221 * Note: We could use dereference_kernel_function_descriptor()
222 * instead but we want to keep it simple here.
224 extern void * const ret_from_kernel_thread
;
225 extern void * const syscall_exit
;
226 extern void * const intr_return
;
227 extern void * const _switch_to_ret
;
228 #ifdef CONFIG_IRQSTACKS
229 extern void * const _call_on_stack
;
230 #endif /* CONFIG_IRQSTACKS */
232 if (pc_is_kernel_fn(pc
, handle_interruption
)) {
233 struct pt_regs
*regs
= (struct pt_regs
*)(info
->sp
- frame_size
- PT_SZ_ALGN
);
234 dbg("Unwinding through handle_interruption()\n");
235 info
->prev_sp
= regs
->gr
[30];
236 info
->prev_ip
= regs
->iaoq
[0];
240 if (pc
== (unsigned long)&ret_from_kernel_thread
||
241 pc
== (unsigned long)&syscall_exit
) {
242 info
->prev_sp
= info
->prev_ip
= 0;
246 if (pc
== (unsigned long)&intr_return
) {
247 struct pt_regs
*regs
;
249 dbg("Found intr_return()\n");
250 regs
= (struct pt_regs
*)(info
->sp
- PT_SZ_ALGN
);
251 info
->prev_sp
= regs
->gr
[30];
252 info
->prev_ip
= regs
->iaoq
[0];
253 info
->rp
= regs
->gr
[2];
257 if (pc_is_kernel_fn(pc
, _switch_to
) ||
258 pc
== (unsigned long)&_switch_to_ret
) {
259 info
->prev_sp
= info
->sp
- CALLEE_SAVE_FRAME_SIZE
;
260 info
->prev_ip
= *(unsigned long *)(info
->prev_sp
- RP_OFFSET
);
264 #ifdef CONFIG_IRQSTACKS
265 if (pc
== (unsigned long)&_call_on_stack
) {
266 info
->prev_sp
= *(unsigned long *)(info
->sp
- FRAME_SIZE
- REG_SZ
);
267 info
->prev_ip
= *(unsigned long *)(info
->sp
- FRAME_SIZE
- RP_OFFSET
);
274 static void unwind_frame_regs(struct unwind_frame_info
*info
)
276 const struct unwind_table_entry
*e
;
280 int looking_for_rp
, rpoffset
= 0;
282 e
= find_unwind_entry(info
->ip
);
286 dbg("Cannot find unwind entry for %pS; forced unwinding\n",
289 /* Since we are doing the unwinding blind, we don't know if
290 we are adjusting the stack correctly or extracting the rp
291 correctly. The rp is checked to see if it belongs to the
292 kernel text section, if not we assume we don't have a
293 correct stack frame and we continue to unwind the stack.
294 This is not quite correct, and will fail for loadable
300 info
->prev_sp
= sp
- 64;
303 /* Check if stack is inside kernel stack area */
304 if ((info
->prev_sp
- (unsigned long) task_stack_page(info
->t
))
310 if (copy_from_kernel_nofault(&tmp
,
311 (void *)info
->prev_sp
- RP_OFFSET
, sizeof(tmp
)))
315 } while (!kernel_text_address(info
->prev_ip
));
319 dbg("analyzing func @ %lx with no unwind info, setting "
320 "prev_sp=%lx prev_ip=%lx\n", info
->ip
,
321 info
->prev_sp
, info
->prev_ip
);
323 dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
324 "Save_RP = %d, Millicode = %d size = %u\n",
325 e
->region_start
, e
->region_end
, e
->Save_SP
, e
->Save_RP
,
326 e
->Millicode
, e
->Total_frame_size
);
328 looking_for_rp
= e
->Save_RP
;
330 for (npc
= e
->region_start
;
331 (frame_size
< (e
->Total_frame_size
<< 3) ||
336 insn
= *(unsigned int *)npc
;
338 if ((insn
& 0xffffc001) == 0x37de0000 ||
339 (insn
& 0xffe00001) == 0x6fc00000) {
340 /* ldo X(sp), sp, or stwm X,D(sp) */
341 frame_size
+= (insn
& 0x3fff) >> 1;
342 dbg("analyzing func @ %lx, insn=%08x @ "
343 "%lx, frame_size = %ld\n", info
->ip
,
344 insn
, npc
, frame_size
);
345 } else if ((insn
& 0xffe00009) == 0x73c00008) {
347 frame_size
+= ((insn
>> 4) & 0x3ff) << 3;
348 dbg("analyzing func @ %lx, insn=%08x @ "
349 "%lx, frame_size = %ld\n", info
->ip
,
350 insn
, npc
, frame_size
);
351 } else if (insn
== 0x6bc23fd9) {
355 dbg("analyzing func @ %lx, insn=stw rp,"
356 "-20(sp) @ %lx\n", info
->ip
, npc
);
357 } else if (insn
== 0x0fc212c1) {
358 /* std rp,-16(sr0,sp) */
361 dbg("analyzing func @ %lx, insn=std rp,"
362 "-16(sp) @ %lx\n", info
->ip
, npc
);
366 if (frame_size
> e
->Total_frame_size
<< 3)
367 frame_size
= e
->Total_frame_size
<< 3;
369 if (!unwind_special(info
, e
->region_start
, frame_size
)) {
370 info
->prev_sp
= info
->sp
- frame_size
;
372 info
->rp
= info
->r31
;
374 info
->rp
= *(unsigned long *)(info
->prev_sp
- rpoffset
);
375 info
->prev_ip
= info
->rp
;
379 dbg("analyzing func @ %lx, setting prev_sp=%lx "
380 "prev_ip=%lx npc=%lx\n", info
->ip
, info
->prev_sp
,
385 void unwind_frame_init(struct unwind_frame_info
*info
, struct task_struct
*t
,
386 struct pt_regs
*regs
)
388 memset(info
, 0, sizeof(struct unwind_frame_info
));
390 info
->sp
= regs
->gr
[30];
391 info
->ip
= regs
->iaoq
[0];
392 info
->rp
= regs
->gr
[2];
393 info
->r31
= regs
->gr
[31];
395 dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
396 t
? (int)t
->pid
: -1, info
->sp
, info
->ip
);
399 void unwind_frame_init_from_blocked_task(struct unwind_frame_info
*info
, struct task_struct
*t
)
401 struct pt_regs
*r
= &t
->thread
.regs
;
404 r2
= kmalloc(sizeof(struct pt_regs
), GFP_ATOMIC
);
409 r2
->iaoq
[0] = r
->kpc
;
410 unwind_frame_init(info
, t
, r2
);
414 #define get_parisc_stackpointer() ({ \
416 __asm__("copy %%r30, %0" : "=r"(sp)); \
420 void unwind_frame_init_task(struct unwind_frame_info
*info
,
421 struct task_struct
*task
, struct pt_regs
*regs
)
423 task
= task
? task
: current
;
425 if (task
== current
) {
429 memset(&r
, 0, sizeof(r
));
430 r
.iaoq
[0] = _THIS_IP_
;
432 r
.gr
[30] = get_parisc_stackpointer();
435 unwind_frame_init(info
, task
, regs
);
437 unwind_frame_init_from_blocked_task(info
, task
);
441 int unwind_once(struct unwind_frame_info
*next_frame
)
443 unwind_frame_regs(next_frame
);
445 if (next_frame
->prev_sp
== 0 ||
446 next_frame
->prev_ip
== 0)
449 next_frame
->sp
= next_frame
->prev_sp
;
450 next_frame
->ip
= next_frame
->prev_ip
;
451 next_frame
->prev_sp
= 0;
452 next_frame
->prev_ip
= 0;
454 dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
455 next_frame
->t
? (int)next_frame
->t
->pid
: -1,
456 next_frame
->sp
, next_frame
->ip
);
461 int unwind_to_user(struct unwind_frame_info
*info
)
466 ret
= unwind_once(info
);
467 } while (!ret
&& !(info
->ip
& 3));
472 unsigned long return_address(unsigned int level
)
474 struct unwind_frame_info info
;
476 /* initialize unwind info */
477 unwind_frame_init_task(&info
, current
, NULL
);
482 if (unwind_once(&info
) < 0 || info
.ip
== 0)
484 if (!kernel_text_address(info
.ip
))
486 } while (info
.ip
&& level
--);