drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / arch / parisc / kernel / unwind.c
blobf7e0fee5ee55a3e055679e75b06c280679b603ad
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Kernel unwinding support
5 * (c) 2002-2004 Randolph Chung <tausq@debian.org>
7 * Derived partially from the IA64 implementation. The PA-RISC
8 * Runtime Architecture Document is also a useful reference to
9 * understand what is happening here
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/sort.h>
17 #include <linux/sched/task_stack.h>
19 #include <linux/uaccess.h>
20 #include <asm/assembly.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/ptrace.h>
24 #include <asm/unwind.h>
25 #include <asm/switch_to.h>
26 #include <asm/sections.h>
27 #include <asm/ftrace.h>
29 /* #define DEBUG 1 */
30 #ifdef DEBUG
31 #define dbg(x...) pr_debug(x)
32 #else
33 #define dbg(x...) do { } while (0)
34 #endif
36 #define KERNEL_START (KERNEL_BINARY_TEXT_START)
38 extern struct unwind_table_entry __start___unwind[];
39 extern struct unwind_table_entry __stop___unwind[];
41 static DEFINE_SPINLOCK(unwind_lock);
43 * the kernel unwind block is not dynamically allocated so that
44 * we can call unwind_init as early in the bootup process as
45 * possible (before the slab allocator is initialized)
47 static struct unwind_table kernel_unwind_table __ro_after_init;
48 static LIST_HEAD(unwind_tables);
50 static inline const struct unwind_table_entry *
51 find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
53 const struct unwind_table_entry *e = NULL;
54 unsigned long lo, hi, mid;
56 lo = 0;
57 hi = table->length - 1;
59 while (lo <= hi) {
60 mid = (hi - lo) / 2 + lo;
61 e = &table->table[mid];
62 if (addr < e->region_start)
63 hi = mid - 1;
64 else if (addr > e->region_end)
65 lo = mid + 1;
66 else
67 return e;
70 return NULL;
73 static const struct unwind_table_entry *
74 find_unwind_entry(unsigned long addr)
76 struct unwind_table *table;
77 const struct unwind_table_entry *e = NULL;
79 if (addr >= kernel_unwind_table.start &&
80 addr <= kernel_unwind_table.end)
81 e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
82 else {
83 unsigned long flags;
85 spin_lock_irqsave(&unwind_lock, flags);
86 list_for_each_entry(table, &unwind_tables, list) {
87 if (addr >= table->start &&
88 addr <= table->end)
89 e = find_unwind_entry_in_table(table, addr);
90 if (e) {
91 /* Move-to-front to exploit common traces */
92 list_move(&table->list, &unwind_tables);
93 break;
96 spin_unlock_irqrestore(&unwind_lock, flags);
99 return e;
102 static void
103 unwind_table_init(struct unwind_table *table, const char *name,
104 unsigned long base_addr, unsigned long gp,
105 void *table_start, void *table_end)
107 struct unwind_table_entry *start = table_start;
108 struct unwind_table_entry *end =
109 (struct unwind_table_entry *)table_end - 1;
111 table->name = name;
112 table->base_addr = base_addr;
113 table->gp = gp;
114 table->start = base_addr + start->region_start;
115 table->end = base_addr + end->region_end;
116 table->table = (struct unwind_table_entry *)table_start;
117 table->length = end - start + 1;
118 INIT_LIST_HEAD(&table->list);
120 for (; start <= end; start++) {
121 if (start < end &&
122 start->region_end > (start+1)->region_start) {
123 pr_warn("Out of order unwind entry! %px and %px\n",
124 start, start+1);
127 start->region_start += base_addr;
128 start->region_end += base_addr;
132 static int cmp_unwind_table_entry(const void *a, const void *b)
134 return ((const struct unwind_table_entry *)a)->region_start
135 - ((const struct unwind_table_entry *)b)->region_start;
138 static void
139 unwind_table_sort(struct unwind_table_entry *start,
140 struct unwind_table_entry *finish)
142 sort(start, finish - start, sizeof(struct unwind_table_entry),
143 cmp_unwind_table_entry, NULL);
146 struct unwind_table *
147 unwind_table_add(const char *name, unsigned long base_addr,
148 unsigned long gp,
149 void *start, void *end)
151 struct unwind_table *table;
152 unsigned long flags;
153 struct unwind_table_entry *s = (struct unwind_table_entry *)start;
154 struct unwind_table_entry *e = (struct unwind_table_entry *)end;
156 unwind_table_sort(s, e);
158 table = kmalloc(sizeof(struct unwind_table), GFP_USER);
159 if (table == NULL)
160 return NULL;
161 unwind_table_init(table, name, base_addr, gp, start, end);
162 spin_lock_irqsave(&unwind_lock, flags);
163 list_add_tail(&table->list, &unwind_tables);
164 spin_unlock_irqrestore(&unwind_lock, flags);
166 return table;
169 void unwind_table_remove(struct unwind_table *table)
171 unsigned long flags;
173 spin_lock_irqsave(&unwind_lock, flags);
174 list_del(&table->list);
175 spin_unlock_irqrestore(&unwind_lock, flags);
177 kfree(table);
180 /* Called from setup_arch to import the kernel unwind info */
181 int __init unwind_init(void)
183 long start __maybe_unused, stop __maybe_unused;
184 register unsigned long gp __asm__ ("r27");
186 start = (long)&__start___unwind[0];
187 stop = (long)&__stop___unwind[0];
189 dbg("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
190 start, stop,
191 (stop - start) / sizeof(struct unwind_table_entry));
193 unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
194 gp,
195 &__start___unwind[0], &__stop___unwind[0]);
196 #if 0
198 int i;
199 for (i = 0; i < 10; i++)
201 printk("region 0x%x-0x%x\n",
202 __start___unwind[i].region_start,
203 __start___unwind[i].region_end);
206 #endif
207 return 0;
210 static bool pc_is_kernel_fn(unsigned long pc, void *fn)
212 return (unsigned long)dereference_kernel_function_descriptor(fn) == pc;
215 static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
218 * We have to use void * instead of a function pointer, because
219 * function pointers aren't a pointer to the function on 64-bit.
220 * Make them const so the compiler knows they live in .text
221 * Note: We could use dereference_kernel_function_descriptor()
222 * instead but we want to keep it simple here.
224 extern void * const ret_from_kernel_thread;
225 extern void * const syscall_exit;
226 extern void * const intr_return;
227 extern void * const _switch_to_ret;
228 #ifdef CONFIG_IRQSTACKS
229 extern void * const _call_on_stack;
230 #endif /* CONFIG_IRQSTACKS */
232 if (pc_is_kernel_fn(pc, handle_interruption)) {
233 struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
234 dbg("Unwinding through handle_interruption()\n");
235 info->prev_sp = regs->gr[30];
236 info->prev_ip = regs->iaoq[0];
237 return 1;
240 if (pc == (unsigned long)&ret_from_kernel_thread ||
241 pc == (unsigned long)&syscall_exit) {
242 info->prev_sp = info->prev_ip = 0;
243 return 1;
246 if (pc == (unsigned long)&intr_return) {
247 struct pt_regs *regs;
249 dbg("Found intr_return()\n");
250 regs = (struct pt_regs *)(info->sp - PT_SZ_ALGN);
251 info->prev_sp = regs->gr[30];
252 info->prev_ip = regs->iaoq[0];
253 info->rp = regs->gr[2];
254 return 1;
257 if (pc_is_kernel_fn(pc, _switch_to) ||
258 pc == (unsigned long)&_switch_to_ret) {
259 info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
260 info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
261 return 1;
264 #ifdef CONFIG_IRQSTACKS
265 if (pc == (unsigned long)&_call_on_stack) {
266 info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
267 info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
268 return 1;
270 #endif
271 return 0;
274 static void unwind_frame_regs(struct unwind_frame_info *info)
276 const struct unwind_table_entry *e;
277 unsigned long npc;
278 unsigned int insn;
279 long frame_size = 0;
280 int looking_for_rp, rpoffset = 0;
282 e = find_unwind_entry(info->ip);
283 if (e == NULL) {
284 unsigned long sp;
286 dbg("Cannot find unwind entry for %pS; forced unwinding\n",
287 (void *) info->ip);
289 /* Since we are doing the unwinding blind, we don't know if
290 we are adjusting the stack correctly or extracting the rp
291 correctly. The rp is checked to see if it belongs to the
292 kernel text section, if not we assume we don't have a
293 correct stack frame and we continue to unwind the stack.
294 This is not quite correct, and will fail for loadable
295 modules. */
296 sp = info->sp & ~63;
297 do {
298 unsigned long tmp;
300 info->prev_sp = sp - 64;
301 info->prev_ip = 0;
303 /* Check if stack is inside kernel stack area */
304 if ((info->prev_sp - (unsigned long) task_stack_page(info->t))
305 >= THREAD_SIZE) {
306 info->prev_sp = 0;
307 break;
310 if (copy_from_kernel_nofault(&tmp,
311 (void *)info->prev_sp - RP_OFFSET, sizeof(tmp)))
312 break;
313 info->prev_ip = tmp;
314 sp = info->prev_sp;
315 } while (!kernel_text_address(info->prev_ip));
317 info->rp = 0;
319 dbg("analyzing func @ %lx with no unwind info, setting "
320 "prev_sp=%lx prev_ip=%lx\n", info->ip,
321 info->prev_sp, info->prev_ip);
322 } else {
323 dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
324 "Save_RP = %d, Millicode = %d size = %u\n",
325 e->region_start, e->region_end, e->Save_SP, e->Save_RP,
326 e->Millicode, e->Total_frame_size);
328 looking_for_rp = e->Save_RP;
330 for (npc = e->region_start;
331 (frame_size < (e->Total_frame_size << 3) ||
332 looking_for_rp) &&
333 npc < info->ip;
334 npc += 4) {
336 insn = *(unsigned int *)npc;
338 if ((insn & 0xffffc001) == 0x37de0000 ||
339 (insn & 0xffe00001) == 0x6fc00000) {
340 /* ldo X(sp), sp, or stwm X,D(sp) */
341 frame_size += (insn & 0x3fff) >> 1;
342 dbg("analyzing func @ %lx, insn=%08x @ "
343 "%lx, frame_size = %ld\n", info->ip,
344 insn, npc, frame_size);
345 } else if ((insn & 0xffe00009) == 0x73c00008) {
346 /* std,ma X,D(sp) */
347 frame_size += ((insn >> 4) & 0x3ff) << 3;
348 dbg("analyzing func @ %lx, insn=%08x @ "
349 "%lx, frame_size = %ld\n", info->ip,
350 insn, npc, frame_size);
351 } else if (insn == 0x6bc23fd9) {
352 /* stw rp,-20(sp) */
353 rpoffset = 20;
354 looking_for_rp = 0;
355 dbg("analyzing func @ %lx, insn=stw rp,"
356 "-20(sp) @ %lx\n", info->ip, npc);
357 } else if (insn == 0x0fc212c1) {
358 /* std rp,-16(sr0,sp) */
359 rpoffset = 16;
360 looking_for_rp = 0;
361 dbg("analyzing func @ %lx, insn=std rp,"
362 "-16(sp) @ %lx\n", info->ip, npc);
366 if (frame_size > e->Total_frame_size << 3)
367 frame_size = e->Total_frame_size << 3;
369 if (!unwind_special(info, e->region_start, frame_size)) {
370 info->prev_sp = info->sp - frame_size;
371 if (e->Millicode)
372 info->rp = info->r31;
373 else if (rpoffset)
374 info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
375 info->prev_ip = info->rp;
376 info->rp = 0;
379 dbg("analyzing func @ %lx, setting prev_sp=%lx "
380 "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
381 info->prev_ip, npc);
385 void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
386 struct pt_regs *regs)
388 memset(info, 0, sizeof(struct unwind_frame_info));
389 info->t = t;
390 info->sp = regs->gr[30];
391 info->ip = regs->iaoq[0];
392 info->rp = regs->gr[2];
393 info->r31 = regs->gr[31];
395 dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
396 t ? (int)t->pid : -1, info->sp, info->ip);
399 void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
401 struct pt_regs *r = &t->thread.regs;
402 struct pt_regs *r2;
404 r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
405 if (!r2)
406 return;
407 *r2 = *r;
408 r2->gr[30] = r->ksp;
409 r2->iaoq[0] = r->kpc;
410 unwind_frame_init(info, t, r2);
411 kfree(r2);
414 #define get_parisc_stackpointer() ({ \
415 unsigned long sp; \
416 __asm__("copy %%r30, %0" : "=r"(sp)); \
417 (sp); \
420 void unwind_frame_init_task(struct unwind_frame_info *info,
421 struct task_struct *task, struct pt_regs *regs)
423 task = task ? task : current;
425 if (task == current) {
426 struct pt_regs r;
428 if (!regs) {
429 memset(&r, 0, sizeof(r));
430 r.iaoq[0] = _THIS_IP_;
431 r.gr[2] = _RET_IP_;
432 r.gr[30] = get_parisc_stackpointer();
433 regs = &r;
435 unwind_frame_init(info, task, regs);
436 } else {
437 unwind_frame_init_from_blocked_task(info, task);
441 int unwind_once(struct unwind_frame_info *next_frame)
443 unwind_frame_regs(next_frame);
445 if (next_frame->prev_sp == 0 ||
446 next_frame->prev_ip == 0)
447 return -1;
449 next_frame->sp = next_frame->prev_sp;
450 next_frame->ip = next_frame->prev_ip;
451 next_frame->prev_sp = 0;
452 next_frame->prev_ip = 0;
454 dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
455 next_frame->t ? (int)next_frame->t->pid : -1,
456 next_frame->sp, next_frame->ip);
458 return 0;
461 int unwind_to_user(struct unwind_frame_info *info)
463 int ret;
465 do {
466 ret = unwind_once(info);
467 } while (!ret && !(info->ip & 3));
469 return ret;
472 unsigned long return_address(unsigned int level)
474 struct unwind_frame_info info;
476 /* initialize unwind info */
477 unwind_frame_init_task(&info, current, NULL);
479 /* unwind stack */
480 level += 2;
481 do {
482 if (unwind_once(&info) < 0 || info.ip == 0)
483 return 0;
484 if (!kernel_text_address(info.ip))
485 return 0;
486 } while (info.ip && level--);
488 return info.ip;