x86/efi: Enforce CONFIG_RELOCATABLE for EFI boot stub
[linux/fpc-iii.git] / arch / arm / kernel / stacktrace.c
blob6582c4adc182ceddbfa2e87e73bccf602567fe5f
1 #include <linux/export.h>
2 #include <linux/sched.h>
3 #include <linux/stacktrace.h>
5 #include <asm/stacktrace.h>
7 #if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
8 /*
9 * Unwind the current stack frame and store the new register values in the
10 * structure passed as argument. Unwinding is equivalent to a function return,
11 * hence the new PC value rather than LR should be used for backtrace.
13 * With framepointer enabled, a simple function prologue looks like this:
14 * mov ip, sp
15 * stmdb sp!, {fp, ip, lr, pc}
16 * sub fp, ip, #4
18 * A simple function epilogue looks like this:
19 * ldm sp, {fp, sp, pc}
21 * Note that with framepointer enabled, even the leaf functions have the same
22 * prologue and epilogue, therefore we can ignore the LR value in this case.
24 int notrace unwind_frame(struct stackframe *frame)
26 unsigned long high, low;
27 unsigned long fp = frame->fp;
29 /* only go to a higher address on the stack */
30 low = frame->sp;
31 high = ALIGN(low, THREAD_SIZE);
33 /* check current frame pointer is within bounds */
34 if (fp < low + 12 || fp > high - 4)
35 return -EINVAL;
37 /* restore the registers from the stack frame */
38 frame->fp = *(unsigned long *)(fp - 12);
39 frame->sp = *(unsigned long *)(fp - 8);
40 frame->pc = *(unsigned long *)(fp - 4);
42 return 0;
44 #endif
46 void notrace walk_stackframe(struct stackframe *frame,
47 int (*fn)(struct stackframe *, void *), void *data)
49 while (1) {
50 int ret;
52 if (fn(frame, data))
53 break;
54 ret = unwind_frame(frame);
55 if (ret < 0)
56 break;
59 EXPORT_SYMBOL(walk_stackframe);
61 #ifdef CONFIG_STACKTRACE
62 struct stack_trace_data {
63 struct stack_trace *trace;
64 unsigned int no_sched_functions;
65 unsigned int skip;
68 static int save_trace(struct stackframe *frame, void *d)
70 struct stack_trace_data *data = d;
71 struct stack_trace *trace = data->trace;
72 unsigned long addr = frame->pc;
74 if (data->no_sched_functions && in_sched_functions(addr))
75 return 0;
76 if (data->skip) {
77 data->skip--;
78 return 0;
81 trace->entries[trace->nr_entries++] = addr;
83 return trace->nr_entries >= trace->max_entries;
86 /* This must be noinline to so that our skip calculation works correctly */
87 static noinline void __save_stack_trace(struct task_struct *tsk,
88 struct stack_trace *trace, unsigned int nosched)
90 struct stack_trace_data data;
91 struct stackframe frame;
93 data.trace = trace;
94 data.skip = trace->skip;
95 data.no_sched_functions = nosched;
97 if (tsk != current) {
98 #ifdef CONFIG_SMP
100 * What guarantees do we have here that 'tsk' is not
101 * running on another CPU? For now, ignore it as we
102 * can't guarantee we won't explode.
104 if (trace->nr_entries < trace->max_entries)
105 trace->entries[trace->nr_entries++] = ULONG_MAX;
106 return;
107 #else
108 frame.fp = thread_saved_fp(tsk);
109 frame.sp = thread_saved_sp(tsk);
110 frame.lr = 0; /* recovered from the stack */
111 frame.pc = thread_saved_pc(tsk);
112 #endif
113 } else {
114 register unsigned long current_sp asm ("sp");
116 /* We don't want this function nor the caller */
117 data.skip += 2;
118 frame.fp = (unsigned long)__builtin_frame_address(0);
119 frame.sp = current_sp;
120 frame.lr = (unsigned long)__builtin_return_address(0);
121 frame.pc = (unsigned long)__save_stack_trace;
124 walk_stackframe(&frame, save_trace, &data);
125 if (trace->nr_entries < trace->max_entries)
126 trace->entries[trace->nr_entries++] = ULONG_MAX;
129 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
131 __save_stack_trace(tsk, trace, 1);
134 void save_stack_trace(struct stack_trace *trace)
136 __save_stack_trace(current, trace, 0);
138 EXPORT_SYMBOL_GPL(save_stack_trace);
139 #endif