conn rcv_lock converted to spinlock, struct cor_sock created, kernel_packet skb_clone...
[cor_2_6_31.git] / arch / parisc / kernel / unwind.c
blob69dad5a850a8392f5f520dd164f4dda9a4d5445a
1 /*
2 * Kernel unwinding support
4 * (c) 2002-2004 Randolph Chung <tausq@debian.org>
6 * Derived partially from the IA64 implementation. The PA-RISC
7 * Runtime Architecture Document is also a useful reference to
8 * understand what is happening here
9 */
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/kallsyms.h>
17 #include <asm/uaccess.h>
18 #include <asm/assembly.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/ptrace.h>
22 #include <asm/unwind.h>
24 /* #define DEBUG 1 */
25 #ifdef DEBUG
26 #define dbg(x...) printk(x)
27 #else
28 #define dbg(x...)
29 #endif
31 #define KERNEL_START (KERNEL_BINARY_TEXT_START - 0x1000)
33 extern struct unwind_table_entry __start___unwind[];
34 extern struct unwind_table_entry __stop___unwind[];
36 static spinlock_t unwind_lock;
38 * the kernel unwind block is not dynamically allocated so that
39 * we can call unwind_init as early in the bootup process as
40 * possible (before the slab allocator is initialized)
42 static struct unwind_table kernel_unwind_table __read_mostly;
43 static LIST_HEAD(unwind_tables);
45 static inline const struct unwind_table_entry *
46 find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
48 const struct unwind_table_entry *e = NULL;
49 unsigned long lo, hi, mid;
51 lo = 0;
52 hi = table->length - 1;
54 while (lo <= hi) {
55 mid = (hi - lo) / 2 + lo;
56 e = &table->table[mid];
57 if (addr < e->region_start)
58 hi = mid - 1;
59 else if (addr > e->region_end)
60 lo = mid + 1;
61 else
62 return e;
65 return NULL;
68 static const struct unwind_table_entry *
69 find_unwind_entry(unsigned long addr)
71 struct unwind_table *table;
72 const struct unwind_table_entry *e = NULL;
74 if (addr >= kernel_unwind_table.start &&
75 addr <= kernel_unwind_table.end)
76 e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
77 else
78 list_for_each_entry(table, &unwind_tables, list) {
79 if (addr >= table->start &&
80 addr <= table->end)
81 e = find_unwind_entry_in_table(table, addr);
82 if (e)
83 break;
86 return e;
89 static void
90 unwind_table_init(struct unwind_table *table, const char *name,
91 unsigned long base_addr, unsigned long gp,
92 void *table_start, void *table_end)
94 struct unwind_table_entry *start = table_start;
95 struct unwind_table_entry *end =
96 (struct unwind_table_entry *)table_end - 1;
98 table->name = name;
99 table->base_addr = base_addr;
100 table->gp = gp;
101 table->start = base_addr + start->region_start;
102 table->end = base_addr + end->region_end;
103 table->table = (struct unwind_table_entry *)table_start;
104 table->length = end - start + 1;
105 INIT_LIST_HEAD(&table->list);
107 for (; start <= end; start++) {
108 if (start < end &&
109 start->region_end > (start+1)->region_start) {
110 printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
113 start->region_start += base_addr;
114 start->region_end += base_addr;
118 static void
119 unwind_table_sort(struct unwind_table_entry *start,
120 struct unwind_table_entry *finish)
122 struct unwind_table_entry el, *p, *q;
124 for (p = start + 1; p < finish; ++p) {
125 if (p[0].region_start < p[-1].region_start) {
126 el = *p;
127 q = p;
128 do {
129 q[0] = q[-1];
130 --q;
131 } while (q > start &&
132 el.region_start < q[-1].region_start);
133 *q = el;
138 struct unwind_table *
139 unwind_table_add(const char *name, unsigned long base_addr,
140 unsigned long gp,
141 void *start, void *end)
143 struct unwind_table *table;
144 unsigned long flags;
145 struct unwind_table_entry *s = (struct unwind_table_entry *)start;
146 struct unwind_table_entry *e = (struct unwind_table_entry *)end;
148 unwind_table_sort(s, e);
150 table = kmalloc(sizeof(struct unwind_table), GFP_USER);
151 if (table == NULL)
152 return NULL;
153 unwind_table_init(table, name, base_addr, gp, start, end);
154 spin_lock_irqsave(&unwind_lock, flags);
155 list_add_tail(&table->list, &unwind_tables);
156 spin_unlock_irqrestore(&unwind_lock, flags);
158 return table;
161 void unwind_table_remove(struct unwind_table *table)
163 unsigned long flags;
165 spin_lock_irqsave(&unwind_lock, flags);
166 list_del(&table->list);
167 spin_unlock_irqrestore(&unwind_lock, flags);
169 kfree(table);
172 /* Called from setup_arch to import the kernel unwind info */
173 int unwind_init(void)
175 long start, stop;
176 register unsigned long gp __asm__ ("r27");
178 start = (long)&__start___unwind[0];
179 stop = (long)&__stop___unwind[0];
181 spin_lock_init(&unwind_lock);
183 printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
184 start, stop,
185 (stop - start) / sizeof(struct unwind_table_entry));
187 unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
188 gp,
189 &__start___unwind[0], &__stop___unwind[0]);
190 #if 0
192 int i;
193 for (i = 0; i < 10; i++)
195 printk("region 0x%x-0x%x\n",
196 __start___unwind[i].region_start,
197 __start___unwind[i].region_end);
200 #endif
201 return 0;
204 #ifdef CONFIG_64BIT
205 #define get_func_addr(fptr) fptr[2]
206 #else
207 #define get_func_addr(fptr) fptr[0]
208 #endif
210 static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
212 extern void handle_interruption(int, struct pt_regs *);
213 static unsigned long *hi = (unsigned long *)&handle_interruption;
215 if (pc == get_func_addr(hi)) {
216 struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
217 dbg("Unwinding through handle_interruption()\n");
218 info->prev_sp = regs->gr[30];
219 info->prev_ip = regs->iaoq[0];
221 return 1;
224 return 0;
227 static void unwind_frame_regs(struct unwind_frame_info *info)
229 const struct unwind_table_entry *e;
230 unsigned long npc;
231 unsigned int insn;
232 long frame_size = 0;
233 int looking_for_rp, rpoffset = 0;
235 e = find_unwind_entry(info->ip);
236 if (e == NULL) {
237 unsigned long sp;
238 extern char _stext[], _etext[];
240 dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
242 #ifdef CONFIG_KALLSYMS
243 /* Handle some frequent special cases.... */
245 char symname[KSYM_NAME_LEN];
246 char *modname;
248 kallsyms_lookup(info->ip, NULL, NULL, &modname,
249 symname);
251 dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
253 if (strcmp(symname, "_switch_to_ret") == 0) {
254 info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
255 info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
256 dbg("_switch_to_ret @ %lx - setting "
257 "prev_sp=%lx prev_ip=%lx\n",
258 info->ip, info->prev_sp,
259 info->prev_ip);
260 return;
261 } else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
262 strcmp(symname, "syscall_exit") == 0) {
263 info->prev_ip = info->prev_sp = 0;
264 return;
267 #endif
269 /* Since we are doing the unwinding blind, we don't know if
270 we are adjusting the stack correctly or extracting the rp
271 correctly. The rp is checked to see if it belongs to the
272 kernel text section, if not we assume we don't have a
273 correct stack frame and we continue to unwind the stack.
274 This is not quite correct, and will fail for loadable
275 modules. */
276 sp = info->sp & ~63;
277 do {
278 unsigned long tmp;
280 info->prev_sp = sp - 64;
281 info->prev_ip = 0;
282 if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
283 break;
284 info->prev_ip = tmp;
285 sp = info->prev_sp;
286 } while (info->prev_ip < (unsigned long)_stext ||
287 info->prev_ip > (unsigned long)_etext);
289 info->rp = 0;
291 dbg("analyzing func @ %lx with no unwind info, setting "
292 "prev_sp=%lx prev_ip=%lx\n", info->ip,
293 info->prev_sp, info->prev_ip);
294 } else {
295 dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
296 "Save_RP = %d, Millicode = %d size = %u\n",
297 e->region_start, e->region_end, e->Save_SP, e->Save_RP,
298 e->Millicode, e->Total_frame_size);
300 looking_for_rp = e->Save_RP;
302 for (npc = e->region_start;
303 (frame_size < (e->Total_frame_size << 3) ||
304 looking_for_rp) &&
305 npc < info->ip;
306 npc += 4) {
308 insn = *(unsigned int *)npc;
310 if ((insn & 0xffffc000) == 0x37de0000 ||
311 (insn & 0xffe00000) == 0x6fc00000) {
312 /* ldo X(sp), sp, or stwm X,D(sp) */
313 frame_size += (insn & 0x1 ? -1 << 13 : 0) |
314 ((insn & 0x3fff) >> 1);
315 dbg("analyzing func @ %lx, insn=%08x @ "
316 "%lx, frame_size = %ld\n", info->ip,
317 insn, npc, frame_size);
318 } else if ((insn & 0xffe00008) == 0x73c00008) {
319 /* std,ma X,D(sp) */
320 frame_size += (insn & 0x1 ? -1 << 13 : 0) |
321 (((insn >> 4) & 0x3ff) << 3);
322 dbg("analyzing func @ %lx, insn=%08x @ "
323 "%lx, frame_size = %ld\n", info->ip,
324 insn, npc, frame_size);
325 } else if (insn == 0x6bc23fd9) {
326 /* stw rp,-20(sp) */
327 rpoffset = 20;
328 looking_for_rp = 0;
329 dbg("analyzing func @ %lx, insn=stw rp,"
330 "-20(sp) @ %lx\n", info->ip, npc);
331 } else if (insn == 0x0fc212c1) {
332 /* std rp,-16(sr0,sp) */
333 rpoffset = 16;
334 looking_for_rp = 0;
335 dbg("analyzing func @ %lx, insn=std rp,"
336 "-16(sp) @ %lx\n", info->ip, npc);
340 if (!unwind_special(info, e->region_start, frame_size)) {
341 info->prev_sp = info->sp - frame_size;
342 if (e->Millicode)
343 info->rp = info->r31;
344 else if (rpoffset)
345 info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
346 info->prev_ip = info->rp;
347 info->rp = 0;
350 dbg("analyzing func @ %lx, setting prev_sp=%lx "
351 "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
352 info->prev_ip, npc);
356 void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
357 struct pt_regs *regs)
359 memset(info, 0, sizeof(struct unwind_frame_info));
360 info->t = t;
361 info->sp = regs->gr[30];
362 info->ip = regs->iaoq[0];
363 info->rp = regs->gr[2];
364 info->r31 = regs->gr[31];
366 dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
367 t ? (int)t->pid : -1, info->sp, info->ip);
370 void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
372 struct pt_regs *r = &t->thread.regs;
373 struct pt_regs *r2;
375 r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
376 if (!r2)
377 return;
378 *r2 = *r;
379 r2->gr[30] = r->ksp;
380 r2->iaoq[0] = r->kpc;
381 unwind_frame_init(info, t, r2);
382 kfree(r2);
385 void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
387 unwind_frame_init(info, current, regs);
390 int unwind_once(struct unwind_frame_info *next_frame)
392 unwind_frame_regs(next_frame);
394 if (next_frame->prev_sp == 0 ||
395 next_frame->prev_ip == 0)
396 return -1;
398 next_frame->sp = next_frame->prev_sp;
399 next_frame->ip = next_frame->prev_ip;
400 next_frame->prev_sp = 0;
401 next_frame->prev_ip = 0;
403 dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
404 next_frame->t ? (int)next_frame->t->pid : -1,
405 next_frame->sp, next_frame->ip);
407 return 0;
410 int unwind_to_user(struct unwind_frame_info *info)
412 int ret;
414 do {
415 ret = unwind_once(info);
416 } while (!ret && !(info->ip & 3));
418 return ret;