slab: setup cpu caches later on when interrupts are enabled
[linux/fpc-iii.git] / arch / parisc / kernel / stacktrace.c
blob2fe914c5f533524edd13abc1affbf85df8f6f854
1 /*
2 * Stack trace management functions
4 * Copyright (C) 2009 Helge Deller <deller@gmx.de>
5 * based on arch/x86/kernel/stacktrace.c by Ingo Molnar <mingo@redhat.com>
6 * and parisc unwind functions by Randolph Chung <tausq@debian.org>
8 * TODO: Userspace stacktrace (CONFIG_USER_STACKTRACE_SUPPORT)
9 */
10 #include <linux/module.h>
11 #include <linux/stacktrace.h>
13 #include <asm/unwind.h>
15 static void dump_trace(struct task_struct *task, struct stack_trace *trace)
17 struct unwind_frame_info info;
19 /* initialize unwind info */
20 if (task == current) {
21 unsigned long sp;
22 struct pt_regs r;
23 HERE:
24 asm volatile ("copy %%r30, %0" : "=r"(sp));
25 memset(&r, 0, sizeof(struct pt_regs));
26 r.iaoq[0] = (unsigned long)&&HERE;
27 r.gr[2] = (unsigned long)__builtin_return_address(0);
28 r.gr[30] = sp;
29 unwind_frame_init(&info, task, &r);
30 } else {
31 unwind_frame_init_from_blocked_task(&info, task);
34 /* unwind stack and save entries in stack_trace struct */
35 trace->nr_entries = 0;
36 while (trace->nr_entries < trace->max_entries) {
37 if (unwind_once(&info) < 0 || info.ip == 0)
38 break;
40 if (__kernel_text_address(info.ip))
41 trace->entries[trace->nr_entries++] = info.ip;
47 * Save stack-backtrace addresses into a stack_trace buffer.
49 void save_stack_trace(struct stack_trace *trace)
51 dump_trace(current, trace);
52 if (trace->nr_entries < trace->max_entries)
53 trace->entries[trace->nr_entries++] = ULONG_MAX;
55 EXPORT_SYMBOL_GPL(save_stack_trace);
57 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
59 dump_trace(tsk, trace);
60 if (trace->nr_entries < trace->max_entries)
61 trace->entries[trace->nr_entries++] = ULONG_MAX;
63 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);