dccp: do not assume DCCP code is non preemptible
[linux/fpc-iii.git] / arch / s390 / kernel / dumpstack.c
blob1b6081c0aff9238641f995c973560d2f8d33b686
1 /*
2 * Stack dumping functions
4 * Copyright IBM Corp. 1999, 2013
5 */
7 #include <linux/kallsyms.h>
8 #include <linux/hardirq.h>
9 #include <linux/kprobes.h>
10 #include <linux/utsname.h>
11 #include <linux/export.h>
12 #include <linux/kdebug.h>
13 #include <linux/ptrace.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <asm/processor.h>
18 #include <asm/debug.h>
19 #include <asm/dis.h>
20 #include <asm/ipl.h>
23 * For dump_trace we have tree different stack to consider:
24 * - the panic stack which is used if the kernel stack has overflown
25 * - the asynchronous interrupt stack (cpu related)
26 * - the synchronous kernel stack (process related)
27 * The stack trace can start at any of the three stacks and can potentially
28 * touch all of them. The order is: panic stack, async stack, sync stack.
30 static unsigned long
31 __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
32 unsigned long low, unsigned long high)
34 struct stack_frame *sf;
35 struct pt_regs *regs;
37 while (1) {
38 if (sp < low || sp > high - sizeof(*sf))
39 return sp;
40 sf = (struct stack_frame *) sp;
41 /* Follow the backchain. */
42 while (1) {
43 if (func(data, sf->gprs[8]))
44 return sp;
45 low = sp;
46 sp = sf->back_chain;
47 if (!sp)
48 break;
49 if (sp <= low || sp > high - sizeof(*sf))
50 return sp;
51 sf = (struct stack_frame *) sp;
53 /* Zero backchain detected, check for interrupt frame. */
54 sp = (unsigned long) (sf + 1);
55 if (sp <= low || sp > high - sizeof(*regs))
56 return sp;
57 regs = (struct pt_regs *) sp;
58 if (!user_mode(regs)) {
59 if (func(data, regs->psw.addr))
60 return sp;
62 low = sp;
63 sp = regs->gprs[15];
67 void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
68 unsigned long sp)
70 unsigned long frame_size;
72 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
73 #ifdef CONFIG_CHECK_STACK
74 sp = __dump_trace(func, data, sp,
75 S390_lowcore.panic_stack + frame_size - 4096,
76 S390_lowcore.panic_stack + frame_size);
77 #endif
78 sp = __dump_trace(func, data, sp,
79 S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
80 S390_lowcore.async_stack + frame_size);
81 if (task)
82 __dump_trace(func, data, sp,
83 (unsigned long)task_stack_page(task),
84 (unsigned long)task_stack_page(task) + THREAD_SIZE);
85 else
86 __dump_trace(func, data, sp,
87 S390_lowcore.thread_info,
88 S390_lowcore.thread_info + THREAD_SIZE);
90 EXPORT_SYMBOL_GPL(dump_trace);
92 static int show_address(void *data, unsigned long address)
94 printk("([<%016lx>] %pSR)\n", address, (void *)address);
95 return 0;
98 static void show_trace(struct task_struct *task, unsigned long sp)
100 if (!sp)
101 sp = task ? task->thread.ksp : current_stack_pointer();
102 printk("Call Trace:\n");
103 dump_trace(show_address, NULL, task, sp);
104 if (!task)
105 task = current;
106 debug_show_held_locks(task);
109 void show_stack(struct task_struct *task, unsigned long *sp)
111 unsigned long *stack;
112 int i;
114 stack = sp;
115 if (!stack) {
116 if (!task)
117 stack = (unsigned long *)current_stack_pointer();
118 else
119 stack = (unsigned long *)task->thread.ksp;
121 for (i = 0; i < 20; i++) {
122 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
123 break;
124 if ((i * sizeof(long) % 32) == 0)
125 printk("%s ", i == 0 ? "" : "\n");
126 printk("%016lx ", *stack++);
128 printk("\n");
129 show_trace(task, (unsigned long)sp);
132 static void show_last_breaking_event(struct pt_regs *regs)
134 printk("Last Breaking-Event-Address:\n");
135 printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
138 void show_registers(struct pt_regs *regs)
140 struct psw_bits *psw = &psw_bits(regs->psw);
141 char *mode;
143 mode = user_mode(regs) ? "User" : "Krnl";
144 printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
145 if (!user_mode(regs))
146 printk(" (%pSR)", (void *)regs->psw.addr);
147 printk("\n");
148 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
149 "P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e,
150 psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm);
151 printk(" RI:%x EA:%x", psw->ri, psw->eaba);
152 printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
153 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
154 printk(" %016lx %016lx %016lx %016lx\n",
155 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
156 printk(" %016lx %016lx %016lx %016lx\n",
157 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
158 printk(" %016lx %016lx %016lx %016lx\n",
159 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
160 show_code(regs);
163 void show_regs(struct pt_regs *regs)
165 show_regs_print_info(KERN_DEFAULT);
166 show_registers(regs);
167 /* Show stack backtrace if pt_regs is from kernel mode */
168 if (!user_mode(regs))
169 show_trace(NULL, regs->gprs[15]);
170 show_last_breaking_event(regs);
173 static DEFINE_SPINLOCK(die_lock);
175 void die(struct pt_regs *regs, const char *str)
177 static int die_counter;
179 oops_enter();
180 lgr_info_log();
181 debug_stop_all();
182 console_verbose();
183 spin_lock_irq(&die_lock);
184 bust_spinlocks(1);
185 printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
186 regs->int_code >> 17, ++die_counter);
187 #ifdef CONFIG_PREEMPT
188 printk("PREEMPT ");
189 #endif
190 #ifdef CONFIG_SMP
191 printk("SMP ");
192 #endif
193 if (debug_pagealloc_enabled())
194 printk("DEBUG_PAGEALLOC");
195 printk("\n");
196 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
197 print_modules();
198 show_regs(regs);
199 bust_spinlocks(0);
200 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
201 spin_unlock_irq(&die_lock);
202 if (in_interrupt())
203 panic("Fatal exception in interrupt");
204 if (panic_on_oops)
205 panic("Fatal exception: panic_on_oops");
206 oops_exit();
207 do_exit(SIGSEGV);