Linux 4.13.16
[linux/fpc-iii.git] / arch / s390 / kernel / dumpstack.c
blobdab78babfab6dd39c9517d4c73aef0de52c09dd3
1 /*
2 * Stack dumping functions
4 * Copyright IBM Corp. 1999, 2013
5 */
7 #include <linux/kallsyms.h>
8 #include <linux/hardirq.h>
9 #include <linux/kprobes.h>
10 #include <linux/utsname.h>
11 #include <linux/export.h>
12 #include <linux/kdebug.h>
13 #include <linux/ptrace.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/sched/debug.h>
18 #include <linux/sched/task_stack.h>
19 #include <asm/processor.h>
20 #include <asm/debug.h>
21 #include <asm/dis.h>
22 #include <asm/ipl.h>
25 * For dump_trace we have tree different stack to consider:
26 * - the panic stack which is used if the kernel stack has overflown
27 * - the asynchronous interrupt stack (cpu related)
28 * - the synchronous kernel stack (process related)
29 * The stack trace can start at any of the three stacks and can potentially
30 * touch all of them. The order is: panic stack, async stack, sync stack.
32 static unsigned long
33 __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
34 unsigned long low, unsigned long high)
36 struct stack_frame *sf;
37 struct pt_regs *regs;
39 while (1) {
40 if (sp < low || sp > high - sizeof(*sf))
41 return sp;
42 sf = (struct stack_frame *) sp;
43 if (func(data, sf->gprs[8], 0))
44 return sp;
45 /* Follow the backchain. */
46 while (1) {
47 low = sp;
48 sp = sf->back_chain;
49 if (!sp)
50 break;
51 if (sp <= low || sp > high - sizeof(*sf))
52 return sp;
53 sf = (struct stack_frame *) sp;
54 if (func(data, sf->gprs[8], 1))
55 return sp;
57 /* Zero backchain detected, check for interrupt frame. */
58 sp = (unsigned long) (sf + 1);
59 if (sp <= low || sp > high - sizeof(*regs))
60 return sp;
61 regs = (struct pt_regs *) sp;
62 if (!user_mode(regs)) {
63 if (func(data, regs->psw.addr, 1))
64 return sp;
66 low = sp;
67 sp = regs->gprs[15];
71 void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
72 unsigned long sp)
74 unsigned long frame_size;
76 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
77 #ifdef CONFIG_CHECK_STACK
78 sp = __dump_trace(func, data, sp,
79 S390_lowcore.panic_stack + frame_size - 4096,
80 S390_lowcore.panic_stack + frame_size);
81 #endif
82 sp = __dump_trace(func, data, sp,
83 S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
84 S390_lowcore.async_stack + frame_size);
85 task = task ?: current;
86 __dump_trace(func, data, sp,
87 (unsigned long)task_stack_page(task),
88 (unsigned long)task_stack_page(task) + THREAD_SIZE);
90 EXPORT_SYMBOL_GPL(dump_trace);
92 static int show_address(void *data, unsigned long address, int reliable)
94 if (reliable)
95 printk(" [<%016lx>] %pSR \n", address, (void *)address);
96 else
97 printk("([<%016lx>] %pSR)\n", address, (void *)address);
98 return 0;
101 void show_stack(struct task_struct *task, unsigned long *stack)
103 unsigned long sp = (unsigned long) stack;
105 if (!sp)
106 sp = task ? task->thread.ksp : current_stack_pointer();
107 printk("Call Trace:\n");
108 dump_trace(show_address, NULL, task, sp);
109 if (!task)
110 task = current;
111 debug_show_held_locks(task);
114 static void show_last_breaking_event(struct pt_regs *regs)
116 printk("Last Breaking-Event-Address:\n");
117 printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
120 void show_registers(struct pt_regs *regs)
122 struct psw_bits *psw = &psw_bits(regs->psw);
123 char *mode;
125 mode = user_mode(regs) ? "User" : "Krnl";
126 printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
127 if (!user_mode(regs))
128 pr_cont(" (%pSR)", (void *)regs->psw.addr);
129 pr_cont("\n");
130 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
131 "P:%x AS:%x CC:%x PM:%x", psw->per, psw->dat, psw->io, psw->ext,
132 psw->key, psw->mcheck, psw->wait, psw->pstate, psw->as, psw->cc, psw->pm);
133 pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba);
134 printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
135 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
136 printk(" %016lx %016lx %016lx %016lx\n",
137 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
138 printk(" %016lx %016lx %016lx %016lx\n",
139 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
140 printk(" %016lx %016lx %016lx %016lx\n",
141 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
142 show_code(regs);
145 void show_regs(struct pt_regs *regs)
147 show_regs_print_info(KERN_DEFAULT);
148 show_registers(regs);
149 /* Show stack backtrace if pt_regs is from kernel mode */
150 if (!user_mode(regs))
151 show_stack(NULL, (unsigned long *) regs->gprs[15]);
152 show_last_breaking_event(regs);
155 static DEFINE_SPINLOCK(die_lock);
157 void die(struct pt_regs *regs, const char *str)
159 static int die_counter;
161 oops_enter();
162 lgr_info_log();
163 debug_stop_all();
164 console_verbose();
165 spin_lock_irq(&die_lock);
166 bust_spinlocks(1);
167 printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
168 regs->int_code >> 17, ++die_counter);
169 #ifdef CONFIG_PREEMPT
170 pr_cont("PREEMPT ");
171 #endif
172 #ifdef CONFIG_SMP
173 pr_cont("SMP ");
174 #endif
175 if (debug_pagealloc_enabled())
176 pr_cont("DEBUG_PAGEALLOC");
177 pr_cont("\n");
178 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
179 print_modules();
180 show_regs(regs);
181 bust_spinlocks(0);
182 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
183 spin_unlock_irq(&die_lock);
184 if (in_interrupt())
185 panic("Fatal exception in interrupt");
186 if (panic_on_oops)
187 panic("Fatal exception: panic_on_oops");
188 oops_exit();
189 do_exit(SIGSEGV);