3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
5 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Derived from "arch/i386/kernel/traps.c"
8 * Copyright (C) 1991, 1992 Linus Torvalds
12 * 'Traps.c' handles hardware traps and faults after we have saved some
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/ptrace.h>
20 #include <linux/timer.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/seq_file.h>
26 #include <linux/delay.h>
27 #include <linux/module.h>
28 #include <linux/kdebug.h>
29 #include <linux/kallsyms.h>
30 #include <linux/reboot.h>
31 #include <linux/kprobes.h>
32 #include <linux/bug.h>
33 #include <linux/utsname.h>
34 #include <asm/uaccess.h>
36 #include <linux/atomic.h>
37 #include <asm/mathemu.h>
38 #include <asm/cpcmd.h>
39 #include <asm/lowcore.h>
40 #include <asm/debug.h>
44 void (*pgm_check_table
[128])(struct pt_regs
*regs
);
46 int show_unhandled_signals
= 1;
48 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
52 #define FOURLONG "%08lx %08lx %08lx %08lx\n"
53 static int kstack_depth_to_print
= 12;
54 #else /* CONFIG_64BIT */
55 #define LONG "%016lx "
56 #define FOURLONG "%016lx %016lx %016lx %016lx\n"
57 static int kstack_depth_to_print
= 20;
58 #endif /* CONFIG_64BIT */
61 * For show_trace we have tree different stack to consider:
62 * - the panic stack which is used if the kernel stack has overflown
63 * - the asynchronous interrupt stack (cpu related)
64 * - the synchronous kernel stack (process related)
65 * The stack trace can start at any of the three stack and can potentially
66 * touch all of them. The order is: panic stack, async stack, sync stack.
69 __show_trace(unsigned long sp
, unsigned long low
, unsigned long high
)
71 struct stack_frame
*sf
;
75 sp
= sp
& PSW_ADDR_INSN
;
76 if (sp
< low
|| sp
> high
- sizeof(*sf
))
78 sf
= (struct stack_frame
*) sp
;
79 printk("([<%016lx>] ", sf
->gprs
[8] & PSW_ADDR_INSN
);
80 print_symbol("%s)\n", sf
->gprs
[8] & PSW_ADDR_INSN
);
81 /* Follow the backchain. */
84 sp
= sf
->back_chain
& PSW_ADDR_INSN
;
87 if (sp
<= low
|| sp
> high
- sizeof(*sf
))
89 sf
= (struct stack_frame
*) sp
;
90 printk(" [<%016lx>] ", sf
->gprs
[8] & PSW_ADDR_INSN
);
91 print_symbol("%s\n", sf
->gprs
[8] & PSW_ADDR_INSN
);
93 /* Zero backchain detected, check for interrupt frame. */
94 sp
= (unsigned long) (sf
+ 1);
95 if (sp
<= low
|| sp
> high
- sizeof(*regs
))
97 regs
= (struct pt_regs
*) sp
;
98 printk(" [<%016lx>] ", regs
->psw
.addr
& PSW_ADDR_INSN
);
99 print_symbol("%s\n", regs
->psw
.addr
& PSW_ADDR_INSN
);
105 static void show_trace(struct task_struct
*task
, unsigned long *stack
)
107 register unsigned long __r15
asm ("15");
110 sp
= (unsigned long) stack
;
112 sp
= task
? task
->thread
.ksp
: __r15
;
113 printk("Call Trace:\n");
114 #ifdef CONFIG_CHECK_STACK
115 sp
= __show_trace(sp
, S390_lowcore
.panic_stack
- 4096,
116 S390_lowcore
.panic_stack
);
118 sp
= __show_trace(sp
, S390_lowcore
.async_stack
- ASYNC_SIZE
,
119 S390_lowcore
.async_stack
);
121 __show_trace(sp
, (unsigned long) task_stack_page(task
),
122 (unsigned long) task_stack_page(task
) + THREAD_SIZE
);
124 __show_trace(sp
, S390_lowcore
.thread_info
,
125 S390_lowcore
.thread_info
+ THREAD_SIZE
);
128 debug_show_held_locks(task
);
131 void show_stack(struct task_struct
*task
, unsigned long *sp
)
133 register unsigned long * __r15
asm ("15");
134 unsigned long *stack
;
138 stack
= task
? (unsigned long *) task
->thread
.ksp
: __r15
;
142 for (i
= 0; i
< kstack_depth_to_print
; i
++) {
143 if (((addr_t
) stack
& (THREAD_SIZE
-1)) == 0)
145 if ((i
* sizeof(long) % 32) == 0)
146 printk("%s ", i
== 0 ? "" : "\n");
147 printk(LONG
, *stack
++);
150 show_trace(task
, sp
);
153 static void show_last_breaking_event(struct pt_regs
*regs
)
156 printk("Last Breaking-Event-Address:\n");
157 printk(" [<%016lx>] ", regs
->args
[0] & PSW_ADDR_INSN
);
158 print_symbol("%s\n", regs
->args
[0] & PSW_ADDR_INSN
);
163 * The architecture-independent dump_stack generator
165 void dump_stack(void)
167 printk("CPU: %d %s %s %.*s\n",
168 task_thread_info(current
)->cpu
, print_tainted(),
169 init_utsname()->release
,
170 (int)strcspn(init_utsname()->version
, " "),
171 init_utsname()->version
);
172 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
173 current
->comm
, current
->pid
, current
,
174 (void *) current
->thread
.ksp
);
175 show_stack(NULL
, NULL
);
177 EXPORT_SYMBOL(dump_stack
);
179 static inline int mask_bits(struct pt_regs
*regs
, unsigned long bits
)
181 return (regs
->psw
.mask
& bits
) / ((~bits
+ 1) & bits
);
184 void show_registers(struct pt_regs
*regs
)
188 mode
= user_mode(regs
) ? "User" : "Krnl";
189 printk("%s PSW : %p %p",
190 mode
, (void *) regs
->psw
.mask
,
191 (void *) regs
->psw
.addr
);
192 print_symbol(" (%s)\n", regs
->psw
.addr
& PSW_ADDR_INSN
);
193 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
194 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs
, PSW_MASK_PER
),
195 mask_bits(regs
, PSW_MASK_DAT
), mask_bits(regs
, PSW_MASK_IO
),
196 mask_bits(regs
, PSW_MASK_EXT
), mask_bits(regs
, PSW_MASK_KEY
),
197 mask_bits(regs
, PSW_MASK_MCHECK
), mask_bits(regs
, PSW_MASK_WAIT
),
198 mask_bits(regs
, PSW_MASK_PSTATE
), mask_bits(regs
, PSW_MASK_ASC
),
199 mask_bits(regs
, PSW_MASK_CC
), mask_bits(regs
, PSW_MASK_PM
));
201 printk(" EA:%x", mask_bits(regs
, PSW_MASK_EA
| PSW_MASK_BA
));
203 printk("\n%s GPRS: " FOURLONG
, mode
,
204 regs
->gprs
[0], regs
->gprs
[1], regs
->gprs
[2], regs
->gprs
[3]);
206 regs
->gprs
[4], regs
->gprs
[5], regs
->gprs
[6], regs
->gprs
[7]);
208 regs
->gprs
[8], regs
->gprs
[9], regs
->gprs
[10], regs
->gprs
[11]);
210 regs
->gprs
[12], regs
->gprs
[13], regs
->gprs
[14], regs
->gprs
[15]);
215 void show_regs(struct pt_regs
*regs
)
218 printk("CPU: %d %s %s %.*s\n",
219 task_thread_info(current
)->cpu
, print_tainted(),
220 init_utsname()->release
,
221 (int)strcspn(init_utsname()->version
, " "),
222 init_utsname()->version
);
223 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
224 current
->comm
, current
->pid
, current
,
225 (void *) current
->thread
.ksp
);
226 show_registers(regs
);
227 /* Show stack backtrace if pt_regs is from kernel mode */
228 if (!user_mode(regs
))
229 show_trace(NULL
, (unsigned long *) regs
->gprs
[15]);
230 show_last_breaking_event(regs
);
233 static DEFINE_SPINLOCK(die_lock
);
235 void die(struct pt_regs
*regs
, const char *str
)
237 static int die_counter
;
243 spin_lock_irq(&die_lock
);
245 printk("%s: %04x [#%d] ", str
, regs
->int_code
& 0xffff, ++die_counter
);
246 #ifdef CONFIG_PREEMPT
252 #ifdef CONFIG_DEBUG_PAGEALLOC
253 printk("DEBUG_PAGEALLOC");
256 notify_die(DIE_OOPS
, str
, regs
, 0, regs
->int_code
& 0xffff, SIGSEGV
);
259 add_taint(TAINT_DIE
);
260 spin_unlock_irq(&die_lock
);
262 panic("Fatal exception in interrupt");
264 panic("Fatal exception: panic_on_oops");
269 static inline void report_user_fault(struct pt_regs
*regs
, int signr
)
271 if ((task_pid_nr(current
) > 1) && !show_unhandled_signals
)
273 if (!unhandled_signal(current
, signr
))
275 if (!printk_ratelimit())
277 printk("User process fault: interruption code 0x%X ", regs
->int_code
);
278 print_vma_addr("in ", regs
->psw
.addr
& PSW_ADDR_INSN
);
283 int is_valid_bugaddr(unsigned long addr
)
288 static inline void __user
*get_psw_address(struct pt_regs
*regs
)
290 return (void __user
*)
291 ((regs
->psw
.addr
- (regs
->int_code
>> 16)) & PSW_ADDR_INSN
);
294 static void __kprobes
do_trap(struct pt_regs
*regs
,
295 int si_signo
, int si_code
, char *str
)
299 if (notify_die(DIE_TRAP
, str
, regs
, 0,
300 regs
->int_code
, si_signo
) == NOTIFY_STOP
)
303 if (user_mode(regs
)) {
304 info
.si_signo
= si_signo
;
306 info
.si_code
= si_code
;
307 info
.si_addr
= get_psw_address(regs
);
308 force_sig_info(si_signo
, &info
, current
);
309 report_user_fault(regs
, si_signo
);
311 const struct exception_table_entry
*fixup
;
312 fixup
= search_exception_tables(regs
->psw
.addr
& PSW_ADDR_INSN
);
314 regs
->psw
.addr
= fixup
->fixup
| PSW_ADDR_AMODE
;
316 enum bug_trap_type btt
;
318 btt
= report_bug(regs
->psw
.addr
& PSW_ADDR_INSN
, regs
);
319 if (btt
== BUG_TRAP_TYPE_WARN
)
326 void __kprobes
do_per_trap(struct pt_regs
*regs
)
330 if (notify_die(DIE_SSTEP
, "sstep", regs
, 0, 0, SIGTRAP
) == NOTIFY_STOP
)
332 if (!current
->ptrace
)
334 info
.si_signo
= SIGTRAP
;
336 info
.si_code
= TRAP_HWBKPT
;
338 (void __force __user
*) current
->thread
.per_event
.address
;
339 force_sig_info(SIGTRAP
, &info
, current
);
342 static void default_trap_handler(struct pt_regs
*regs
)
344 if (user_mode(regs
)) {
345 report_user_fault(regs
, SIGSEGV
);
348 die(regs
, "Unknown program exception");
351 #define DO_ERROR_INFO(name, signr, sicode, str) \
352 static void name(struct pt_regs *regs) \
354 do_trap(regs, signr, sicode, str); \
357 DO_ERROR_INFO(addressing_exception
, SIGILL
, ILL_ILLADR
,
358 "addressing exception")
359 DO_ERROR_INFO(execute_exception
, SIGILL
, ILL_ILLOPN
,
361 DO_ERROR_INFO(divide_exception
, SIGFPE
, FPE_INTDIV
,
362 "fixpoint divide exception")
363 DO_ERROR_INFO(overflow_exception
, SIGFPE
, FPE_INTOVF
,
364 "fixpoint overflow exception")
365 DO_ERROR_INFO(hfp_overflow_exception
, SIGFPE
, FPE_FLTOVF
,
366 "HFP overflow exception")
367 DO_ERROR_INFO(hfp_underflow_exception
, SIGFPE
, FPE_FLTUND
,
368 "HFP underflow exception")
369 DO_ERROR_INFO(hfp_significance_exception
, SIGFPE
, FPE_FLTRES
,
370 "HFP significance exception")
371 DO_ERROR_INFO(hfp_divide_exception
, SIGFPE
, FPE_FLTDIV
,
372 "HFP divide exception")
373 DO_ERROR_INFO(hfp_sqrt_exception
, SIGFPE
, FPE_FLTINV
,
374 "HFP square root exception")
375 DO_ERROR_INFO(operand_exception
, SIGILL
, ILL_ILLOPN
,
377 DO_ERROR_INFO(privileged_op
, SIGILL
, ILL_PRVOPC
,
378 "privileged operation")
379 DO_ERROR_INFO(special_op_exception
, SIGILL
, ILL_ILLOPN
,
380 "special operation exception")
381 DO_ERROR_INFO(translation_exception
, SIGILL
, ILL_ILLOPN
,
382 "translation exception")
384 static inline void do_fp_trap(struct pt_regs
*regs
, int fpc
)
387 /* FPC[2] is Data Exception Code */
388 if ((fpc
& 0x00000300) == 0) {
389 /* bits 6 and 7 of DXC are 0 iff IEEE exception */
390 if (fpc
& 0x8000) /* invalid fp operation */
391 si_code
= FPE_FLTINV
;
392 else if (fpc
& 0x4000) /* div by 0 */
393 si_code
= FPE_FLTDIV
;
394 else if (fpc
& 0x2000) /* overflow */
395 si_code
= FPE_FLTOVF
;
396 else if (fpc
& 0x1000) /* underflow */
397 si_code
= FPE_FLTUND
;
398 else if (fpc
& 0x0800) /* inexact */
399 si_code
= FPE_FLTRES
;
401 do_trap(regs
, SIGFPE
, si_code
, "floating point exception");
404 static void __kprobes
illegal_op(struct pt_regs
*regs
)
408 __u16 __user
*location
;
411 location
= get_psw_address(regs
);
413 if (user_mode(regs
)) {
414 if (get_user(*((__u16
*) opcode
), (__u16 __user
*) location
))
416 if (*((__u16
*) opcode
) == S390_BREAKPOINT_U16
) {
417 if (current
->ptrace
) {
418 info
.si_signo
= SIGTRAP
;
420 info
.si_code
= TRAP_BRKPT
;
421 info
.si_addr
= location
;
422 force_sig_info(SIGTRAP
, &info
, current
);
425 #ifdef CONFIG_MATHEMU
426 } else if (opcode
[0] == 0xb3) {
427 if (get_user(*((__u16
*) (opcode
+2)), location
+1))
429 signal
= math_emu_b3(opcode
, regs
);
430 } else if (opcode
[0] == 0xed) {
431 if (get_user(*((__u32
*) (opcode
+2)),
432 (__u32 __user
*)(location
+1)))
434 signal
= math_emu_ed(opcode
, regs
);
435 } else if (*((__u16
*) opcode
) == 0xb299) {
436 if (get_user(*((__u16
*) (opcode
+2)), location
+1))
438 signal
= math_emu_srnm(opcode
, regs
);
439 } else if (*((__u16
*) opcode
) == 0xb29c) {
440 if (get_user(*((__u16
*) (opcode
+2)), location
+1))
442 signal
= math_emu_stfpc(opcode
, regs
);
443 } else if (*((__u16
*) opcode
) == 0xb29d) {
444 if (get_user(*((__u16
*) (opcode
+2)), location
+1))
446 signal
= math_emu_lfpc(opcode
, regs
);
452 * If we get an illegal op in kernel mode, send it through the
453 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
455 if (notify_die(DIE_BPT
, "bpt", regs
, 0,
456 3, SIGTRAP
) != NOTIFY_STOP
)
460 #ifdef CONFIG_MATHEMU
461 if (signal
== SIGFPE
)
462 do_fp_trap(regs
, current
->thread
.fp_regs
.fpc
);
463 else if (signal
== SIGSEGV
)
464 do_trap(regs
, signal
, SEGV_MAPERR
, "user address fault");
468 do_trap(regs
, signal
, ILL_ILLOPC
, "illegal operation");
472 #ifdef CONFIG_MATHEMU
473 void specification_exception(struct pt_regs
*regs
)
476 __u16 __user
*location
= NULL
;
479 location
= (__u16 __user
*) get_psw_address(regs
);
481 if (user_mode(regs
)) {
482 get_user(*((__u16
*) opcode
), location
);
484 case 0x28: /* LDR Rx,Ry */
485 signal
= math_emu_ldr(opcode
);
487 case 0x38: /* LER Rx,Ry */
488 signal
= math_emu_ler(opcode
);
490 case 0x60: /* STD R,D(X,B) */
491 get_user(*((__u16
*) (opcode
+2)), location
+1);
492 signal
= math_emu_std(opcode
, regs
);
494 case 0x68: /* LD R,D(X,B) */
495 get_user(*((__u16
*) (opcode
+2)), location
+1);
496 signal
= math_emu_ld(opcode
, regs
);
498 case 0x70: /* STE R,D(X,B) */
499 get_user(*((__u16
*) (opcode
+2)), location
+1);
500 signal
= math_emu_ste(opcode
, regs
);
502 case 0x78: /* LE R,D(X,B) */
503 get_user(*((__u16
*) (opcode
+2)), location
+1);
504 signal
= math_emu_le(opcode
, regs
);
513 if (signal
== SIGFPE
)
514 do_fp_trap(regs
, current
->thread
.fp_regs
.fpc
);
516 do_trap(regs
, signal
, ILL_ILLOPN
, "specification exception");
519 DO_ERROR_INFO(specification_exception
, SIGILL
, ILL_ILLOPN
,
520 "specification exception");
523 static void data_exception(struct pt_regs
*regs
)
525 __u16 __user
*location
;
528 location
= get_psw_address(regs
);
530 if (MACHINE_HAS_IEEE
)
531 asm volatile("stfpc %0" : "=m" (current
->thread
.fp_regs
.fpc
));
533 #ifdef CONFIG_MATHEMU
534 else if (user_mode(regs
)) {
536 get_user(*((__u16
*) opcode
), location
);
538 case 0x28: /* LDR Rx,Ry */
539 signal
= math_emu_ldr(opcode
);
541 case 0x38: /* LER Rx,Ry */
542 signal
= math_emu_ler(opcode
);
544 case 0x60: /* STD R,D(X,B) */
545 get_user(*((__u16
*) (opcode
+2)), location
+1);
546 signal
= math_emu_std(opcode
, regs
);
548 case 0x68: /* LD R,D(X,B) */
549 get_user(*((__u16
*) (opcode
+2)), location
+1);
550 signal
= math_emu_ld(opcode
, regs
);
552 case 0x70: /* STE R,D(X,B) */
553 get_user(*((__u16
*) (opcode
+2)), location
+1);
554 signal
= math_emu_ste(opcode
, regs
);
556 case 0x78: /* LE R,D(X,B) */
557 get_user(*((__u16
*) (opcode
+2)), location
+1);
558 signal
= math_emu_le(opcode
, regs
);
561 get_user(*((__u16
*) (opcode
+2)), location
+1);
562 signal
= math_emu_b3(opcode
, regs
);
565 get_user(*((__u32
*) (opcode
+2)),
566 (__u32 __user
*)(location
+1));
567 signal
= math_emu_ed(opcode
, regs
);
570 if (opcode
[1] == 0x99) {
571 get_user(*((__u16
*) (opcode
+2)), location
+1);
572 signal
= math_emu_srnm(opcode
, regs
);
573 } else if (opcode
[1] == 0x9c) {
574 get_user(*((__u16
*) (opcode
+2)), location
+1);
575 signal
= math_emu_stfpc(opcode
, regs
);
576 } else if (opcode
[1] == 0x9d) {
577 get_user(*((__u16
*) (opcode
+2)), location
+1);
578 signal
= math_emu_lfpc(opcode
, regs
);
588 if (current
->thread
.fp_regs
.fpc
& FPC_DXC_MASK
)
592 if (signal
== SIGFPE
)
593 do_fp_trap(regs
, current
->thread
.fp_regs
.fpc
);
595 do_trap(regs
, signal
, ILL_ILLOPN
, "data exception");
598 static void space_switch_exception(struct pt_regs
*regs
)
600 /* Set user psw back to home space mode. */
602 regs
->psw
.mask
|= PSW_ASC_HOME
;
604 do_trap(regs
, SIGILL
, ILL_PRVOPC
, "space switch event");
607 void __kprobes
kernel_stack_overflow(struct pt_regs
* regs
)
610 printk("Kernel stack overflow.\n");
613 panic("Corrupt kernel stack, can't continue.");
616 /* init is done in lowcore.S and head.S */
618 void __init
trap_init(void)
622 for (i
= 0; i
< 128; i
++)
623 pgm_check_table
[i
] = &default_trap_handler
;
624 pgm_check_table
[1] = &illegal_op
;
625 pgm_check_table
[2] = &privileged_op
;
626 pgm_check_table
[3] = &execute_exception
;
627 pgm_check_table
[4] = &do_protection_exception
;
628 pgm_check_table
[5] = &addressing_exception
;
629 pgm_check_table
[6] = &specification_exception
;
630 pgm_check_table
[7] = &data_exception
;
631 pgm_check_table
[8] = &overflow_exception
;
632 pgm_check_table
[9] = ÷_exception
;
633 pgm_check_table
[0x0A] = &overflow_exception
;
634 pgm_check_table
[0x0B] = ÷_exception
;
635 pgm_check_table
[0x0C] = &hfp_overflow_exception
;
636 pgm_check_table
[0x0D] = &hfp_underflow_exception
;
637 pgm_check_table
[0x0E] = &hfp_significance_exception
;
638 pgm_check_table
[0x0F] = &hfp_divide_exception
;
639 pgm_check_table
[0x10] = &do_dat_exception
;
640 pgm_check_table
[0x11] = &do_dat_exception
;
641 pgm_check_table
[0x12] = &translation_exception
;
642 pgm_check_table
[0x13] = &special_op_exception
;
644 pgm_check_table
[0x38] = &do_asce_exception
;
645 pgm_check_table
[0x39] = &do_dat_exception
;
646 pgm_check_table
[0x3A] = &do_dat_exception
;
647 pgm_check_table
[0x3B] = &do_dat_exception
;
648 #endif /* CONFIG_64BIT */
649 pgm_check_table
[0x15] = &operand_exception
;
650 pgm_check_table
[0x1C] = &space_switch_exception
;
651 pgm_check_table
[0x1D] = &hfp_sqrt_exception
;
652 /* Enable machine checks early. */