2 * linux/arch/x86-64/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
7 * Pentium III FXSR, SSE support
8 * Gareth Hughes <gareth@valinux.com>, May 2000
12 * 'Traps.c' handles hardware traps and faults after we have saved some
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/ptrace.h>
20 #include <linux/timer.h>
22 #include <linux/init.h>
23 #include <linux/delay.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/nmi.h>
29 #include <linux/kprobes.h>
30 #include <linux/kexec.h>
31 #include <linux/unwind.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
36 #include <asm/atomic.h>
37 #include <asm/debugreg.h>
40 #include <asm/kdebug.h>
41 #include <asm/processor.h>
42 #include <asm/unwind.h>
44 #include <asm/pgalloc.h>
46 #include <asm/proto.h>
49 asmlinkage
void divide_error(void);
50 asmlinkage
void debug(void);
51 asmlinkage
void nmi(void);
52 asmlinkage
void int3(void);
53 asmlinkage
void overflow(void);
54 asmlinkage
void bounds(void);
55 asmlinkage
void invalid_op(void);
56 asmlinkage
void device_not_available(void);
57 asmlinkage
void double_fault(void);
58 asmlinkage
void coprocessor_segment_overrun(void);
59 asmlinkage
void invalid_TSS(void);
60 asmlinkage
void segment_not_present(void);
61 asmlinkage
void stack_segment(void);
62 asmlinkage
void general_protection(void);
63 asmlinkage
void page_fault(void);
64 asmlinkage
void coprocessor_error(void);
65 asmlinkage
void simd_coprocessor_error(void);
66 asmlinkage
void reserved(void);
67 asmlinkage
void alignment_check(void);
68 asmlinkage
void machine_check(void);
69 asmlinkage
void spurious_interrupt_bug(void);
71 ATOMIC_NOTIFIER_HEAD(die_chain
);
72 EXPORT_SYMBOL(die_chain
);
74 int register_die_notifier(struct notifier_block
*nb
)
77 return atomic_notifier_chain_register(&die_chain
, nb
);
79 EXPORT_SYMBOL(register_die_notifier
); /* used modular by kdb */
81 int unregister_die_notifier(struct notifier_block
*nb
)
83 return atomic_notifier_chain_unregister(&die_chain
, nb
);
85 EXPORT_SYMBOL(unregister_die_notifier
); /* used modular by kdb */
87 static inline void conditional_sti(struct pt_regs
*regs
)
89 if (regs
->eflags
& X86_EFLAGS_IF
)
93 static inline void preempt_conditional_sti(struct pt_regs
*regs
)
96 if (regs
->eflags
& X86_EFLAGS_IF
)
100 static inline void preempt_conditional_cli(struct pt_regs
*regs
)
102 if (regs
->eflags
& X86_EFLAGS_IF
)
104 /* Make sure to not schedule here because we could be running
105 on an exception stack. */
106 preempt_enable_no_resched();
109 static int kstack_depth_to_print
= 12;
110 #ifdef CONFIG_STACK_UNWIND
111 static int call_trace
= 1;
113 #define call_trace (-1)
116 #ifdef CONFIG_KALLSYMS
117 # include <linux/kallsyms.h>
118 void printk_address(unsigned long address
)
120 unsigned long offset
= 0, symsize
;
126 symname
= kallsyms_lookup(address
, &symsize
, &offset
,
129 printk(" [<%016lx>]\n", address
);
133 modname
= delim
= "";
134 printk(" [<%016lx>] %s%s%s%s+0x%lx/0x%lx\n",
135 address
, delim
, modname
, delim
, symname
, offset
, symsize
);
138 void printk_address(unsigned long address
)
140 printk(" [<%016lx>]\n", address
);
144 static unsigned long *in_exception_stack(unsigned cpu
, unsigned long stack
,
145 unsigned *usedp
, const char **idp
)
147 static char ids
[][8] = {
148 [DEBUG_STACK
- 1] = "#DB",
149 [NMI_STACK
- 1] = "NMI",
150 [DOUBLEFAULT_STACK
- 1] = "#DF",
151 [STACKFAULT_STACK
- 1] = "#SS",
152 [MCE_STACK
- 1] = "#MC",
153 #if DEBUG_STKSZ > EXCEPTION_STKSZ
154 [N_EXCEPTION_STACKS
... N_EXCEPTION_STACKS
+ DEBUG_STKSZ
/ EXCEPTION_STKSZ
- 2] = "#DB[?]"
160 * Iterate over all exception stacks, and figure out whether
161 * 'stack' is in one of them:
163 for (k
= 0; k
< N_EXCEPTION_STACKS
; k
++) {
167 * set 'end' to the end of the exception stack.
171 * TODO: this block is not needed i think, because
172 * setup64.c:cpu_init() sets up t->ist[DEBUG_STACK]
175 #if DEBUG_STKSZ > EXCEPTION_STKSZ
177 end
= cpu_pda(cpu
)->debugstack
+ DEBUG_STKSZ
;
181 end
= per_cpu(orig_ist
, cpu
).ist
[k
];
185 * Is 'stack' above this exception frame's end?
186 * If yes then skip to the next frame.
191 * Is 'stack' above this exception frame's start address?
192 * If yes then we found the right frame.
194 if (stack
>= end
- EXCEPTION_STKSZ
) {
196 * Make sure we only iterate through an exception
197 * stack once. If it comes up for the second time
198 * then there's something wrong going on - just
199 * break out and return NULL:
201 if (*usedp
& (1U << k
))
205 return (unsigned long *)end
;
208 * If this is a debug stack, and if it has a larger size than
209 * the usual exception stacks, then 'stack' might still
210 * be within the lower portion of the debug stack:
212 #if DEBUG_STKSZ > EXCEPTION_STKSZ
213 if (k
== DEBUG_STACK
- 1 && stack
>= end
- DEBUG_STKSZ
) {
214 unsigned j
= N_EXCEPTION_STACKS
- 1;
217 * Black magic. A large debug stack is composed of
218 * multiple exception stack entries, which we
219 * iterate through now. Dont look:
223 end
-= EXCEPTION_STKSZ
;
224 ids
[j
][4] = '1' + (j
- N_EXCEPTION_STACKS
);
225 } while (stack
< end
- EXCEPTION_STKSZ
);
226 if (*usedp
& (1U << j
))
230 return (unsigned long *)end
;
237 static int show_trace_unwind(struct unwind_frame_info
*info
, void *context
)
241 while (unwind(info
) == 0 && UNW_PC(info
)) {
243 printk_address(UNW_PC(info
));
244 if (arch_unw_user_mode(info
))
251 * x86-64 can have upto three kernel stacks:
254 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
257 void show_trace(struct task_struct
*tsk
, struct pt_regs
*regs
, unsigned long * stack
)
259 const unsigned cpu
= safe_smp_processor_id();
260 unsigned long *irqstack_end
= (unsigned long *)cpu_pda(cpu
)->irqstackptr
;
263 printk("\nCall Trace:\n");
268 if (call_trace
>= 0) {
270 struct unwind_frame_info info
;
273 if (unwind_init_frame_info(&info
, tsk
, regs
) == 0)
274 unw_ret
= show_trace_unwind(&info
, NULL
);
275 } else if (tsk
== current
)
276 unw_ret
= unwind_init_running(&info
, show_trace_unwind
, NULL
);
278 if (unwind_init_blocked(&info
, tsk
) == 0)
279 unw_ret
= show_trace_unwind(&info
, NULL
);
282 if (call_trace
== 1 && !arch_unw_user_mode(&info
)) {
283 print_symbol("DWARF2 unwinder stuck at %s\n",
285 if ((long)UNW_SP(&info
) < 0) {
286 printk("Leftover inexact backtrace:\n");
287 stack
= (unsigned long *)UNW_SP(&info
);
289 printk("Full inexact backtrace again:\n");
290 } else if (call_trace
>= 1)
293 printk("Full inexact backtrace again:\n");
295 printk("Inexact backtrace:\n");
299 * Print function call entries within a stack. 'cond' is the
300 * "end of stackframe" condition, that the 'stack++'
301 * iteration will eventually trigger.
303 #define HANDLE_STACK(cond) \
305 unsigned long addr = *stack++; \
306 if (kernel_text_address(addr)) { \
308 * If the address is either in the text segment of the \
309 * kernel, or in the region which contains vmalloc'ed \
310 * memory, it *may* be the address of a calling \
311 * routine; if so, print it so that someone tracing \
312 * down the cause of the crash will be able to figure \
313 * out the call path that was taken. \
315 printk_address(addr); \
320 * Print function call entries in all stacks, starting at the
321 * current stack address. If the stacks consist of nested
326 unsigned long *estack_end
;
327 estack_end
= in_exception_stack(cpu
, (unsigned long)stack
,
332 HANDLE_STACK (stack
< estack_end
);
335 * We link to the next stack via the
336 * second-to-last pointer (index -2 to end) in the
339 stack
= (unsigned long *) estack_end
[-2];
343 unsigned long *irqstack
;
344 irqstack
= irqstack_end
-
345 (IRQSTACKSIZE
- 64) / sizeof(*irqstack
);
347 if (stack
>= irqstack
&& stack
< irqstack_end
) {
349 HANDLE_STACK (stack
< irqstack_end
);
351 * We link to the next stack (which would be
352 * the process stack normally) the last
353 * pointer (index -1 to end) in the IRQ stack:
355 stack
= (unsigned long *) (irqstack_end
[-1]);
365 * This prints the process stack:
367 HANDLE_STACK (((long) stack
& (THREAD_SIZE
-1)) != 0);
373 static void _show_stack(struct task_struct
*tsk
, struct pt_regs
*regs
, unsigned long * rsp
)
375 unsigned long *stack
;
377 const int cpu
= safe_smp_processor_id();
378 unsigned long *irqstack_end
= (unsigned long *) (cpu_pda(cpu
)->irqstackptr
);
379 unsigned long *irqstack
= (unsigned long *) (cpu_pda(cpu
)->irqstackptr
- IRQSTACKSIZE
);
381 // debugging aid: "show_stack(NULL, NULL);" prints the
382 // back trace for this cpu.
386 rsp
= (unsigned long *)tsk
->thread
.rsp
;
388 rsp
= (unsigned long *)&rsp
;
392 for(i
=0; i
< kstack_depth_to_print
; i
++) {
393 if (stack
>= irqstack
&& stack
<= irqstack_end
) {
394 if (stack
== irqstack_end
) {
395 stack
= (unsigned long *) (irqstack_end
[-1]);
399 if (((long) stack
& (THREAD_SIZE
-1)) == 0)
402 if (i
&& ((i
% 4) == 0))
404 printk(" %016lx", *stack
++);
405 touch_nmi_watchdog();
407 show_trace(tsk
, regs
, rsp
);
410 void show_stack(struct task_struct
*tsk
, unsigned long * rsp
)
412 _show_stack(tsk
, NULL
, rsp
);
416 * The architecture-independent dump_stack generator
418 void dump_stack(void)
421 show_trace(NULL
, NULL
, &dummy
);
424 EXPORT_SYMBOL(dump_stack
);
426 void show_registers(struct pt_regs
*regs
)
429 int in_kernel
= !user_mode(regs
);
431 const int cpu
= safe_smp_processor_id();
432 struct task_struct
*cur
= cpu_pda(cpu
)->pcurrent
;
436 printk("CPU %d ", cpu
);
438 printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
439 cur
->comm
, cur
->pid
, task_thread_info(cur
), cur
);
442 * When in-kernel, we also print out the stack and code at the
443 * time of the fault..
448 _show_stack(NULL
, regs
, (unsigned long*)rsp
);
451 if (regs
->rip
< PAGE_OFFSET
)
454 for (i
=0; i
<20; i
++) {
456 if (__get_user(c
, &((unsigned char*)regs
->rip
)[i
])) {
458 printk(" Bad RIP value.");
467 void handle_BUG(struct pt_regs
*regs
)
471 const char *prefix
= "";
475 if (__copy_from_user(&f
, (const void __user
*) regs
->rip
,
476 sizeof(struct bug_frame
)))
478 if (f
.filename
>= 0 ||
479 f
.ud2
[0] != 0x0f || f
.ud2
[1] != 0x0b)
481 len
= __strnlen_user((char *)(long)f
.filename
, PATH_MAX
) - 1;
482 if (len
< 0 || len
>= PATH_MAX
)
483 f
.filename
= (int)(long)"unmapped filename";
485 f
.filename
+= len
- 50;
488 printk("----------- [cut here ] --------- [please bite here ] ---------\n");
489 printk(KERN_ALERT
"Kernel BUG at %s%.50s:%d\n", prefix
, (char *)(long)f
.filename
, f
.line
);
493 void out_of_line_bug(void)
497 EXPORT_SYMBOL(out_of_line_bug
);
500 static DEFINE_SPINLOCK(die_lock
);
501 static int die_owner
= -1;
502 static unsigned int die_nest_count
;
504 unsigned __kprobes
long oops_begin(void)
506 int cpu
= safe_smp_processor_id();
509 /* racy, but better than risking deadlock. */
510 local_irq_save(flags
);
511 if (!spin_trylock(&die_lock
)) {
512 if (cpu
== die_owner
)
513 /* nested oops. should stop eventually */;
515 spin_lock(&die_lock
);
524 void __kprobes
oops_end(unsigned long flags
)
530 /* We still own the lock */
531 local_irq_restore(flags
);
533 /* Nest count reaches zero, release the lock. */
534 spin_unlock_irqrestore(&die_lock
, flags
);
536 panic("Fatal exception");
539 void __kprobes
__die(const char * str
, struct pt_regs
* regs
, long err
)
541 static int die_counter
;
542 printk(KERN_EMERG
"%s: %04lx [%u] ", str
, err
& 0xffff,++die_counter
);
543 #ifdef CONFIG_PREEMPT
549 #ifdef CONFIG_DEBUG_PAGEALLOC
550 printk("DEBUG_PAGEALLOC");
553 notify_die(DIE_OOPS
, str
, regs
, err
, current
->thread
.trap_no
, SIGSEGV
);
554 show_registers(regs
);
555 /* Executive summary in case the oops scrolled away */
556 printk(KERN_ALERT
"RIP ");
557 printk_address(regs
->rip
);
558 printk(" RSP <%016lx>\n", regs
->rsp
);
559 if (kexec_should_crash(current
))
563 void die(const char * str
, struct pt_regs
* regs
, long err
)
565 unsigned long flags
= oops_begin();
568 __die(str
, regs
, err
);
573 void __kprobes
die_nmi(char *str
, struct pt_regs
*regs
)
575 unsigned long flags
= oops_begin();
578 * We are in trouble anyway, lets at least try
579 * to get a message out.
581 printk(str
, safe_smp_processor_id());
582 show_registers(regs
);
583 if (kexec_should_crash(current
))
585 if (panic_on_timeout
|| panic_on_oops
)
586 panic("nmi watchdog");
587 printk("console shuts up ...\n");
594 static void __kprobes
do_trap(int trapnr
, int signr
, char *str
,
595 struct pt_regs
* regs
, long error_code
,
598 struct task_struct
*tsk
= current
;
600 tsk
->thread
.error_code
= error_code
;
601 tsk
->thread
.trap_no
= trapnr
;
603 if (user_mode(regs
)) {
604 if (exception_trace
&& unhandled_signal(tsk
, signr
))
606 "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
607 tsk
->comm
, tsk
->pid
, str
,
608 regs
->rip
, regs
->rsp
, error_code
);
611 force_sig_info(signr
, info
, tsk
);
613 force_sig(signr
, tsk
);
620 const struct exception_table_entry
*fixup
;
621 fixup
= search_exception_tables(regs
->rip
);
623 regs
->rip
= fixup
->fixup
;
625 die(str
, regs
, error_code
);
630 #define DO_ERROR(trapnr, signr, str, name) \
631 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
633 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
636 conditional_sti(regs); \
637 do_trap(trapnr, signr, str, regs, error_code, NULL); \
640 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
641 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
644 info.si_signo = signr; \
646 info.si_code = sicode; \
647 info.si_addr = (void __user *)siaddr; \
648 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
651 conditional_sti(regs); \
652 do_trap(trapnr, signr, str, regs, error_code, &info); \
655 DO_ERROR_INFO( 0, SIGFPE
, "divide error", divide_error
, FPE_INTDIV
, regs
->rip
)
656 DO_ERROR( 4, SIGSEGV
, "overflow", overflow
)
657 DO_ERROR( 5, SIGSEGV
, "bounds", bounds
)
658 DO_ERROR_INFO( 6, SIGILL
, "invalid opcode", invalid_op
, ILL_ILLOPN
, regs
->rip
)
659 DO_ERROR( 7, SIGSEGV
, "device not available", device_not_available
)
660 DO_ERROR( 9, SIGFPE
, "coprocessor segment overrun", coprocessor_segment_overrun
)
661 DO_ERROR(10, SIGSEGV
, "invalid TSS", invalid_TSS
)
662 DO_ERROR(11, SIGBUS
, "segment not present", segment_not_present
)
663 DO_ERROR_INFO(17, SIGBUS
, "alignment check", alignment_check
, BUS_ADRALN
, 0)
664 DO_ERROR(18, SIGSEGV
, "reserved", reserved
)
666 /* Runs on IST stack */
667 asmlinkage
void do_stack_segment(struct pt_regs
*regs
, long error_code
)
669 if (notify_die(DIE_TRAP
, "stack segment", regs
, error_code
,
670 12, SIGBUS
) == NOTIFY_STOP
)
672 preempt_conditional_sti(regs
);
673 do_trap(12, SIGBUS
, "stack segment", regs
, error_code
, NULL
);
674 preempt_conditional_cli(regs
);
677 asmlinkage
void do_double_fault(struct pt_regs
* regs
, long error_code
)
679 static const char str
[] = "double fault";
680 struct task_struct
*tsk
= current
;
682 /* Return not checked because double check cannot be ignored */
683 notify_die(DIE_TRAP
, str
, regs
, error_code
, 8, SIGSEGV
);
685 tsk
->thread
.error_code
= error_code
;
686 tsk
->thread
.trap_no
= 8;
688 /* This is always a kernel trap and never fixable (and thus must
691 die(str
, regs
, error_code
);
694 asmlinkage
void __kprobes
do_general_protection(struct pt_regs
* regs
,
697 struct task_struct
*tsk
= current
;
699 conditional_sti(regs
);
701 tsk
->thread
.error_code
= error_code
;
702 tsk
->thread
.trap_no
= 13;
704 if (user_mode(regs
)) {
705 if (exception_trace
&& unhandled_signal(tsk
, SIGSEGV
))
707 "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
709 regs
->rip
, regs
->rsp
, error_code
);
711 force_sig(SIGSEGV
, tsk
);
717 const struct exception_table_entry
*fixup
;
718 fixup
= search_exception_tables(regs
->rip
);
720 regs
->rip
= fixup
->fixup
;
723 if (notify_die(DIE_GPF
, "general protection fault", regs
,
724 error_code
, 13, SIGSEGV
) == NOTIFY_STOP
)
726 die("general protection fault", regs
, error_code
);
730 static __kprobes
void
731 mem_parity_error(unsigned char reason
, struct pt_regs
* regs
)
733 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
734 printk("You probably have a hardware problem with your RAM chips\n");
736 /* Clear and disable the memory parity error line. */
737 reason
= (reason
& 0xf) | 4;
741 static __kprobes
void
742 io_check_error(unsigned char reason
, struct pt_regs
* regs
)
744 printk("NMI: IOCK error (debug interrupt?)\n");
745 show_registers(regs
);
747 /* Re-enable the IOCK line, wait for a few seconds */
748 reason
= (reason
& 0xf) | 8;
755 static __kprobes
void
756 unknown_nmi_error(unsigned char reason
, struct pt_regs
* regs
)
757 { printk("Uhhuh. NMI received for unknown reason %02x.\n", reason
);
758 printk("Dazed and confused, but trying to continue\n");
759 printk("Do you have a strange power saving mode enabled?\n");
762 /* Runs on IST stack. This code must keep interrupts off all the time.
763 Nested NMIs are prevented by the CPU. */
764 asmlinkage __kprobes
void default_do_nmi(struct pt_regs
*regs
)
766 unsigned char reason
= 0;
769 cpu
= smp_processor_id();
771 /* Only the BSP gets external NMIs from the system. */
773 reason
= get_nmi_reason();
775 if (!(reason
& 0xc0)) {
776 if (notify_die(DIE_NMI_IPI
, "nmi_ipi", regs
, reason
, 2, SIGINT
)
779 #ifdef CONFIG_X86_LOCAL_APIC
781 * Ok, so this is none of the documented NMI sources,
782 * so it must be the NMI watchdog.
784 if (nmi_watchdog
> 0) {
785 nmi_watchdog_tick(regs
,reason
);
789 unknown_nmi_error(reason
, regs
);
792 if (notify_die(DIE_NMI
, "nmi", regs
, reason
, 2, SIGINT
) == NOTIFY_STOP
)
795 /* AK: following checks seem to be broken on modern chipsets. FIXME */
798 mem_parity_error(reason
, regs
);
800 io_check_error(reason
, regs
);
803 /* runs on IST stack. */
804 asmlinkage
void __kprobes
do_int3(struct pt_regs
* regs
, long error_code
)
806 if (notify_die(DIE_INT3
, "int3", regs
, error_code
, 3, SIGTRAP
) == NOTIFY_STOP
) {
809 preempt_conditional_sti(regs
);
810 do_trap(3, SIGTRAP
, "int3", regs
, error_code
, NULL
);
811 preempt_conditional_cli(regs
);
814 /* Help handler running on IST stack to switch back to user stack
815 for scheduling or signal handling. The actual stack switch is done in
817 asmlinkage __kprobes
struct pt_regs
*sync_regs(struct pt_regs
*eregs
)
819 struct pt_regs
*regs
= eregs
;
820 /* Did already sync */
821 if (eregs
== (struct pt_regs
*)eregs
->rsp
)
823 /* Exception from user space */
824 else if (user_mode(eregs
))
825 regs
= task_pt_regs(current
);
826 /* Exception from kernel and interrupts are enabled. Move to
827 kernel process stack. */
828 else if (eregs
->eflags
& X86_EFLAGS_IF
)
829 regs
= (struct pt_regs
*)(eregs
->rsp
-= sizeof(struct pt_regs
));
835 /* runs on IST stack. */
836 asmlinkage
void __kprobes
do_debug(struct pt_regs
* regs
,
837 unsigned long error_code
)
839 unsigned long condition
;
840 struct task_struct
*tsk
= current
;
843 get_debugreg(condition
, 6);
845 if (notify_die(DIE_DEBUG
, "debug", regs
, condition
, error_code
,
846 SIGTRAP
) == NOTIFY_STOP
)
849 preempt_conditional_sti(regs
);
851 /* Mask out spurious debug traps due to lazy DR7 setting */
852 if (condition
& (DR_TRAP0
|DR_TRAP1
|DR_TRAP2
|DR_TRAP3
)) {
853 if (!tsk
->thread
.debugreg7
) {
858 tsk
->thread
.debugreg6
= condition
;
860 /* Mask out spurious TF errors due to lazy TF clearing */
861 if (condition
& DR_STEP
) {
863 * The TF error should be masked out only if the current
864 * process is not traced and if the TRAP flag has been set
865 * previously by a tracing process (condition detected by
866 * the PT_DTRACE flag); remember that the i386 TRAP flag
867 * can be modified by the process itself in user mode,
868 * allowing programs to debug themselves without the ptrace()
871 if (!user_mode(regs
))
872 goto clear_TF_reenable
;
874 * Was the TF flag set by a debugger? If so, clear it now,
875 * so that register information is correct.
877 if (tsk
->ptrace
& PT_DTRACE
) {
878 regs
->eflags
&= ~TF_MASK
;
879 tsk
->ptrace
&= ~PT_DTRACE
;
883 /* Ok, finally something we can handle */
884 tsk
->thread
.trap_no
= 1;
885 tsk
->thread
.error_code
= error_code
;
886 info
.si_signo
= SIGTRAP
;
888 info
.si_code
= TRAP_BRKPT
;
889 info
.si_addr
= user_mode(regs
) ? (void __user
*)regs
->rip
: NULL
;
890 force_sig_info(SIGTRAP
, &info
, tsk
);
893 set_debugreg(0UL, 7);
894 preempt_conditional_cli(regs
);
898 set_tsk_thread_flag(tsk
, TIF_SINGLESTEP
);
899 regs
->eflags
&= ~TF_MASK
;
900 preempt_conditional_cli(regs
);
903 static int kernel_math_error(struct pt_regs
*regs
, const char *str
, int trapnr
)
905 const struct exception_table_entry
*fixup
;
906 fixup
= search_exception_tables(regs
->rip
);
908 regs
->rip
= fixup
->fixup
;
911 notify_die(DIE_GPF
, str
, regs
, 0, trapnr
, SIGFPE
);
912 /* Illegal floating point operation in the kernel */
913 current
->thread
.trap_no
= trapnr
;
919 * Note that we play around with the 'TS' bit in an attempt to get
920 * the correct behaviour even in the presence of the asynchronous
923 asmlinkage
void do_coprocessor_error(struct pt_regs
*regs
)
925 void __user
*rip
= (void __user
*)(regs
->rip
);
926 struct task_struct
* task
;
928 unsigned short cwd
, swd
;
930 conditional_sti(regs
);
931 if (!user_mode(regs
) &&
932 kernel_math_error(regs
, "kernel x87 math error", 16))
936 * Save the info for the exception handler and clear the error.
940 task
->thread
.trap_no
= 16;
941 task
->thread
.error_code
= 0;
942 info
.si_signo
= SIGFPE
;
944 info
.si_code
= __SI_FAULT
;
947 * (~cwd & swd) will mask out exceptions that are not set to unmasked
948 * status. 0x3f is the exception bits in these regs, 0x200 is the
949 * C1 reg you need in case of a stack fault, 0x040 is the stack
950 * fault bit. We should only be taking one exception at a time,
951 * so if this combination doesn't produce any single exception,
952 * then we have a bad program that isn't synchronizing its FPU usage
953 * and it will suffer the consequences since we won't be able to
954 * fully reproduce the context of the exception
956 cwd
= get_fpu_cwd(task
);
957 swd
= get_fpu_swd(task
);
958 switch (swd
& ~cwd
& 0x3f) {
962 case 0x001: /* Invalid Op */
964 * swd & 0x240 == 0x040: Stack Underflow
965 * swd & 0x240 == 0x240: Stack Overflow
966 * User must clear the SF bit (0x40) if set
968 info
.si_code
= FPE_FLTINV
;
970 case 0x002: /* Denormalize */
971 case 0x010: /* Underflow */
972 info
.si_code
= FPE_FLTUND
;
974 case 0x004: /* Zero Divide */
975 info
.si_code
= FPE_FLTDIV
;
977 case 0x008: /* Overflow */
978 info
.si_code
= FPE_FLTOVF
;
980 case 0x020: /* Precision */
981 info
.si_code
= FPE_FLTRES
;
984 force_sig_info(SIGFPE
, &info
, task
);
987 asmlinkage
void bad_intr(void)
989 printk("bad interrupt");
992 asmlinkage
void do_simd_coprocessor_error(struct pt_regs
*regs
)
994 void __user
*rip
= (void __user
*)(regs
->rip
);
995 struct task_struct
* task
;
997 unsigned short mxcsr
;
999 conditional_sti(regs
);
1000 if (!user_mode(regs
) &&
1001 kernel_math_error(regs
, "kernel simd math error", 19))
1005 * Save the info for the exception handler and clear the error.
1008 save_init_fpu(task
);
1009 task
->thread
.trap_no
= 19;
1010 task
->thread
.error_code
= 0;
1011 info
.si_signo
= SIGFPE
;
1013 info
.si_code
= __SI_FAULT
;
1016 * The SIMD FPU exceptions are handled a little differently, as there
1017 * is only a single status/control register. Thus, to determine which
1018 * unmasked exception was caught we must mask the exception mask bits
1019 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
1021 mxcsr
= get_fpu_mxcsr(task
);
1022 switch (~((mxcsr
& 0x1f80) >> 7) & (mxcsr
& 0x3f)) {
1026 case 0x001: /* Invalid Op */
1027 info
.si_code
= FPE_FLTINV
;
1029 case 0x002: /* Denormalize */
1030 case 0x010: /* Underflow */
1031 info
.si_code
= FPE_FLTUND
;
1033 case 0x004: /* Zero Divide */
1034 info
.si_code
= FPE_FLTDIV
;
1036 case 0x008: /* Overflow */
1037 info
.si_code
= FPE_FLTOVF
;
1039 case 0x020: /* Precision */
1040 info
.si_code
= FPE_FLTRES
;
1043 force_sig_info(SIGFPE
, &info
, task
);
1046 asmlinkage
void do_spurious_interrupt_bug(struct pt_regs
* regs
)
1050 asmlinkage
void __attribute__((weak
)) smp_thermal_interrupt(void)
1054 asmlinkage
void __attribute__((weak
)) mce_threshold_interrupt(void)
1059 * 'math_state_restore()' saves the current math information in the
1060 * old math state array, and gets the new ones from the current task
1062 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1063 * Don't touch unless you *really* know how it works.
1065 asmlinkage
void math_state_restore(void)
1067 struct task_struct
*me
= current
;
1068 clts(); /* Allow maths ops (or we recurse) */
1072 restore_fpu_checking(&me
->thread
.i387
.fxsave
);
1073 task_thread_info(me
)->status
|= TS_USEDFPU
;
1076 void __init
trap_init(void)
1078 set_intr_gate(0,÷_error
);
1079 set_intr_gate_ist(1,&debug
,DEBUG_STACK
);
1080 set_intr_gate_ist(2,&nmi
,NMI_STACK
);
1081 set_system_gate_ist(3,&int3
,DEBUG_STACK
); /* int3 can be called from all */
1082 set_system_gate(4,&overflow
); /* int4 can be called from all */
1083 set_intr_gate(5,&bounds
);
1084 set_intr_gate(6,&invalid_op
);
1085 set_intr_gate(7,&device_not_available
);
1086 set_intr_gate_ist(8,&double_fault
, DOUBLEFAULT_STACK
);
1087 set_intr_gate(9,&coprocessor_segment_overrun
);
1088 set_intr_gate(10,&invalid_TSS
);
1089 set_intr_gate(11,&segment_not_present
);
1090 set_intr_gate_ist(12,&stack_segment
,STACKFAULT_STACK
);
1091 set_intr_gate(13,&general_protection
);
1092 set_intr_gate(14,&page_fault
);
1093 set_intr_gate(15,&spurious_interrupt_bug
);
1094 set_intr_gate(16,&coprocessor_error
);
1095 set_intr_gate(17,&alignment_check
);
1096 #ifdef CONFIG_X86_MCE
1097 set_intr_gate_ist(18,&machine_check
, MCE_STACK
);
1099 set_intr_gate(19,&simd_coprocessor_error
);
1101 #ifdef CONFIG_IA32_EMULATION
1102 set_system_gate(IA32_SYSCALL_VECTOR
, ia32_syscall
);
1106 * Should be a barrier for any external CPU state.
1112 /* Actual parsing is done early in setup.c. */
1113 static int __init
oops_dummy(char *s
)
1118 __setup("oops=", oops_dummy
);
1120 static int __init
kstack_setup(char *s
)
1122 kstack_depth_to_print
= simple_strtoul(s
,NULL
,0);
1125 __setup("kstack=", kstack_setup
);
1127 #ifdef CONFIG_STACK_UNWIND
1128 static int __init
call_trace_setup(char *s
)
1130 if (strcmp(s
, "old") == 0)
1132 else if (strcmp(s
, "both") == 0)
1134 else if (strcmp(s
, "newfallback") == 0)
1136 else if (strcmp(s
, "new") == 0)
1140 __setup("call_trace=", call_trace_setup
);