2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * 'Traps.c' handles hardware traps and faults after we have saved some
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/timer.h>
20 #include <linux/init.h>
21 #include <linux/delay.h>
22 #include <linux/spinlock.h>
23 #include <linux/interrupt.h>
24 #include <linux/kallsyms.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/nmi.h>
28 #include <linux/kprobes.h>
29 #include <linux/kexec.h>
30 #include <linux/unwind.h>
31 #include <linux/uaccess.h>
32 #include <linux/bug.h>
33 #include <linux/kdebug.h>
34 #include <linux/utsname.h>
36 #include <mach_traps.h>
38 #if defined(CONFIG_EDAC)
39 #include <linux/edac.h>
42 #include <asm/system.h>
44 #include <asm/atomic.h>
45 #include <asm/debugreg.h>
48 #include <asm/processor.h>
49 #include <asm/unwind.h>
51 #include <asm/pgalloc.h>
53 #include <asm/proto.h>
55 #include <asm/stacktrace.h>
57 asmlinkage
void divide_error(void);
58 asmlinkage
void debug(void);
59 asmlinkage
void nmi(void);
60 asmlinkage
void int3(void);
61 asmlinkage
void overflow(void);
62 asmlinkage
void bounds(void);
63 asmlinkage
void invalid_op(void);
64 asmlinkage
void device_not_available(void);
65 asmlinkage
void double_fault(void);
66 asmlinkage
void coprocessor_segment_overrun(void);
67 asmlinkage
void invalid_TSS(void);
68 asmlinkage
void segment_not_present(void);
69 asmlinkage
void stack_segment(void);
70 asmlinkage
void general_protection(void);
71 asmlinkage
void page_fault(void);
72 asmlinkage
void coprocessor_error(void);
73 asmlinkage
void simd_coprocessor_error(void);
74 asmlinkage
void reserved(void);
75 asmlinkage
void alignment_check(void);
76 asmlinkage
void machine_check(void);
77 asmlinkage
void spurious_interrupt_bug(void);
79 static unsigned int code_bytes
= 64;
81 static inline void conditional_sti(struct pt_regs
*regs
)
83 if (regs
->flags
& X86_EFLAGS_IF
)
87 static inline void preempt_conditional_sti(struct pt_regs
*regs
)
90 if (regs
->flags
& X86_EFLAGS_IF
)
94 static inline void preempt_conditional_cli(struct pt_regs
*regs
)
96 if (regs
->flags
& X86_EFLAGS_IF
)
98 /* Make sure to not schedule here because we could be running
99 on an exception stack. */
103 int kstack_depth_to_print
= 12;
105 void printk_address(unsigned long address
, int reliable
)
107 #ifdef CONFIG_KALLSYMS
108 unsigned long offset
= 0, symsize
;
112 char namebuf
[KSYM_NAME_LEN
];
115 symname
= kallsyms_lookup(address
, &symsize
, &offset
,
118 printk(" [<%016lx>]\n", address
);
122 strcpy(reliab
, "? ");
125 modname
= delim
= "";
126 printk(" [<%016lx>] %s%s%s%s%s+0x%lx/0x%lx\n",
127 address
, reliab
, delim
, modname
, delim
, symname
, offset
, symsize
);
129 printk(" [<%016lx>]\n", address
);
133 static unsigned long *in_exception_stack(unsigned cpu
, unsigned long stack
,
134 unsigned *usedp
, char **idp
)
136 static char ids
[][8] = {
137 [DEBUG_STACK
- 1] = "#DB",
138 [NMI_STACK
- 1] = "NMI",
139 [DOUBLEFAULT_STACK
- 1] = "#DF",
140 [STACKFAULT_STACK
- 1] = "#SS",
141 [MCE_STACK
- 1] = "#MC",
142 #if DEBUG_STKSZ > EXCEPTION_STKSZ
143 [N_EXCEPTION_STACKS
... N_EXCEPTION_STACKS
+ DEBUG_STKSZ
/ EXCEPTION_STKSZ
- 2] = "#DB[?]"
149 * Iterate over all exception stacks, and figure out whether
150 * 'stack' is in one of them:
152 for (k
= 0; k
< N_EXCEPTION_STACKS
; k
++) {
153 unsigned long end
= per_cpu(orig_ist
, cpu
).ist
[k
];
155 * Is 'stack' above this exception frame's end?
156 * If yes then skip to the next frame.
161 * Is 'stack' above this exception frame's start address?
162 * If yes then we found the right frame.
164 if (stack
>= end
- EXCEPTION_STKSZ
) {
166 * Make sure we only iterate through an exception
167 * stack once. If it comes up for the second time
168 * then there's something wrong going on - just
169 * break out and return NULL:
171 if (*usedp
& (1U << k
))
175 return (unsigned long *)end
;
178 * If this is a debug stack, and if it has a larger size than
179 * the usual exception stacks, then 'stack' might still
180 * be within the lower portion of the debug stack:
182 #if DEBUG_STKSZ > EXCEPTION_STKSZ
183 if (k
== DEBUG_STACK
- 1 && stack
>= end
- DEBUG_STKSZ
) {
184 unsigned j
= N_EXCEPTION_STACKS
- 1;
187 * Black magic. A large debug stack is composed of
188 * multiple exception stack entries, which we
189 * iterate through now. Dont look:
193 end
-= EXCEPTION_STKSZ
;
194 ids
[j
][4] = '1' + (j
- N_EXCEPTION_STACKS
);
195 } while (stack
< end
- EXCEPTION_STKSZ
);
196 if (*usedp
& (1U << j
))
200 return (unsigned long *)end
;
207 #define MSG(txt) ops->warning(data, txt)
210 * x86-64 can have up to three kernel stacks:
213 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
216 static inline int valid_stack_ptr(struct thread_info
*tinfo
,
217 void *p
, unsigned int size
, void *end
)
221 if (p
< end
&& p
>= (end
-THREAD_SIZE
))
226 return p
> t
&& p
< t
+ THREAD_SIZE
- size
;
229 /* The form of the top of the frame on the stack */
231 struct stack_frame
*next_frame
;
232 unsigned long return_address
;
236 static inline unsigned long print_context_stack(struct thread_info
*tinfo
,
237 unsigned long *stack
, unsigned long bp
,
238 const struct stacktrace_ops
*ops
, void *data
,
241 struct stack_frame
*frame
= (struct stack_frame
*)bp
;
243 while (valid_stack_ptr(tinfo
, stack
, sizeof(*stack
), end
)) {
247 if (__kernel_text_address(addr
)) {
248 if ((unsigned long) stack
== bp
+ 8) {
249 ops
->address(data
, addr
, 1);
250 frame
= frame
->next_frame
;
251 bp
= (unsigned long) frame
;
253 ops
->address(data
, addr
, bp
== 0);
261 void dump_trace(struct task_struct
*tsk
, struct pt_regs
*regs
,
262 unsigned long *stack
, unsigned long bp
,
263 const struct stacktrace_ops
*ops
, void *data
)
265 const unsigned cpu
= get_cpu();
266 unsigned long *irqstack_end
= (unsigned long*)cpu_pda(cpu
)->irqstackptr
;
268 struct thread_info
*tinfo
;
272 tinfo
= task_thread_info(tsk
);
277 if (tsk
&& tsk
!= current
)
278 stack
= (unsigned long *)tsk
->thread
.sp
;
281 #ifdef CONFIG_FRAME_POINTER
283 if (tsk
== current
) {
284 /* Grab bp right from our regs */
285 asm("movq %%rbp, %0" : "=r" (bp
):);
287 /* bp is the last reg pushed by switch_to */
288 bp
= *(unsigned long *) tsk
->thread
.sp
;
296 * Print function call entries in all stacks, starting at the
297 * current stack address. If the stacks consist of nested
302 unsigned long *estack_end
;
303 estack_end
= in_exception_stack(cpu
, (unsigned long)stack
,
307 if (ops
->stack(data
, id
) < 0)
310 bp
= print_context_stack(tinfo
, stack
, bp
, ops
,
312 ops
->stack(data
, "<EOE>");
314 * We link to the next stack via the
315 * second-to-last pointer (index -2 to end) in the
318 stack
= (unsigned long *) estack_end
[-2];
322 unsigned long *irqstack
;
323 irqstack
= irqstack_end
-
324 (IRQSTACKSIZE
- 64) / sizeof(*irqstack
);
326 if (stack
>= irqstack
&& stack
< irqstack_end
) {
327 if (ops
->stack(data
, "IRQ") < 0)
329 bp
= print_context_stack(tinfo
, stack
, bp
,
330 ops
, data
, irqstack_end
);
332 * We link to the next stack (which would be
333 * the process stack normally) the last
334 * pointer (index -1 to end) in the IRQ stack:
336 stack
= (unsigned long *) (irqstack_end
[-1]);
338 ops
->stack(data
, "EOI");
346 * This handles the process stack:
348 bp
= print_context_stack(tinfo
, stack
, bp
, ops
, data
, NULL
);
351 EXPORT_SYMBOL(dump_trace
);
354 print_trace_warning_symbol(void *data
, char *msg
, unsigned long symbol
)
356 print_symbol(msg
, symbol
);
360 static void print_trace_warning(void *data
, char *msg
)
365 static int print_trace_stack(void *data
, char *name
)
367 printk(" <%s> ", name
);
371 static void print_trace_address(void *data
, unsigned long addr
, int reliable
)
373 touch_nmi_watchdog();
374 printk_address(addr
, reliable
);
377 static const struct stacktrace_ops print_trace_ops
= {
378 .warning
= print_trace_warning
,
379 .warning_symbol
= print_trace_warning_symbol
,
380 .stack
= print_trace_stack
,
381 .address
= print_trace_address
,
385 show_trace(struct task_struct
*tsk
, struct pt_regs
*regs
, unsigned long *stack
,
388 printk("\nCall Trace:\n");
389 dump_trace(tsk
, regs
, stack
, bp
, &print_trace_ops
, NULL
);
394 _show_stack(struct task_struct
*tsk
, struct pt_regs
*regs
, unsigned long *sp
,
397 unsigned long *stack
;
399 const int cpu
= smp_processor_id();
400 unsigned long *irqstack_end
= (unsigned long *) (cpu_pda(cpu
)->irqstackptr
);
401 unsigned long *irqstack
= (unsigned long *) (cpu_pda(cpu
)->irqstackptr
- IRQSTACKSIZE
);
403 // debugging aid: "show_stack(NULL, NULL);" prints the
404 // back trace for this cpu.
408 sp
= (unsigned long *)tsk
->thread
.sp
;
410 sp
= (unsigned long *)&sp
;
414 for(i
=0; i
< kstack_depth_to_print
; i
++) {
415 if (stack
>= irqstack
&& stack
<= irqstack_end
) {
416 if (stack
== irqstack_end
) {
417 stack
= (unsigned long *) (irqstack_end
[-1]);
421 if (((long) stack
& (THREAD_SIZE
-1)) == 0)
424 if (i
&& ((i
% 4) == 0))
426 printk(" %016lx", *stack
++);
427 touch_nmi_watchdog();
429 show_trace(tsk
, regs
, sp
, bp
);
432 void show_stack(struct task_struct
*tsk
, unsigned long * sp
)
434 _show_stack(tsk
, NULL
, sp
, 0);
438 * The architecture-independent dump_stack generator
440 void dump_stack(void)
443 unsigned long bp
= 0;
445 #ifdef CONFIG_FRAME_POINTER
447 asm("movq %%rbp, %0" : "=r" (bp
):);
450 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
451 current
->pid
, current
->comm
, print_tainted(),
452 init_utsname()->release
,
453 (int)strcspn(init_utsname()->version
, " "),
454 init_utsname()->version
);
455 show_trace(NULL
, NULL
, &dummy
, bp
);
458 EXPORT_SYMBOL(dump_stack
);
460 void show_registers(struct pt_regs
*regs
)
464 const int cpu
= smp_processor_id();
465 struct task_struct
*cur
= cpu_pda(cpu
)->pcurrent
;
467 unsigned int code_prologue
= code_bytes
* 43 / 64;
468 unsigned int code_len
= code_bytes
;
471 ip
= (u8
*) regs
->ip
- code_prologue
;
472 printk("CPU %d ", cpu
);
474 printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
475 cur
->comm
, cur
->pid
, task_thread_info(cur
), cur
);
478 * When in-kernel, we also print out the stack and code at the
479 * time of the fault..
481 if (!user_mode(regs
)) {
484 _show_stack(NULL
, regs
, (unsigned long *)sp
, regs
->bp
);
487 printk(KERN_EMERG
"Code: ");
488 if (ip
< (u8
*)PAGE_OFFSET
|| probe_kernel_address(ip
, c
)) {
489 /* try starting at RIP */
490 ip
= (u8
*) regs
->ip
;
491 code_len
= code_len
- code_prologue
+ 1;
493 for (i
= 0; i
< code_len
; i
++, ip
++) {
494 if (ip
< (u8
*)PAGE_OFFSET
||
495 probe_kernel_address(ip
, c
)) {
496 printk(" Bad RIP value.");
499 if (ip
== (u8
*)regs
->ip
)
500 printk("<%02x> ", c
);
508 int is_valid_bugaddr(unsigned long ip
)
512 if (__copy_from_user(&ud2
, (const void __user
*) ip
, sizeof(ud2
)))
515 return ud2
== 0x0b0f;
518 static raw_spinlock_t die_lock
= __RAW_SPIN_LOCK_UNLOCKED
;
519 static int die_owner
= -1;
520 static unsigned int die_nest_count
;
522 unsigned __kprobes
long oops_begin(void)
529 /* racy, but better than risking deadlock. */
530 raw_local_irq_save(flags
);
531 cpu
= smp_processor_id();
532 if (!__raw_spin_trylock(&die_lock
)) {
533 if (cpu
== die_owner
)
534 /* nested oops. should stop eventually */;
536 __raw_spin_lock(&die_lock
);
545 void __kprobes
oops_end(unsigned long flags
, struct pt_regs
*regs
, int signr
)
551 /* Nest count reaches zero, release the lock. */
552 __raw_spin_unlock(&die_lock
);
553 raw_local_irq_restore(flags
);
559 panic("Fatal exception");
564 int __kprobes
__die(const char * str
, struct pt_regs
* regs
, long err
)
566 static int die_counter
;
567 printk(KERN_EMERG
"%s: %04lx [%u] ", str
, err
& 0xffff,++die_counter
);
568 #ifdef CONFIG_PREEMPT
574 #ifdef CONFIG_DEBUG_PAGEALLOC
575 printk("DEBUG_PAGEALLOC");
578 if (notify_die(DIE_OOPS
, str
, regs
, err
, current
->thread
.trap_no
, SIGSEGV
) == NOTIFY_STOP
)
580 show_registers(regs
);
581 add_taint(TAINT_DIE
);
582 /* Executive summary in case the oops scrolled away */
583 printk(KERN_ALERT
"RIP ");
584 printk_address(regs
->ip
, 1);
585 printk(" RSP <%016lx>\n", regs
->sp
);
586 if (kexec_should_crash(current
))
591 void die(const char * str
, struct pt_regs
* regs
, long err
)
593 unsigned long flags
= oops_begin();
595 if (!user_mode(regs
))
596 report_bug(regs
->ip
, regs
);
598 if (__die(str
, regs
, err
))
600 oops_end(flags
, regs
, SIGSEGV
);
603 notrace __kprobes
void
604 die_nmi(char *str
, struct pt_regs
*regs
, int do_panic
)
608 if (notify_die(DIE_NMIWATCHDOG
, str
, regs
, 0, 2, SIGINT
) ==
612 flags
= oops_begin();
614 * We are in trouble anyway, lets at least try
615 * to get a message out.
617 printk(str
, smp_processor_id());
618 show_registers(regs
);
619 if (kexec_should_crash(current
))
621 if (do_panic
|| panic_on_oops
)
622 panic("Non maskable interrupt");
623 oops_end(flags
, NULL
, SIGBUS
);
629 static void __kprobes
do_trap(int trapnr
, int signr
, char *str
,
630 struct pt_regs
* regs
, long error_code
,
633 struct task_struct
*tsk
= current
;
635 if (user_mode(regs
)) {
637 * We want error_code and trap_no set for userspace
638 * faults and kernelspace faults which result in
639 * die(), but not kernelspace faults which are fixed
640 * up. die() gives the process no chance to handle
641 * the signal and notice the kernel fault information,
642 * so that won't result in polluting the information
643 * about previously queued, but not yet delivered,
644 * faults. See also do_general_protection below.
646 tsk
->thread
.error_code
= error_code
;
647 tsk
->thread
.trap_no
= trapnr
;
649 if (show_unhandled_signals
&& unhandled_signal(tsk
, signr
) &&
650 printk_ratelimit()) {
652 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
653 tsk
->comm
, tsk
->pid
, str
,
654 regs
->ip
, regs
->sp
, error_code
);
655 print_vma_addr(" in ", regs
->ip
);
660 force_sig_info(signr
, info
, tsk
);
662 force_sig(signr
, tsk
);
667 if (!fixup_exception(regs
)) {
668 tsk
->thread
.error_code
= error_code
;
669 tsk
->thread
.trap_no
= trapnr
;
670 die(str
, regs
, error_code
);
675 #define DO_ERROR(trapnr, signr, str, name) \
676 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
678 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
681 conditional_sti(regs); \
682 do_trap(trapnr, signr, str, regs, error_code, NULL); \
685 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
686 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
689 info.si_signo = signr; \
691 info.si_code = sicode; \
692 info.si_addr = (void __user *)siaddr; \
693 trace_hardirqs_fixup(); \
694 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
697 conditional_sti(regs); \
698 do_trap(trapnr, signr, str, regs, error_code, &info); \
701 DO_ERROR_INFO( 0, SIGFPE
, "divide error", divide_error
, FPE_INTDIV
, regs
->ip
)
702 DO_ERROR( 4, SIGSEGV
, "overflow", overflow
)
703 DO_ERROR( 5, SIGSEGV
, "bounds", bounds
)
704 DO_ERROR_INFO( 6, SIGILL
, "invalid opcode", invalid_op
, ILL_ILLOPN
, regs
->ip
)
705 DO_ERROR( 7, SIGSEGV
, "device not available", device_not_available
)
706 DO_ERROR( 9, SIGFPE
, "coprocessor segment overrun", coprocessor_segment_overrun
)
707 DO_ERROR(10, SIGSEGV
, "invalid TSS", invalid_TSS
)
708 DO_ERROR(11, SIGBUS
, "segment not present", segment_not_present
)
709 DO_ERROR_INFO(17, SIGBUS
, "alignment check", alignment_check
, BUS_ADRALN
, 0)
710 DO_ERROR(18, SIGSEGV
, "reserved", reserved
)
712 /* Runs on IST stack */
713 asmlinkage
void do_stack_segment(struct pt_regs
*regs
, long error_code
)
715 if (notify_die(DIE_TRAP
, "stack segment", regs
, error_code
,
716 12, SIGBUS
) == NOTIFY_STOP
)
718 preempt_conditional_sti(regs
);
719 do_trap(12, SIGBUS
, "stack segment", regs
, error_code
, NULL
);
720 preempt_conditional_cli(regs
);
723 asmlinkage
void do_double_fault(struct pt_regs
* regs
, long error_code
)
725 static const char str
[] = "double fault";
726 struct task_struct
*tsk
= current
;
728 /* Return not checked because double check cannot be ignored */
729 notify_die(DIE_TRAP
, str
, regs
, error_code
, 8, SIGSEGV
);
731 tsk
->thread
.error_code
= error_code
;
732 tsk
->thread
.trap_no
= 8;
734 /* This is always a kernel trap and never fixable (and thus must
737 die(str
, regs
, error_code
);
740 asmlinkage
void __kprobes
do_general_protection(struct pt_regs
* regs
,
743 struct task_struct
*tsk
= current
;
745 conditional_sti(regs
);
747 if (user_mode(regs
)) {
748 tsk
->thread
.error_code
= error_code
;
749 tsk
->thread
.trap_no
= 13;
751 if (show_unhandled_signals
&& unhandled_signal(tsk
, SIGSEGV
) &&
752 printk_ratelimit()) {
754 "%s[%d] general protection ip:%lx sp:%lx error:%lx",
756 regs
->ip
, regs
->sp
, error_code
);
757 print_vma_addr(" in ", regs
->ip
);
761 force_sig(SIGSEGV
, tsk
);
765 if (fixup_exception(regs
))
768 tsk
->thread
.error_code
= error_code
;
769 tsk
->thread
.trap_no
= 13;
770 if (notify_die(DIE_GPF
, "general protection fault", regs
,
771 error_code
, 13, SIGSEGV
) == NOTIFY_STOP
)
773 die("general protection fault", regs
, error_code
);
776 static notrace __kprobes
void
777 mem_parity_error(unsigned char reason
, struct pt_regs
* regs
)
779 printk(KERN_EMERG
"Uhhuh. NMI received for unknown reason %02x.\n",
781 printk(KERN_EMERG
"You have some hardware problem, likely on the PCI bus.\n");
783 #if defined(CONFIG_EDAC)
784 if(edac_handler_set()) {
785 edac_atomic_assert_error();
790 if (panic_on_unrecovered_nmi
)
791 panic("NMI: Not continuing");
793 printk(KERN_EMERG
"Dazed and confused, but trying to continue\n");
795 /* Clear and disable the memory parity error line. */
796 reason
= (reason
& 0xf) | 4;
800 static notrace __kprobes
void
801 io_check_error(unsigned char reason
, struct pt_regs
* regs
)
803 printk("NMI: IOCK error (debug interrupt?)\n");
804 show_registers(regs
);
806 /* Re-enable the IOCK line, wait for a few seconds */
807 reason
= (reason
& 0xf) | 8;
814 static notrace __kprobes
void
815 unknown_nmi_error(unsigned char reason
, struct pt_regs
* regs
)
817 if (notify_die(DIE_NMIUNKNOWN
, "nmi", regs
, reason
, 2, SIGINT
) == NOTIFY_STOP
)
819 printk(KERN_EMERG
"Uhhuh. NMI received for unknown reason %02x.\n",
821 printk(KERN_EMERG
"Do you have a strange power saving mode enabled?\n");
823 if (panic_on_unrecovered_nmi
)
824 panic("NMI: Not continuing");
826 printk(KERN_EMERG
"Dazed and confused, but trying to continue\n");
829 /* Runs on IST stack. This code must keep interrupts off all the time.
830 Nested NMIs are prevented by the CPU. */
831 asmlinkage notrace __kprobes
void default_do_nmi(struct pt_regs
*regs
)
833 unsigned char reason
= 0;
836 cpu
= smp_processor_id();
838 /* Only the BSP gets external NMIs from the system. */
840 reason
= get_nmi_reason();
842 if (!(reason
& 0xc0)) {
843 if (notify_die(DIE_NMI_IPI
, "nmi_ipi", regs
, reason
, 2, SIGINT
)
847 * Ok, so this is none of the documented NMI sources,
848 * so it must be the NMI watchdog.
850 if (nmi_watchdog_tick(regs
,reason
))
852 if (!do_nmi_callback(regs
,cpu
))
853 unknown_nmi_error(reason
, regs
);
857 if (notify_die(DIE_NMI
, "nmi", regs
, reason
, 2, SIGINT
) == NOTIFY_STOP
)
860 /* AK: following checks seem to be broken on modern chipsets. FIXME */
863 mem_parity_error(reason
, regs
);
865 io_check_error(reason
, regs
);
868 /* runs on IST stack. */
869 asmlinkage
void __kprobes
do_int3(struct pt_regs
* regs
, long error_code
)
871 trace_hardirqs_fixup();
873 if (notify_die(DIE_INT3
, "int3", regs
, error_code
, 3, SIGTRAP
) == NOTIFY_STOP
) {
876 preempt_conditional_sti(regs
);
877 do_trap(3, SIGTRAP
, "int3", regs
, error_code
, NULL
);
878 preempt_conditional_cli(regs
);
881 /* Help handler running on IST stack to switch back to user stack
882 for scheduling or signal handling. The actual stack switch is done in
884 asmlinkage __kprobes
struct pt_regs
*sync_regs(struct pt_regs
*eregs
)
886 struct pt_regs
*regs
= eregs
;
887 /* Did already sync */
888 if (eregs
== (struct pt_regs
*)eregs
->sp
)
890 /* Exception from user space */
891 else if (user_mode(eregs
))
892 regs
= task_pt_regs(current
);
893 /* Exception from kernel and interrupts are enabled. Move to
894 kernel process stack. */
895 else if (eregs
->flags
& X86_EFLAGS_IF
)
896 regs
= (struct pt_regs
*)(eregs
->sp
-= sizeof(struct pt_regs
));
902 /* runs on IST stack. */
903 asmlinkage
void __kprobes
do_debug(struct pt_regs
* regs
,
904 unsigned long error_code
)
906 unsigned long condition
;
907 struct task_struct
*tsk
= current
;
910 trace_hardirqs_fixup();
912 get_debugreg(condition
, 6);
915 * The processor cleared BTF, so don't mark that we need it set.
917 clear_tsk_thread_flag(tsk
, TIF_DEBUGCTLMSR
);
918 tsk
->thread
.debugctlmsr
= 0;
920 if (notify_die(DIE_DEBUG
, "debug", regs
, condition
, error_code
,
921 SIGTRAP
) == NOTIFY_STOP
)
924 preempt_conditional_sti(regs
);
926 /* Mask out spurious debug traps due to lazy DR7 setting */
927 if (condition
& (DR_TRAP0
|DR_TRAP1
|DR_TRAP2
|DR_TRAP3
)) {
928 if (!tsk
->thread
.debugreg7
) {
933 tsk
->thread
.debugreg6
= condition
;
937 * Single-stepping through TF: make sure we ignore any events in
938 * kernel space (but re-enable TF when returning to user mode).
940 if (condition
& DR_STEP
) {
941 if (!user_mode(regs
))
942 goto clear_TF_reenable
;
945 /* Ok, finally something we can handle */
946 tsk
->thread
.trap_no
= 1;
947 tsk
->thread
.error_code
= error_code
;
948 info
.si_signo
= SIGTRAP
;
950 info
.si_code
= TRAP_BRKPT
;
951 info
.si_addr
= user_mode(regs
) ? (void __user
*)regs
->ip
: NULL
;
952 force_sig_info(SIGTRAP
, &info
, tsk
);
955 set_debugreg(0UL, 7);
956 preempt_conditional_cli(regs
);
960 set_tsk_thread_flag(tsk
, TIF_SINGLESTEP
);
961 regs
->flags
&= ~X86_EFLAGS_TF
;
962 preempt_conditional_cli(regs
);
965 static int kernel_math_error(struct pt_regs
*regs
, const char *str
, int trapnr
)
967 if (fixup_exception(regs
))
970 notify_die(DIE_GPF
, str
, regs
, 0, trapnr
, SIGFPE
);
971 /* Illegal floating point operation in the kernel */
972 current
->thread
.trap_no
= trapnr
;
978 * Note that we play around with the 'TS' bit in an attempt to get
979 * the correct behaviour even in the presence of the asynchronous
982 asmlinkage
void do_coprocessor_error(struct pt_regs
*regs
)
984 void __user
*ip
= (void __user
*)(regs
->ip
);
985 struct task_struct
* task
;
987 unsigned short cwd
, swd
;
989 conditional_sti(regs
);
990 if (!user_mode(regs
) &&
991 kernel_math_error(regs
, "kernel x87 math error", 16))
995 * Save the info for the exception handler and clear the error.
999 task
->thread
.trap_no
= 16;
1000 task
->thread
.error_code
= 0;
1001 info
.si_signo
= SIGFPE
;
1003 info
.si_code
= __SI_FAULT
;
1006 * (~cwd & swd) will mask out exceptions that are not set to unmasked
1007 * status. 0x3f is the exception bits in these regs, 0x200 is the
1008 * C1 reg you need in case of a stack fault, 0x040 is the stack
1009 * fault bit. We should only be taking one exception at a time,
1010 * so if this combination doesn't produce any single exception,
1011 * then we have a bad program that isn't synchronizing its FPU usage
1012 * and it will suffer the consequences since we won't be able to
1013 * fully reproduce the context of the exception
1015 cwd
= get_fpu_cwd(task
);
1016 swd
= get_fpu_swd(task
);
1017 switch (swd
& ~cwd
& 0x3f) {
1021 case 0x001: /* Invalid Op */
1023 * swd & 0x240 == 0x040: Stack Underflow
1024 * swd & 0x240 == 0x240: Stack Overflow
1025 * User must clear the SF bit (0x40) if set
1027 info
.si_code
= FPE_FLTINV
;
1029 case 0x002: /* Denormalize */
1030 case 0x010: /* Underflow */
1031 info
.si_code
= FPE_FLTUND
;
1033 case 0x004: /* Zero Divide */
1034 info
.si_code
= FPE_FLTDIV
;
1036 case 0x008: /* Overflow */
1037 info
.si_code
= FPE_FLTOVF
;
1039 case 0x020: /* Precision */
1040 info
.si_code
= FPE_FLTRES
;
1043 force_sig_info(SIGFPE
, &info
, task
);
1046 asmlinkage
void bad_intr(void)
1048 printk("bad interrupt");
1051 asmlinkage
void do_simd_coprocessor_error(struct pt_regs
*regs
)
1053 void __user
*ip
= (void __user
*)(regs
->ip
);
1054 struct task_struct
* task
;
1056 unsigned short mxcsr
;
1058 conditional_sti(regs
);
1059 if (!user_mode(regs
) &&
1060 kernel_math_error(regs
, "kernel simd math error", 19))
1064 * Save the info for the exception handler and clear the error.
1067 save_init_fpu(task
);
1068 task
->thread
.trap_no
= 19;
1069 task
->thread
.error_code
= 0;
1070 info
.si_signo
= SIGFPE
;
1072 info
.si_code
= __SI_FAULT
;
1075 * The SIMD FPU exceptions are handled a little differently, as there
1076 * is only a single status/control register. Thus, to determine which
1077 * unmasked exception was caught we must mask the exception mask bits
1078 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
1080 mxcsr
= get_fpu_mxcsr(task
);
1081 switch (~((mxcsr
& 0x1f80) >> 7) & (mxcsr
& 0x3f)) {
1085 case 0x001: /* Invalid Op */
1086 info
.si_code
= FPE_FLTINV
;
1088 case 0x002: /* Denormalize */
1089 case 0x010: /* Underflow */
1090 info
.si_code
= FPE_FLTUND
;
1092 case 0x004: /* Zero Divide */
1093 info
.si_code
= FPE_FLTDIV
;
1095 case 0x008: /* Overflow */
1096 info
.si_code
= FPE_FLTOVF
;
1098 case 0x020: /* Precision */
1099 info
.si_code
= FPE_FLTRES
;
1102 force_sig_info(SIGFPE
, &info
, task
);
1105 asmlinkage
void do_spurious_interrupt_bug(struct pt_regs
* regs
)
1109 asmlinkage
void __attribute__((weak
)) smp_thermal_interrupt(void)
1113 asmlinkage
void __attribute__((weak
)) mce_threshold_interrupt(void)
1118 * 'math_state_restore()' saves the current math information in the
1119 * old math state array, and gets the new ones from the current task
1121 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1122 * Don't touch unless you *really* know how it works.
1124 asmlinkage
void math_state_restore(void)
1126 struct task_struct
*me
= current
;
1131 * does a slab alloc which can sleep
1135 * ran out of memory!
1137 do_group_exit(SIGKILL
);
1140 local_irq_disable();
1143 clts(); /* Allow maths ops (or we recurse) */
1144 restore_fpu_checking(&me
->thread
.xstate
->fxsave
);
1145 task_thread_info(me
)->status
|= TS_USEDFPU
;
1148 EXPORT_SYMBOL_GPL(math_state_restore
);
1150 void __init
trap_init(void)
1152 set_intr_gate(0,÷_error
);
1153 set_intr_gate_ist(1,&debug
,DEBUG_STACK
);
1154 set_intr_gate_ist(2,&nmi
,NMI_STACK
);
1155 set_system_gate_ist(3,&int3
,DEBUG_STACK
); /* int3 can be called from all */
1156 set_system_gate(4,&overflow
); /* int4 can be called from all */
1157 set_intr_gate(5,&bounds
);
1158 set_intr_gate(6,&invalid_op
);
1159 set_intr_gate(7,&device_not_available
);
1160 set_intr_gate_ist(8,&double_fault
, DOUBLEFAULT_STACK
);
1161 set_intr_gate(9,&coprocessor_segment_overrun
);
1162 set_intr_gate(10,&invalid_TSS
);
1163 set_intr_gate(11,&segment_not_present
);
1164 set_intr_gate_ist(12,&stack_segment
,STACKFAULT_STACK
);
1165 set_intr_gate(13,&general_protection
);
1166 set_intr_gate(14,&page_fault
);
1167 set_intr_gate(15,&spurious_interrupt_bug
);
1168 set_intr_gate(16,&coprocessor_error
);
1169 set_intr_gate(17,&alignment_check
);
1170 #ifdef CONFIG_X86_MCE
1171 set_intr_gate_ist(18,&machine_check
, MCE_STACK
);
1173 set_intr_gate(19,&simd_coprocessor_error
);
1175 #ifdef CONFIG_IA32_EMULATION
1176 set_system_gate(IA32_SYSCALL_VECTOR
, ia32_syscall
);
1180 * initialize the per thread extended state:
1182 init_thread_xstate();
1184 * Should be a barrier for any external CPU state.
1190 static int __init
oops_setup(char *s
)
1194 if (!strcmp(s
, "panic"))
1198 early_param("oops", oops_setup
);
1200 static int __init
kstack_setup(char *s
)
1204 kstack_depth_to_print
= simple_strtoul(s
,NULL
,0);
1207 early_param("kstack", kstack_setup
);
1210 static int __init
code_bytes_setup(char *s
)
1212 code_bytes
= simple_strtoul(s
, NULL
, 0);
1213 if (code_bytes
> 8192)
1218 __setup("code_bytes=", code_bytes_setup
);