2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * Handle hardware traps and faults.
12 #include <linux/interrupt.h>
13 #include <linux/kallsyms.h>
14 #include <linux/spinlock.h>
15 #include <linux/kprobes.h>
16 #include <linux/uaccess.h>
17 #include <linux/kdebug.h>
18 #include <linux/kgdb.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/ptrace.h>
22 #include <linux/string.h>
23 #include <linux/delay.h>
24 #include <linux/errno.h>
25 #include <linux/kexec.h>
26 #include <linux/sched.h>
27 #include <linux/timer.h>
28 #include <linux/init.h>
29 #include <linux/bug.h>
30 #include <linux/nmi.h>
32 #include <linux/smp.h>
36 #include <linux/ioport.h>
37 #include <linux/eisa.h>
41 #include <linux/mca.h>
44 #if defined(CONFIG_EDAC)
45 #include <linux/edac.h>
48 #include <asm/kmemcheck.h>
49 #include <asm/stacktrace.h>
50 #include <asm/processor.h>
51 #include <asm/debugreg.h>
52 #include <linux/atomic.h>
53 #include <asm/system.h>
54 #include <asm/traps.h>
59 #include <asm/mach_traps.h>
62 #include <asm/x86_init.h>
63 #include <asm/pgalloc.h>
64 #include <asm/proto.h>
66 #include <asm/processor-flags.h>
67 #include <asm/setup.h>
69 asmlinkage
int system_call(void);
71 /* Do we ignore FPU interrupts ? */
75 * The IDT has to be page-aligned to simplify the Pentium
76 * F0 0F bug workaround.
78 gate_desc idt_table
[NR_VECTORS
] __page_aligned_data
= { { { { 0, 0 } } }, };
81 DECLARE_BITMAP(used_vectors
, NR_VECTORS
);
82 EXPORT_SYMBOL_GPL(used_vectors
);
84 static inline void conditional_sti(struct pt_regs
*regs
)
86 if (regs
->flags
& X86_EFLAGS_IF
)
90 static inline void preempt_conditional_sti(struct pt_regs
*regs
)
93 if (regs
->flags
& X86_EFLAGS_IF
)
97 static inline void conditional_cli(struct pt_regs
*regs
)
99 if (regs
->flags
& X86_EFLAGS_IF
)
103 static inline void preempt_conditional_cli(struct pt_regs
*regs
)
105 if (regs
->flags
& X86_EFLAGS_IF
)
110 static void __kprobes
111 do_trap(int trapnr
, int signr
, char *str
, struct pt_regs
*regs
,
112 long error_code
, siginfo_t
*info
)
114 struct task_struct
*tsk
= current
;
117 if (regs
->flags
& X86_VM_MASK
) {
119 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
120 * On nmi (interrupt 2), do_trap should not be called.
128 if (!user_mode(regs
))
135 * We want error_code and trap_no set for userspace faults and
136 * kernelspace faults which result in die(), but not
137 * kernelspace faults which are fixed up. die() gives the
138 * process no chance to handle the signal and notice the
139 * kernel fault information, so that won't result in polluting
140 * the information about previously queued, but not yet
141 * delivered, faults. See also do_general_protection below.
143 tsk
->thread
.error_code
= error_code
;
144 tsk
->thread
.trap_no
= trapnr
;
147 if (show_unhandled_signals
&& unhandled_signal(tsk
, signr
) &&
148 printk_ratelimit()) {
150 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
151 tsk
->comm
, tsk
->pid
, str
,
152 regs
->ip
, regs
->sp
, error_code
);
153 print_vma_addr(" in ", regs
->ip
);
159 force_sig_info(signr
, info
, tsk
);
161 force_sig(signr
, tsk
);
165 if (!fixup_exception(regs
)) {
166 tsk
->thread
.error_code
= error_code
;
167 tsk
->thread
.trap_no
= trapnr
;
168 die(str
, regs
, error_code
);
174 if (handle_vm86_trap((struct kernel_vm86_regs
*) regs
,
181 #define DO_ERROR(trapnr, signr, str, name) \
182 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
184 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
187 conditional_sti(regs); \
188 do_trap(trapnr, signr, str, regs, error_code, NULL); \
191 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
192 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
195 info.si_signo = signr; \
197 info.si_code = sicode; \
198 info.si_addr = (void __user *)siaddr; \
199 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
202 conditional_sti(regs); \
203 do_trap(trapnr, signr, str, regs, error_code, &info); \
206 DO_ERROR_INFO(0, SIGFPE
, "divide error", divide_error
, FPE_INTDIV
, regs
->ip
)
207 DO_ERROR(4, SIGSEGV
, "overflow", overflow
)
208 DO_ERROR(5, SIGSEGV
, "bounds", bounds
)
209 DO_ERROR_INFO(6, SIGILL
, "invalid opcode", invalid_op
, ILL_ILLOPN
, regs
->ip
)
210 DO_ERROR(9, SIGFPE
, "coprocessor segment overrun", coprocessor_segment_overrun
)
211 DO_ERROR(10, SIGSEGV
, "invalid TSS", invalid_TSS
)
212 DO_ERROR(11, SIGBUS
, "segment not present", segment_not_present
)
214 DO_ERROR(12, SIGBUS
, "stack segment", stack_segment
)
216 DO_ERROR_INFO(17, SIGBUS
, "alignment check", alignment_check
, BUS_ADRALN
, 0)
219 /* Runs on IST stack */
220 dotraplinkage
void do_stack_segment(struct pt_regs
*regs
, long error_code
)
222 if (notify_die(DIE_TRAP
, "stack segment", regs
, error_code
,
223 12, SIGBUS
) == NOTIFY_STOP
)
225 preempt_conditional_sti(regs
);
226 do_trap(12, SIGBUS
, "stack segment", regs
, error_code
, NULL
);
227 preempt_conditional_cli(regs
);
230 dotraplinkage
void do_double_fault(struct pt_regs
*regs
, long error_code
)
232 static const char str
[] = "double fault";
233 struct task_struct
*tsk
= current
;
235 /* Return not checked because double check cannot be ignored */
236 notify_die(DIE_TRAP
, str
, regs
, error_code
, 8, SIGSEGV
);
238 tsk
->thread
.error_code
= error_code
;
239 tsk
->thread
.trap_no
= 8;
242 * This is always a kernel trap and never fixable (and thus must
246 die(str
, regs
, error_code
);
250 dotraplinkage
void __kprobes
251 do_general_protection(struct pt_regs
*regs
, long error_code
)
253 struct task_struct
*tsk
;
255 conditional_sti(regs
);
258 if (regs
->flags
& X86_VM_MASK
)
263 if (!user_mode(regs
))
266 tsk
->thread
.error_code
= error_code
;
267 tsk
->thread
.trap_no
= 13;
269 if (show_unhandled_signals
&& unhandled_signal(tsk
, SIGSEGV
) &&
270 printk_ratelimit()) {
272 "%s[%d] general protection ip:%lx sp:%lx error:%lx",
273 tsk
->comm
, task_pid_nr(tsk
),
274 regs
->ip
, regs
->sp
, error_code
);
275 print_vma_addr(" in ", regs
->ip
);
279 force_sig(SIGSEGV
, tsk
);
285 handle_vm86_fault((struct kernel_vm86_regs
*) regs
, error_code
);
290 if (fixup_exception(regs
))
293 tsk
->thread
.error_code
= error_code
;
294 tsk
->thread
.trap_no
= 13;
295 if (notify_die(DIE_GPF
, "general protection fault", regs
,
296 error_code
, 13, SIGSEGV
) == NOTIFY_STOP
)
298 die("general protection fault", regs
, error_code
);
301 /* May run on IST stack. */
302 dotraplinkage
void __kprobes
do_int3(struct pt_regs
*regs
, long error_code
)
304 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
305 if (kgdb_ll_trap(DIE_INT3
, "int3", regs
, error_code
, 3, SIGTRAP
)
308 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
309 #ifdef CONFIG_KPROBES
310 if (notify_die(DIE_INT3
, "int3", regs
, error_code
, 3, SIGTRAP
)
314 if (notify_die(DIE_TRAP
, "int3", regs
, error_code
, 3, SIGTRAP
)
319 preempt_conditional_sti(regs
);
320 do_trap(3, SIGTRAP
, "int3", regs
, error_code
, NULL
);
321 preempt_conditional_cli(regs
);
326 * Help handler running on IST stack to switch back to user stack
327 * for scheduling or signal handling. The actual stack switch is done in
330 asmlinkage __kprobes
struct pt_regs
*sync_regs(struct pt_regs
*eregs
)
332 struct pt_regs
*regs
= eregs
;
333 /* Did already sync */
334 if (eregs
== (struct pt_regs
*)eregs
->sp
)
336 /* Exception from user space */
337 else if (user_mode(eregs
))
338 regs
= task_pt_regs(current
);
340 * Exception from kernel and interrupts are enabled. Move to
341 * kernel process stack.
343 else if (eregs
->flags
& X86_EFLAGS_IF
)
344 regs
= (struct pt_regs
*)(eregs
->sp
-= sizeof(struct pt_regs
));
352 * Our handling of the processor debug registers is non-trivial.
353 * We do not clear them on entry and exit from the kernel. Therefore
354 * it is possible to get a watchpoint trap here from inside the kernel.
355 * However, the code in ./ptrace.c has ensured that the user can
356 * only set watchpoints on userspace addresses. Therefore the in-kernel
357 * watchpoint trap can only occur in code which is reading/writing
358 * from user space. Such code must not hold kernel locks (since it
359 * can equally take a page fault), therefore it is safe to call
360 * force_sig_info even though that claims and releases locks.
362 * Code in ./signal.c ensures that the debug control register
363 * is restored before we deliver any signal, and therefore that
364 * user code runs with the correct debug control register even though
367 * Being careful here means that we don't have to be as careful in a
368 * lot of more complicated places (task switching can be a bit lazy
369 * about restoring all the debug state, and ptrace doesn't have to
370 * find every occurrence of the TF bit that could be saved away even
373 * May run on IST stack.
375 dotraplinkage
void __kprobes
do_debug(struct pt_regs
*regs
, long error_code
)
377 struct task_struct
*tsk
= current
;
382 get_debugreg(dr6
, 6);
384 /* Filter out all the reserved bits which are preset to 1 */
385 dr6
&= ~DR6_RESERVED
;
388 * If dr6 has no reason to give us about the origin of this trap,
389 * then it's very likely the result of an icebp/int01 trap.
390 * User wants a sigtrap for that.
392 if (!dr6
&& user_mode(regs
))
395 /* Catch kmemcheck conditions first of all! */
396 if ((dr6
& DR_STEP
) && kmemcheck_trap(regs
))
399 /* DR6 may or may not be cleared by the CPU */
403 * The processor cleared BTF, so don't mark that we need it set.
405 clear_tsk_thread_flag(tsk
, TIF_BLOCKSTEP
);
407 /* Store the virtualized DR6 value */
408 tsk
->thread
.debugreg6
= dr6
;
410 if (notify_die(DIE_DEBUG
, "debug", regs
, PTR_ERR(&dr6
), error_code
,
411 SIGTRAP
) == NOTIFY_STOP
)
414 /* It's safe to allow irq's after DR6 has been saved */
415 preempt_conditional_sti(regs
);
417 if (regs
->flags
& X86_VM_MASK
) {
418 handle_vm86_trap((struct kernel_vm86_regs
*) regs
,
420 preempt_conditional_cli(regs
);
425 * Single-stepping through system calls: ignore any exceptions in
426 * kernel space, but re-enable TF when returning to user mode.
428 * We already checked v86 mode above, so we can check for kernel mode
429 * by just checking the CPL of CS.
431 if ((dr6
& DR_STEP
) && !user_mode(regs
)) {
432 tsk
->thread
.debugreg6
&= ~DR_STEP
;
433 set_tsk_thread_flag(tsk
, TIF_SINGLESTEP
);
434 regs
->flags
&= ~X86_EFLAGS_TF
;
436 si_code
= get_si_code(tsk
->thread
.debugreg6
);
437 if (tsk
->thread
.debugreg6
& (DR_STEP
| DR_TRAP_BITS
) || user_icebp
)
438 send_sigtrap(tsk
, regs
, error_code
, si_code
);
439 preempt_conditional_cli(regs
);
445 * Note that we play around with the 'TS' bit in an attempt to get
446 * the correct behaviour even in the presence of the asynchronous
449 void math_error(struct pt_regs
*regs
, int error_code
, int trapnr
)
451 struct task_struct
*task
= current
;
454 char *str
= (trapnr
== 16) ? "fpu exception" : "simd exception";
456 if (notify_die(DIE_TRAP
, str
, regs
, error_code
, trapnr
, SIGFPE
) == NOTIFY_STOP
)
458 conditional_sti(regs
);
460 if (!user_mode_vm(regs
))
462 if (!fixup_exception(regs
)) {
463 task
->thread
.error_code
= error_code
;
464 task
->thread
.trap_no
= trapnr
;
465 die(str
, regs
, error_code
);
471 * Save the info for the exception handler and clear the error.
474 task
->thread
.trap_no
= trapnr
;
475 task
->thread
.error_code
= error_code
;
476 info
.si_signo
= SIGFPE
;
478 info
.si_addr
= (void __user
*)regs
->ip
;
480 unsigned short cwd
, swd
;
482 * (~cwd & swd) will mask out exceptions that are not set to unmasked
483 * status. 0x3f is the exception bits in these regs, 0x200 is the
484 * C1 reg you need in case of a stack fault, 0x040 is the stack
485 * fault bit. We should only be taking one exception at a time,
486 * so if this combination doesn't produce any single exception,
487 * then we have a bad program that isn't synchronizing its FPU usage
488 * and it will suffer the consequences since we won't be able to
489 * fully reproduce the context of the exception
491 cwd
= get_fpu_cwd(task
);
492 swd
= get_fpu_swd(task
);
497 * The SIMD FPU exceptions are handled a little differently, as there
498 * is only a single status/control register. Thus, to determine which
499 * unmasked exception was caught we must mask the exception mask bits
500 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
502 unsigned short mxcsr
= get_fpu_mxcsr(task
);
503 err
= ~(mxcsr
>> 7) & mxcsr
;
506 if (err
& 0x001) { /* Invalid op */
508 * swd & 0x240 == 0x040: Stack Underflow
509 * swd & 0x240 == 0x240: Stack Overflow
510 * User must clear the SF bit (0x40) if set
512 info
.si_code
= FPE_FLTINV
;
513 } else if (err
& 0x004) { /* Divide by Zero */
514 info
.si_code
= FPE_FLTDIV
;
515 } else if (err
& 0x008) { /* Overflow */
516 info
.si_code
= FPE_FLTOVF
;
517 } else if (err
& 0x012) { /* Denormal, Underflow */
518 info
.si_code
= FPE_FLTUND
;
519 } else if (err
& 0x020) { /* Precision */
520 info
.si_code
= FPE_FLTRES
;
523 * If we're using IRQ 13, or supposedly even some trap 16
524 * implementations, it's possible we get a spurious trap...
526 return; /* Spurious trap, no error */
528 force_sig_info(SIGFPE
, &info
, task
);
531 dotraplinkage
void do_coprocessor_error(struct pt_regs
*regs
, long error_code
)
537 math_error(regs
, error_code
, 16);
541 do_simd_coprocessor_error(struct pt_regs
*regs
, long error_code
)
543 math_error(regs
, error_code
, 19);
547 do_spurious_interrupt_bug(struct pt_regs
*regs
, long error_code
)
549 conditional_sti(regs
);
551 /* No need to warn about this any longer. */
552 printk(KERN_INFO
"Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
556 asmlinkage
void __attribute__((weak
)) smp_thermal_interrupt(void)
560 asmlinkage
void __attribute__((weak
)) smp_threshold_interrupt(void)
565 * __math_state_restore assumes that cr0.TS is already clear and the
566 * fpu state is all ready for use. Used during context switch.
568 void __math_state_restore(void)
570 struct thread_info
*thread
= current_thread_info();
571 struct task_struct
*tsk
= thread
->task
;
574 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
576 if (unlikely(restore_fpu_checking(tsk
))) {
578 force_sig(SIGSEGV
, tsk
);
582 thread
->status
|= TS_USEDFPU
; /* So we fnsave on switch_to() */
587 * 'math_state_restore()' saves the current math information in the
588 * old math state array, and gets the new ones from the current task
590 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
591 * Don't touch unless you *really* know how it works.
593 * Must be called with kernel preemption disabled (in this case,
594 * local interrupts are disabled at the call-site in entry.S).
596 asmlinkage
void math_state_restore(void)
598 struct thread_info
*thread
= current_thread_info();
599 struct task_struct
*tsk
= thread
->task
;
601 if (!tsk_used_math(tsk
)) {
604 * does a slab alloc which can sleep
610 do_group_exit(SIGKILL
);
616 clts(); /* Allow maths ops (or we recurse) */
618 __math_state_restore();
620 EXPORT_SYMBOL_GPL(math_state_restore
);
622 dotraplinkage
void __kprobes
623 do_device_not_available(struct pt_regs
*regs
, long error_code
)
625 #ifdef CONFIG_MATH_EMULATION
626 if (read_cr0() & X86_CR0_EM
) {
627 struct math_emu_info info
= { };
629 conditional_sti(regs
);
636 math_state_restore(); /* interrupts still off */
638 conditional_sti(regs
);
643 dotraplinkage
void do_iret_error(struct pt_regs
*regs
, long error_code
)
648 info
.si_signo
= SIGILL
;
650 info
.si_code
= ILL_BADSTK
;
652 if (notify_die(DIE_TRAP
, "iret exception",
653 regs
, error_code
, 32, SIGILL
) == NOTIFY_STOP
)
655 do_trap(32, SIGILL
, "iret exception", regs
, error_code
, &info
);
659 /* Set of traps needed for early debugging. */
660 void __init
early_trap_init(void)
662 set_intr_gate_ist(1, &debug
, DEBUG_STACK
);
663 /* int3 can be called from all */
664 set_system_intr_gate_ist(3, &int3
, DEBUG_STACK
);
665 set_intr_gate(14, &page_fault
);
666 load_idt(&idt_descr
);
669 void __init
trap_init(void)
674 void __iomem
*p
= early_ioremap(0x0FFFD9, 4);
676 if (readl(p
) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
681 set_intr_gate(0, ÷_error
);
682 set_intr_gate_ist(2, &nmi
, NMI_STACK
);
683 /* int4 can be called from all */
684 set_system_intr_gate(4, &overflow
);
685 set_intr_gate(5, &bounds
);
686 set_intr_gate(6, &invalid_op
);
687 set_intr_gate(7, &device_not_available
);
689 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS
);
691 set_intr_gate_ist(8, &double_fault
, DOUBLEFAULT_STACK
);
693 set_intr_gate(9, &coprocessor_segment_overrun
);
694 set_intr_gate(10, &invalid_TSS
);
695 set_intr_gate(11, &segment_not_present
);
696 set_intr_gate_ist(12, &stack_segment
, STACKFAULT_STACK
);
697 set_intr_gate(13, &general_protection
);
698 set_intr_gate(15, &spurious_interrupt_bug
);
699 set_intr_gate(16, &coprocessor_error
);
700 set_intr_gate(17, &alignment_check
);
701 #ifdef CONFIG_X86_MCE
702 set_intr_gate_ist(18, &machine_check
, MCE_STACK
);
704 set_intr_gate(19, &simd_coprocessor_error
);
706 /* Reserve all the builtin and the syscall vector: */
707 for (i
= 0; i
< FIRST_EXTERNAL_VECTOR
; i
++)
708 set_bit(i
, used_vectors
);
710 #ifdef CONFIG_IA32_EMULATION
711 set_system_intr_gate(IA32_SYSCALL_VECTOR
, ia32_syscall
);
712 set_bit(IA32_SYSCALL_VECTOR
, used_vectors
);
716 set_system_trap_gate(SYSCALL_VECTOR
, &system_call
);
717 set_bit(SYSCALL_VECTOR
, used_vectors
);
721 * Should be a barrier for any external CPU state:
725 x86_init
.irqs
.trap_init();