Linux 3.8-rc7
[cris-mirror.git] / arch / x86 / kernel / traps.c
blobecffca11f4e92cb1df5bbf4bcedb0dcefb86bc8a
1 /*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
9 /*
10 * Handle hardware traps and faults.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/interrupt.h>
16 #include <linux/kallsyms.h>
17 #include <linux/spinlock.h>
18 #include <linux/kprobes.h>
19 #include <linux/uaccess.h>
20 #include <linux/kdebug.h>
21 #include <linux/kgdb.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/ptrace.h>
25 #include <linux/string.h>
26 #include <linux/delay.h>
27 #include <linux/errno.h>
28 #include <linux/kexec.h>
29 #include <linux/sched.h>
30 #include <linux/timer.h>
31 #include <linux/init.h>
32 #include <linux/bug.h>
33 #include <linux/nmi.h>
34 #include <linux/mm.h>
35 #include <linux/smp.h>
36 #include <linux/io.h>
38 #ifdef CONFIG_EISA
39 #include <linux/ioport.h>
40 #include <linux/eisa.h>
41 #endif
43 #if defined(CONFIG_EDAC)
44 #include <linux/edac.h>
45 #endif
47 #include <asm/kmemcheck.h>
48 #include <asm/stacktrace.h>
49 #include <asm/processor.h>
50 #include <asm/debugreg.h>
51 #include <linux/atomic.h>
52 #include <asm/ftrace.h>
53 #include <asm/traps.h>
54 #include <asm/desc.h>
55 #include <asm/i387.h>
56 #include <asm/fpu-internal.h>
57 #include <asm/mce.h>
58 #include <asm/context_tracking.h>
60 #include <asm/mach_traps.h>
62 #ifdef CONFIG_X86_64
63 #include <asm/x86_init.h>
64 #include <asm/pgalloc.h>
65 #include <asm/proto.h>
66 #else
67 #include <asm/processor-flags.h>
68 #include <asm/setup.h>
70 asmlinkage int system_call(void);
73 * The IDT has to be page-aligned to simplify the Pentium
74 * F0 0F bug workaround.
76 gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
77 #endif
79 DECLARE_BITMAP(used_vectors, NR_VECTORS);
80 EXPORT_SYMBOL_GPL(used_vectors);
82 static inline void conditional_sti(struct pt_regs *regs)
84 if (regs->flags & X86_EFLAGS_IF)
85 local_irq_enable();
88 static inline void preempt_conditional_sti(struct pt_regs *regs)
90 inc_preempt_count();
91 if (regs->flags & X86_EFLAGS_IF)
92 local_irq_enable();
95 static inline void conditional_cli(struct pt_regs *regs)
97 if (regs->flags & X86_EFLAGS_IF)
98 local_irq_disable();
101 static inline void preempt_conditional_cli(struct pt_regs *regs)
103 if (regs->flags & X86_EFLAGS_IF)
104 local_irq_disable();
105 dec_preempt_count();
108 static int __kprobes
109 do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
110 struct pt_regs *regs, long error_code)
112 #ifdef CONFIG_X86_32
113 if (regs->flags & X86_VM_MASK) {
115 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
116 * On nmi (interrupt 2), do_trap should not be called.
118 if (trapnr < X86_TRAP_UD) {
119 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
120 error_code, trapnr))
121 return 0;
123 return -1;
125 #endif
126 if (!user_mode(regs)) {
127 if (!fixup_exception(regs)) {
128 tsk->thread.error_code = error_code;
129 tsk->thread.trap_nr = trapnr;
130 die(str, regs, error_code);
132 return 0;
135 return -1;
138 static void __kprobes
139 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
140 long error_code, siginfo_t *info)
142 struct task_struct *tsk = current;
145 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
146 return;
148 * We want error_code and trap_nr set for userspace faults and
149 * kernelspace faults which result in die(), but not
150 * kernelspace faults which are fixed up. die() gives the
151 * process no chance to handle the signal and notice the
152 * kernel fault information, so that won't result in polluting
153 * the information about previously queued, but not yet
154 * delivered, faults. See also do_general_protection below.
156 tsk->thread.error_code = error_code;
157 tsk->thread.trap_nr = trapnr;
159 #ifdef CONFIG_X86_64
160 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
161 printk_ratelimit()) {
162 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
163 tsk->comm, tsk->pid, str,
164 regs->ip, regs->sp, error_code);
165 print_vma_addr(" in ", regs->ip);
166 pr_cont("\n");
168 #endif
170 if (info)
171 force_sig_info(signr, info, tsk);
172 else
173 force_sig(signr, tsk);
176 #define DO_ERROR(trapnr, signr, str, name) \
177 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
179 exception_enter(regs); \
180 if (notify_die(DIE_TRAP, str, regs, error_code, \
181 trapnr, signr) == NOTIFY_STOP) { \
182 exception_exit(regs); \
183 return; \
185 conditional_sti(regs); \
186 do_trap(trapnr, signr, str, regs, error_code, NULL); \
187 exception_exit(regs); \
190 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
191 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
193 siginfo_t info; \
194 info.si_signo = signr; \
195 info.si_errno = 0; \
196 info.si_code = sicode; \
197 info.si_addr = (void __user *)siaddr; \
198 exception_enter(regs); \
199 if (notify_die(DIE_TRAP, str, regs, error_code, \
200 trapnr, signr) == NOTIFY_STOP) { \
201 exception_exit(regs); \
202 return; \
204 conditional_sti(regs); \
205 do_trap(trapnr, signr, str, regs, error_code, &info); \
206 exception_exit(regs); \
209 DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
210 regs->ip)
211 DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
212 DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
213 DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN,
214 regs->ip)
215 DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
216 coprocessor_segment_overrun)
217 DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
218 DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
219 #ifdef CONFIG_X86_32
220 DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
221 #endif
222 DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
223 BUS_ADRALN, 0)
225 #ifdef CONFIG_X86_64
226 /* Runs on IST stack */
227 dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
229 exception_enter(regs);
230 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
231 X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) {
232 preempt_conditional_sti(regs);
233 do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
234 preempt_conditional_cli(regs);
236 exception_exit(regs);
239 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
241 static const char str[] = "double fault";
242 struct task_struct *tsk = current;
244 exception_enter(regs);
245 /* Return not checked because double check cannot be ignored */
246 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
248 tsk->thread.error_code = error_code;
249 tsk->thread.trap_nr = X86_TRAP_DF;
252 * This is always a kernel trap and never fixable (and thus must
253 * never return).
255 for (;;)
256 die(str, regs, error_code);
258 #endif
260 dotraplinkage void __kprobes
261 do_general_protection(struct pt_regs *regs, long error_code)
263 struct task_struct *tsk;
265 exception_enter(regs);
266 conditional_sti(regs);
268 #ifdef CONFIG_X86_32
269 if (regs->flags & X86_VM_MASK) {
270 local_irq_enable();
271 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
272 goto exit;
274 #endif
276 tsk = current;
277 if (!user_mode(regs)) {
278 if (fixup_exception(regs))
279 goto exit;
281 tsk->thread.error_code = error_code;
282 tsk->thread.trap_nr = X86_TRAP_GP;
283 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
284 X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
285 die("general protection fault", regs, error_code);
286 goto exit;
289 tsk->thread.error_code = error_code;
290 tsk->thread.trap_nr = X86_TRAP_GP;
292 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
293 printk_ratelimit()) {
294 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
295 tsk->comm, task_pid_nr(tsk),
296 regs->ip, regs->sp, error_code);
297 print_vma_addr(" in ", regs->ip);
298 pr_cont("\n");
301 force_sig(SIGSEGV, tsk);
302 exit:
303 exception_exit(regs);
306 /* May run on IST stack. */
307 dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code)
309 #ifdef CONFIG_DYNAMIC_FTRACE
311 * ftrace must be first, everything else may cause a recursive crash.
312 * See note by declaration of modifying_ftrace_code in ftrace.c
314 if (unlikely(atomic_read(&modifying_ftrace_code)) &&
315 ftrace_int3_handler(regs))
316 return;
317 #endif
318 exception_enter(regs);
319 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
320 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
321 SIGTRAP) == NOTIFY_STOP)
322 goto exit;
323 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
325 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
326 SIGTRAP) == NOTIFY_STOP)
327 goto exit;
330 * Let others (NMI) know that the debug stack is in use
331 * as we may switch to the interrupt stack.
333 debug_stack_usage_inc();
334 preempt_conditional_sti(regs);
335 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
336 preempt_conditional_cli(regs);
337 debug_stack_usage_dec();
338 exit:
339 exception_exit(regs);
342 #ifdef CONFIG_X86_64
344 * Help handler running on IST stack to switch back to user stack
345 * for scheduling or signal handling. The actual stack switch is done in
346 * entry.S
348 asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
350 struct pt_regs *regs = eregs;
351 /* Did already sync */
352 if (eregs == (struct pt_regs *)eregs->sp)
354 /* Exception from user space */
355 else if (user_mode(eregs))
356 regs = task_pt_regs(current);
358 * Exception from kernel and interrupts are enabled. Move to
359 * kernel process stack.
361 else if (eregs->flags & X86_EFLAGS_IF)
362 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
363 if (eregs != regs)
364 *regs = *eregs;
365 return regs;
367 #endif
370 * Our handling of the processor debug registers is non-trivial.
371 * We do not clear them on entry and exit from the kernel. Therefore
372 * it is possible to get a watchpoint trap here from inside the kernel.
373 * However, the code in ./ptrace.c has ensured that the user can
374 * only set watchpoints on userspace addresses. Therefore the in-kernel
375 * watchpoint trap can only occur in code which is reading/writing
376 * from user space. Such code must not hold kernel locks (since it
377 * can equally take a page fault), therefore it is safe to call
378 * force_sig_info even though that claims and releases locks.
380 * Code in ./signal.c ensures that the debug control register
381 * is restored before we deliver any signal, and therefore that
382 * user code runs with the correct debug control register even though
383 * we clear it here.
385 * Being careful here means that we don't have to be as careful in a
386 * lot of more complicated places (task switching can be a bit lazy
387 * about restoring all the debug state, and ptrace doesn't have to
388 * find every occurrence of the TF bit that could be saved away even
389 * by user code)
391 * May run on IST stack.
393 dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
395 struct task_struct *tsk = current;
396 int user_icebp = 0;
397 unsigned long dr6;
398 int si_code;
400 exception_enter(regs);
402 get_debugreg(dr6, 6);
404 /* Filter out all the reserved bits which are preset to 1 */
405 dr6 &= ~DR6_RESERVED;
408 * If dr6 has no reason to give us about the origin of this trap,
409 * then it's very likely the result of an icebp/int01 trap.
410 * User wants a sigtrap for that.
412 if (!dr6 && user_mode(regs))
413 user_icebp = 1;
415 /* Catch kmemcheck conditions first of all! */
416 if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
417 goto exit;
419 /* DR6 may or may not be cleared by the CPU */
420 set_debugreg(0, 6);
423 * The processor cleared BTF, so don't mark that we need it set.
425 clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
427 /* Store the virtualized DR6 value */
428 tsk->thread.debugreg6 = dr6;
430 if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code,
431 SIGTRAP) == NOTIFY_STOP)
432 goto exit;
435 * Let others (NMI) know that the debug stack is in use
436 * as we may switch to the interrupt stack.
438 debug_stack_usage_inc();
440 /* It's safe to allow irq's after DR6 has been saved */
441 preempt_conditional_sti(regs);
443 if (regs->flags & X86_VM_MASK) {
444 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
445 X86_TRAP_DB);
446 preempt_conditional_cli(regs);
447 debug_stack_usage_dec();
448 goto exit;
452 * Single-stepping through system calls: ignore any exceptions in
453 * kernel space, but re-enable TF when returning to user mode.
455 * We already checked v86 mode above, so we can check for kernel mode
456 * by just checking the CPL of CS.
458 if ((dr6 & DR_STEP) && !user_mode(regs)) {
459 tsk->thread.debugreg6 &= ~DR_STEP;
460 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
461 regs->flags &= ~X86_EFLAGS_TF;
463 si_code = get_si_code(tsk->thread.debugreg6);
464 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
465 send_sigtrap(tsk, regs, error_code, si_code);
466 preempt_conditional_cli(regs);
467 debug_stack_usage_dec();
469 exit:
470 exception_exit(regs);
474 * Note that we play around with the 'TS' bit in an attempt to get
475 * the correct behaviour even in the presence of the asynchronous
476 * IRQ13 behaviour
478 void math_error(struct pt_regs *regs, int error_code, int trapnr)
480 struct task_struct *task = current;
481 siginfo_t info;
482 unsigned short err;
483 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
484 "simd exception";
486 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
487 return;
488 conditional_sti(regs);
490 if (!user_mode_vm(regs))
492 if (!fixup_exception(regs)) {
493 task->thread.error_code = error_code;
494 task->thread.trap_nr = trapnr;
495 die(str, regs, error_code);
497 return;
501 * Save the info for the exception handler and clear the error.
503 save_init_fpu(task);
504 task->thread.trap_nr = trapnr;
505 task->thread.error_code = error_code;
506 info.si_signo = SIGFPE;
507 info.si_errno = 0;
508 info.si_addr = (void __user *)regs->ip;
509 if (trapnr == X86_TRAP_MF) {
510 unsigned short cwd, swd;
512 * (~cwd & swd) will mask out exceptions that are not set to unmasked
513 * status. 0x3f is the exception bits in these regs, 0x200 is the
514 * C1 reg you need in case of a stack fault, 0x040 is the stack
515 * fault bit. We should only be taking one exception at a time,
516 * so if this combination doesn't produce any single exception,
517 * then we have a bad program that isn't synchronizing its FPU usage
518 * and it will suffer the consequences since we won't be able to
519 * fully reproduce the context of the exception
521 cwd = get_fpu_cwd(task);
522 swd = get_fpu_swd(task);
524 err = swd & ~cwd;
525 } else {
527 * The SIMD FPU exceptions are handled a little differently, as there
528 * is only a single status/control register. Thus, to determine which
529 * unmasked exception was caught we must mask the exception mask bits
530 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
532 unsigned short mxcsr = get_fpu_mxcsr(task);
533 err = ~(mxcsr >> 7) & mxcsr;
536 if (err & 0x001) { /* Invalid op */
538 * swd & 0x240 == 0x040: Stack Underflow
539 * swd & 0x240 == 0x240: Stack Overflow
540 * User must clear the SF bit (0x40) if set
542 info.si_code = FPE_FLTINV;
543 } else if (err & 0x004) { /* Divide by Zero */
544 info.si_code = FPE_FLTDIV;
545 } else if (err & 0x008) { /* Overflow */
546 info.si_code = FPE_FLTOVF;
547 } else if (err & 0x012) { /* Denormal, Underflow */
548 info.si_code = FPE_FLTUND;
549 } else if (err & 0x020) { /* Precision */
550 info.si_code = FPE_FLTRES;
551 } else {
553 * If we're using IRQ 13, or supposedly even some trap
554 * X86_TRAP_MF implementations, it's possible
555 * we get a spurious trap, which is not an error.
557 return;
559 force_sig_info(SIGFPE, &info, task);
562 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
564 exception_enter(regs);
565 math_error(regs, error_code, X86_TRAP_MF);
566 exception_exit(regs);
569 dotraplinkage void
570 do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
572 exception_enter(regs);
573 math_error(regs, error_code, X86_TRAP_XF);
574 exception_exit(regs);
577 dotraplinkage void
578 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
580 conditional_sti(regs);
581 #if 0
582 /* No need to warn about this any longer. */
583 pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
584 #endif
587 asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
591 asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
596 * 'math_state_restore()' saves the current math information in the
597 * old math state array, and gets the new ones from the current task
599 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
600 * Don't touch unless you *really* know how it works.
602 * Must be called with kernel preemption disabled (eg with local
603 * local interrupts as in the case of do_device_not_available).
605 void math_state_restore(void)
607 struct task_struct *tsk = current;
609 if (!tsk_used_math(tsk)) {
610 local_irq_enable();
612 * does a slab alloc which can sleep
614 if (init_fpu(tsk)) {
616 * ran out of memory!
618 do_group_exit(SIGKILL);
619 return;
621 local_irq_disable();
624 __thread_fpu_begin(tsk);
627 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
629 if (unlikely(restore_fpu_checking(tsk))) {
630 drop_init_fpu(tsk);
631 force_sig(SIGSEGV, tsk);
632 return;
635 tsk->fpu_counter++;
637 EXPORT_SYMBOL_GPL(math_state_restore);
639 dotraplinkage void __kprobes
640 do_device_not_available(struct pt_regs *regs, long error_code)
642 exception_enter(regs);
643 BUG_ON(use_eager_fpu());
645 #ifdef CONFIG_MATH_EMULATION
646 if (read_cr0() & X86_CR0_EM) {
647 struct math_emu_info info = { };
649 conditional_sti(regs);
651 info.regs = regs;
652 math_emulate(&info);
653 exception_exit(regs);
654 return;
656 #endif
657 math_state_restore(); /* interrupts still off */
658 #ifdef CONFIG_X86_32
659 conditional_sti(regs);
660 #endif
661 exception_exit(regs);
664 #ifdef CONFIG_X86_32
665 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
667 siginfo_t info;
669 exception_enter(regs);
670 local_irq_enable();
672 info.si_signo = SIGILL;
673 info.si_errno = 0;
674 info.si_code = ILL_BADSTK;
675 info.si_addr = NULL;
676 if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
677 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
678 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
679 &info);
681 exception_exit(regs);
683 #endif
685 /* Set of traps needed for early debugging. */
686 void __init early_trap_init(void)
688 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
689 /* int3 can be called from all */
690 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
691 set_intr_gate(X86_TRAP_PF, &page_fault);
692 load_idt(&idt_descr);
695 void __init trap_init(void)
697 int i;
699 #ifdef CONFIG_EISA
700 void __iomem *p = early_ioremap(0x0FFFD9, 4);
702 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
703 EISA_bus = 1;
704 early_iounmap(p, 4);
705 #endif
707 set_intr_gate(X86_TRAP_DE, &divide_error);
708 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
709 /* int4 can be called from all */
710 set_system_intr_gate(X86_TRAP_OF, &overflow);
711 set_intr_gate(X86_TRAP_BR, &bounds);
712 set_intr_gate(X86_TRAP_UD, &invalid_op);
713 set_intr_gate(X86_TRAP_NM, &device_not_available);
714 #ifdef CONFIG_X86_32
715 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
716 #else
717 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
718 #endif
719 set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
720 set_intr_gate(X86_TRAP_TS, &invalid_TSS);
721 set_intr_gate(X86_TRAP_NP, &segment_not_present);
722 set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
723 set_intr_gate(X86_TRAP_GP, &general_protection);
724 set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
725 set_intr_gate(X86_TRAP_MF, &coprocessor_error);
726 set_intr_gate(X86_TRAP_AC, &alignment_check);
727 #ifdef CONFIG_X86_MCE
728 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
729 #endif
730 set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error);
732 /* Reserve all the builtin and the syscall vector: */
733 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
734 set_bit(i, used_vectors);
736 #ifdef CONFIG_IA32_EMULATION
737 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
738 set_bit(IA32_SYSCALL_VECTOR, used_vectors);
739 #endif
741 #ifdef CONFIG_X86_32
742 set_system_trap_gate(SYSCALL_VECTOR, &system_call);
743 set_bit(SYSCALL_VECTOR, used_vectors);
744 #endif
747 * Should be a barrier for any external CPU state:
749 cpu_init();
751 x86_init.irqs.trap_init();
753 #ifdef CONFIG_X86_64
754 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16);
755 set_nmi_gate(X86_TRAP_DB, &debug);
756 set_nmi_gate(X86_TRAP_BP, &int3);
757 #endif