fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / include / asm-x86 / i387_32.h
blobcdd1e248e3b409529747c7bcd0b68ea032497e5e
1 /*
2 * include/asm-i386/i387.h
4 * Copyright (C) 1994 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * General FPU state handling cleanups
8 * Gareth Hughes <gareth@valinux.com>, May 2000
9 */
11 #ifndef __ASM_I386_I387_H
12 #define __ASM_I386_I387_H
14 #include <linux/sched.h>
15 #include <linux/init.h>
16 #include <linux/kernel_stat.h>
17 #include <asm/processor.h>
18 #include <asm/sigcontext.h>
19 #include <asm/user.h>
21 extern void mxcsr_feature_mask_init(void);
22 extern void init_fpu(struct task_struct *);
25 * FPU lazy state save handling...
29 * The "nop" is needed to make the instructions the same
30 * length.
32 #define restore_fpu(tsk) \
33 alternative_input( \
34 "nop ; frstor %1", \
35 "fxrstor %1", \
36 X86_FEATURE_FXSR, \
37 "m" ((tsk)->thread.i387.fxsave))
39 extern void kernel_fpu_begin(void);
40 #define kernel_fpu_end() do { stts(); preempt_enable(); } while(0)
42 /* We need a safe address that is cheap to find and that is already
43 in L1 during context switch. The best choices are unfortunately
44 different for UP and SMP */
45 #ifdef CONFIG_SMP
46 #define safe_address (__per_cpu_offset[0])
47 #else
48 #define safe_address (kstat_cpu(0).cpustat.user)
49 #endif
52 * These must be called with preempt disabled
54 static inline void __save_init_fpu( struct task_struct *tsk )
56 /* Use more nops than strictly needed in case the compiler
57 varies code */
58 alternative_input(
59 "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4,
60 "fxsave %[fx]\n"
61 "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
62 X86_FEATURE_FXSR,
63 [fx] "m" (tsk->thread.i387.fxsave),
64 [fsw] "m" (tsk->thread.i387.fxsave.swd) : "memory");
65 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
66 is pending. Clear the x87 state here by setting it to fixed
67 values. safe_address is a random variable that should be in L1 */
68 alternative_input(
69 GENERIC_NOP8 GENERIC_NOP2,
70 "emms\n\t" /* clear stack tags */
71 "fildl %[addr]", /* set F?P to defined value */
72 X86_FEATURE_FXSAVE_LEAK,
73 [addr] "m" (safe_address));
74 task_thread_info(tsk)->status &= ~TS_USEDFPU;
77 #define __unlazy_fpu( tsk ) do { \
78 if (task_thread_info(tsk)->status & TS_USEDFPU) { \
79 __save_init_fpu(tsk); \
80 stts(); \
81 } else \
82 tsk->fpu_counter = 0; \
83 } while (0)
85 #define __clear_fpu( tsk ) \
86 do { \
87 if (task_thread_info(tsk)->status & TS_USEDFPU) { \
88 asm volatile("fnclex ; fwait"); \
89 task_thread_info(tsk)->status &= ~TS_USEDFPU; \
90 stts(); \
91 } \
92 } while (0)
96 * These disable preemption on their own and are safe
98 static inline void save_init_fpu( struct task_struct *tsk )
100 preempt_disable();
101 __save_init_fpu(tsk);
102 stts();
103 preempt_enable();
106 #define unlazy_fpu( tsk ) do { \
107 preempt_disable(); \
108 __unlazy_fpu(tsk); \
109 preempt_enable(); \
110 } while (0)
112 #define clear_fpu( tsk ) do { \
113 preempt_disable(); \
114 __clear_fpu( tsk ); \
115 preempt_enable(); \
116 } while (0)
119 * FPU state interaction...
121 extern unsigned short get_fpu_cwd( struct task_struct *tsk );
122 extern unsigned short get_fpu_swd( struct task_struct *tsk );
123 extern unsigned short get_fpu_mxcsr( struct task_struct *tsk );
124 extern asmlinkage void math_state_restore(void);
127 * Signal frame handlers...
129 extern int save_i387( struct _fpstate __user *buf );
130 extern int restore_i387( struct _fpstate __user *buf );
133 * ptrace request handers...
135 extern int get_fpregs( struct user_i387_struct __user *buf,
136 struct task_struct *tsk );
137 extern int set_fpregs( struct task_struct *tsk,
138 struct user_i387_struct __user *buf );
140 extern int get_fpxregs( struct user_fxsr_struct __user *buf,
141 struct task_struct *tsk );
142 extern int set_fpxregs( struct task_struct *tsk,
143 struct user_fxsr_struct __user *buf );
146 * FPU state for core dumps...
148 extern int dump_fpu( struct pt_regs *regs,
149 struct user_i387_struct *fpu );
151 #endif /* __ASM_I386_I387_H */