2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
10 #ifndef _ASM_X86_FPU_INTERNAL_H
11 #define _ASM_X86_FPU_INTERNAL_H
13 #include <linux/compat.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
18 #include <asm/fpu/api.h>
19 #include <asm/fpu/xstate.h>
20 #include <asm/cpufeature.h>
23 * High level FPU state handling functions:
25 extern void fpu__activate_curr(struct fpu
*fpu
);
26 extern void fpu__activate_fpstate_read(struct fpu
*fpu
);
27 extern void fpu__activate_fpstate_write(struct fpu
*fpu
);
28 extern void fpu__current_fpstate_write_begin(void);
29 extern void fpu__current_fpstate_write_end(void);
30 extern void fpu__save(struct fpu
*fpu
);
31 extern void fpu__restore(struct fpu
*fpu
);
32 extern int fpu__restore_sig(void __user
*buf
, int ia32_frame
);
33 extern void fpu__drop(struct fpu
*fpu
);
34 extern int fpu__copy(struct fpu
*dst_fpu
, struct fpu
*src_fpu
);
35 extern void fpu__clear(struct fpu
*fpu
);
36 extern int fpu__exception_code(struct fpu
*fpu
, int trap_nr
);
37 extern int dump_fpu(struct pt_regs
*ptregs
, struct user_i387_struct
*fpstate
);
40 * Boot time FPU initialization functions:
42 extern void fpu__init_cpu(void);
43 extern void fpu__init_system_xstate(void);
44 extern void fpu__init_cpu_xstate(void);
45 extern void fpu__init_system(struct cpuinfo_x86
*c
);
46 extern void fpu__init_check_bugs(void);
47 extern void fpu__resume_cpu(void);
48 extern u64
fpu__get_supported_xfeatures_mask(void);
53 #ifdef CONFIG_X86_DEBUG_FPU
54 # define WARN_ON_FPU(x) WARN_ON_ONCE(x)
56 # define WARN_ON_FPU(x) ({ (void)(x); 0; })
60 * FPU related CPU feature flag helper routines:
62 static __always_inline __pure
bool use_eager_fpu(void)
64 return static_cpu_has(X86_FEATURE_EAGER_FPU
);
67 static __always_inline __pure
bool use_xsaveopt(void)
69 return static_cpu_has(X86_FEATURE_XSAVEOPT
);
72 static __always_inline __pure
bool use_xsave(void)
74 return static_cpu_has(X86_FEATURE_XSAVE
);
77 static __always_inline __pure
bool use_fxsr(void)
79 return static_cpu_has(X86_FEATURE_FXSR
);
83 * fpstate handling functions:
86 extern union fpregs_state init_fpstate
;
88 extern void fpstate_init(union fpregs_state
*state
);
89 #ifdef CONFIG_MATH_EMULATION
90 extern void fpstate_init_soft(struct swregs_state
*soft
);
92 static inline void fpstate_init_soft(struct swregs_state
*soft
) {}
94 static inline void fpstate_init_fxstate(struct fxregs_state
*fx
)
97 fx
->mxcsr
= MXCSR_DEFAULT
;
99 extern void fpstate_sanitize_xstate(struct fpu
*fpu
);
101 #define user_insn(insn, output, input...) \
104 asm volatile(ASM_STAC "\n" \
106 "2: " ASM_CLAC "\n" \
107 ".section .fixup,\"ax\"\n" \
108 "3: movl $-1,%[err]\n" \
111 _ASM_EXTABLE(1b, 3b) \
112 : [err] "=r" (err), output \
117 #define check_insn(insn, output, input...) \
120 asm volatile("1:" #insn "\n\t" \
122 ".section .fixup,\"ax\"\n" \
123 "3: movl $-1,%[err]\n" \
126 _ASM_EXTABLE(1b, 3b) \
127 : [err] "=r" (err), output \
132 static inline int copy_fregs_to_user(struct fregs_state __user
*fx
)
134 return user_insn(fnsave
%[fx
]; fwait
, [fx
] "=m" (*fx
), "m" (*fx
));
137 static inline int copy_fxregs_to_user(struct fxregs_state __user
*fx
)
139 if (config_enabled(CONFIG_X86_32
))
140 return user_insn(fxsave
%[fx
], [fx
] "=m" (*fx
), "m" (*fx
));
141 else if (config_enabled(CONFIG_AS_FXSAVEQ
))
142 return user_insn(fxsaveq
%[fx
], [fx
] "=m" (*fx
), "m" (*fx
));
144 /* See comment in copy_fxregs_to_kernel() below. */
145 return user_insn(rex64
/fxsave (%[fx
]), "=m" (*fx
), [fx
] "R" (fx
));
148 static inline void copy_kernel_to_fxregs(struct fxregs_state
*fx
)
152 if (config_enabled(CONFIG_X86_32
)) {
153 err
= check_insn(fxrstor
%[fx
], "=m" (*fx
), [fx
] "m" (*fx
));
155 if (config_enabled(CONFIG_AS_FXSAVEQ
)) {
156 err
= check_insn(fxrstorq
%[fx
], "=m" (*fx
), [fx
] "m" (*fx
));
158 /* See comment in copy_fxregs_to_kernel() below. */
159 err
= check_insn(rex64
/fxrstor (%[fx
]), "=m" (*fx
), [fx
] "R" (fx
), "m" (*fx
));
162 /* Copying from a kernel buffer to FPU registers should never fail: */
166 static inline int copy_user_to_fxregs(struct fxregs_state __user
*fx
)
168 if (config_enabled(CONFIG_X86_32
))
169 return user_insn(fxrstor
%[fx
], "=m" (*fx
), [fx
] "m" (*fx
));
170 else if (config_enabled(CONFIG_AS_FXSAVEQ
))
171 return user_insn(fxrstorq
%[fx
], "=m" (*fx
), [fx
] "m" (*fx
));
173 /* See comment in copy_fxregs_to_kernel() below. */
174 return user_insn(rex64
/fxrstor (%[fx
]), "=m" (*fx
), [fx
] "R" (fx
),
178 static inline void copy_kernel_to_fregs(struct fregs_state
*fx
)
180 int err
= check_insn(frstor
%[fx
], "=m" (*fx
), [fx
] "m" (*fx
));
185 static inline int copy_user_to_fregs(struct fregs_state __user
*fx
)
187 return user_insn(frstor
%[fx
], "=m" (*fx
), [fx
] "m" (*fx
));
190 static inline void copy_fxregs_to_kernel(struct fpu
*fpu
)
192 if (config_enabled(CONFIG_X86_32
))
193 asm volatile( "fxsave %[fx]" : [fx
] "=m" (fpu
->state
.fxsave
));
194 else if (config_enabled(CONFIG_AS_FXSAVEQ
))
195 asm volatile("fxsaveq %[fx]" : [fx
] "=m" (fpu
->state
.fxsave
));
197 /* Using "rex64; fxsave %0" is broken because, if the memory
198 * operand uses any extended registers for addressing, a second
199 * REX prefix will be generated (to the assembler, rex64
200 * followed by semicolon is a separate instruction), and hence
201 * the 64-bitness is lost.
203 * Using "fxsaveq %0" would be the ideal choice, but is only
204 * supported starting with gas 2.16.
206 * Using, as a workaround, the properly prefixed form below
207 * isn't accepted by any binutils version so far released,
208 * complaining that the same type of prefix is used twice if
209 * an extended register is needed for addressing (fix submitted
210 * to mainline 2005-11-21).
212 * asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave));
214 * This, however, we can work around by forcing the compiler to
215 * select an addressing mode that doesn't require extended
218 asm volatile( "rex64/fxsave (%[fx])"
219 : "=m" (fpu
->state
.fxsave
)
220 : [fx
] "R" (&fpu
->state
.fxsave
));
224 /* These macros all use (%edi)/(%rdi) as the single memory argument. */
225 #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
226 #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
227 #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
228 #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
229 #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
231 #define XSTATE_OP(op, st, lmask, hmask, err) \
232 asm volatile("1:" op "\n\t" \
233 "xor %[err], %[err]\n" \
235 ".pushsection .fixup,\"ax\"\n\t" \
236 "3: movl $-2,%[err]\n\t" \
239 _ASM_EXTABLE(1b, 3b) \
241 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
245 * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
246 * format and supervisor states in addition to modified optimization in
249 * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
250 * supports modified optimization which is not supported by XSAVE.
252 * We use XSAVE as a fallback.
254 * The 661 label is defined in the ALTERNATIVE* macros as the address of the
255 * original instruction which gets replaced. We need to use it here as the
256 * address of the instruction where we might get an exception at.
258 #define XSTATE_XSAVE(st, lmask, hmask, err) \
259 asm volatile(ALTERNATIVE_2(XSAVE, \
260 XSAVEOPT, X86_FEATURE_XSAVEOPT, \
261 XSAVES, X86_FEATURE_XSAVES) \
263 "xor %[err], %[err]\n" \
265 ".pushsection .fixup,\"ax\"\n" \
266 "4: movl $-2, %[err]\n" \
269 _ASM_EXTABLE(661b, 4b) \
271 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
275 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
278 #define XSTATE_XRESTORE(st, lmask, hmask, err) \
279 asm volatile(ALTERNATIVE(XRSTOR, \
280 XRSTORS, X86_FEATURE_XSAVES) \
282 "xor %[err], %[err]\n" \
284 ".pushsection .fixup,\"ax\"\n" \
285 "4: movl $-2, %[err]\n" \
288 _ASM_EXTABLE(661b, 4b) \
290 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
294 * This function is called only during boot time when x86 caps are not set
295 * up and alternative can not be used yet.
297 static inline void copy_xregs_to_kernel_booting(struct xregs_state
*xstate
)
301 u32 hmask
= mask
>> 32;
304 WARN_ON(system_state
!= SYSTEM_BOOTING
);
306 if (static_cpu_has(X86_FEATURE_XSAVES
))
307 XSTATE_OP(XSAVES
, xstate
, lmask
, hmask
, err
);
309 XSTATE_OP(XSAVE
, xstate
, lmask
, hmask
, err
);
311 /* We should never fault when copying to a kernel buffer: */
316 * This function is called only during boot time when x86 caps are not set
317 * up and alternative can not be used yet.
319 static inline void copy_kernel_to_xregs_booting(struct xregs_state
*xstate
)
323 u32 hmask
= mask
>> 32;
326 WARN_ON(system_state
!= SYSTEM_BOOTING
);
328 if (static_cpu_has(X86_FEATURE_XSAVES
))
329 XSTATE_OP(XRSTORS
, xstate
, lmask
, hmask
, err
);
331 XSTATE_OP(XRSTOR
, xstate
, lmask
, hmask
, err
);
333 /* We should never fault when copying from a kernel buffer: */
338 * Save processor xstate to xsave area.
340 static inline void copy_xregs_to_kernel(struct xregs_state
*xstate
)
344 u32 hmask
= mask
>> 32;
347 WARN_ON(!alternatives_patched
);
349 XSTATE_XSAVE(xstate
, lmask
, hmask
, err
);
351 /* We should never fault when copying to a kernel buffer: */
356 * Restore processor xstate from xsave area.
358 static inline void copy_kernel_to_xregs(struct xregs_state
*xstate
, u64 mask
)
361 u32 hmask
= mask
>> 32;
364 XSTATE_XRESTORE(xstate
, lmask
, hmask
, err
);
366 /* We should never fault when copying from a kernel buffer: */
371 * Save xstate to user space xsave area.
373 * We don't use modified optimization because xrstor/xrstors might track
374 * a different application.
376 * We don't use compacted format xsave area for
377 * backward compatibility for old applications which don't understand
378 * compacted format of xsave area.
380 static inline int copy_xregs_to_user(struct xregs_state __user
*buf
)
385 * Clear the xsave header first, so that reserved fields are
386 * initialized to zero.
388 err
= __clear_user(&buf
->header
, sizeof(buf
->header
));
393 XSTATE_OP(XSAVE
, buf
, -1, -1, err
);
400 * Restore xstate from user space xsave area.
402 static inline int copy_user_to_xregs(struct xregs_state __user
*buf
, u64 mask
)
404 struct xregs_state
*xstate
= ((__force
struct xregs_state
*)buf
);
406 u32 hmask
= mask
>> 32;
410 XSTATE_OP(XRSTOR
, xstate
, lmask
, hmask
, err
);
417 * These must be called with preempt disabled. Returns
418 * 'true' if the FPU state is still intact and we can
419 * keep registers active.
421 * The legacy FNSAVE instruction cleared all FPU state
422 * unconditionally, so registers are essentially destroyed.
423 * Modern FPU state can be kept in registers, if there are
424 * no pending FP exceptions.
426 static inline int copy_fpregs_to_fpstate(struct fpu
*fpu
)
428 if (likely(use_xsave())) {
429 copy_xregs_to_kernel(&fpu
->state
.xsave
);
433 if (likely(use_fxsr())) {
434 copy_fxregs_to_kernel(fpu
);
439 * Legacy FPU register saving, FNSAVE always clears FPU registers,
440 * so we have to mark them inactive:
442 asm volatile("fnsave %[fp]; fwait" : [fp
] "=m" (fpu
->state
.fsave
));
447 static inline void __copy_kernel_to_fpregs(union fpregs_state
*fpstate
)
450 copy_kernel_to_xregs(&fpstate
->xsave
, -1);
453 copy_kernel_to_fxregs(&fpstate
->fxsave
);
455 copy_kernel_to_fregs(&fpstate
->fsave
);
459 static inline void copy_kernel_to_fpregs(union fpregs_state
*fpstate
)
462 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
463 * pending. Clear the x87 state here by setting it to fixed values.
464 * "m" is a random variable that should be in L1.
466 if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK
))) {
470 "fildl %P[addr]" /* set F?P to defined value */
471 : : [addr
] "m" (fpstate
));
474 __copy_kernel_to_fpregs(fpstate
);
477 extern int copy_fpstate_to_sigframe(void __user
*buf
, void __user
*fp
, int size
);
480 * FPU context switch related helper methods:
483 DECLARE_PER_CPU(struct fpu
*, fpu_fpregs_owner_ctx
);
486 * Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx,
489 * This will disable any lazy FPU state restore of the current FPU state,
490 * but if the current thread owns the FPU, it will still be saved by.
492 static inline void __cpu_disable_lazy_restore(unsigned int cpu
)
494 per_cpu(fpu_fpregs_owner_ctx
, cpu
) = NULL
;
497 static inline int fpu_want_lazy_restore(struct fpu
*fpu
, unsigned int cpu
)
499 return fpu
== this_cpu_read_stable(fpu_fpregs_owner_ctx
) && cpu
== fpu
->last_cpu
;
504 * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation'
505 * idiom, which is then paired with the sw-flag (fpregs_active) later on:
508 static inline void __fpregs_activate_hw(void)
510 if (!use_eager_fpu())
514 static inline void __fpregs_deactivate_hw(void)
516 if (!use_eager_fpu())
520 /* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
521 static inline void __fpregs_deactivate(struct fpu
*fpu
)
523 WARN_ON_FPU(!fpu
->fpregs_active
);
525 fpu
->fpregs_active
= 0;
526 this_cpu_write(fpu_fpregs_owner_ctx
, NULL
);
529 /* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
530 static inline void __fpregs_activate(struct fpu
*fpu
)
532 WARN_ON_FPU(fpu
->fpregs_active
);
534 fpu
->fpregs_active
= 1;
535 this_cpu_write(fpu_fpregs_owner_ctx
, fpu
);
539 * The question "does this thread have fpu access?"
540 * is slightly racy, since preemption could come in
541 * and revoke it immediately after the test.
543 * However, even in that very unlikely scenario,
544 * we can just assume we have FPU access - typically
545 * to save the FP state - we'll just take a #NM
546 * fault and get the FPU access back.
548 static inline int fpregs_active(void)
550 return current
->thread
.fpu
.fpregs_active
;
554 * Encapsulate the CR0.TS handling together with the
557 * These generally need preemption protection to work,
558 * do try to avoid using these on their own.
560 static inline void fpregs_activate(struct fpu
*fpu
)
562 __fpregs_activate_hw();
563 __fpregs_activate(fpu
);
566 static inline void fpregs_deactivate(struct fpu
*fpu
)
568 __fpregs_deactivate(fpu
);
569 __fpregs_deactivate_hw();
573 * FPU state switching for scheduling.
575 * This is a two-stage process:
577 * - switch_fpu_prepare() saves the old state and
578 * sets the new state of the CR0.TS bit. This is
579 * done within the context of the old process.
581 * - switch_fpu_finish() restores the new state as
584 typedef struct { int preload
; } fpu_switch_t
;
586 static inline fpu_switch_t
587 switch_fpu_prepare(struct fpu
*old_fpu
, struct fpu
*new_fpu
, int cpu
)
592 * If the task has used the math, pre-load the FPU on xsave processors
593 * or if the past 5 consecutive context-switches used math.
595 fpu
.preload
= static_cpu_has(X86_FEATURE_FPU
) &&
596 new_fpu
->fpstate_active
&&
597 (use_eager_fpu() || new_fpu
->counter
> 5);
599 if (old_fpu
->fpregs_active
) {
600 if (!copy_fpregs_to_fpstate(old_fpu
))
601 old_fpu
->last_cpu
= -1;
603 old_fpu
->last_cpu
= cpu
;
605 /* But leave fpu_fpregs_owner_ctx! */
606 old_fpu
->fpregs_active
= 0;
608 /* Don't change CR0.TS if we just switch! */
611 __fpregs_activate(new_fpu
);
612 prefetch(&new_fpu
->state
);
614 __fpregs_deactivate_hw();
617 old_fpu
->counter
= 0;
618 old_fpu
->last_cpu
= -1;
621 if (fpu_want_lazy_restore(new_fpu
, cpu
))
624 prefetch(&new_fpu
->state
);
625 fpregs_activate(new_fpu
);
632 * Misc helper functions:
636 * By the time this gets called, we've already cleared CR0.TS and
637 * given the process the FPU if we are going to preload the FPU
638 * state - all we need to do is to conditionally restore the register
641 static inline void switch_fpu_finish(struct fpu
*new_fpu
, fpu_switch_t fpu_switch
)
643 if (fpu_switch
.preload
)
644 copy_kernel_to_fpregs(&new_fpu
->state
);
648 * Needs to be preemption-safe.
650 * NOTE! user_fpu_begin() must be used only immediately before restoring
651 * the save state. It does not do any saving/restoring on its own. In
652 * lazy FPU mode, it is just an optimization to avoid a #NM exception,
653 * the task can lose the FPU right after preempt_enable().
655 static inline void user_fpu_begin(void)
657 struct fpu
*fpu
= ¤t
->thread
.fpu
;
660 if (!fpregs_active())
661 fpregs_activate(fpu
);
666 * MXCSR and XCR definitions:
669 extern unsigned int mxcsr_feature_mask
;
671 #define XCR_XFEATURE_ENABLED_MASK 0x00000000
673 static inline u64
xgetbv(u32 index
)
677 asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
678 : "=a" (eax
), "=d" (edx
)
680 return eax
+ ((u64
)edx
<< 32);
683 static inline void xsetbv(u32 index
, u64 value
)
686 u32 edx
= value
>> 32;
688 asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
689 : : "a" (eax
), "d" (edx
), "c" (index
));
692 #endif /* _ASM_X86_FPU_INTERNAL_H */