1 #ifndef __ASM_X86_XSAVE_H
2 #define __ASM_X86_XSAVE_H
4 #include <linux/types.h>
5 #include <asm/processor.h>
7 #define XSTATE_CPUID 0x0000000d
10 #define XSTATE_SSE 0x2
11 #define XSTATE_YMM 0x4
13 #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
15 #define FXSAVE_SIZE 512
17 #define XSAVE_HDR_SIZE 64
18 #define XSAVE_HDR_OFFSET FXSAVE_SIZE
20 #define XSAVE_YMM_SIZE 256
21 #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
24 * These are the features that the OS can handle currently.
26 #define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
29 #define REX_PREFIX "0x48, "
34 extern unsigned int xstate_size
;
35 extern u64 pcntxt_mask
;
36 extern u64 xstate_fx_sw_bytes
[USER_XSTATE_FX_SW_WORDS
];
37 extern struct xsave_struct
*init_xstate_buf
;
39 extern void xsave_init(void);
40 extern void update_regset_xstate_info(unsigned int size
, u64 xstate_mask
);
41 extern int init_fpu(struct task_struct
*child
);
43 static inline int fpu_xrstor_checking(struct xsave_struct
*fx
)
47 asm volatile("1: .byte " REX_PREFIX
"0x0f,0xae,0x2f\n\t"
49 ".section .fixup,\"ax\"\n"
50 "3: movl $-1,%[err]\n"
55 : "D" (fx
), "m" (*fx
), "a" (-1), "d" (-1), "0" (0)
61 static inline int xsave_user(struct xsave_struct __user
*buf
)
66 * Clear the xsave header first, so that reserved fields are
67 * initialized to zero.
69 err
= __clear_user(&buf
->xsave_hdr
, sizeof(buf
->xsave_hdr
));
73 __asm__
__volatile__(ASM_STAC
"\n"
74 "1: .byte " REX_PREFIX
"0x0f,0xae,0x27\n"
76 ".section .fixup,\"ax\"\n"
77 "3: movl $-1,%[err]\n"
82 : "D" (buf
), "a" (-1), "d" (-1), "0" (0)
87 static inline int xrestore_user(struct xsave_struct __user
*buf
, u64 mask
)
90 struct xsave_struct
*xstate
= ((__force
struct xsave_struct
*)buf
);
92 u32 hmask
= mask
>> 32;
94 __asm__
__volatile__(ASM_STAC
"\n"
95 "1: .byte " REX_PREFIX
"0x0f,0xae,0x2f\n"
97 ".section .fixup,\"ax\"\n"
98 "3: movl $-1,%[err]\n"
103 : "D" (xstate
), "a" (lmask
), "d" (hmask
), "0" (0)
104 : "memory"); /* memory required? */
108 static inline void xrstor_state(struct xsave_struct
*fx
, u64 mask
)
111 u32 hmask
= mask
>> 32;
113 asm volatile(".byte " REX_PREFIX
"0x0f,0xae,0x2f\n\t"
114 : : "D" (fx
), "m" (*fx
), "a" (lmask
), "d" (hmask
)
118 static inline void xsave_state(struct xsave_struct
*fx
, u64 mask
)
121 u32 hmask
= mask
>> 32;
123 asm volatile(".byte " REX_PREFIX
"0x0f,0xae,0x27\n\t"
124 : : "D" (fx
), "m" (*fx
), "a" (lmask
), "d" (hmask
)
128 static inline void fpu_xsave(struct fpu
*fpu
)
130 /* This, however, we can work around by forcing the compiler to select
131 an addressing mode that doesn't require extended registers. */
133 ".byte " REX_PREFIX
"0x0f,0xae,0x27",
134 ".byte " REX_PREFIX
"0x0f,0xae,0x37",
135 X86_FEATURE_XSAVEOPT
,
136 [fx
] "D" (&fpu
->state
->xsave
), "a" (-1), "d" (-1) :