2 * x86 FPU boot time init code:
4 #include <asm/fpu/internal.h>
5 #include <asm/tlbflush.h>
7 #include <linux/sched.h>
10 * Initialize the TS bit in CR0 according to the style of context-switches
13 static void fpu__init_cpu_ctx_switch(void)
15 if (!boot_cpu_has(X86_FEATURE_EAGER_FPU
))
22 * Initialize the registers found in all CPUs, CR0 and CR4:
24 static void fpu__init_cpu_generic(void)
27 unsigned long cr4_mask
= 0;
30 cr4_mask
|= X86_CR4_OSFXSR
;
32 cr4_mask
|= X86_CR4_OSXMMEXCPT
;
34 cr4_set_bits(cr4_mask
);
37 cr0
&= ~(X86_CR0_TS
|X86_CR0_EM
); /* clear TS and EM */
42 /* Flush out any pending x87 state: */
43 #ifdef CONFIG_MATH_EMULATION
45 fpstate_init_soft(¤t
->thread
.fpu
.state
.soft
);
48 asm volatile ("fninit");
52 * Enable all supported FPU features. Called when a CPU is brought online:
54 void fpu__init_cpu(void)
56 fpu__init_cpu_generic();
57 fpu__init_cpu_xstate();
58 fpu__init_cpu_ctx_switch();
62 * The earliest FPU detection code.
64 * Set the X86_FEATURE_FPU CPU-capability bit based on
65 * trying to execute an actual sequence of FPU instructions:
67 static void fpu__init_system_early_generic(struct cpuinfo_x86
*c
)
75 cr0
&= ~(X86_CR0_TS
| X86_CR0_EM
);
78 asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
79 : "+m" (fsw
), "+m" (fcw
));
81 if (fsw
== 0 && (fcw
& 0x103f) == 0x003f)
82 set_cpu_cap(c
, X86_FEATURE_FPU
);
84 clear_cpu_cap(c
, X86_FEATURE_FPU
);
86 #ifndef CONFIG_MATH_EMULATION
88 pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n");
96 * Boot time FPU feature detection code:
98 unsigned int mxcsr_feature_mask __read_mostly
= 0xffffffffu
;
100 static void __init
fpu__init_system_mxcsr(void)
102 unsigned int mask
= 0;
105 /* Static because GCC does not get 16-byte stack alignment right: */
106 static struct fxregs_state fxregs __initdata
;
108 asm volatile("fxsave %0" : "+m" (fxregs
));
110 mask
= fxregs
.mxcsr_mask
;
113 * If zero then use the default features mask,
114 * which has all features set, except the
115 * denormals-are-zero feature bit:
120 mxcsr_feature_mask
&= mask
;
124 * Once per bootup FPU initialization sequences that will run on most x86 CPUs:
126 static void __init
fpu__init_system_generic(void)
129 * Set up the legacy init FPU context. (xstate init might overwrite this
130 * with a more modern format, if the CPU supports it.)
132 fpstate_init_fxstate(&init_fpstate
.fxsave
);
134 fpu__init_system_mxcsr();
138 * Size of the FPU context state. All tasks in the system use the
139 * same context size, regardless of what portion they use.
140 * This is inherent to the XSAVE architecture which puts all state
141 * components into a single, continuous memory block:
143 unsigned int xstate_size
;
144 EXPORT_SYMBOL_GPL(xstate_size
);
146 /* Get alignment of the TYPE. */
147 #define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test)
150 * Enforce that 'MEMBER' is the last field of 'TYPE'.
152 * Align the computed size with alignment of the TYPE,
153 * because that's how C aligns structs.
155 #define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
156 BUILD_BUG_ON(sizeof(TYPE) != ALIGN(offsetofend(TYPE, MEMBER), \
160 * We append the 'struct fpu' to the task_struct:
162 static void __init
fpu__init_task_struct_size(void)
164 int task_size
= sizeof(struct task_struct
);
167 * Subtract off the static size of the register state.
168 * It potentially has a bunch of padding.
170 task_size
-= sizeof(((struct task_struct
*)0)->thread
.fpu
.state
);
173 * Add back the dynamically-calculated register state
176 task_size
+= xstate_size
;
179 * We dynamically size 'struct fpu', so we require that
180 * it be at the end of 'thread_struct' and that
181 * 'thread_struct' be at the end of 'task_struct'. If
182 * you hit a compile error here, check the structure to
183 * see if something got added to the end.
185 CHECK_MEMBER_AT_END_OF(struct fpu
, state
);
186 CHECK_MEMBER_AT_END_OF(struct thread_struct
, fpu
);
187 CHECK_MEMBER_AT_END_OF(struct task_struct
, thread
);
189 arch_task_struct_size
= task_size
;
193 * Set up the xstate_size based on the legacy FPU context size.
195 * We set this up first, and later it will be overwritten by
196 * fpu__init_system_xstate() if the CPU knows about xstates.
198 static void __init
fpu__init_system_xstate_size_legacy(void)
200 static int on_boot_cpu __initdata
= 1;
202 WARN_ON_FPU(!on_boot_cpu
);
206 * Note that xstate_size might be overwriten later during
207 * fpu__init_system_xstate().
212 * Disable xsave as we do not support it if i387
213 * emulation is enabled.
215 setup_clear_cpu_cap(X86_FEATURE_XSAVE
);
216 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT
);
217 xstate_size
= sizeof(struct swregs_state
);
220 xstate_size
= sizeof(struct fxregs_state
);
222 xstate_size
= sizeof(struct fregs_state
);
225 * Quirk: we don't yet handle the XSAVES* instructions
226 * correctly, as we don't correctly convert between
227 * standard and compacted format when interfacing
228 * with user-space - so disable it for now.
230 * The difference is small: with recent CPUs the
231 * compacted format is only marginally smaller than
232 * the standard FPU state format.
234 * ( This is easy to backport while we are fixing
237 setup_clear_cpu_cap(X86_FEATURE_XSAVES
);
241 * FPU context switching strategies:
243 * Against popular belief, we don't do lazy FPU saves, due to the
244 * task migration complications it brings on SMP - we only do
247 * 'lazy' is the traditional strategy, which is based on setting
248 * CR0::TS to 1 during context-switch (instead of doing a full
249 * restore of the FPU state), which causes the first FPU instruction
250 * after the context switch (whenever it is executed) to fault - at
251 * which point we lazily restore the FPU state into FPU registers.
253 * Tasks are of course under no obligation to execute FPU instructions,
254 * so it can easily happen that another context-switch occurs without
255 * a single FPU instruction being executed. If we eventually switch
256 * back to the original task (that still owns the FPU) then we have
257 * not only saved the restores along the way, but we also have the
258 * FPU ready to be used for the original task.
260 * 'eager' switching is used on modern CPUs, there we switch the FPU
261 * state during every context switch, regardless of whether the task
262 * has used FPU instructions in that time slice or not. This is done
263 * because modern FPU context saving instructions are able to optimize
264 * state saving and restoration in hardware: they can detect both
265 * unused and untouched FPU state and optimize accordingly.
267 * [ Note that even in 'lazy' mode we might optimize context switches
268 * to use 'eager' restores, if we detect that a task is using the FPU
269 * frequently. See the fpu->counter logic in fpu/internal.h for that. ]
271 static enum { AUTO
, ENABLE
, DISABLE
} eagerfpu
= AUTO
;
273 static int __init
eager_fpu_setup(char *s
)
275 if (!strcmp(s
, "on"))
277 else if (!strcmp(s
, "off"))
279 else if (!strcmp(s
, "auto"))
283 __setup("eagerfpu=", eager_fpu_setup
);
286 * Pick the FPU context switching strategy:
288 static void __init
fpu__init_system_ctx_switch(void)
290 static bool on_boot_cpu __initdata
= 1;
292 WARN_ON_FPU(!on_boot_cpu
);
295 WARN_ON_FPU(current
->thread
.fpu
.fpstate_active
);
296 current_thread_info()->status
= 0;
298 /* Auto enable eagerfpu for xsaveopt */
299 if (boot_cpu_has(X86_FEATURE_XSAVEOPT
) && eagerfpu
!= DISABLE
)
302 if (xfeatures_mask
& XFEATURE_MASK_EAGER
) {
303 if (eagerfpu
== DISABLE
) {
304 pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
305 xfeatures_mask
& XFEATURE_MASK_EAGER
);
306 xfeatures_mask
&= ~XFEATURE_MASK_EAGER
;
312 if (eagerfpu
== ENABLE
)
313 setup_force_cpu_cap(X86_FEATURE_EAGER_FPU
);
315 printk(KERN_INFO
"x86/fpu: Using '%s' FPU context switches.\n", eagerfpu
== ENABLE
? "eager" : "lazy");
319 * Called on the boot CPU once per system bootup, to set up the initial
320 * FPU state that is later cloned into all processes:
322 void __init
fpu__init_system(struct cpuinfo_x86
*c
)
324 fpu__init_system_early_generic(c
);
327 * The FPU has to be operational for some of the
328 * later FPU init activities:
333 * But don't leave CR0::TS set yet, as some of the FPU setup
334 * methods depend on being able to execute FPU instructions
335 * that will fault on a set TS, such as the FXSAVE in
336 * fpu__init_system_mxcsr().
340 fpu__init_system_generic();
341 fpu__init_system_xstate_size_legacy();
342 fpu__init_system_xstate();
343 fpu__init_task_struct_size();
345 fpu__init_system_ctx_switch();
349 * Boot parameter to turn off FPU support and fall back to math-emu:
351 static int __init
no_387(char *s
)
353 setup_clear_cpu_cap(X86_FEATURE_FPU
);
356 __setup("no387", no_387
);
359 * Disable all xstate CPU features:
361 static int __init
x86_noxsave_setup(char *s
)
366 fpu__xstate_clear_all_cpu_caps();
370 __setup("noxsave", x86_noxsave_setup
);
373 * Disable the XSAVEOPT instruction specifically:
375 static int __init
x86_noxsaveopt_setup(char *s
)
377 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT
);
381 __setup("noxsaveopt", x86_noxsaveopt_setup
);
384 * Disable the XSAVES instruction:
386 static int __init
x86_noxsaves_setup(char *s
)
388 setup_clear_cpu_cap(X86_FEATURE_XSAVES
);
392 __setup("noxsaves", x86_noxsaves_setup
);
395 * Disable FX save/restore and SSE support:
397 static int __init
x86_nofxsr_setup(char *s
)
399 setup_clear_cpu_cap(X86_FEATURE_FXSR
);
400 setup_clear_cpu_cap(X86_FEATURE_FXSR_OPT
);
401 setup_clear_cpu_cap(X86_FEATURE_XMM
);
405 __setup("nofxsr", x86_nofxsr_setup
);