target/cxgbit: Use T6 specific macros to get ETH/IP hdr len
[linux/fpc-iii.git] / arch / x86 / kernel / fpu / core.c
blobde7234401275b56760f27573eea5669e2bda4f68
1 /*
2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8 #include <asm/fpu/internal.h>
9 #include <asm/fpu/regset.h>
10 #include <asm/fpu/signal.h>
11 #include <asm/fpu/types.h>
12 #include <asm/fpu/xstate.h>
13 #include <asm/traps.h>
15 #include <linux/hardirq.h>
16 #include <linux/pkeys.h>
18 #define CREATE_TRACE_POINTS
19 #include <asm/trace/fpu.h>
22 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
23 * depending on the FPU hardware format:
25 union fpregs_state init_fpstate __read_mostly;
28 * Track whether the kernel is using the FPU state
29 * currently.
31 * This flag is used:
33 * - by IRQ context code to potentially use the FPU
34 * if it's unused.
36 * - to debug kernel_fpu_begin()/end() correctness
38 static DEFINE_PER_CPU(bool, in_kernel_fpu);
41 * Track which context is using the FPU on the CPU:
43 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
45 static void kernel_fpu_disable(void)
47 WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
48 this_cpu_write(in_kernel_fpu, true);
51 static void kernel_fpu_enable(void)
53 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
54 this_cpu_write(in_kernel_fpu, false);
57 static bool kernel_fpu_disabled(void)
59 return this_cpu_read(in_kernel_fpu);
62 static bool interrupted_kernel_fpu_idle(void)
64 return !kernel_fpu_disabled();
68 * Were we in user mode (or vm86 mode) when we were
69 * interrupted?
71 * Doing kernel_fpu_begin/end() is ok if we are running
72 * in an interrupt context from user mode - we'll just
73 * save the FPU state as required.
75 static bool interrupted_user_mode(void)
77 struct pt_regs *regs = get_irq_regs();
78 return regs && user_mode(regs);
82 * Can we use the FPU in kernel mode with the
83 * whole "kernel_fpu_begin/end()" sequence?
85 * It's always ok in process context (ie "not interrupt")
86 * but it is sometimes ok even from an irq.
88 bool irq_fpu_usable(void)
90 return !in_interrupt() ||
91 interrupted_user_mode() ||
92 interrupted_kernel_fpu_idle();
94 EXPORT_SYMBOL(irq_fpu_usable);
96 void __kernel_fpu_begin(void)
98 struct fpu *fpu = &current->thread.fpu;
100 WARN_ON_FPU(!irq_fpu_usable());
102 kernel_fpu_disable();
104 if (fpu->fpregs_active) {
106 * Ignore return value -- we don't care if reg state
107 * is clobbered.
109 copy_fpregs_to_fpstate(fpu);
110 } else {
111 __cpu_invalidate_fpregs_state();
114 EXPORT_SYMBOL(__kernel_fpu_begin);
116 void __kernel_fpu_end(void)
118 struct fpu *fpu = &current->thread.fpu;
120 if (fpu->fpregs_active)
121 copy_kernel_to_fpregs(&fpu->state);
123 kernel_fpu_enable();
125 EXPORT_SYMBOL(__kernel_fpu_end);
127 void kernel_fpu_begin(void)
129 preempt_disable();
130 __kernel_fpu_begin();
132 EXPORT_SYMBOL_GPL(kernel_fpu_begin);
134 void kernel_fpu_end(void)
136 __kernel_fpu_end();
137 preempt_enable();
139 EXPORT_SYMBOL_GPL(kernel_fpu_end);
142 * Save the FPU state (mark it for reload if necessary):
144 * This only ever gets called for the current task.
146 void fpu__save(struct fpu *fpu)
148 WARN_ON_FPU(fpu != &current->thread.fpu);
150 preempt_disable();
151 trace_x86_fpu_before_save(fpu);
152 if (fpu->fpregs_active) {
153 if (!copy_fpregs_to_fpstate(fpu)) {
154 copy_kernel_to_fpregs(&fpu->state);
157 trace_x86_fpu_after_save(fpu);
158 preempt_enable();
160 EXPORT_SYMBOL_GPL(fpu__save);
163 * Legacy x87 fpstate state init:
165 static inline void fpstate_init_fstate(struct fregs_state *fp)
167 fp->cwd = 0xffff037fu;
168 fp->swd = 0xffff0000u;
169 fp->twd = 0xffffffffu;
170 fp->fos = 0xffff0000u;
173 void fpstate_init(union fpregs_state *state)
175 if (!static_cpu_has(X86_FEATURE_FPU)) {
176 fpstate_init_soft(&state->soft);
177 return;
180 memset(state, 0, fpu_kernel_xstate_size);
183 * XRSTORS requires that this bit is set in xcomp_bv, or
184 * it will #GP. Make sure it is replaced after the memset().
186 if (static_cpu_has(X86_FEATURE_XSAVES))
187 state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
188 xfeatures_mask;
190 if (static_cpu_has(X86_FEATURE_FXSR))
191 fpstate_init_fxstate(&state->fxsave);
192 else
193 fpstate_init_fstate(&state->fsave);
195 EXPORT_SYMBOL_GPL(fpstate_init);
197 int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
199 dst_fpu->fpregs_active = 0;
200 dst_fpu->last_cpu = -1;
202 if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU))
203 return 0;
205 WARN_ON_FPU(src_fpu != &current->thread.fpu);
208 * Don't let 'init optimized' areas of the XSAVE area
209 * leak into the child task:
211 memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
214 * Save current FPU registers directly into the child
215 * FPU context, without any memory-to-memory copying.
216 * In lazy mode, if the FPU context isn't loaded into
217 * fpregs, CR0.TS will be set and do_device_not_available
218 * will load the FPU context.
220 * We have to do all this with preemption disabled,
221 * mostly because of the FNSAVE case, because in that
222 * case we must not allow preemption in the window
223 * between the FNSAVE and us marking the context lazy.
225 * It shouldn't be an issue as even FNSAVE is plenty
226 * fast in terms of critical section length.
228 preempt_disable();
229 if (!copy_fpregs_to_fpstate(dst_fpu)) {
230 memcpy(&src_fpu->state, &dst_fpu->state,
231 fpu_kernel_xstate_size);
233 copy_kernel_to_fpregs(&src_fpu->state);
235 preempt_enable();
237 trace_x86_fpu_copy_src(src_fpu);
238 trace_x86_fpu_copy_dst(dst_fpu);
240 return 0;
244 * Activate the current task's in-memory FPU context,
245 * if it has not been used before:
247 void fpu__activate_curr(struct fpu *fpu)
249 WARN_ON_FPU(fpu != &current->thread.fpu);
251 if (!fpu->fpstate_active) {
252 fpstate_init(&fpu->state);
253 trace_x86_fpu_init_state(fpu);
255 trace_x86_fpu_activate_state(fpu);
256 /* Safe to do for the current task: */
257 fpu->fpstate_active = 1;
260 EXPORT_SYMBOL_GPL(fpu__activate_curr);
263 * This function must be called before we read a task's fpstate.
265 * If the task has not used the FPU before then initialize its
266 * fpstate.
268 * If the task has used the FPU before then save it.
270 void fpu__activate_fpstate_read(struct fpu *fpu)
273 * If fpregs are active (in the current CPU), then
274 * copy them to the fpstate:
276 if (fpu->fpregs_active) {
277 fpu__save(fpu);
278 } else {
279 if (!fpu->fpstate_active) {
280 fpstate_init(&fpu->state);
281 trace_x86_fpu_init_state(fpu);
283 trace_x86_fpu_activate_state(fpu);
284 /* Safe to do for current and for stopped child tasks: */
285 fpu->fpstate_active = 1;
291 * This function must be called before we write a task's fpstate.
293 * If the task has used the FPU before then unlazy it.
294 * If the task has not used the FPU before then initialize its fpstate.
296 * After this function call, after registers in the fpstate are
297 * modified and the child task has woken up, the child task will
298 * restore the modified FPU state from the modified context. If we
299 * didn't clear its lazy status here then the lazy in-registers
300 * state pending on its former CPU could be restored, corrupting
301 * the modifications.
303 void fpu__activate_fpstate_write(struct fpu *fpu)
306 * Only stopped child tasks can be used to modify the FPU
307 * state in the fpstate buffer:
309 WARN_ON_FPU(fpu == &current->thread.fpu);
311 if (fpu->fpstate_active) {
312 /* Invalidate any lazy state: */
313 __fpu_invalidate_fpregs_state(fpu);
314 } else {
315 fpstate_init(&fpu->state);
316 trace_x86_fpu_init_state(fpu);
318 trace_x86_fpu_activate_state(fpu);
319 /* Safe to do for stopped child tasks: */
320 fpu->fpstate_active = 1;
325 * This function must be called before we write the current
326 * task's fpstate.
328 * This call gets the current FPU register state and moves
329 * it in to the 'fpstate'. Preemption is disabled so that
330 * no writes to the 'fpstate' can occur from context
331 * swiches.
333 * Must be followed by a fpu__current_fpstate_write_end().
335 void fpu__current_fpstate_write_begin(void)
337 struct fpu *fpu = &current->thread.fpu;
340 * Ensure that the context-switching code does not write
341 * over the fpstate while we are doing our update.
343 preempt_disable();
346 * Move the fpregs in to the fpu's 'fpstate'.
348 fpu__activate_fpstate_read(fpu);
351 * The caller is about to write to 'fpu'. Ensure that no
352 * CPU thinks that its fpregs match the fpstate. This
353 * ensures we will not be lazy and skip a XRSTOR in the
354 * future.
356 __fpu_invalidate_fpregs_state(fpu);
360 * This function must be paired with fpu__current_fpstate_write_begin()
362 * This will ensure that the modified fpstate gets placed back in
363 * the fpregs if necessary.
365 * Note: This function may be called whether or not an _actual_
366 * write to the fpstate occurred.
368 void fpu__current_fpstate_write_end(void)
370 struct fpu *fpu = &current->thread.fpu;
373 * 'fpu' now has an updated copy of the state, but the
374 * registers may still be out of date. Update them with
375 * an XRSTOR if they are active.
377 if (fpregs_active())
378 copy_kernel_to_fpregs(&fpu->state);
381 * Our update is done and the fpregs/fpstate are in sync
382 * if necessary. Context switches can happen again.
384 preempt_enable();
388 * 'fpu__restore()' is called to copy FPU registers from
389 * the FPU fpstate to the live hw registers and to activate
390 * access to the hardware registers, so that FPU instructions
391 * can be used afterwards.
393 * Must be called with kernel preemption disabled (for example
394 * with local interrupts disabled, as it is in the case of
395 * do_device_not_available()).
397 void fpu__restore(struct fpu *fpu)
399 fpu__activate_curr(fpu);
401 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
402 kernel_fpu_disable();
403 trace_x86_fpu_before_restore(fpu);
404 fpregs_activate(fpu);
405 copy_kernel_to_fpregs(&fpu->state);
406 trace_x86_fpu_after_restore(fpu);
407 kernel_fpu_enable();
409 EXPORT_SYMBOL_GPL(fpu__restore);
412 * Drops current FPU state: deactivates the fpregs and
413 * the fpstate. NOTE: it still leaves previous contents
414 * in the fpregs in the eager-FPU case.
416 * This function can be used in cases where we know that
417 * a state-restore is coming: either an explicit one,
418 * or a reschedule.
420 void fpu__drop(struct fpu *fpu)
422 preempt_disable();
424 if (fpu->fpregs_active) {
425 /* Ignore delayed exceptions from user space */
426 asm volatile("1: fwait\n"
427 "2:\n"
428 _ASM_EXTABLE(1b, 2b));
429 fpregs_deactivate(fpu);
432 fpu->fpstate_active = 0;
434 trace_x86_fpu_dropped(fpu);
436 preempt_enable();
440 * Clear FPU registers by setting them up from
441 * the init fpstate:
443 static inline void copy_init_fpstate_to_fpregs(void)
445 if (use_xsave())
446 copy_kernel_to_xregs(&init_fpstate.xsave, -1);
447 else if (static_cpu_has(X86_FEATURE_FXSR))
448 copy_kernel_to_fxregs(&init_fpstate.fxsave);
449 else
450 copy_kernel_to_fregs(&init_fpstate.fsave);
452 if (boot_cpu_has(X86_FEATURE_OSPKE))
453 copy_init_pkru_to_fpregs();
457 * Clear the FPU state back to init state.
459 * Called by sys_execve(), by the signal handler code and by various
460 * error paths.
462 void fpu__clear(struct fpu *fpu)
464 WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
466 fpu__drop(fpu);
469 * Make sure fpstate is cleared and initialized.
471 if (static_cpu_has(X86_FEATURE_FPU)) {
472 fpu__activate_curr(fpu);
473 user_fpu_begin();
474 copy_init_fpstate_to_fpregs();
479 * x87 math exception handling:
482 int fpu__exception_code(struct fpu *fpu, int trap_nr)
484 int err;
486 if (trap_nr == X86_TRAP_MF) {
487 unsigned short cwd, swd;
489 * (~cwd & swd) will mask out exceptions that are not set to unmasked
490 * status. 0x3f is the exception bits in these regs, 0x200 is the
491 * C1 reg you need in case of a stack fault, 0x040 is the stack
492 * fault bit. We should only be taking one exception at a time,
493 * so if this combination doesn't produce any single exception,
494 * then we have a bad program that isn't synchronizing its FPU usage
495 * and it will suffer the consequences since we won't be able to
496 * fully reproduce the context of the exception.
498 if (boot_cpu_has(X86_FEATURE_FXSR)) {
499 cwd = fpu->state.fxsave.cwd;
500 swd = fpu->state.fxsave.swd;
501 } else {
502 cwd = (unsigned short)fpu->state.fsave.cwd;
503 swd = (unsigned short)fpu->state.fsave.swd;
506 err = swd & ~cwd;
507 } else {
509 * The SIMD FPU exceptions are handled a little differently, as there
510 * is only a single status/control register. Thus, to determine which
511 * unmasked exception was caught we must mask the exception mask bits
512 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
514 unsigned short mxcsr = MXCSR_DEFAULT;
516 if (boot_cpu_has(X86_FEATURE_XMM))
517 mxcsr = fpu->state.fxsave.mxcsr;
519 err = ~(mxcsr >> 7) & mxcsr;
522 if (err & 0x001) { /* Invalid op */
524 * swd & 0x240 == 0x040: Stack Underflow
525 * swd & 0x240 == 0x240: Stack Overflow
526 * User must clear the SF bit (0x40) if set
528 return FPE_FLTINV;
529 } else if (err & 0x004) { /* Divide by Zero */
530 return FPE_FLTDIV;
531 } else if (err & 0x008) { /* Overflow */
532 return FPE_FLTOVF;
533 } else if (err & 0x012) { /* Denormal, Underflow */
534 return FPE_FLTUND;
535 } else if (err & 0x020) { /* Precision */
536 return FPE_FLTRES;
540 * If we're using IRQ 13, or supposedly even some trap
541 * X86_TRAP_MF implementations, it's possible
542 * we get a spurious trap, which is not an error.
544 return 0;