2 * linux/arch/arm/vfp/vfpmodule.c
4 * Copyright (C) 2004 ARM Limited.
5 * Written by Deep Blue Solutions Limited.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/cpu.h>
14 #include <linux/kernel.h>
15 #include <linux/notifier.h>
16 #include <linux/signal.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
19 #include <linux/init.h>
21 #include <asm/cputype.h>
22 #include <asm/thread_notify.h>
29 * Our undef handlers (in entry.S)
31 void vfp_testing_entry(void);
32 void vfp_support_entry(void);
33 void vfp_null_entry(void);
35 void (*vfp_vector
)(void) = vfp_null_entry
;
36 union vfp_state
*last_VFP_context
[NR_CPUS
];
40 * Used in startup: set to non-zero if VFP checks fail
41 * After startup, holds VFP architecture
43 unsigned int VFP_arch
;
46 * Per-thread VFP initialization.
48 static void vfp_thread_flush(struct thread_info
*thread
)
50 union vfp_state
*vfp
= &thread
->vfpstate
;
53 memset(vfp
, 0, sizeof(union vfp_state
));
55 vfp
->hard
.fpexc
= FPEXC_EN
;
56 vfp
->hard
.fpscr
= FPSCR_ROUND_NEAREST
;
59 * Disable VFP to ensure we initialize it first. We must ensure
60 * that the modification of last_VFP_context[] and hardware disable
61 * are done for the same CPU and without preemption.
64 if (last_VFP_context
[cpu
] == vfp
)
65 last_VFP_context
[cpu
] = NULL
;
66 fmxr(FPEXC
, fmrx(FPEXC
) & ~FPEXC_EN
);
70 static void vfp_thread_exit(struct thread_info
*thread
)
72 /* release case: Per-thread VFP cleanup. */
73 union vfp_state
*vfp
= &thread
->vfpstate
;
74 unsigned int cpu
= get_cpu();
76 if (last_VFP_context
[cpu
] == vfp
)
77 last_VFP_context
[cpu
] = NULL
;
81 static void vfp_thread_copy(struct thread_info
*thread
)
83 struct thread_info
*parent
= current_thread_info();
85 vfp_sync_hwstate(parent
);
86 thread
->vfpstate
= parent
->vfpstate
;
90 * When this function is called with the following 'cmd's, the following
91 * is true while this function is being run:
92 * THREAD_NOFTIFY_SWTICH:
93 * - the previously running thread will not be scheduled onto another CPU.
94 * - the next thread to be run (v) will not be running on another CPU.
95 * - thread->cpu is the local CPU number
96 * - not preemptible as we're called in the middle of a thread switch
97 * THREAD_NOTIFY_FLUSH:
98 * - the thread (v) will be running on the local CPU, so
99 * v === current_thread_info()
100 * - thread->cpu is the local CPU number at the time it is accessed,
101 * but may change at any time.
102 * - we could be preempted if tree preempt rcu is enabled, so
103 * it is unsafe to use thread->cpu.
105 * - the thread (v) will be running on the local CPU, so
106 * v === current_thread_info()
107 * - thread->cpu is the local CPU number at the time it is accessed,
108 * but may change at any time.
109 * - we could be preempted if tree preempt rcu is enabled, so
110 * it is unsafe to use thread->cpu.
112 static int vfp_notifier(struct notifier_block
*self
, unsigned long cmd
, void *v
)
114 struct thread_info
*thread
= v
;
121 case THREAD_NOTIFY_SWITCH
:
128 * On SMP, if VFP is enabled, save the old state in
129 * case the thread migrates to a different CPU. The
130 * restoring is done lazily.
132 if ((fpexc
& FPEXC_EN
) && last_VFP_context
[cpu
]) {
133 vfp_save_state(last_VFP_context
[cpu
], fpexc
);
134 last_VFP_context
[cpu
]->hard
.cpu
= cpu
;
137 * Thread migration, just force the reloading of the
138 * state on the new CPU in case the VFP registers
139 * contain stale data.
141 if (thread
->vfpstate
.hard
.cpu
!= cpu
)
142 last_VFP_context
[cpu
] = NULL
;
146 * Always disable VFP so we can lazily save/restore the
149 fmxr(FPEXC
, fpexc
& ~FPEXC_EN
);
152 case THREAD_NOTIFY_FLUSH
:
153 vfp_thread_flush(thread
);
156 case THREAD_NOTIFY_EXIT
:
157 vfp_thread_exit(thread
);
160 case THREAD_NOTIFY_COPY
:
161 vfp_thread_copy(thread
);
168 static struct notifier_block vfp_notifier_block
= {
169 .notifier_call
= vfp_notifier
,
173 * Raise a SIGFPE for the current process.
174 * sicode describes the signal being raised.
176 static void vfp_raise_sigfpe(unsigned int sicode
, struct pt_regs
*regs
)
180 memset(&info
, 0, sizeof(info
));
182 info
.si_signo
= SIGFPE
;
183 info
.si_code
= sicode
;
184 info
.si_addr
= (void __user
*)(instruction_pointer(regs
) - 4);
187 * This is the same as NWFPE, because it's not clear what
190 current
->thread
.error_code
= 0;
191 current
->thread
.trap_no
= 6;
193 send_sig_info(SIGFPE
, &info
, current
);
196 static void vfp_panic(char *reason
, u32 inst
)
200 printk(KERN_ERR
"VFP: Error: %s\n", reason
);
201 printk(KERN_ERR
"VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
202 fmrx(FPEXC
), fmrx(FPSCR
), inst
);
203 for (i
= 0; i
< 32; i
+= 2)
204 printk(KERN_ERR
"VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
205 i
, vfp_get_float(i
), i
+1, vfp_get_float(i
+1));
209 * Process bitmask of exception conditions.
211 static void vfp_raise_exceptions(u32 exceptions
, u32 inst
, u32 fpscr
, struct pt_regs
*regs
)
215 pr_debug("VFP: raising exceptions %08x\n", exceptions
);
217 if (exceptions
== VFP_EXCEPTION_ERROR
) {
218 vfp_panic("unhandled bounce", inst
);
219 vfp_raise_sigfpe(0, regs
);
224 * If any of the status flags are set, update the FPSCR.
225 * Comparison instructions always return at least one of
228 if (exceptions
& (FPSCR_N
|FPSCR_Z
|FPSCR_C
|FPSCR_V
))
229 fpscr
&= ~(FPSCR_N
|FPSCR_Z
|FPSCR_C
|FPSCR_V
);
235 #define RAISE(stat,en,sig) \
236 if (exceptions & stat && fpscr & en) \
240 * These are arranged in priority order, least to highest.
242 RAISE(FPSCR_DZC
, FPSCR_DZE
, FPE_FLTDIV
);
243 RAISE(FPSCR_IXC
, FPSCR_IXE
, FPE_FLTRES
);
244 RAISE(FPSCR_UFC
, FPSCR_UFE
, FPE_FLTUND
);
245 RAISE(FPSCR_OFC
, FPSCR_OFE
, FPE_FLTOVF
);
246 RAISE(FPSCR_IOC
, FPSCR_IOE
, FPE_FLTINV
);
249 vfp_raise_sigfpe(si_code
, regs
);
253 * Emulate a VFP instruction.
255 static u32
vfp_emulate_instruction(u32 inst
, u32 fpscr
, struct pt_regs
*regs
)
257 u32 exceptions
= VFP_EXCEPTION_ERROR
;
259 pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst
, fpscr
);
261 if (INST_CPRTDO(inst
)) {
262 if (!INST_CPRT(inst
)) {
266 if (vfp_single(inst
)) {
267 exceptions
= vfp_single_cpdo(inst
, fpscr
);
269 exceptions
= vfp_double_cpdo(inst
, fpscr
);
273 * A CPRT instruction can not appear in FPINST2, nor
274 * can it cause an exception. Therefore, we do not
275 * have to emulate it.
280 * A CPDT instruction can not appear in FPINST2, nor can
281 * it cause an exception. Therefore, we do not have to
285 return exceptions
& ~VFP_NAN_FLAG
;
289 * Package up a bounce condition.
291 void VFP_bounce(u32 trigger
, u32 fpexc
, struct pt_regs
*regs
)
293 u32 fpscr
, orig_fpscr
, fpsid
, exceptions
;
295 pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger
, fpexc
);
298 * At this point, FPEXC can have the following configuration:
301 * 0 1 x - synchronous exception
302 * 1 x 0 - asynchronous exception
303 * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later
304 * 0 0 1 - synchronous on VFP9 (non-standard subarch 1
305 * implementation), undefined otherwise
307 * Clear various bits and enable access to the VFP so we can
310 fmxr(FPEXC
, fpexc
& ~(FPEXC_EX
|FPEXC_DEX
|FPEXC_FP2V
|FPEXC_VV
|FPEXC_TRAP_MASK
));
313 orig_fpscr
= fpscr
= fmrx(FPSCR
);
316 * Check for the special VFP subarch 1 and FPSCR.IXE bit case
318 if ((fpsid
& FPSID_ARCH_MASK
) == (1 << FPSID_ARCH_BIT
)
319 && (fpscr
& FPSCR_IXE
)) {
321 * Synchronous exception, emulate the trigger instruction
326 if (fpexc
& FPEXC_EX
) {
327 #ifndef CONFIG_CPU_FEROCEON
329 * Asynchronous exception. The instruction is read from FPINST
330 * and the interrupted instruction has to be restarted.
332 trigger
= fmrx(FPINST
);
335 } else if (!(fpexc
& FPEXC_DEX
)) {
337 * Illegal combination of bits. It can be caused by an
338 * unallocated VFP instruction but with FPSCR.IXE set and not
341 vfp_raise_exceptions(VFP_EXCEPTION_ERROR
, trigger
, fpscr
, regs
);
346 * Modify fpscr to indicate the number of iterations remaining.
347 * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates
348 * whether FPEXC.VECITR or FPSCR.LEN is used.
350 if (fpexc
& (FPEXC_EX
| FPEXC_VV
)) {
353 len
= fpexc
+ (1 << FPEXC_LENGTH_BIT
);
355 fpscr
&= ~FPSCR_LENGTH_MASK
;
356 fpscr
|= (len
& FPEXC_LENGTH_MASK
) << (FPSCR_LENGTH_BIT
- FPEXC_LENGTH_BIT
);
360 * Handle the first FP instruction. We used to take note of the
361 * FPEXC bounce reason, but this appears to be unreliable.
362 * Emulate the bounced instruction instead.
364 exceptions
= vfp_emulate_instruction(trigger
, fpscr
, regs
);
366 vfp_raise_exceptions(exceptions
, trigger
, orig_fpscr
, regs
);
369 * If there isn't a second FP instruction, exit now. Note that
370 * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
372 if (fpexc
^ (FPEXC_EX
| FPEXC_FP2V
))
376 * The barrier() here prevents fpinst2 being read
377 * before the condition above.
380 trigger
= fmrx(FPINST2
);
383 exceptions
= vfp_emulate_instruction(trigger
, orig_fpscr
, regs
);
385 vfp_raise_exceptions(exceptions
, trigger
, orig_fpscr
, regs
);
390 static void vfp_enable(void *unused
)
392 u32 access
= get_copro_access();
395 * Enable full access to VFP (cp10 and cp11)
397 set_copro_access(access
| CPACC_FULL(10) | CPACC_FULL(11));
401 #include <linux/syscore_ops.h>
403 static int vfp_pm_suspend(void)
405 struct thread_info
*ti
= current_thread_info();
406 u32 fpexc
= fmrx(FPEXC
);
408 /* if vfp is on, then save state for resumption */
409 if (fpexc
& FPEXC_EN
) {
410 printk(KERN_DEBUG
"%s: saving vfp state\n", __func__
);
411 vfp_save_state(&ti
->vfpstate
, fpexc
);
413 /* disable, just in case */
414 fmxr(FPEXC
, fmrx(FPEXC
) & ~FPEXC_EN
);
417 /* clear any information we had about last context state */
418 memset(last_VFP_context
, 0, sizeof(last_VFP_context
));
423 static void vfp_pm_resume(void)
425 /* ensure we have access to the vfp */
428 /* and disable it to ensure the next usage restores the state */
429 fmxr(FPEXC
, fmrx(FPEXC
) & ~FPEXC_EN
);
432 static struct syscore_ops vfp_pm_syscore_ops
= {
433 .suspend
= vfp_pm_suspend
,
434 .resume
= vfp_pm_resume
,
437 static void vfp_pm_init(void)
439 register_syscore_ops(&vfp_pm_syscore_ops
);
443 static inline void vfp_pm_init(void) { }
444 #endif /* CONFIG_PM */
446 void vfp_sync_hwstate(struct thread_info
*thread
)
448 unsigned int cpu
= get_cpu();
451 * If the thread we're interested in is the current owner of the
452 * hardware VFP state, then we need to save its state.
454 if (last_VFP_context
[cpu
] == &thread
->vfpstate
) {
455 u32 fpexc
= fmrx(FPEXC
);
458 * Save the last VFP state on this CPU.
460 fmxr(FPEXC
, fpexc
| FPEXC_EN
);
461 vfp_save_state(&thread
->vfpstate
, fpexc
| FPEXC_EN
);
468 void vfp_flush_hwstate(struct thread_info
*thread
)
470 unsigned int cpu
= get_cpu();
473 * If the thread we're interested in is the current owner of the
474 * hardware VFP state, then we need to save its state.
476 if (last_VFP_context
[cpu
] == &thread
->vfpstate
) {
477 u32 fpexc
= fmrx(FPEXC
);
479 fmxr(FPEXC
, fpexc
& ~FPEXC_EN
);
482 * Set the context to NULL to force a reload the next time
483 * the thread uses the VFP.
485 last_VFP_context
[cpu
] = NULL
;
490 * For SMP we still have to take care of the case where the thread
491 * migrates to another CPU and then back to the original CPU on which
492 * the last VFP user is still the same thread. Mark the thread VFP
493 * state as belonging to a non-existent CPU so that the saved one will
494 * be reloaded in the above case.
496 thread
->vfpstate
.hard
.cpu
= NR_CPUS
;
502 * VFP hardware can lose all context when a CPU goes offline.
503 * As we will be running in SMP mode with CPU hotplug, we will save the
504 * hardware state at every thread switch. We clear our held state when
505 * a CPU has been killed, indicating that the VFP hardware doesn't contain
506 * a threads VFP state. When a CPU starts up, we re-enable access to the
509 * Both CPU_DYING and CPU_STARTING are called on the CPU which
510 * is being offlined/onlined.
512 static int vfp_hotplug(struct notifier_block
*b
, unsigned long action
,
515 if (action
== CPU_DYING
|| action
== CPU_DYING_FROZEN
) {
516 unsigned int cpu
= (long)hcpu
;
517 last_VFP_context
[cpu
] = NULL
;
518 } else if (action
== CPU_STARTING
|| action
== CPU_STARTING_FROZEN
)
524 * VFP support code initialisation.
526 static int __init
vfp_init(void)
529 unsigned int cpu_arch
= cpu_architecture();
531 if (cpu_arch
>= CPU_ARCH_ARMv6
)
535 * First check that there is a VFP that we can use.
536 * The handler is already setup to just log calls, so
537 * we just need to read the VFPSID register.
539 vfp_vector
= vfp_testing_entry
;
541 vfpsid
= fmrx(FPSID
);
543 vfp_vector
= vfp_null_entry
;
545 printk(KERN_INFO
"VFP support v0.3: ");
547 printk("not present\n");
548 else if (vfpsid
& FPSID_NODOUBLE
) {
549 printk("no double precision support\n");
551 hotcpu_notifier(vfp_hotplug
, 0);
553 smp_call_function(vfp_enable
, NULL
, 1);
555 VFP_arch
= (vfpsid
& FPSID_ARCH_MASK
) >> FPSID_ARCH_BIT
; /* Extract the architecture version */
556 printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
557 (vfpsid
& FPSID_IMPLEMENTER_MASK
) >> FPSID_IMPLEMENTER_BIT
,
558 (vfpsid
& FPSID_ARCH_MASK
) >> FPSID_ARCH_BIT
,
559 (vfpsid
& FPSID_PART_MASK
) >> FPSID_PART_BIT
,
560 (vfpsid
& FPSID_VARIANT_MASK
) >> FPSID_VARIANT_BIT
,
561 (vfpsid
& FPSID_REV_MASK
) >> FPSID_REV_BIT
);
563 vfp_vector
= vfp_support_entry
;
565 thread_register_notifier(&vfp_notifier_block
);
569 * We detected VFP, and the support code is
570 * in place; report VFP support to userspace.
572 elf_hwcap
|= HWCAP_VFP
;
575 elf_hwcap
|= HWCAP_VFPv3
;
578 * Check for VFPv3 D16. CPUs in this configuration
579 * only have 16 x 64bit registers.
581 if (((fmrx(MVFR0
) & MVFR0_A_SIMD_MASK
)) == 1)
582 elf_hwcap
|= HWCAP_VFPv3D16
;
587 * Check for the presence of the Advanced SIMD
588 * load/store instructions, integer and single
589 * precision floating point operations. Only check
590 * for NEON if the hardware has the MVFR registers.
592 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
593 if ((fmrx(MVFR1
) & 0x000fff00) == 0x00011100)
594 elf_hwcap
|= HWCAP_NEON
;
601 late_initcall(vfp_init
);