x86/xen: resume timer irqs early
[linux/fpc-iii.git] / arch / powerpc / kernel / fpu.S
blobcaeaabf11a2fbb3cd7d63555a19600f4e13e2618
1 /*
2  *  FPU support code, moved here from head.S so that it can be used
3  *  by chips which use other head-whatever.S files.
4  *
5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6  *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7  *    Copyright (C) 1996 Paul Mackerras.
8  *    Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
9  *
10  *  This program is free software; you can redistribute it and/or
11  *  modify it under the terms of the GNU General Public License
12  *  as published by the Free Software Foundation; either version
13  *  2 of the License, or (at your option) any later version.
14  *
15  */
17 #include <asm/reg.h>
18 #include <asm/page.h>
19 #include <asm/mmu.h>
20 #include <asm/pgtable.h>
21 #include <asm/cputable.h>
22 #include <asm/cache.h>
23 #include <asm/thread_info.h>
24 #include <asm/ppc_asm.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/ptrace.h>
28 #ifdef CONFIG_VSX
29 #define __REST_32FPVSRS(n,c,base)                                       \
30 BEGIN_FTR_SECTION                                                       \
31         b       2f;                                                     \
32 END_FTR_SECTION_IFSET(CPU_FTR_VSX);                                     \
33         REST_32FPRS(n,base);                                            \
34         b       3f;                                                     \
35 2:      REST_32VSRS(n,c,base);                                          \
38 #define __REST_32FPVSRS_TRANSACT(n,c,base)                              \
39 BEGIN_FTR_SECTION                                                       \
40         b       2f;                                                     \
41 END_FTR_SECTION_IFSET(CPU_FTR_VSX);                                     \
42         REST_32FPRS_TRANSACT(n,base);                                   \
43         b       3f;                                                     \
44 2:      REST_32VSRS_TRANSACT(n,c,base);                                 \
47 #define __SAVE_32FPVSRS(n,c,base)                                       \
48 BEGIN_FTR_SECTION                                                       \
49         b       2f;                                                     \
50 END_FTR_SECTION_IFSET(CPU_FTR_VSX);                                     \
51         SAVE_32FPRS(n,base);                                            \
52         b       3f;                                                     \
53 2:      SAVE_32VSRS(n,c,base);                                          \
55 #else
56 #define __REST_32FPVSRS(n,b,base)       REST_32FPRS(n, base)
57 #define __REST_32FPVSRS_TRANSACT(n,b,base)      REST_32FPRS(n, base)
58 #define __SAVE_32FPVSRS(n,b,base)       SAVE_32FPRS(n, base)
59 #endif
60 #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
61 #define REST_32FPVSRS_TRANSACT(n,c,base) \
62         __REST_32FPVSRS_TRANSACT(n,__REG_##c,__REG_##base)
63 #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
65 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
67  * Wrapper to call load_up_fpu from C.
68  * void do_load_up_fpu(struct pt_regs *regs);
69  */
70 _GLOBAL(do_load_up_fpu)
71         mflr    r0
72         std     r0, 16(r1)
73         stdu    r1, -112(r1)
75         subi    r6, r3, STACK_FRAME_OVERHEAD
76         /* load_up_fpu expects r12=MSR, r13=PACA, and returns
77          * with r12 = new MSR.
78          */
79         ld      r12,_MSR(r6)
80         GET_PACA(r13)
82         bl      load_up_fpu
83         std     r12,_MSR(r6)
85         ld      r0, 112+16(r1)
86         addi    r1, r1, 112
87         mtlr    r0
88         blr
91 /* void do_load_up_transact_fpu(struct thread_struct *thread)
92  *
93  * This is similar to load_up_fpu but for the transactional version of the FP
94  * register set.  It doesn't mess with the task MSR or valid flags.
95  * Furthermore, we don't do lazy FP with TM currently.
96  */
97 _GLOBAL(do_load_up_transact_fpu)
98         mfmsr   r6
99         ori     r5,r6,MSR_FP
100 #ifdef CONFIG_VSX
101 BEGIN_FTR_SECTION
102         oris    r5,r5,MSR_VSX@h
103 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
104 #endif
105         SYNC
106         MTMSRD(r5)
108         lfd     fr0,THREAD_TRANSACT_FPSCR(r3)
109         MTFSF_L(fr0)
110         REST_32FPVSRS_TRANSACT(0, R4, R3)
112         /* FP/VSX off again */
113         MTMSRD(r6)
114         SYNC
116         blr
117 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
120  * This task wants to use the FPU now.
121  * On UP, disable FP for the task which had the FPU previously,
122  * and save its floating-point registers in its thread_struct.
123  * Load up this task's FP registers from its thread_struct,
124  * enable the FPU for the current task and return to the task.
125  */
126 _GLOBAL(load_up_fpu)
127         mfmsr   r5
128         ori     r5,r5,MSR_FP
129 #ifdef CONFIG_VSX
130 BEGIN_FTR_SECTION
131         oris    r5,r5,MSR_VSX@h
132 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
133 #endif
134         SYNC
135         MTMSRD(r5)                      /* enable use of fpu now */
136         isync
138  * For SMP, we don't do lazy FPU switching because it just gets too
139  * horrendously complex, especially when a task switches from one CPU
140  * to another.  Instead we call giveup_fpu in switch_to.
141  */
142 #ifndef CONFIG_SMP
143         LOAD_REG_ADDRBASE(r3, last_task_used_math)
144         toreal(r3)
145         PPC_LL  r4,ADDROFF(last_task_used_math)(r3)
146         PPC_LCMPI       0,r4,0
147         beq     1f
148         toreal(r4)
149         addi    r4,r4,THREAD            /* want last_task_used_math->thread */
150         SAVE_32FPVSRS(0, R5, R4)
151         mffs    fr0
152         stfd    fr0,THREAD_FPSCR(r4)
153         PPC_LL  r5,PT_REGS(r4)
154         toreal(r5)
155         PPC_LL  r4,_MSR-STACK_FRAME_OVERHEAD(r5)
156         li      r10,MSR_FP|MSR_FE0|MSR_FE1
157         andc    r4,r4,r10               /* disable FP for previous task */
158         PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
160 #endif /* CONFIG_SMP */
161         /* enable use of FP after return */
162 #ifdef CONFIG_PPC32
163         mfspr   r5,SPRN_SPRG_THREAD             /* current task's THREAD (phys) */
164         lwz     r4,THREAD_FPEXC_MODE(r5)
165         ori     r9,r9,MSR_FP            /* enable FP for current */
166         or      r9,r9,r4
167 #else
168         ld      r4,PACACURRENT(r13)
169         addi    r5,r4,THREAD            /* Get THREAD */
170         lwz     r4,THREAD_FPEXC_MODE(r5)
171         ori     r12,r12,MSR_FP
172         or      r12,r12,r4
173         std     r12,_MSR(r1)
174 #endif
175         lfd     fr0,THREAD_FPSCR(r5)
176         MTFSF_L(fr0)
177         REST_32FPVSRS(0, R4, R5)
178 #ifndef CONFIG_SMP
179         subi    r4,r5,THREAD
180         fromreal(r4)
181         PPC_STL r4,ADDROFF(last_task_used_math)(r3)
182 #endif /* CONFIG_SMP */
183         /* restore registers and return */
184         /* we haven't used ctr or xer or lr */
185         blr
188  * giveup_fpu(tsk)
189  * Disable FP for the task given as the argument,
190  * and save the floating-point registers in its thread_struct.
191  * Enables the FPU for use in the kernel on return.
192  */
193 _GLOBAL(giveup_fpu)
194         mfmsr   r5
195         ori     r5,r5,MSR_FP
196 #ifdef CONFIG_VSX
197 BEGIN_FTR_SECTION
198         oris    r5,r5,MSR_VSX@h
199 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
200 #endif
201         SYNC_601
202         ISYNC_601
203         MTMSRD(r5)                      /* enable use of fpu now */
204         SYNC_601
205         isync
206         PPC_LCMPI       0,r3,0
207         beqlr-                          /* if no previous owner, done */
208         addi    r3,r3,THREAD            /* want THREAD of task */
209         PPC_LL  r5,PT_REGS(r3)
210         PPC_LCMPI       0,r5,0
211         SAVE_32FPVSRS(0, R4 ,R3)
212         mffs    fr0
213         stfd    fr0,THREAD_FPSCR(r3)
214         beq     1f
215         PPC_LL  r4,_MSR-STACK_FRAME_OVERHEAD(r5)
216         li      r3,MSR_FP|MSR_FE0|MSR_FE1
217 #ifdef CONFIG_VSX
218 BEGIN_FTR_SECTION
219         oris    r3,r3,MSR_VSX@h
220 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
221 #endif
222         andc    r4,r4,r3                /* disable FP for previous task */
223         PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
225 #ifndef CONFIG_SMP
226         li      r5,0
227         LOAD_REG_ADDRBASE(r4,last_task_used_math)
228         PPC_STL r5,ADDROFF(last_task_used_math)(r4)
229 #endif /* CONFIG_SMP */
230         blr
233  * These are used in the alignment trap handler when emulating
234  * single-precision loads and stores.
235  */
237 _GLOBAL(cvt_fd)
238         lfs     0,0(r3)
239         stfd    0,0(r4)
240         blr
242 _GLOBAL(cvt_df)
243         lfd     0,0(r3)
244         stfs    0,0(r4)
245         blr