2 * arch/powerpc/math-emu/math_efp.c
4 * Copyright (C) 2006-2008, 2010 Freescale Semiconductor, Inc.
6 * Author: Ebony Zhu, <ebony.zhu@freescale.com>
7 * Yu Liu, <yu.liu@freescale.com>
9 * Derived from arch/alpha/math-emu/math.c
10 * arch/powerpc/math-emu/math.c
13 * This file is the exception handler to make E500 SPE instructions
14 * fully comply with IEEE-754 floating point standard.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
22 #include <linux/types.h>
23 #include <linux/prctl.h>
25 #include <asm/uaccess.h>
28 #define FP_EX_BOOKE_E500_SPE
29 #include <asm/sfp-machine.h>
31 #include <math-emu/soft-fp.h>
32 #include <math-emu/single.h>
33 #include <math-emu/double.h>
48 #define EFSCMPGT 0x2cc
49 #define EFSCMPLT 0x2cd
50 #define EFSCMPEQ 0x2ce
57 #define EFSCTUIZ 0x2d8
58 #define EFSCTSIZ 0x2da
63 #define EVFSNABS 0x285
67 #define EVFSCMPGT 0x28c
68 #define EVFSCMPLT 0x28d
69 #define EVFSCMPEQ 0x28e
70 #define EVFSCTUI 0x294
71 #define EVFSCTSI 0x295
72 #define EVFSCTUF 0x296
73 #define EVFSCTSF 0x297
74 #define EVFSCTUIZ 0x298
75 #define EVFSCTSIZ 0x29a
84 #define EFDCTUIDZ 0x2ea
85 #define EFDCTSIDZ 0x2eb
86 #define EFDCMPGT 0x2ec
87 #define EFDCMPLT 0x2ed
88 #define EFDCMPEQ 0x2ee
94 #define EFDCTUIZ 0x2f8
95 #define EFDCTSIZ 0x2fa
103 #define SIGN_BIT_S (1UL << 31)
104 #define SIGN_BIT_D (1ULL << 63)
105 #define FP_EX_MASK (FP_EX_INEXACT | FP_EX_INVALID | FP_EX_DIVZERO | \
106 FP_EX_UNDERFLOW | FP_EX_OVERFLOW)
108 static int have_e500_cpu_a005_erratum
;
115 static unsigned long insn_type(unsigned long speinsn
)
117 unsigned long ret
= NOTYPE
;
119 switch (speinsn
& 0x7ff) {
120 case EFSABS
: ret
= XA
; break;
121 case EFSADD
: ret
= AB
; break;
122 case EFSCFD
: ret
= XB
; break;
123 case EFSCMPEQ
: ret
= XCR
; break;
124 case EFSCMPGT
: ret
= XCR
; break;
125 case EFSCMPLT
: ret
= XCR
; break;
126 case EFSCTSF
: ret
= XB
; break;
127 case EFSCTSI
: ret
= XB
; break;
128 case EFSCTSIZ
: ret
= XB
; break;
129 case EFSCTUF
: ret
= XB
; break;
130 case EFSCTUI
: ret
= XB
; break;
131 case EFSCTUIZ
: ret
= XB
; break;
132 case EFSDIV
: ret
= AB
; break;
133 case EFSMUL
: ret
= AB
; break;
134 case EFSNABS
: ret
= XA
; break;
135 case EFSNEG
: ret
= XA
; break;
136 case EFSSUB
: ret
= AB
; break;
137 case EFSCFSI
: ret
= XB
; break;
139 case EVFSABS
: ret
= XA
; break;
140 case EVFSADD
: ret
= AB
; break;
141 case EVFSCMPEQ
: ret
= XCR
; break;
142 case EVFSCMPGT
: ret
= XCR
; break;
143 case EVFSCMPLT
: ret
= XCR
; break;
144 case EVFSCTSF
: ret
= XB
; break;
145 case EVFSCTSI
: ret
= XB
; break;
146 case EVFSCTSIZ
: ret
= XB
; break;
147 case EVFSCTUF
: ret
= XB
; break;
148 case EVFSCTUI
: ret
= XB
; break;
149 case EVFSCTUIZ
: ret
= XB
; break;
150 case EVFSDIV
: ret
= AB
; break;
151 case EVFSMUL
: ret
= AB
; break;
152 case EVFSNABS
: ret
= XA
; break;
153 case EVFSNEG
: ret
= XA
; break;
154 case EVFSSUB
: ret
= AB
; break;
156 case EFDABS
: ret
= XA
; break;
157 case EFDADD
: ret
= AB
; break;
158 case EFDCFS
: ret
= XB
; break;
159 case EFDCMPEQ
: ret
= XCR
; break;
160 case EFDCMPGT
: ret
= XCR
; break;
161 case EFDCMPLT
: ret
= XCR
; break;
162 case EFDCTSF
: ret
= XB
; break;
163 case EFDCTSI
: ret
= XB
; break;
164 case EFDCTSIDZ
: ret
= XB
; break;
165 case EFDCTSIZ
: ret
= XB
; break;
166 case EFDCTUF
: ret
= XB
; break;
167 case EFDCTUI
: ret
= XB
; break;
168 case EFDCTUIDZ
: ret
= XB
; break;
169 case EFDCTUIZ
: ret
= XB
; break;
170 case EFDDIV
: ret
= AB
; break;
171 case EFDMUL
: ret
= AB
; break;
172 case EFDNABS
: ret
= XA
; break;
173 case EFDNEG
: ret
= XA
; break;
174 case EFDSUB
: ret
= AB
; break;
180 int do_spe_mathemu(struct pt_regs
*regs
)
185 unsigned long type
, func
, fc
, fa
, fb
, src
, speinsn
;
186 union dw_union vc
, va
, vb
;
188 if (get_user(speinsn
, (unsigned int __user
*) regs
->nip
))
190 if ((speinsn
>> 26) != EFAPU
)
191 return -EINVAL
; /* not an spe instruction */
193 type
= insn_type(speinsn
);
197 func
= speinsn
& 0x7ff;
198 fc
= (speinsn
>> 21) & 0x1f;
199 fa
= (speinsn
>> 16) & 0x1f;
200 fb
= (speinsn
>> 11) & 0x1f;
201 src
= (speinsn
>> 5) & 0x7;
203 vc
.wp
[0] = current
->thread
.evr
[fc
];
204 vc
.wp
[1] = regs
->gpr
[fc
];
205 va
.wp
[0] = current
->thread
.evr
[fa
];
206 va
.wp
[1] = regs
->gpr
[fa
];
207 vb
.wp
[0] = current
->thread
.evr
[fb
];
208 vb
.wp
[1] = regs
->gpr
[fb
];
210 __FPU_FPSCR
= mfspr(SPRN_SPEFSCR
);
212 pr_debug("speinsn:%08lx spefscr:%08lx\n", speinsn
, __FPU_FPSCR
);
213 pr_debug("vc: %08x %08x\n", vc
.wp
[0], vc
.wp
[1]);
214 pr_debug("va: %08x %08x\n", va
.wp
[0], va
.wp
[1]);
215 pr_debug("vb: %08x %08x\n", vb
.wp
[0], vb
.wp
[1]);
219 FP_DECL_S(SA
); FP_DECL_S(SB
); FP_DECL_S(SR
);
224 FP_UNPACK_SP(SA
, va
.wp
+ 1);
226 FP_UNPACK_SP(SB
, vb
.wp
+ 1);
229 FP_UNPACK_SP(SA
, va
.wp
+ 1);
233 pr_debug("SA: %ld %08lx %ld (%ld)\n", SA_s
, SA_f
, SA_e
, SA_c
);
234 pr_debug("SB: %ld %08lx %ld (%ld)\n", SB_s
, SB_f
, SB_e
, SB_c
);
238 vc
.wp
[1] = va
.wp
[1] & ~SIGN_BIT_S
;
242 vc
.wp
[1] = va
.wp
[1] | SIGN_BIT_S
;
246 vc
.wp
[1] = va
.wp
[1] ^ SIGN_BIT_S
;
250 FP_ADD_S(SR
, SA
, SB
);
254 FP_SUB_S(SR
, SA
, SB
);
258 FP_MUL_S(SR
, SA
, SB
);
262 FP_DIV_S(SR
, SA
, SB
);
279 if (SB_c
== FP_CLS_NAN
) {
281 FP_SET_EXCEPTION(FP_EX_INVALID
);
283 SB_e
+= (func
== EFSCTSF
? 31 : 32);
284 FP_TO_INT_ROUND_S(vc
.wp
[1], SB
, 32,
292 FP_UNPACK_DP(DB
, vb
.dp
);
294 pr_debug("DB: %ld %08lx %08lx %ld (%ld)\n",
295 DB_s
, DB_f1
, DB_f0
, DB_e
, DB_c
);
297 FP_CONV(S
, D
, 1, 2, SR
, DB
);
303 if (SB_c
== FP_CLS_NAN
) {
305 FP_SET_EXCEPTION(FP_EX_INVALID
);
307 FP_TO_INT_ROUND_S(vc
.wp
[1], SB
, 32,
308 ((func
& 0x3) != 0));
314 if (SB_c
== FP_CLS_NAN
) {
316 FP_SET_EXCEPTION(FP_EX_INVALID
);
318 FP_TO_INT_S(vc
.wp
[1], SB
, 32,
319 ((func
& 0x3) != 0));
329 pr_debug("SR: %ld %08lx %ld (%ld)\n", SR_s
, SR_f
, SR_e
, SR_c
);
331 FP_PACK_SP(vc
.wp
+ 1, SR
);
335 FP_CMP_S(IR
, SA
, SB
, 3);
336 if (IR
== 3 && (FP_ISSIGNAN_S(SA
) || FP_ISSIGNAN_S(SB
)))
337 FP_SET_EXCEPTION(FP_EX_INVALID
);
347 FP_DECL_D(DA
); FP_DECL_D(DB
); FP_DECL_D(DR
);
352 FP_UNPACK_DP(DA
, va
.dp
);
354 FP_UNPACK_DP(DB
, vb
.dp
);
357 FP_UNPACK_DP(DA
, va
.dp
);
361 pr_debug("DA: %ld %08lx %08lx %ld (%ld)\n",
362 DA_s
, DA_f1
, DA_f0
, DA_e
, DA_c
);
363 pr_debug("DB: %ld %08lx %08lx %ld (%ld)\n",
364 DB_s
, DB_f1
, DB_f0
, DB_e
, DB_c
);
368 vc
.dp
[0] = va
.dp
[0] & ~SIGN_BIT_D
;
372 vc
.dp
[0] = va
.dp
[0] | SIGN_BIT_D
;
376 vc
.dp
[0] = va
.dp
[0] ^ SIGN_BIT_D
;
380 FP_ADD_D(DR
, DA
, DB
);
384 FP_SUB_D(DR
, DA
, DB
);
388 FP_MUL_D(DR
, DA
, DB
);
392 FP_DIV_D(DR
, DA
, DB
);
409 if (DB_c
== FP_CLS_NAN
) {
411 FP_SET_EXCEPTION(FP_EX_INVALID
);
413 DB_e
+= (func
== EFDCTSF
? 31 : 32);
414 FP_TO_INT_ROUND_D(vc
.wp
[1], DB
, 32,
422 FP_UNPACK_SP(SB
, vb
.wp
+ 1);
424 pr_debug("SB: %ld %08lx %ld (%ld)\n",
425 SB_s
, SB_f
, SB_e
, SB_c
);
427 FP_CONV(D
, S
, 2, 1, DR
, SB
);
433 if (DB_c
== FP_CLS_NAN
) {
435 FP_SET_EXCEPTION(FP_EX_INVALID
);
437 FP_TO_INT_D(vc
.dp
[0], DB
, 64,
438 ((func
& 0x1) == 0));
444 if (DB_c
== FP_CLS_NAN
) {
446 FP_SET_EXCEPTION(FP_EX_INVALID
);
448 FP_TO_INT_ROUND_D(vc
.wp
[1], DB
, 32,
449 ((func
& 0x3) != 0));
455 if (DB_c
== FP_CLS_NAN
) {
457 FP_SET_EXCEPTION(FP_EX_INVALID
);
459 FP_TO_INT_D(vc
.wp
[1], DB
, 32,
460 ((func
& 0x3) != 0));
470 pr_debug("DR: %ld %08lx %08lx %ld (%ld)\n",
471 DR_s
, DR_f1
, DR_f0
, DR_e
, DR_c
);
473 FP_PACK_DP(vc
.dp
, DR
);
477 FP_CMP_D(IR
, DA
, DB
, 3);
478 if (IR
== 3 && (FP_ISSIGNAN_D(DA
) || FP_ISSIGNAN_D(DB
)))
479 FP_SET_EXCEPTION(FP_EX_INVALID
);
490 FP_DECL_S(SA0
); FP_DECL_S(SB0
); FP_DECL_S(SR0
);
491 FP_DECL_S(SA1
); FP_DECL_S(SB1
); FP_DECL_S(SR1
);
497 FP_UNPACK_SP(SA0
, va
.wp
);
498 FP_UNPACK_SP(SA1
, va
.wp
+ 1);
500 FP_UNPACK_SP(SB0
, vb
.wp
);
501 FP_UNPACK_SP(SB1
, vb
.wp
+ 1);
504 FP_UNPACK_SP(SA0
, va
.wp
);
505 FP_UNPACK_SP(SA1
, va
.wp
+ 1);
509 pr_debug("SA0: %ld %08lx %ld (%ld)\n",
510 SA0_s
, SA0_f
, SA0_e
, SA0_c
);
511 pr_debug("SA1: %ld %08lx %ld (%ld)\n",
512 SA1_s
, SA1_f
, SA1_e
, SA1_c
);
513 pr_debug("SB0: %ld %08lx %ld (%ld)\n",
514 SB0_s
, SB0_f
, SB0_e
, SB0_c
);
515 pr_debug("SB1: %ld %08lx %ld (%ld)\n",
516 SB1_s
, SB1_f
, SB1_e
, SB1_c
);
520 vc
.wp
[0] = va
.wp
[0] & ~SIGN_BIT_S
;
521 vc
.wp
[1] = va
.wp
[1] & ~SIGN_BIT_S
;
525 vc
.wp
[0] = va
.wp
[0] | SIGN_BIT_S
;
526 vc
.wp
[1] = va
.wp
[1] | SIGN_BIT_S
;
530 vc
.wp
[0] = va
.wp
[0] ^ SIGN_BIT_S
;
531 vc
.wp
[1] = va
.wp
[1] ^ SIGN_BIT_S
;
535 FP_ADD_S(SR0
, SA0
, SB0
);
536 FP_ADD_S(SR1
, SA1
, SB1
);
540 FP_SUB_S(SR0
, SA0
, SB0
);
541 FP_SUB_S(SR1
, SA1
, SB1
);
545 FP_MUL_S(SR0
, SA0
, SB0
);
546 FP_MUL_S(SR1
, SA1
, SB1
);
550 FP_DIV_S(SR0
, SA0
, SB0
);
551 FP_DIV_S(SR1
, SA1
, SB1
);
568 if (SB0_c
== FP_CLS_NAN
) {
570 FP_SET_EXCEPTION(FP_EX_INVALID
);
572 SB0_e
+= (func
== EVFSCTSF
? 31 : 32);
573 FP_TO_INT_ROUND_S(vc
.wp
[0], SB0
, 32,
576 if (SB1_c
== FP_CLS_NAN
) {
578 FP_SET_EXCEPTION(FP_EX_INVALID
);
580 SB1_e
+= (func
== EVFSCTSF
? 31 : 32);
581 FP_TO_INT_ROUND_S(vc
.wp
[1], SB1
, 32,
588 if (SB0_c
== FP_CLS_NAN
) {
590 FP_SET_EXCEPTION(FP_EX_INVALID
);
592 FP_TO_INT_ROUND_S(vc
.wp
[0], SB0
, 32,
593 ((func
& 0x3) != 0));
595 if (SB1_c
== FP_CLS_NAN
) {
597 FP_SET_EXCEPTION(FP_EX_INVALID
);
599 FP_TO_INT_ROUND_S(vc
.wp
[1], SB1
, 32,
600 ((func
& 0x3) != 0));
606 if (SB0_c
== FP_CLS_NAN
) {
608 FP_SET_EXCEPTION(FP_EX_INVALID
);
610 FP_TO_INT_S(vc
.wp
[0], SB0
, 32,
611 ((func
& 0x3) != 0));
613 if (SB1_c
== FP_CLS_NAN
) {
615 FP_SET_EXCEPTION(FP_EX_INVALID
);
617 FP_TO_INT_S(vc
.wp
[1], SB1
, 32,
618 ((func
& 0x3) != 0));
628 pr_debug("SR0: %ld %08lx %ld (%ld)\n",
629 SR0_s
, SR0_f
, SR0_e
, SR0_c
);
630 pr_debug("SR1: %ld %08lx %ld (%ld)\n",
631 SR1_s
, SR1_f
, SR1_e
, SR1_c
);
633 FP_PACK_SP(vc
.wp
, SR0
);
634 FP_PACK_SP(vc
.wp
+ 1, SR1
);
641 FP_CMP_S(IR0
, SA0
, SB0
, 3);
642 FP_CMP_S(IR1
, SA1
, SB1
, 3);
643 if (IR0
== 3 && (FP_ISSIGNAN_S(SA0
) || FP_ISSIGNAN_S(SB0
)))
644 FP_SET_EXCEPTION(FP_EX_INVALID
);
645 if (IR1
== 3 && (FP_ISSIGNAN_S(SA1
) || FP_ISSIGNAN_S(SB1
)))
646 FP_SET_EXCEPTION(FP_EX_INVALID
);
647 ch
= (IR0
== cmp
) ? 1 : 0;
648 cl
= (IR1
== cmp
) ? 1 : 0;
649 IR
= (ch
<< 3) | (cl
<< 2) | ((ch
| cl
) << 1) |
659 regs
->ccr
&= ~(15 << ((7 - ((speinsn
>> 23) & 0x7)) << 2));
660 regs
->ccr
|= (IR
<< ((7 - ((speinsn
>> 23) & 0x7)) << 2));
664 * If the "invalid" exception sticky bit was set by the
665 * processor for non-finite input, but was not set before the
666 * instruction being emulated, clear it. Likewise for the
667 * "underflow" bit, which may have been set by the processor
668 * for exact underflow, not just inexact underflow when the
669 * flag should be set for IEEE 754 semantics. Other sticky
670 * exceptions will only be set by the processor when they are
671 * correct according to IEEE 754 semantics, and we must not
672 * clear sticky bits that were already set before the emulated
673 * instruction as they represent the user-visible sticky
674 * exception status. "inexact" traps to kernel are not
675 * required for IEEE semantics and are not enabled by default,
676 * so the "inexact" sticky bit may have been set by a previous
677 * instruction without the kernel being aware of it.
680 &= ~(FP_EX_INVALID
| FP_EX_UNDERFLOW
) | current
->thread
.spefscr_last
;
681 __FPU_FPSCR
|= (FP_CUR_EXCEPTIONS
& FP_EX_MASK
);
682 mtspr(SPRN_SPEFSCR
, __FPU_FPSCR
);
683 current
->thread
.spefscr_last
= __FPU_FPSCR
;
685 current
->thread
.evr
[fc
] = vc
.wp
[0];
686 regs
->gpr
[fc
] = vc
.wp
[1];
688 pr_debug("ccr = %08lx\n", regs
->ccr
);
689 pr_debug("cur exceptions = %08x spefscr = %08lx\n",
690 FP_CUR_EXCEPTIONS
, __FPU_FPSCR
);
691 pr_debug("vc: %08x %08x\n", vc
.wp
[0], vc
.wp
[1]);
692 pr_debug("va: %08x %08x\n", va
.wp
[0], va
.wp
[1]);
693 pr_debug("vb: %08x %08x\n", vb
.wp
[0], vb
.wp
[1]);
695 if (current
->thread
.fpexc_mode
& PR_FP_EXC_SW_ENABLE
) {
696 if ((FP_CUR_EXCEPTIONS
& FP_EX_DIVZERO
)
697 && (current
->thread
.fpexc_mode
& PR_FP_EXC_DIV
))
699 if ((FP_CUR_EXCEPTIONS
& FP_EX_OVERFLOW
)
700 && (current
->thread
.fpexc_mode
& PR_FP_EXC_OVF
))
702 if ((FP_CUR_EXCEPTIONS
& FP_EX_UNDERFLOW
)
703 && (current
->thread
.fpexc_mode
& PR_FP_EXC_UND
))
705 if ((FP_CUR_EXCEPTIONS
& FP_EX_INEXACT
)
706 && (current
->thread
.fpexc_mode
& PR_FP_EXC_RES
))
708 if ((FP_CUR_EXCEPTIONS
& FP_EX_INVALID
)
709 && (current
->thread
.fpexc_mode
& PR_FP_EXC_INV
))
715 if (have_e500_cpu_a005_erratum
) {
716 /* according to e500 cpu a005 erratum, reissue efp inst */
718 pr_debug("re-issue efp inst: %08lx\n", speinsn
);
722 printk(KERN_ERR
"\nOoops! IEEE-754 compliance handler encountered un-supported instruction.\ninst code: %08lx\n", speinsn
);
726 int speround_handler(struct pt_regs
*regs
)
730 int lo_inexact
, hi_inexact
;
732 unsigned long speinsn
, type
, fb
, fc
, fptype
, func
;
734 if (get_user(speinsn
, (unsigned int __user
*) regs
->nip
))
736 if ((speinsn
>> 26) != 4)
737 return -EINVAL
; /* not an spe instruction */
739 func
= speinsn
& 0x7ff;
740 type
= insn_type(func
);
741 if (type
== XCR
) return -ENOSYS
;
743 __FPU_FPSCR
= mfspr(SPRN_SPEFSCR
);
744 pr_debug("speinsn:%08lx spefscr:%08lx\n", speinsn
, __FPU_FPSCR
);
746 fptype
= (speinsn
>> 5) & 0x7;
748 /* No need to round if the result is exact */
749 lo_inexact
= __FPU_FPSCR
& (SPEFSCR_FG
| SPEFSCR_FX
);
750 hi_inexact
= __FPU_FPSCR
& (SPEFSCR_FGH
| SPEFSCR_FXH
);
751 if (!(lo_inexact
|| (hi_inexact
&& fptype
== VCT
)))
754 fc
= (speinsn
>> 21) & 0x1f;
755 s_lo
= regs
->gpr
[fc
] & SIGN_BIT_S
;
756 s_hi
= current
->thread
.evr
[fc
] & SIGN_BIT_S
;
757 fgpr
.wp
[0] = current
->thread
.evr
[fc
];
758 fgpr
.wp
[1] = regs
->gpr
[fc
];
760 fb
= (speinsn
>> 11) & 0x1f;
771 * These instructions always round to zero,
772 * independent of the rounding mode.
790 /* Recover the sign of a zero result if possible. */
792 s_lo
= regs
->gpr
[fb
] & SIGN_BIT_S
;
798 /* Recover the sign of a zero result if possible. */
800 s_lo
= regs
->gpr
[fb
] & SIGN_BIT_S
;
802 s_hi
= current
->thread
.evr
[fb
] & SIGN_BIT_S
;
809 /* Recover the sign of a zero result if possible. */
811 s_hi
= current
->thread
.evr
[fb
] & SIGN_BIT_S
;
819 pr_debug("round fgpr: %08x %08x\n", fgpr
.wp
[0], fgpr
.wp
[1]);
822 /* Since SPE instructions on E500 core can handle round to nearest
823 * and round toward zero with IEEE-754 complied, we just need
824 * to handle round toward +Inf and round toward -Inf by software.
827 if ((FP_ROUNDMODE
) == FP_RND_PINF
) {
828 if (!s_lo
) fgpr
.wp
[1]++; /* Z > 0, choose Z1 */
829 } else { /* round to -Inf */
832 fgpr
.wp
[1]++; /* Z < 0, choose Z2 */
834 fgpr
.wp
[1]--; /* Z < 0, choose Z2 */
840 if (FP_ROUNDMODE
== FP_RND_PINF
) {
843 fgpr
.dp
[0]++; /* Z > 0, choose Z1 */
845 fgpr
.wp
[1]++; /* Z > 0, choose Z1 */
847 } else { /* round to -Inf */
850 fgpr
.dp
[0]++; /* Z < 0, choose Z2 */
852 fgpr
.wp
[1]--; /* Z < 0, choose Z2 */
858 if (FP_ROUNDMODE
== FP_RND_PINF
) {
859 if (lo_inexact
&& !s_lo
)
860 fgpr
.wp
[1]++; /* Z_low > 0, choose Z1 */
861 if (hi_inexact
&& !s_hi
)
862 fgpr
.wp
[0]++; /* Z_high word > 0, choose Z1 */
863 } else { /* round to -Inf */
864 if (lo_inexact
&& s_lo
) {
866 fgpr
.wp
[1]++; /* Z_low < 0, choose Z2 */
868 fgpr
.wp
[1]--; /* Z_low < 0, choose Z2 */
870 if (hi_inexact
&& s_hi
) {
872 fgpr
.wp
[0]++; /* Z_high < 0, choose Z2 */
874 fgpr
.wp
[0]--; /* Z_high < 0, choose Z2 */
883 current
->thread
.evr
[fc
] = fgpr
.wp
[0];
884 regs
->gpr
[fc
] = fgpr
.wp
[1];
886 pr_debug(" to fgpr: %08x %08x\n", fgpr
.wp
[0], fgpr
.wp
[1]);
888 if (current
->thread
.fpexc_mode
& PR_FP_EXC_SW_ENABLE
)
889 return (current
->thread
.fpexc_mode
& PR_FP_EXC_RES
) ? 1 : 0;
893 int __init
spe_mathemu_init(void)
897 pvr
= mfspr(SPRN_PVR
);
899 if ((PVR_VER(pvr
) == PVR_VER_E500V1
) ||
900 (PVR_VER(pvr
) == PVR_VER_E500V2
)) {
905 * E500 revision below 1.1, 2.3, 3.1, 4.1, 5.1
906 * need cpu a005 errata workaround
911 have_e500_cpu_a005_erratum
= 1;
915 have_e500_cpu_a005_erratum
= 1;
921 have_e500_cpu_a005_erratum
= 1;
931 module_init(spe_mathemu_init
);