1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * arch/powerpc/math-emu/math_efp.c
5 * Copyright (C) 2006-2008, 2010 Freescale Semiconductor, Inc.
7 * Author: Ebony Zhu, <ebony.zhu@freescale.com>
8 * Yu Liu, <yu.liu@freescale.com>
10 * Derived from arch/alpha/math-emu/math.c
11 * arch/powerpc/math-emu/math.c
14 * This file is the exception handler to make E500 SPE instructions
15 * fully comply with IEEE-754 floating point standard.
18 #include <linux/types.h>
19 #include <linux/prctl.h>
21 #include <linux/uaccess.h>
24 #define FP_EX_BOOKE_E500_SPE
25 #include <asm/sfp-machine.h>
27 #include <math-emu/soft-fp.h>
28 #include <math-emu/single.h>
29 #include <math-emu/double.h>
44 #define EFSCMPGT 0x2cc
45 #define EFSCMPLT 0x2cd
46 #define EFSCMPEQ 0x2ce
53 #define EFSCTUIZ 0x2d8
54 #define EFSCTSIZ 0x2da
59 #define EVFSNABS 0x285
63 #define EVFSCMPGT 0x28c
64 #define EVFSCMPLT 0x28d
65 #define EVFSCMPEQ 0x28e
66 #define EVFSCTUI 0x294
67 #define EVFSCTSI 0x295
68 #define EVFSCTUF 0x296
69 #define EVFSCTSF 0x297
70 #define EVFSCTUIZ 0x298
71 #define EVFSCTSIZ 0x29a
80 #define EFDCTUIDZ 0x2ea
81 #define EFDCTSIDZ 0x2eb
82 #define EFDCMPGT 0x2ec
83 #define EFDCMPLT 0x2ed
84 #define EFDCMPEQ 0x2ee
90 #define EFDCTUIZ 0x2f8
91 #define EFDCTSIZ 0x2fa
99 #define SIGN_BIT_S (1UL << 31)
100 #define SIGN_BIT_D (1ULL << 63)
101 #define FP_EX_MASK (FP_EX_INEXACT | FP_EX_INVALID | FP_EX_DIVZERO | \
102 FP_EX_UNDERFLOW | FP_EX_OVERFLOW)
104 static int have_e500_cpu_a005_erratum
;
111 static unsigned long insn_type(unsigned long speinsn
)
113 unsigned long ret
= NOTYPE
;
115 switch (speinsn
& 0x7ff) {
116 case EFSABS
: ret
= XA
; break;
117 case EFSADD
: ret
= AB
; break;
118 case EFSCFD
: ret
= XB
; break;
119 case EFSCMPEQ
: ret
= XCR
; break;
120 case EFSCMPGT
: ret
= XCR
; break;
121 case EFSCMPLT
: ret
= XCR
; break;
122 case EFSCTSF
: ret
= XB
; break;
123 case EFSCTSI
: ret
= XB
; break;
124 case EFSCTSIZ
: ret
= XB
; break;
125 case EFSCTUF
: ret
= XB
; break;
126 case EFSCTUI
: ret
= XB
; break;
127 case EFSCTUIZ
: ret
= XB
; break;
128 case EFSDIV
: ret
= AB
; break;
129 case EFSMUL
: ret
= AB
; break;
130 case EFSNABS
: ret
= XA
; break;
131 case EFSNEG
: ret
= XA
; break;
132 case EFSSUB
: ret
= AB
; break;
133 case EFSCFSI
: ret
= XB
; break;
135 case EVFSABS
: ret
= XA
; break;
136 case EVFSADD
: ret
= AB
; break;
137 case EVFSCMPEQ
: ret
= XCR
; break;
138 case EVFSCMPGT
: ret
= XCR
; break;
139 case EVFSCMPLT
: ret
= XCR
; break;
140 case EVFSCTSF
: ret
= XB
; break;
141 case EVFSCTSI
: ret
= XB
; break;
142 case EVFSCTSIZ
: ret
= XB
; break;
143 case EVFSCTUF
: ret
= XB
; break;
144 case EVFSCTUI
: ret
= XB
; break;
145 case EVFSCTUIZ
: ret
= XB
; break;
146 case EVFSDIV
: ret
= AB
; break;
147 case EVFSMUL
: ret
= AB
; break;
148 case EVFSNABS
: ret
= XA
; break;
149 case EVFSNEG
: ret
= XA
; break;
150 case EVFSSUB
: ret
= AB
; break;
152 case EFDABS
: ret
= XA
; break;
153 case EFDADD
: ret
= AB
; break;
154 case EFDCFS
: ret
= XB
; break;
155 case EFDCMPEQ
: ret
= XCR
; break;
156 case EFDCMPGT
: ret
= XCR
; break;
157 case EFDCMPLT
: ret
= XCR
; break;
158 case EFDCTSF
: ret
= XB
; break;
159 case EFDCTSI
: ret
= XB
; break;
160 case EFDCTSIDZ
: ret
= XB
; break;
161 case EFDCTSIZ
: ret
= XB
; break;
162 case EFDCTUF
: ret
= XB
; break;
163 case EFDCTUI
: ret
= XB
; break;
164 case EFDCTUIDZ
: ret
= XB
; break;
165 case EFDCTUIZ
: ret
= XB
; break;
166 case EFDDIV
: ret
= AB
; break;
167 case EFDMUL
: ret
= AB
; break;
168 case EFDNABS
: ret
= XA
; break;
169 case EFDNEG
: ret
= XA
; break;
170 case EFDSUB
: ret
= AB
; break;
176 int do_spe_mathemu(struct pt_regs
*regs
)
181 unsigned long type
, func
, fc
, fa
, fb
, src
, speinsn
;
182 union dw_union vc
, va
, vb
;
184 if (get_user(speinsn
, (unsigned int __user
*) regs
->nip
))
186 if ((speinsn
>> 26) != EFAPU
)
187 return -EINVAL
; /* not an spe instruction */
189 type
= insn_type(speinsn
);
193 func
= speinsn
& 0x7ff;
194 fc
= (speinsn
>> 21) & 0x1f;
195 fa
= (speinsn
>> 16) & 0x1f;
196 fb
= (speinsn
>> 11) & 0x1f;
197 src
= (speinsn
>> 5) & 0x7;
199 vc
.wp
[0] = current
->thread
.evr
[fc
];
200 vc
.wp
[1] = regs
->gpr
[fc
];
201 va
.wp
[0] = current
->thread
.evr
[fa
];
202 va
.wp
[1] = regs
->gpr
[fa
];
203 vb
.wp
[0] = current
->thread
.evr
[fb
];
204 vb
.wp
[1] = regs
->gpr
[fb
];
206 __FPU_FPSCR
= mfspr(SPRN_SPEFSCR
);
208 pr_debug("speinsn:%08lx spefscr:%08lx\n", speinsn
, __FPU_FPSCR
);
209 pr_debug("vc: %08x %08x\n", vc
.wp
[0], vc
.wp
[1]);
210 pr_debug("va: %08x %08x\n", va
.wp
[0], va
.wp
[1]);
211 pr_debug("vb: %08x %08x\n", vb
.wp
[0], vb
.wp
[1]);
215 FP_DECL_S(SA
); FP_DECL_S(SB
); FP_DECL_S(SR
);
220 FP_UNPACK_SP(SA
, va
.wp
+ 1);
222 FP_UNPACK_SP(SB
, vb
.wp
+ 1);
225 FP_UNPACK_SP(SA
, va
.wp
+ 1);
229 pr_debug("SA: %ld %08lx %ld (%ld)\n", SA_s
, SA_f
, SA_e
, SA_c
);
230 pr_debug("SB: %ld %08lx %ld (%ld)\n", SB_s
, SB_f
, SB_e
, SB_c
);
234 vc
.wp
[1] = va
.wp
[1] & ~SIGN_BIT_S
;
238 vc
.wp
[1] = va
.wp
[1] | SIGN_BIT_S
;
242 vc
.wp
[1] = va
.wp
[1] ^ SIGN_BIT_S
;
246 FP_ADD_S(SR
, SA
, SB
);
250 FP_SUB_S(SR
, SA
, SB
);
254 FP_MUL_S(SR
, SA
, SB
);
258 FP_DIV_S(SR
, SA
, SB
);
275 if (SB_c
== FP_CLS_NAN
) {
277 FP_SET_EXCEPTION(FP_EX_INVALID
);
279 SB_e
+= (func
== EFSCTSF
? 31 : 32);
280 FP_TO_INT_ROUND_S(vc
.wp
[1], SB
, 32,
288 FP_UNPACK_DP(DB
, vb
.dp
);
290 pr_debug("DB: %ld %08lx %08lx %ld (%ld)\n",
291 DB_s
, DB_f1
, DB_f0
, DB_e
, DB_c
);
293 FP_CONV(S
, D
, 1, 2, SR
, DB
);
299 if (SB_c
== FP_CLS_NAN
) {
301 FP_SET_EXCEPTION(FP_EX_INVALID
);
303 FP_TO_INT_ROUND_S(vc
.wp
[1], SB
, 32,
304 ((func
& 0x3) != 0));
310 if (SB_c
== FP_CLS_NAN
) {
312 FP_SET_EXCEPTION(FP_EX_INVALID
);
314 FP_TO_INT_S(vc
.wp
[1], SB
, 32,
315 ((func
& 0x3) != 0));
325 pr_debug("SR: %ld %08lx %ld (%ld)\n", SR_s
, SR_f
, SR_e
, SR_c
);
327 FP_PACK_SP(vc
.wp
+ 1, SR
);
331 FP_CMP_S(IR
, SA
, SB
, 3);
332 if (IR
== 3 && (FP_ISSIGNAN_S(SA
) || FP_ISSIGNAN_S(SB
)))
333 FP_SET_EXCEPTION(FP_EX_INVALID
);
343 FP_DECL_D(DA
); FP_DECL_D(DB
); FP_DECL_D(DR
);
348 FP_UNPACK_DP(DA
, va
.dp
);
350 FP_UNPACK_DP(DB
, vb
.dp
);
353 FP_UNPACK_DP(DA
, va
.dp
);
357 pr_debug("DA: %ld %08lx %08lx %ld (%ld)\n",
358 DA_s
, DA_f1
, DA_f0
, DA_e
, DA_c
);
359 pr_debug("DB: %ld %08lx %08lx %ld (%ld)\n",
360 DB_s
, DB_f1
, DB_f0
, DB_e
, DB_c
);
364 vc
.dp
[0] = va
.dp
[0] & ~SIGN_BIT_D
;
368 vc
.dp
[0] = va
.dp
[0] | SIGN_BIT_D
;
372 vc
.dp
[0] = va
.dp
[0] ^ SIGN_BIT_D
;
376 FP_ADD_D(DR
, DA
, DB
);
380 FP_SUB_D(DR
, DA
, DB
);
384 FP_MUL_D(DR
, DA
, DB
);
388 FP_DIV_D(DR
, DA
, DB
);
405 if (DB_c
== FP_CLS_NAN
) {
407 FP_SET_EXCEPTION(FP_EX_INVALID
);
409 DB_e
+= (func
== EFDCTSF
? 31 : 32);
410 FP_TO_INT_ROUND_D(vc
.wp
[1], DB
, 32,
418 FP_UNPACK_SP(SB
, vb
.wp
+ 1);
420 pr_debug("SB: %ld %08lx %ld (%ld)\n",
421 SB_s
, SB_f
, SB_e
, SB_c
);
423 FP_CONV(D
, S
, 2, 1, DR
, SB
);
429 if (DB_c
== FP_CLS_NAN
) {
431 FP_SET_EXCEPTION(FP_EX_INVALID
);
433 FP_TO_INT_D(vc
.dp
[0], DB
, 64,
434 ((func
& 0x1) == 0));
440 if (DB_c
== FP_CLS_NAN
) {
442 FP_SET_EXCEPTION(FP_EX_INVALID
);
444 FP_TO_INT_ROUND_D(vc
.wp
[1], DB
, 32,
445 ((func
& 0x3) != 0));
451 if (DB_c
== FP_CLS_NAN
) {
453 FP_SET_EXCEPTION(FP_EX_INVALID
);
455 FP_TO_INT_D(vc
.wp
[1], DB
, 32,
456 ((func
& 0x3) != 0));
466 pr_debug("DR: %ld %08lx %08lx %ld (%ld)\n",
467 DR_s
, DR_f1
, DR_f0
, DR_e
, DR_c
);
469 FP_PACK_DP(vc
.dp
, DR
);
473 FP_CMP_D(IR
, DA
, DB
, 3);
474 if (IR
== 3 && (FP_ISSIGNAN_D(DA
) || FP_ISSIGNAN_D(DB
)))
475 FP_SET_EXCEPTION(FP_EX_INVALID
);
486 FP_DECL_S(SA0
); FP_DECL_S(SB0
); FP_DECL_S(SR0
);
487 FP_DECL_S(SA1
); FP_DECL_S(SB1
); FP_DECL_S(SR1
);
493 FP_UNPACK_SP(SA0
, va
.wp
);
494 FP_UNPACK_SP(SA1
, va
.wp
+ 1);
496 FP_UNPACK_SP(SB0
, vb
.wp
);
497 FP_UNPACK_SP(SB1
, vb
.wp
+ 1);
500 FP_UNPACK_SP(SA0
, va
.wp
);
501 FP_UNPACK_SP(SA1
, va
.wp
+ 1);
505 pr_debug("SA0: %ld %08lx %ld (%ld)\n",
506 SA0_s
, SA0_f
, SA0_e
, SA0_c
);
507 pr_debug("SA1: %ld %08lx %ld (%ld)\n",
508 SA1_s
, SA1_f
, SA1_e
, SA1_c
);
509 pr_debug("SB0: %ld %08lx %ld (%ld)\n",
510 SB0_s
, SB0_f
, SB0_e
, SB0_c
);
511 pr_debug("SB1: %ld %08lx %ld (%ld)\n",
512 SB1_s
, SB1_f
, SB1_e
, SB1_c
);
516 vc
.wp
[0] = va
.wp
[0] & ~SIGN_BIT_S
;
517 vc
.wp
[1] = va
.wp
[1] & ~SIGN_BIT_S
;
521 vc
.wp
[0] = va
.wp
[0] | SIGN_BIT_S
;
522 vc
.wp
[1] = va
.wp
[1] | SIGN_BIT_S
;
526 vc
.wp
[0] = va
.wp
[0] ^ SIGN_BIT_S
;
527 vc
.wp
[1] = va
.wp
[1] ^ SIGN_BIT_S
;
531 FP_ADD_S(SR0
, SA0
, SB0
);
532 FP_ADD_S(SR1
, SA1
, SB1
);
536 FP_SUB_S(SR0
, SA0
, SB0
);
537 FP_SUB_S(SR1
, SA1
, SB1
);
541 FP_MUL_S(SR0
, SA0
, SB0
);
542 FP_MUL_S(SR1
, SA1
, SB1
);
546 FP_DIV_S(SR0
, SA0
, SB0
);
547 FP_DIV_S(SR1
, SA1
, SB1
);
564 if (SB0_c
== FP_CLS_NAN
) {
566 FP_SET_EXCEPTION(FP_EX_INVALID
);
568 SB0_e
+= (func
== EVFSCTSF
? 31 : 32);
569 FP_TO_INT_ROUND_S(vc
.wp
[0], SB0
, 32,
572 if (SB1_c
== FP_CLS_NAN
) {
574 FP_SET_EXCEPTION(FP_EX_INVALID
);
576 SB1_e
+= (func
== EVFSCTSF
? 31 : 32);
577 FP_TO_INT_ROUND_S(vc
.wp
[1], SB1
, 32,
584 if (SB0_c
== FP_CLS_NAN
) {
586 FP_SET_EXCEPTION(FP_EX_INVALID
);
588 FP_TO_INT_ROUND_S(vc
.wp
[0], SB0
, 32,
589 ((func
& 0x3) != 0));
591 if (SB1_c
== FP_CLS_NAN
) {
593 FP_SET_EXCEPTION(FP_EX_INVALID
);
595 FP_TO_INT_ROUND_S(vc
.wp
[1], SB1
, 32,
596 ((func
& 0x3) != 0));
602 if (SB0_c
== FP_CLS_NAN
) {
604 FP_SET_EXCEPTION(FP_EX_INVALID
);
606 FP_TO_INT_S(vc
.wp
[0], SB0
, 32,
607 ((func
& 0x3) != 0));
609 if (SB1_c
== FP_CLS_NAN
) {
611 FP_SET_EXCEPTION(FP_EX_INVALID
);
613 FP_TO_INT_S(vc
.wp
[1], SB1
, 32,
614 ((func
& 0x3) != 0));
624 pr_debug("SR0: %ld %08lx %ld (%ld)\n",
625 SR0_s
, SR0_f
, SR0_e
, SR0_c
);
626 pr_debug("SR1: %ld %08lx %ld (%ld)\n",
627 SR1_s
, SR1_f
, SR1_e
, SR1_c
);
629 FP_PACK_SP(vc
.wp
, SR0
);
630 FP_PACK_SP(vc
.wp
+ 1, SR1
);
637 FP_CMP_S(IR0
, SA0
, SB0
, 3);
638 FP_CMP_S(IR1
, SA1
, SB1
, 3);
639 if (IR0
== 3 && (FP_ISSIGNAN_S(SA0
) || FP_ISSIGNAN_S(SB0
)))
640 FP_SET_EXCEPTION(FP_EX_INVALID
);
641 if (IR1
== 3 && (FP_ISSIGNAN_S(SA1
) || FP_ISSIGNAN_S(SB1
)))
642 FP_SET_EXCEPTION(FP_EX_INVALID
);
643 ch
= (IR0
== cmp
) ? 1 : 0;
644 cl
= (IR1
== cmp
) ? 1 : 0;
645 IR
= (ch
<< 3) | (cl
<< 2) | ((ch
| cl
) << 1) |
655 regs
->ccr
&= ~(15 << ((7 - ((speinsn
>> 23) & 0x7)) << 2));
656 regs
->ccr
|= (IR
<< ((7 - ((speinsn
>> 23) & 0x7)) << 2));
660 * If the "invalid" exception sticky bit was set by the
661 * processor for non-finite input, but was not set before the
662 * instruction being emulated, clear it. Likewise for the
663 * "underflow" bit, which may have been set by the processor
664 * for exact underflow, not just inexact underflow when the
665 * flag should be set for IEEE 754 semantics. Other sticky
666 * exceptions will only be set by the processor when they are
667 * correct according to IEEE 754 semantics, and we must not
668 * clear sticky bits that were already set before the emulated
669 * instruction as they represent the user-visible sticky
670 * exception status. "inexact" traps to kernel are not
671 * required for IEEE semantics and are not enabled by default,
672 * so the "inexact" sticky bit may have been set by a previous
673 * instruction without the kernel being aware of it.
676 &= ~(FP_EX_INVALID
| FP_EX_UNDERFLOW
) | current
->thread
.spefscr_last
;
677 __FPU_FPSCR
|= (FP_CUR_EXCEPTIONS
& FP_EX_MASK
);
678 mtspr(SPRN_SPEFSCR
, __FPU_FPSCR
);
679 current
->thread
.spefscr_last
= __FPU_FPSCR
;
681 current
->thread
.evr
[fc
] = vc
.wp
[0];
682 regs
->gpr
[fc
] = vc
.wp
[1];
684 pr_debug("ccr = %08lx\n", regs
->ccr
);
685 pr_debug("cur exceptions = %08x spefscr = %08lx\n",
686 FP_CUR_EXCEPTIONS
, __FPU_FPSCR
);
687 pr_debug("vc: %08x %08x\n", vc
.wp
[0], vc
.wp
[1]);
688 pr_debug("va: %08x %08x\n", va
.wp
[0], va
.wp
[1]);
689 pr_debug("vb: %08x %08x\n", vb
.wp
[0], vb
.wp
[1]);
691 if (current
->thread
.fpexc_mode
& PR_FP_EXC_SW_ENABLE
) {
692 if ((FP_CUR_EXCEPTIONS
& FP_EX_DIVZERO
)
693 && (current
->thread
.fpexc_mode
& PR_FP_EXC_DIV
))
695 if ((FP_CUR_EXCEPTIONS
& FP_EX_OVERFLOW
)
696 && (current
->thread
.fpexc_mode
& PR_FP_EXC_OVF
))
698 if ((FP_CUR_EXCEPTIONS
& FP_EX_UNDERFLOW
)
699 && (current
->thread
.fpexc_mode
& PR_FP_EXC_UND
))
701 if ((FP_CUR_EXCEPTIONS
& FP_EX_INEXACT
)
702 && (current
->thread
.fpexc_mode
& PR_FP_EXC_RES
))
704 if ((FP_CUR_EXCEPTIONS
& FP_EX_INVALID
)
705 && (current
->thread
.fpexc_mode
& PR_FP_EXC_INV
))
711 if (have_e500_cpu_a005_erratum
) {
712 /* according to e500 cpu a005 erratum, reissue efp inst */
714 pr_debug("re-issue efp inst: %08lx\n", speinsn
);
718 printk(KERN_ERR
"\nOoops! IEEE-754 compliance handler encountered un-supported instruction.\ninst code: %08lx\n", speinsn
);
722 int speround_handler(struct pt_regs
*regs
)
726 int lo_inexact
, hi_inexact
;
728 unsigned long speinsn
, type
, fb
, fc
, fptype
, func
;
730 if (get_user(speinsn
, (unsigned int __user
*) regs
->nip
))
732 if ((speinsn
>> 26) != 4)
733 return -EINVAL
; /* not an spe instruction */
735 func
= speinsn
& 0x7ff;
736 type
= insn_type(func
);
737 if (type
== XCR
) return -ENOSYS
;
739 __FPU_FPSCR
= mfspr(SPRN_SPEFSCR
);
740 pr_debug("speinsn:%08lx spefscr:%08lx\n", speinsn
, __FPU_FPSCR
);
742 fptype
= (speinsn
>> 5) & 0x7;
744 /* No need to round if the result is exact */
745 lo_inexact
= __FPU_FPSCR
& (SPEFSCR_FG
| SPEFSCR_FX
);
746 hi_inexact
= __FPU_FPSCR
& (SPEFSCR_FGH
| SPEFSCR_FXH
);
747 if (!(lo_inexact
|| (hi_inexact
&& fptype
== VCT
)))
750 fc
= (speinsn
>> 21) & 0x1f;
751 s_lo
= regs
->gpr
[fc
] & SIGN_BIT_S
;
752 s_hi
= current
->thread
.evr
[fc
] & SIGN_BIT_S
;
753 fgpr
.wp
[0] = current
->thread
.evr
[fc
];
754 fgpr
.wp
[1] = regs
->gpr
[fc
];
756 fb
= (speinsn
>> 11) & 0x1f;
767 * These instructions always round to zero,
768 * independent of the rounding mode.
786 /* Recover the sign of a zero result if possible. */
788 s_lo
= regs
->gpr
[fb
] & SIGN_BIT_S
;
794 /* Recover the sign of a zero result if possible. */
796 s_lo
= regs
->gpr
[fb
] & SIGN_BIT_S
;
798 s_hi
= current
->thread
.evr
[fb
] & SIGN_BIT_S
;
805 /* Recover the sign of a zero result if possible. */
807 s_hi
= current
->thread
.evr
[fb
] & SIGN_BIT_S
;
815 pr_debug("round fgpr: %08x %08x\n", fgpr
.wp
[0], fgpr
.wp
[1]);
818 /* Since SPE instructions on E500 core can handle round to nearest
819 * and round toward zero with IEEE-754 complied, we just need
820 * to handle round toward +Inf and round toward -Inf by software.
823 if ((FP_ROUNDMODE
) == FP_RND_PINF
) {
824 if (!s_lo
) fgpr
.wp
[1]++; /* Z > 0, choose Z1 */
825 } else { /* round to -Inf */
828 fgpr
.wp
[1]++; /* Z < 0, choose Z2 */
830 fgpr
.wp
[1]--; /* Z < 0, choose Z2 */
836 if (FP_ROUNDMODE
== FP_RND_PINF
) {
839 fgpr
.dp
[0]++; /* Z > 0, choose Z1 */
841 fgpr
.wp
[1]++; /* Z > 0, choose Z1 */
843 } else { /* round to -Inf */
846 fgpr
.dp
[0]++; /* Z < 0, choose Z2 */
848 fgpr
.wp
[1]--; /* Z < 0, choose Z2 */
854 if (FP_ROUNDMODE
== FP_RND_PINF
) {
855 if (lo_inexact
&& !s_lo
)
856 fgpr
.wp
[1]++; /* Z_low > 0, choose Z1 */
857 if (hi_inexact
&& !s_hi
)
858 fgpr
.wp
[0]++; /* Z_high word > 0, choose Z1 */
859 } else { /* round to -Inf */
860 if (lo_inexact
&& s_lo
) {
862 fgpr
.wp
[1]++; /* Z_low < 0, choose Z2 */
864 fgpr
.wp
[1]--; /* Z_low < 0, choose Z2 */
866 if (hi_inexact
&& s_hi
) {
868 fgpr
.wp
[0]++; /* Z_high < 0, choose Z2 */
870 fgpr
.wp
[0]--; /* Z_high < 0, choose Z2 */
879 current
->thread
.evr
[fc
] = fgpr
.wp
[0];
880 regs
->gpr
[fc
] = fgpr
.wp
[1];
882 pr_debug(" to fgpr: %08x %08x\n", fgpr
.wp
[0], fgpr
.wp
[1]);
884 if (current
->thread
.fpexc_mode
& PR_FP_EXC_SW_ENABLE
)
885 return (current
->thread
.fpexc_mode
& PR_FP_EXC_RES
) ? 1 : 0;
889 int __init
spe_mathemu_init(void)
893 pvr
= mfspr(SPRN_PVR
);
895 if ((PVR_VER(pvr
) == PVR_VER_E500V1
) ||
896 (PVR_VER(pvr
) == PVR_VER_E500V2
)) {
901 * E500 revision below 1.1, 2.3, 3.1, 4.1, 5.1
902 * need cpu a005 errata workaround
907 have_e500_cpu_a005_erratum
= 1;
911 have_e500_cpu_a005_erratum
= 1;
917 have_e500_cpu_a005_erratum
= 1;
927 module_init(spe_mathemu_init
);