1 /* This is a software floating point library which can be used instead
2 of the floating point routines in libgcc1.c for targets without
3 hardware floating point. */
5 /* Copyright 1994-2020 Free Software Foundation, Inc.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 /* As a special exception, if you link this library with other files,
21 some of which are compiled with GCC, to produce an executable,
22 this library does not by itself cause the resulting executable
23 to be covered by the GNU General Public License.
24 This exception does not however invalidate any other reasons why
25 the executable file might be covered by the GNU General Public License. */
27 /* This implements IEEE 754 format arithmetic, but does not provide a
28 mechanism for setting the rounding mode, or for generating or handling
31 The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
32 Wilson, all of Cygnus Support. */
38 #include "sim-basics.h"
42 #include "sim-assert.h"
49 If digits is -1, then print all digits. */
52 print_bits (unsigned64 x
,
55 sim_fpu_print_func print
,
58 unsigned64 bit
= LSBIT64 (msbit
);
79 /* Quick and dirty conversion between a host double and host 64bit int. */
88 /* A packed IEEE floating point number.
90 Form is <SIGN:1><BIASEDEXP:NR_EXPBITS><FRAC:NR_FRACBITS> for both
91 32 and 64 bit numbers. This number is interpreted as:
93 Normalized (0 < BIASEDEXP && BIASEDEXP < EXPMAX):
94 (sign ? '-' : '+') 1.<FRAC> x 2 ^ (BIASEDEXP - EXPBIAS)
96 Denormalized (0 == BIASEDEXP && FRAC != 0):
97 (sign ? "-" : "+") 0.<FRAC> x 2 ^ (- EXPBIAS)
99 Zero (0 == BIASEDEXP && FRAC == 0):
100 (sign ? "-" : "+") 0.0
102 Infinity (BIASEDEXP == EXPMAX && FRAC == 0):
103 (sign ? "-" : "+") "infinity"
105 SignalingNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC < QUIET_NAN):
108 QuietNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC > QUIET_NAN):
113 #define NR_EXPBITS (is_double ? 11 : 8)
114 #define NR_FRACBITS (is_double ? 52 : 23)
115 #define SIGNBIT (is_double ? MSBIT64 (0) : MSBIT64 (32))
117 #define EXPMAX32 (255)
118 #define EXMPAX64 (2047)
119 #define EXPMAX ((unsigned) (is_double ? EXMPAX64 : EXPMAX32))
121 #define EXPBIAS32 (127)
122 #define EXPBIAS64 (1023)
123 #define EXPBIAS (is_double ? EXPBIAS64 : EXPBIAS32)
125 #define QUIET_NAN LSBIT64 (NR_FRACBITS - 1)
129 /* An unpacked floating point number.
131 When unpacked, the fraction of both a 32 and 64 bit floating point
132 number is stored using the same format:
134 64 bit - <IMPLICIT_1:1><FRACBITS:52><GUARDS:8><PAD:00>
135 32 bit - <IMPLICIT_1:1><FRACBITS:23><GUARDS:7><PAD:30> */
137 #define NR_PAD32 (30)
139 #define NR_PAD (is_double ? NR_PAD64 : NR_PAD32)
140 #define PADMASK (is_double ? 0 : LSMASK64 (NR_PAD32 - 1, 0))
142 #define NR_GUARDS32 (7 + NR_PAD32)
143 #define NR_GUARDS64 (8 + NR_PAD64)
144 #define NR_GUARDS (is_double ? NR_GUARDS64 : NR_GUARDS32)
145 #define GUARDMASK LSMASK64 (NR_GUARDS - 1, 0)
147 #define GUARDMSB LSBIT64 (NR_GUARDS - 1)
148 #define GUARDLSB LSBIT64 (NR_PAD)
149 #define GUARDROUND LSMASK64 (NR_GUARDS - 2, 0)
151 #define NR_FRAC_GUARD (60)
152 #define IMPLICIT_1 LSBIT64 (NR_FRAC_GUARD)
153 #define IMPLICIT_2 LSBIT64 (NR_FRAC_GUARD + 1)
154 #define IMPLICIT_4 LSBIT64 (NR_FRAC_GUARD + 2)
157 #define FRAC32MASK LSMASK64 (63, NR_FRAC_GUARD - 32 + 1)
159 #define NORMAL_EXPMIN (-(EXPBIAS)+1)
161 #define NORMAL_EXPMAX32 (EXPBIAS32)
162 #define NORMAL_EXPMAX64 (EXPBIAS64)
163 #define NORMAL_EXPMAX (EXPBIAS)
166 /* Integer constants */
168 #define MAX_INT32 ((signed64) LSMASK64 (30, 0))
169 #define MAX_UINT32 LSMASK64 (31, 0)
170 #define MIN_INT32 ((signed64) LSMASK64 (63, 31))
172 #define MAX_INT64 ((signed64) LSMASK64 (62, 0))
173 #define MAX_UINT64 LSMASK64 (63, 0)
174 #define MIN_INT64 ((signed64) LSMASK64 (63, 63))
176 #define MAX_INT (is_64bit ? MAX_INT64 : MAX_INT32)
177 #define MIN_INT (is_64bit ? MIN_INT64 : MIN_INT32)
178 #define MAX_UINT (is_64bit ? MAX_UINT64 : MAX_UINT32)
179 #define NR_INTBITS (is_64bit ? 64 : 32)
181 /* Squeeze an unpacked sim_fpu struct into a 32/64 bit integer. */
182 STATIC_INLINE_SIM_FPU (unsigned64
)
183 pack_fpu (const sim_fpu
*src
,
194 case sim_fpu_class_qnan
:
197 /* Force fraction to correct class. */
198 fraction
= src
->fraction
;
199 fraction
>>= NR_GUARDS
;
200 #ifdef SIM_QUIET_NAN_NEGATED
201 fraction
|= QUIET_NAN
- 1;
203 fraction
|= QUIET_NAN
;
206 case sim_fpu_class_snan
:
209 /* Force fraction to correct class. */
210 fraction
= src
->fraction
;
211 fraction
>>= NR_GUARDS
;
212 #ifdef SIM_QUIET_NAN_NEGATED
213 fraction
|= QUIET_NAN
;
215 fraction
&= ~QUIET_NAN
;
218 case sim_fpu_class_infinity
:
223 case sim_fpu_class_zero
:
228 case sim_fpu_class_number
:
229 case sim_fpu_class_denorm
:
230 ASSERT (src
->fraction
>= IMPLICIT_1
);
231 ASSERT (src
->fraction
< IMPLICIT_2
);
232 if (src
->normal_exp
< NORMAL_EXPMIN
)
234 /* This number's exponent is too low to fit into the bits
235 available in the number We'll denormalize the number by
236 storing zero in the exponent and shift the fraction to
237 the right to make up for it. */
238 int nr_shift
= NORMAL_EXPMIN
- src
->normal_exp
;
239 if (nr_shift
> NR_FRACBITS
)
241 /* Underflow, just make the number zero. */
250 /* Shift by the value. */
251 fraction
= src
->fraction
;
252 fraction
>>= NR_GUARDS
;
253 fraction
>>= nr_shift
;
256 else if (src
->normal_exp
> NORMAL_EXPMAX
)
265 exp
= (src
->normal_exp
+ EXPBIAS
);
267 fraction
= src
->fraction
;
268 /* FIXME: Need to round according to WITH_SIM_FPU_ROUNDING
270 /* Round to nearest: If the guard bits are the all zero, but
271 the first, then we're half way between two numbers,
272 choose the one which makes the lsb of the answer 0. */
273 if ((fraction
& GUARDMASK
) == GUARDMSB
)
275 if ((fraction
& (GUARDMSB
<< 1)))
276 fraction
+= (GUARDMSB
<< 1);
280 /* Add a one to the guards to force round to nearest. */
281 fraction
+= GUARDROUND
;
283 if ((fraction
& IMPLICIT_2
)) /* Rounding resulted in carry. */
288 fraction
>>= NR_GUARDS
;
289 /* When exp == EXPMAX (overflow from carry) fraction must
290 have been made zero. */
291 ASSERT ((exp
== EXPMAX
) <= ((fraction
& ~IMPLICIT_1
) == 0));
298 packed
= ((sign
? SIGNBIT
: 0)
299 | (exp
<< NR_FRACBITS
)
300 | LSMASKED64 (fraction
, NR_FRACBITS
- 1, 0));
302 /* Trace operation. */
309 printf ("pack_fpu: ");
310 printf ("-> %c%0lX.%06lX\n",
311 LSMASKED32 (packed
, 31, 31) ? '8' : '0',
312 (long) LSEXTRACTED32 (packed
, 30, 23),
313 (long) LSEXTRACTED32 (packed
, 23 - 1, 0));
321 /* Unpack a 32/64 bit integer into a sim_fpu structure. */
322 STATIC_INLINE_SIM_FPU (void)
323 unpack_fpu (sim_fpu
*dst
, unsigned64 packed
, int is_double
)
325 unsigned64 fraction
= LSMASKED64 (packed
, NR_FRACBITS
- 1, 0);
326 unsigned exp
= LSEXTRACTED64 (packed
, NR_EXPBITS
+ NR_FRACBITS
- 1, NR_FRACBITS
);
327 int sign
= (packed
& SIGNBIT
) != 0;
331 /* Hmm. Looks like 0 */
334 /* Tastes like zero. */
335 dst
->class = sim_fpu_class_zero
;
341 /* Zero exponent with non zero fraction - it's denormalized,
342 so there isn't a leading implicit one - we'll shift it so
344 dst
->normal_exp
= exp
- EXPBIAS
+ 1;
345 dst
->class = sim_fpu_class_denorm
;
347 fraction
<<= NR_GUARDS
;
348 while (fraction
< IMPLICIT_1
)
353 dst
->fraction
= fraction
;
356 else if (exp
== EXPMAX
)
361 /* Attached to a zero fraction - means infinity. */
362 dst
->class = sim_fpu_class_infinity
;
364 /* dst->normal_exp = EXPBIAS; */
365 /* dst->fraction = 0; */
371 /* Non zero fraction, means NaN. */
373 dst
->fraction
= (fraction
<< NR_GUARDS
);
374 #ifdef SIM_QUIET_NAN_NEGATED
375 qnan
= (fraction
& QUIET_NAN
) == 0;
377 qnan
= fraction
>= QUIET_NAN
;
380 dst
->class = sim_fpu_class_qnan
;
382 dst
->class = sim_fpu_class_snan
;
387 /* Nothing strange about this number. */
388 dst
->class = sim_fpu_class_number
;
390 dst
->fraction
= ((fraction
<< NR_GUARDS
) | IMPLICIT_1
);
391 dst
->normal_exp
= exp
- EXPBIAS
;
394 /* Trace operation. */
401 printf ("unpack_fpu: %c%02lX.%06lX ->\n",
402 LSMASKED32 (packed
, 31, 31) ? '8' : '0',
403 (long) LSEXTRACTED32 (packed
, 30, 23),
404 (long) LSEXTRACTED32 (packed
, 23 - 1, 0));
411 val
.i
= pack_fpu (dst
, 1);
414 ASSERT (val
.i
== packed
);
418 unsigned32 val
= pack_fpu (dst
, 0);
419 unsigned32 org
= packed
;
426 /* Convert a floating point into an integer. */
427 STATIC_INLINE_SIM_FPU (int)
436 if (sim_fpu_is_zero (s
))
441 if (sim_fpu_is_snan (s
))
443 *i
= MIN_INT
; /* FIXME */
444 return sim_fpu_status_invalid_cvi
;
446 if (sim_fpu_is_qnan (s
))
448 *i
= MIN_INT
; /* FIXME */
449 return sim_fpu_status_invalid_cvi
;
451 /* Map infinity onto MAX_INT... */
452 if (sim_fpu_is_infinity (s
))
454 *i
= s
->sign
? MIN_INT
: MAX_INT
;
455 return sim_fpu_status_invalid_cvi
;
457 /* It is a number, but a small one. */
458 if (s
->normal_exp
< 0)
461 return sim_fpu_status_inexact
;
463 /* Is the floating point MIN_INT or just close? */
464 if (s
->sign
&& s
->normal_exp
== (NR_INTBITS
- 1))
467 ASSERT (s
->fraction
>= IMPLICIT_1
);
468 if (s
->fraction
== IMPLICIT_1
)
469 return 0; /* exact */
470 if (is_64bit
) /* can't round */
471 return sim_fpu_status_invalid_cvi
; /* must be overflow */
472 /* For a 32bit with MAX_INT, rounding is possible. */
475 case sim_fpu_round_default
:
477 case sim_fpu_round_zero
:
478 if ((s
->fraction
& FRAC32MASK
) != IMPLICIT_1
)
479 return sim_fpu_status_invalid_cvi
;
481 return sim_fpu_status_inexact
;
483 case sim_fpu_round_near
:
485 if ((s
->fraction
& FRAC32MASK
) != IMPLICIT_1
)
486 return sim_fpu_status_invalid_cvi
;
487 else if ((s
->fraction
& !FRAC32MASK
) >= (~FRAC32MASK
>> 1))
488 return sim_fpu_status_invalid_cvi
;
490 return sim_fpu_status_inexact
;
492 case sim_fpu_round_up
:
493 if ((s
->fraction
& FRAC32MASK
) == IMPLICIT_1
)
494 return sim_fpu_status_inexact
;
496 return sim_fpu_status_invalid_cvi
;
497 case sim_fpu_round_down
:
498 return sim_fpu_status_invalid_cvi
;
501 /* Would right shifting result in the FRAC being shifted into
502 (through) the integer's sign bit? */
503 if (s
->normal_exp
> (NR_INTBITS
- 2))
505 *i
= s
->sign
? MIN_INT
: MAX_INT
;
506 return sim_fpu_status_invalid_cvi
;
508 /* Normal number, shift it into place. */
510 shift
= (s
->normal_exp
- (NR_FRAC_GUARD
));
518 if (tmp
& ((SIGNED64 (1) << shift
) - 1))
519 status
|= sim_fpu_status_inexact
;
522 *i
= s
->sign
? (-tmp
) : (tmp
);
526 /* Convert an integer into a floating point. */
527 STATIC_INLINE_SIM_FPU (int)
528 i2fpu (sim_fpu
*f
, signed64 i
, int is_64bit
)
533 f
->class = sim_fpu_class_zero
;
539 f
->class = sim_fpu_class_number
;
541 f
->normal_exp
= NR_FRAC_GUARD
;
545 /* Special case for minint, since there is no corresponding
546 +ve integer representation for it. */
549 f
->fraction
= IMPLICIT_1
;
550 f
->normal_exp
= NR_INTBITS
- 1;
558 if (f
->fraction
>= IMPLICIT_2
)
562 f
->fraction
= (f
->fraction
>> 1) | (f
->fraction
& 1);
565 while (f
->fraction
>= IMPLICIT_2
);
567 else if (f
->fraction
< IMPLICIT_1
)
574 while (f
->fraction
< IMPLICIT_1
);
578 /* trace operation */
581 printf ("i2fpu: 0x%08lX ->\n", (long) i
);
588 fpu2i (&val
, f
, is_64bit
, sim_fpu_round_zero
);
589 if (i
>= MIN_INT32
&& i
<= MAX_INT32
)
599 /* Convert a floating point into an integer. */
600 STATIC_INLINE_SIM_FPU (int)
601 fpu2u (unsigned64
*u
, const sim_fpu
*s
, int is_64bit
)
603 const int is_double
= 1;
606 if (sim_fpu_is_zero (s
))
611 if (sim_fpu_is_nan (s
))
616 /* It is a negative number. */
622 /* Get reasonable MAX_USI_INT... */
623 if (sim_fpu_is_infinity (s
))
628 /* It is a number, but a small one. */
629 if (s
->normal_exp
< 0)
635 if (s
->normal_exp
> (NR_INTBITS
- 1))
641 tmp
= (s
->fraction
& ~PADMASK
);
642 shift
= (s
->normal_exp
- (NR_FRACBITS
+ NR_GUARDS
));
656 /* Convert an unsigned integer into a floating point. */
657 STATIC_INLINE_SIM_FPU (int)
658 u2fpu (sim_fpu
*f
, unsigned64 u
, int is_64bit
)
662 f
->class = sim_fpu_class_zero
;
668 f
->class = sim_fpu_class_number
;
670 f
->normal_exp
= NR_FRAC_GUARD
;
673 while (f
->fraction
< IMPLICIT_1
)
683 /* register <-> sim_fpu */
685 INLINE_SIM_FPU (void)
686 sim_fpu_32to (sim_fpu
*f
, unsigned32 s
)
688 unpack_fpu (f
, s
, 0);
692 INLINE_SIM_FPU (void)
693 sim_fpu_232to (sim_fpu
*f
, unsigned32 h
, unsigned32 l
)
697 unpack_fpu (f
, s
, 1);
701 INLINE_SIM_FPU (void)
702 sim_fpu_64to (sim_fpu
*f
, unsigned64 s
)
704 unpack_fpu (f
, s
, 1);
708 INLINE_SIM_FPU (void)
709 sim_fpu_to32 (unsigned32
*s
,
712 *s
= pack_fpu (f
, 0);
716 INLINE_SIM_FPU (void)
717 sim_fpu_to232 (unsigned32
*h
, unsigned32
*l
,
720 unsigned64 s
= pack_fpu (f
, 1);
726 INLINE_SIM_FPU (void)
727 sim_fpu_to64 (unsigned64
*u
,
730 *u
= pack_fpu (f
, 1);
734 INLINE_SIM_FPU (void)
735 sim_fpu_fractionto (sim_fpu
*f
,
741 int shift
= (NR_FRAC_GUARD
- precision
);
742 f
->class = sim_fpu_class_number
;
744 f
->normal_exp
= normal_exp
;
745 /* Shift the fraction to where sim-fpu expects it. */
747 f
->fraction
= (fraction
<< shift
);
749 f
->fraction
= (fraction
>> -shift
);
750 f
->fraction
|= IMPLICIT_1
;
754 INLINE_SIM_FPU (unsigned64
)
755 sim_fpu_tofraction (const sim_fpu
*d
,
758 /* We have NR_FRAC_GUARD bits, we want only PRECISION bits. */
759 int shift
= (NR_FRAC_GUARD
- precision
);
760 unsigned64 fraction
= (d
->fraction
& ~IMPLICIT_1
);
762 return fraction
>> shift
;
764 return fraction
<< -shift
;
770 STATIC_INLINE_SIM_FPU (int)
771 do_normal_overflow (sim_fpu
*f
,
777 case sim_fpu_round_default
:
779 case sim_fpu_round_near
:
780 f
->class = sim_fpu_class_infinity
;
782 case sim_fpu_round_up
:
784 f
->class = sim_fpu_class_infinity
;
786 case sim_fpu_round_down
:
788 f
->class = sim_fpu_class_infinity
;
790 case sim_fpu_round_zero
:
793 f
->normal_exp
= NORMAL_EXPMAX
;
794 f
->fraction
= LSMASK64 (NR_FRAC_GUARD
, NR_GUARDS
);
795 return (sim_fpu_status_overflow
| sim_fpu_status_inexact
);
798 STATIC_INLINE_SIM_FPU (int)
799 do_normal_underflow (sim_fpu
*f
,
805 case sim_fpu_round_default
:
807 case sim_fpu_round_near
:
808 f
->class = sim_fpu_class_zero
;
810 case sim_fpu_round_up
:
812 f
->class = sim_fpu_class_zero
;
814 case sim_fpu_round_down
:
816 f
->class = sim_fpu_class_zero
;
818 case sim_fpu_round_zero
:
819 f
->class = sim_fpu_class_zero
;
822 f
->normal_exp
= NORMAL_EXPMIN
- NR_FRACBITS
;
823 f
->fraction
= IMPLICIT_1
;
824 return (sim_fpu_status_inexact
| sim_fpu_status_underflow
);
829 /* Round a number using NR_GUARDS.
830 Will return the rounded number or F->FRACTION == 0 when underflow. */
832 STATIC_INLINE_SIM_FPU (int)
833 do_normal_round (sim_fpu
*f
,
837 unsigned64 guardmask
= LSMASK64 (nr_guards
- 1, 0);
838 unsigned64 guardmsb
= LSBIT64 (nr_guards
- 1);
839 unsigned64 fraclsb
= guardmsb
<< 1;
840 if ((f
->fraction
& guardmask
))
842 int status
= sim_fpu_status_inexact
;
845 case sim_fpu_round_default
:
847 case sim_fpu_round_near
:
848 if ((f
->fraction
& guardmsb
))
850 if ((f
->fraction
& fraclsb
))
852 status
|= sim_fpu_status_rounded
;
854 else if ((f
->fraction
& (guardmask
>> 1)))
856 status
|= sim_fpu_status_rounded
;
860 case sim_fpu_round_up
:
862 status
|= sim_fpu_status_rounded
;
864 case sim_fpu_round_down
:
866 status
|= sim_fpu_status_rounded
;
868 case sim_fpu_round_zero
:
871 f
->fraction
&= ~guardmask
;
872 /* Round if needed, handle resulting overflow. */
873 if ((status
& sim_fpu_status_rounded
))
875 f
->fraction
+= fraclsb
;
876 if ((f
->fraction
& IMPLICIT_2
))
889 STATIC_INLINE_SIM_FPU (int)
890 do_round (sim_fpu
*f
,
893 sim_fpu_denorm denorm
)
897 case sim_fpu_class_qnan
:
898 case sim_fpu_class_zero
:
899 case sim_fpu_class_infinity
:
902 case sim_fpu_class_snan
:
903 /* Quieten a SignalingNaN. */
904 f
->class = sim_fpu_class_qnan
;
905 return sim_fpu_status_invalid_snan
;
907 case sim_fpu_class_number
:
908 case sim_fpu_class_denorm
:
911 ASSERT (f
->fraction
< IMPLICIT_2
);
912 ASSERT (f
->fraction
>= IMPLICIT_1
);
913 if (f
->normal_exp
< NORMAL_EXPMIN
)
915 /* This number's exponent is too low to fit into the bits
916 available in the number. Round off any bits that will be
917 discarded as a result of denormalization. Edge case is
918 the implicit bit shifted to GUARD0 and then rounded
920 int shift
= NORMAL_EXPMIN
- f
->normal_exp
;
921 if (shift
+ NR_GUARDS
<= NR_FRAC_GUARD
+ 1
922 && !(denorm
& sim_fpu_denorm_zero
))
924 status
= do_normal_round (f
, shift
+ NR_GUARDS
, round
);
925 if (f
->fraction
== 0) /* Rounding underflowed. */
927 status
|= do_normal_underflow (f
, is_double
, round
);
929 else if (f
->normal_exp
< NORMAL_EXPMIN
) /* still underflow? */
931 status
|= sim_fpu_status_denorm
;
932 /* Any loss of precision when denormalizing is
933 underflow. Some processors check for underflow
934 before rounding, some after! */
935 if (status
& sim_fpu_status_inexact
)
936 status
|= sim_fpu_status_underflow
;
937 /* Flag that resultant value has been denormalized. */
938 f
->class = sim_fpu_class_denorm
;
940 else if ((denorm
& sim_fpu_denorm_underflow_inexact
))
942 if ((status
& sim_fpu_status_inexact
))
943 status
|= sim_fpu_status_underflow
;
948 status
= do_normal_underflow (f
, is_double
, round
);
951 else if (f
->normal_exp
> NORMAL_EXPMAX
)
954 status
= do_normal_overflow (f
, is_double
, round
);
958 status
= do_normal_round (f
, NR_GUARDS
, round
);
959 if (f
->fraction
== 0)
960 /* f->class = sim_fpu_class_zero; */
961 status
|= do_normal_underflow (f
, is_double
, round
);
962 else if (f
->normal_exp
> NORMAL_EXPMAX
)
963 /* Oops! rounding caused overflow. */
964 status
|= do_normal_overflow (f
, is_double
, round
);
966 ASSERT ((f
->class == sim_fpu_class_number
967 || f
->class == sim_fpu_class_denorm
)
968 <= (f
->fraction
< IMPLICIT_2
&& f
->fraction
>= IMPLICIT_1
));
976 sim_fpu_round_32 (sim_fpu
*f
,
978 sim_fpu_denorm denorm
)
980 return do_round (f
, 0, round
, denorm
);
984 sim_fpu_round_64 (sim_fpu
*f
,
986 sim_fpu_denorm denorm
)
988 return do_round (f
, 1, round
, denorm
);
996 sim_fpu_add (sim_fpu
*f
,
1000 if (sim_fpu_is_snan (l
))
1003 f
->class = sim_fpu_class_qnan
;
1004 return sim_fpu_status_invalid_snan
;
1006 if (sim_fpu_is_snan (r
))
1009 f
->class = sim_fpu_class_qnan
;
1010 return sim_fpu_status_invalid_snan
;
1012 if (sim_fpu_is_qnan (l
))
1017 if (sim_fpu_is_qnan (r
))
1022 if (sim_fpu_is_infinity (l
))
1024 if (sim_fpu_is_infinity (r
)
1025 && l
->sign
!= r
->sign
)
1028 return sim_fpu_status_invalid_isi
;
1033 if (sim_fpu_is_infinity (r
))
1038 if (sim_fpu_is_zero (l
))
1040 if (sim_fpu_is_zero (r
))
1043 f
->sign
= l
->sign
& r
->sign
;
1049 if (sim_fpu_is_zero (r
))
1056 int shift
= l
->normal_exp
- r
->normal_exp
;
1057 unsigned64 lfraction
;
1058 unsigned64 rfraction
;
1059 /* use exp of larger */
1060 if (shift
>= NR_FRAC_GUARD
)
1062 /* left has much bigger magnitude */
1064 return sim_fpu_status_inexact
;
1066 if (shift
<= - NR_FRAC_GUARD
)
1068 /* right has much bigger magnitude */
1070 return sim_fpu_status_inexact
;
1072 lfraction
= l
->fraction
;
1073 rfraction
= r
->fraction
;
1076 f
->normal_exp
= l
->normal_exp
;
1077 if (rfraction
& LSMASK64 (shift
- 1, 0))
1079 status
|= sim_fpu_status_inexact
;
1080 rfraction
|= LSBIT64 (shift
); /* Stick LSBit. */
1082 rfraction
>>= shift
;
1086 f
->normal_exp
= r
->normal_exp
;
1087 if (lfraction
& LSMASK64 (- shift
- 1, 0))
1089 status
|= sim_fpu_status_inexact
;
1090 lfraction
|= LSBIT64 (- shift
); /* Stick LSBit. */
1092 lfraction
>>= -shift
;
1096 f
->normal_exp
= r
->normal_exp
;
1099 /* Perform the addition. */
1101 lfraction
= - lfraction
;
1103 rfraction
= - rfraction
;
1104 f
->fraction
= lfraction
+ rfraction
;
1107 if (f
->fraction
== 0)
1114 f
->class = sim_fpu_class_number
;
1115 if (((signed64
) f
->fraction
) >= 0)
1120 f
->fraction
= - f
->fraction
;
1124 if ((f
->fraction
& IMPLICIT_2
))
1126 f
->fraction
= (f
->fraction
>> 1) | (f
->fraction
& 1);
1129 else if (f
->fraction
< IMPLICIT_1
)
1136 while (f
->fraction
< IMPLICIT_1
);
1138 ASSERT (f
->fraction
>= IMPLICIT_1
&& f
->fraction
< IMPLICIT_2
);
1144 INLINE_SIM_FPU (int)
1145 sim_fpu_sub (sim_fpu
*f
,
1149 if (sim_fpu_is_snan (l
))
1152 f
->class = sim_fpu_class_qnan
;
1153 return sim_fpu_status_invalid_snan
;
1155 if (sim_fpu_is_snan (r
))
1158 f
->class = sim_fpu_class_qnan
;
1159 return sim_fpu_status_invalid_snan
;
1161 if (sim_fpu_is_qnan (l
))
1166 if (sim_fpu_is_qnan (r
))
1171 if (sim_fpu_is_infinity (l
))
1173 if (sim_fpu_is_infinity (r
)
1174 && l
->sign
== r
->sign
)
1177 return sim_fpu_status_invalid_isi
;
1182 if (sim_fpu_is_infinity (r
))
1188 if (sim_fpu_is_zero (l
))
1190 if (sim_fpu_is_zero (r
))
1193 f
->sign
= l
->sign
& !r
->sign
;
1202 if (sim_fpu_is_zero (r
))
1209 int shift
= l
->normal_exp
- r
->normal_exp
;
1210 unsigned64 lfraction
;
1211 unsigned64 rfraction
;
1212 /* use exp of larger */
1213 if (shift
>= NR_FRAC_GUARD
)
1215 /* left has much bigger magnitude */
1217 return sim_fpu_status_inexact
;
1219 if (shift
<= - NR_FRAC_GUARD
)
1221 /* right has much bigger magnitude */
1224 return sim_fpu_status_inexact
;
1226 lfraction
= l
->fraction
;
1227 rfraction
= r
->fraction
;
1230 f
->normal_exp
= l
->normal_exp
;
1231 if (rfraction
& LSMASK64 (shift
- 1, 0))
1233 status
|= sim_fpu_status_inexact
;
1234 rfraction
|= LSBIT64 (shift
); /* Stick LSBit. */
1236 rfraction
>>= shift
;
1240 f
->normal_exp
= r
->normal_exp
;
1241 if (lfraction
& LSMASK64 (- shift
- 1, 0))
1243 status
|= sim_fpu_status_inexact
;
1244 lfraction
|= LSBIT64 (- shift
); /* Stick LSBit. */
1246 lfraction
>>= -shift
;
1250 f
->normal_exp
= r
->normal_exp
;
1253 /* Perform the subtraction. */
1255 lfraction
= - lfraction
;
1257 rfraction
= - rfraction
;
1258 f
->fraction
= lfraction
+ rfraction
;
1261 if (f
->fraction
== 0)
1268 f
->class = sim_fpu_class_number
;
1269 if (((signed64
) f
->fraction
) >= 0)
1274 f
->fraction
= - f
->fraction
;
1278 if ((f
->fraction
& IMPLICIT_2
))
1280 f
->fraction
= (f
->fraction
>> 1) | (f
->fraction
& 1);
1283 else if (f
->fraction
< IMPLICIT_1
)
1290 while (f
->fraction
< IMPLICIT_1
);
1292 ASSERT (f
->fraction
>= IMPLICIT_1
&& f
->fraction
< IMPLICIT_2
);
1298 INLINE_SIM_FPU (int)
1299 sim_fpu_mul (sim_fpu
*f
,
1303 if (sim_fpu_is_snan (l
))
1306 f
->class = sim_fpu_class_qnan
;
1307 return sim_fpu_status_invalid_snan
;
1309 if (sim_fpu_is_snan (r
))
1312 f
->class = sim_fpu_class_qnan
;
1313 return sim_fpu_status_invalid_snan
;
1315 if (sim_fpu_is_qnan (l
))
1320 if (sim_fpu_is_qnan (r
))
1325 if (sim_fpu_is_infinity (l
))
1327 if (sim_fpu_is_zero (r
))
1330 return sim_fpu_status_invalid_imz
;
1333 f
->sign
= l
->sign
^ r
->sign
;
1336 if (sim_fpu_is_infinity (r
))
1338 if (sim_fpu_is_zero (l
))
1341 return sim_fpu_status_invalid_imz
;
1344 f
->sign
= l
->sign
^ r
->sign
;
1347 if (sim_fpu_is_zero (l
) || sim_fpu_is_zero (r
))
1350 f
->sign
= l
->sign
^ r
->sign
;
1353 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1358 unsigned64 nl
= l
->fraction
& 0xffffffff;
1359 unsigned64 nh
= l
->fraction
>> 32;
1360 unsigned64 ml
= r
->fraction
& 0xffffffff;
1361 unsigned64 mh
= r
->fraction
>>32;
1362 unsigned64 pp_ll
= ml
* nl
;
1363 unsigned64 pp_hl
= mh
* nl
;
1364 unsigned64 pp_lh
= ml
* nh
;
1365 unsigned64 pp_hh
= mh
* nh
;
1366 unsigned64 res2
= 0;
1367 unsigned64 res0
= 0;
1368 unsigned64 ps_hh__
= pp_hl
+ pp_lh
;
1369 if (ps_hh__
< pp_hl
)
1370 res2
+= UNSIGNED64 (0x100000000);
1371 pp_hl
= (ps_hh__
<< 32) & UNSIGNED64 (0xffffffff00000000);
1372 res0
= pp_ll
+ pp_hl
;
1375 res2
+= ((ps_hh__
>> 32) & 0xffffffff) + pp_hh
;
1379 f
->normal_exp
= l
->normal_exp
+ r
->normal_exp
;
1380 f
->sign
= l
->sign
^ r
->sign
;
1381 f
->class = sim_fpu_class_number
;
1383 /* Input is bounded by [1,2) ; [2^60,2^61)
1384 Output is bounded by [1,4) ; [2^120,2^122) */
1386 /* Adjust the exponent according to where the decimal point ended
1387 up in the high 64 bit word. In the source the decimal point
1388 was at NR_FRAC_GUARD. */
1389 f
->normal_exp
+= NR_FRAC_GUARD
+ 64 - (NR_FRAC_GUARD
* 2);
1391 /* The high word is bounded according to the above. Consequently
1392 it has never overflowed into IMPLICIT_2. */
1393 ASSERT (high
< LSBIT64 (((NR_FRAC_GUARD
+ 1) * 2) - 64));
1394 ASSERT (high
>= LSBIT64 ((NR_FRAC_GUARD
* 2) - 64));
1395 ASSERT (LSBIT64 (((NR_FRAC_GUARD
+ 1) * 2) - 64) < IMPLICIT_1
);
1402 if (low
& LSBIT64 (63))
1406 while (high
< IMPLICIT_1
);
1408 ASSERT (high
>= IMPLICIT_1
&& high
< IMPLICIT_2
);
1411 f
->fraction
= (high
| 1); /* sticky */
1412 return sim_fpu_status_inexact
;
1423 INLINE_SIM_FPU (int)
1424 sim_fpu_div (sim_fpu
*f
,
1428 if (sim_fpu_is_snan (l
))
1431 f
->class = sim_fpu_class_qnan
;
1432 return sim_fpu_status_invalid_snan
;
1434 if (sim_fpu_is_snan (r
))
1437 f
->class = sim_fpu_class_qnan
;
1438 return sim_fpu_status_invalid_snan
;
1440 if (sim_fpu_is_qnan (l
))
1443 f
->class = sim_fpu_class_qnan
;
1446 if (sim_fpu_is_qnan (r
))
1449 f
->class = sim_fpu_class_qnan
;
1452 if (sim_fpu_is_infinity (l
))
1454 if (sim_fpu_is_infinity (r
))
1457 return sim_fpu_status_invalid_idi
;
1462 f
->sign
= l
->sign
^ r
->sign
;
1466 if (sim_fpu_is_zero (l
))
1468 if (sim_fpu_is_zero (r
))
1471 return sim_fpu_status_invalid_zdz
;
1476 f
->sign
= l
->sign
^ r
->sign
;
1480 if (sim_fpu_is_infinity (r
))
1483 f
->sign
= l
->sign
^ r
->sign
;
1486 if (sim_fpu_is_zero (r
))
1488 f
->class = sim_fpu_class_infinity
;
1489 f
->sign
= l
->sign
^ r
->sign
;
1490 return sim_fpu_status_invalid_div0
;
1493 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1496 /* quotient = ( ( numerator / denominator)
1497 x 2^(numerator exponent - denominator exponent)
1499 unsigned64 numerator
;
1500 unsigned64 denominator
;
1501 unsigned64 quotient
;
1504 f
->class = sim_fpu_class_number
;
1505 f
->sign
= l
->sign
^ r
->sign
;
1506 f
->normal_exp
= l
->normal_exp
- r
->normal_exp
;
1508 numerator
= l
->fraction
;
1509 denominator
= r
->fraction
;
1511 /* Fraction will be less than 1.0 */
1512 if (numerator
< denominator
)
1517 ASSERT (numerator
>= denominator
);
1519 /* Gain extra precision, already used one spare bit. */
1520 numerator
<<= NR_SPARE
;
1521 denominator
<<= NR_SPARE
;
1523 /* Does divide one bit at a time. Optimize??? */
1525 bit
= (IMPLICIT_1
<< NR_SPARE
);
1528 if (numerator
>= denominator
)
1531 numerator
-= denominator
;
1537 /* Discard (but save) the extra bits. */
1538 if ((quotient
& LSMASK64 (NR_SPARE
-1, 0)))
1539 quotient
= (quotient
>> NR_SPARE
) | 1;
1541 quotient
= (quotient
>> NR_SPARE
);
1543 f
->fraction
= quotient
;
1544 ASSERT (f
->fraction
>= IMPLICIT_1
&& f
->fraction
< IMPLICIT_2
);
1547 f
->fraction
|= 1; /* Stick remaining bits. */
1548 return sim_fpu_status_inexact
;
1556 INLINE_SIM_FPU (int)
1557 sim_fpu_rem (sim_fpu
*f
,
1561 if (sim_fpu_is_snan (l
))
1564 f
->class = sim_fpu_class_qnan
;
1565 return sim_fpu_status_invalid_snan
;
1567 if (sim_fpu_is_snan (r
))
1570 f
->class = sim_fpu_class_qnan
;
1571 return sim_fpu_status_invalid_snan
;
1573 if (sim_fpu_is_qnan (l
))
1576 f
->class = sim_fpu_class_qnan
;
1579 if (sim_fpu_is_qnan (r
))
1582 f
->class = sim_fpu_class_qnan
;
1585 if (sim_fpu_is_infinity (l
))
1588 return sim_fpu_status_invalid_irx
;
1590 if (sim_fpu_is_zero (r
))
1593 return sim_fpu_status_invalid_div0
;
1595 if (sim_fpu_is_zero (l
))
1600 if (sim_fpu_is_infinity (r
))
1608 /* Remainder is calculated as l-n*r, where n is l/r rounded to the
1609 nearest integer. The variable n is rounded half even. */
1611 sim_fpu_div (&n
, l
, r
);
1612 sim_fpu_round_64 (&n
, 0, 0);
1614 if (n
.normal_exp
< -1) /* If n looks like zero just return l. */
1619 else if (n
.class == sim_fpu_class_number
1620 && n
.normal_exp
<= (NR_FRAC_GUARD
)) /* If not too large round. */
1621 do_normal_round (&n
, (NR_FRAC_GUARD
) - n
.normal_exp
, sim_fpu_round_near
);
1623 /* Mark 0's as zero so multiply can detect zero. */
1624 if (n
.fraction
== 0)
1625 n
.class = sim_fpu_class_zero
;
1627 /* Calculate n*r. */
1628 sim_fpu_mul (&tmp
, &n
, r
);
1629 sim_fpu_round_64 (&tmp
, 0, 0);
1631 /* Finally calculate l-n*r. */
1632 sim_fpu_sub (f
, l
, &tmp
);
1639 INLINE_SIM_FPU (int)
1640 sim_fpu_max (sim_fpu
*f
,
1644 if (sim_fpu_is_snan (l
))
1647 f
->class = sim_fpu_class_qnan
;
1648 return sim_fpu_status_invalid_snan
;
1650 if (sim_fpu_is_snan (r
))
1653 f
->class = sim_fpu_class_qnan
;
1654 return sim_fpu_status_invalid_snan
;
1656 if (sim_fpu_is_qnan (l
))
1661 if (sim_fpu_is_qnan (r
))
1666 if (sim_fpu_is_infinity (l
))
1668 if (sim_fpu_is_infinity (r
)
1669 && l
->sign
== r
->sign
)
1672 return sim_fpu_status_invalid_isi
;
1675 *f
= *r
; /* -inf < anything */
1677 *f
= *l
; /* +inf > anything */
1680 if (sim_fpu_is_infinity (r
))
1683 *f
= *l
; /* anything > -inf */
1685 *f
= *r
; /* anything < +inf */
1688 if (l
->sign
> r
->sign
)
1690 *f
= *r
; /* -ve < +ve */
1693 if (l
->sign
< r
->sign
)
1695 *f
= *l
; /* +ve > -ve */
1698 ASSERT (l
->sign
== r
->sign
);
1699 if (l
->normal_exp
> r
->normal_exp
1700 || (l
->normal_exp
== r
->normal_exp
1701 && l
->fraction
> r
->fraction
))
1705 *f
= *r
; /* -ve < -ve */
1707 *f
= *l
; /* +ve > +ve */
1714 *f
= *l
; /* -ve > -ve */
1716 *f
= *r
; /* +ve < +ve */
1722 INLINE_SIM_FPU (int)
1723 sim_fpu_min (sim_fpu
*f
,
1727 if (sim_fpu_is_snan (l
))
1730 f
->class = sim_fpu_class_qnan
;
1731 return sim_fpu_status_invalid_snan
;
1733 if (sim_fpu_is_snan (r
))
1736 f
->class = sim_fpu_class_qnan
;
1737 return sim_fpu_status_invalid_snan
;
1739 if (sim_fpu_is_qnan (l
))
1744 if (sim_fpu_is_qnan (r
))
1749 if (sim_fpu_is_infinity (l
))
1751 if (sim_fpu_is_infinity (r
)
1752 && l
->sign
== r
->sign
)
1755 return sim_fpu_status_invalid_isi
;
1758 *f
= *l
; /* -inf < anything */
1760 *f
= *r
; /* +inf > anthing */
1763 if (sim_fpu_is_infinity (r
))
1766 *f
= *r
; /* anything > -inf */
1768 *f
= *l
; /* anything < +inf */
1771 if (l
->sign
> r
->sign
)
1773 *f
= *l
; /* -ve < +ve */
1776 if (l
->sign
< r
->sign
)
1778 *f
= *r
; /* +ve > -ve */
1781 ASSERT (l
->sign
== r
->sign
);
1782 if (l
->normal_exp
> r
->normal_exp
1783 || (l
->normal_exp
== r
->normal_exp
1784 && l
->fraction
> r
->fraction
))
1788 *f
= *l
; /* -ve < -ve */
1790 *f
= *r
; /* +ve > +ve */
1797 *f
= *r
; /* -ve > -ve */
1799 *f
= *l
; /* +ve < +ve */
1805 INLINE_SIM_FPU (int)
1806 sim_fpu_neg (sim_fpu
*f
,
1809 if (sim_fpu_is_snan (r
))
1812 f
->class = sim_fpu_class_qnan
;
1813 return sim_fpu_status_invalid_snan
;
1815 if (sim_fpu_is_qnan (r
))
1826 INLINE_SIM_FPU (int)
1827 sim_fpu_abs (sim_fpu
*f
,
1832 if (sim_fpu_is_snan (r
))
1834 f
->class = sim_fpu_class_qnan
;
1835 return sim_fpu_status_invalid_snan
;
1841 INLINE_SIM_FPU (int)
1842 sim_fpu_inv (sim_fpu
*f
,
1845 return sim_fpu_div (f
, &sim_fpu_one
, r
);
1849 INLINE_SIM_FPU (int)
1850 sim_fpu_sqrt (sim_fpu
*f
,
1853 if (sim_fpu_is_snan (r
))
1856 return sim_fpu_status_invalid_snan
;
1858 if (sim_fpu_is_qnan (r
))
1863 if (sim_fpu_is_zero (r
))
1865 f
->class = sim_fpu_class_zero
;
1870 if (sim_fpu_is_infinity (r
))
1875 return sim_fpu_status_invalid_sqrt
;
1879 f
->class = sim_fpu_class_infinity
;
1888 return sim_fpu_status_invalid_sqrt
;
1891 /* @(#)e_sqrt.c 5.1 93/09/24 */
1893 * ====================================================
1894 * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
1896 * Developed at SunPro, a Sun Microsystems, Inc. business.
1897 * Permission to use, copy, modify, and distribute this
1898 * software is freely granted, provided that this notice
1900 * ====================================================
1903 /* __ieee754_sqrt(x)
1904 * Return correctly rounded sqrt.
1905 * ------------------------------------------
1906 * | Use the hardware sqrt if you have one |
1907 * ------------------------------------------
1909 * Bit by bit method using integer arithmetic. (Slow, but portable)
1911 * Scale x to y in [1,4) with even powers of 2:
1912 * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then
1913 * sqrt(x) = 2^k * sqrt(y)
1916 - sqrt ( x*2^(2m) ) = sqrt(x).2^m ; m even
1917 - sqrt ( x*2^(2m + 1) ) = sqrt(2.x).2^m ; m odd
1919 - y = ((m even) ? x : 2.x)
1921 - y in [1, 4) ; [IMPLICIT_1,IMPLICIT_4)
1923 - sqrt (y) in [1, 2) ; [IMPLICIT_1,IMPLICIT_2)
1925 * 2. Bit by bit computation
1926 * Let q = sqrt(y) truncated to i bit after binary point (q = 1),
1929 * s = 2*q , and y = 2 * ( y - q ). (1)
1932 * To compute q from q , one checks whether
1936 * (q + 2 ) <= y. (2)
1939 * If (2) is false, then q = q ; otherwise q = q + 2 .
1942 * With some algebraic manipulation, it is not difficult to see
1943 * that (2) is equivalent to
1948 * The advantage of (3) is that s and y can be computed by
1950 * the following recurrence formula:
1953 * s = s , y = y ; (4)
1962 * s = s + 2 , y = y - s - 2 (5)
1967 - NOTE: y = 2 (y - s - 2 )
1970 * One may easily use induction to prove (4) and (5).
1971 * Note. Since the left hand side of (3) contain only i+2 bits,
1972 * it does not necessary to do a full (53-bit) comparison
1975 * After generating the 53 bits result, we compute one more bit.
1976 * Together with the remainder, we can decide whether the
1977 * result is exact, bigger than 1/2ulp, or less than 1/2ulp
1978 * (it will never equal to 1/2ulp).
1979 * The rounding mode can be detected by checking whether
1980 * huge + tiny is equal to huge, and whether huge - tiny is
1981 * equal to huge for some floating point number "huge" and "tiny".
1984 * sqrt(+-0) = +-0 ... exact
1986 * sqrt(-ve) = NaN ... with invalid signal
1987 * sqrt(NaN) = NaN ... with invalid signal for signalling NaN
1989 * Other methods : see the appended file at the end of the program below.
1994 /* Generate sqrt(x) bit by bit. */
2000 f
->class = sim_fpu_class_number
;
2003 f
->normal_exp
= (r
->normal_exp
>> 1); /* exp = [exp/2] */
2005 /* Odd exp, double x to make it even. */
2006 ASSERT (y
>= IMPLICIT_1
&& y
< IMPLICIT_4
);
2007 if ((r
->normal_exp
& 1))
2011 ASSERT (y
>= IMPLICIT_1
&& y
< (IMPLICIT_2
<< 1));
2013 /* Let loop determine first value of s (either 1 or 2) */
2020 unsigned64 t
= s
+ b
;
2031 ASSERT (q
>= IMPLICIT_1
&& q
< IMPLICIT_2
);
2035 f
->fraction
|= 1; /* Stick remaining bits. */
2036 return sim_fpu_status_inexact
;
2044 /* int/long <-> sim_fpu */
2046 INLINE_SIM_FPU (int)
2047 sim_fpu_i32to (sim_fpu
*f
,
2049 sim_fpu_round round
)
2055 INLINE_SIM_FPU (int)
2056 sim_fpu_u32to (sim_fpu
*f
,
2058 sim_fpu_round round
)
2064 INLINE_SIM_FPU (int)
2065 sim_fpu_i64to (sim_fpu
*f
,
2067 sim_fpu_round round
)
2073 INLINE_SIM_FPU (int)
2074 sim_fpu_u64to (sim_fpu
*f
,
2076 sim_fpu_round round
)
2083 INLINE_SIM_FPU (int)
2084 sim_fpu_to32i (signed32
*i
,
2086 sim_fpu_round round
)
2089 int status
= fpu2i (&i64
, f
, 0, round
);
2094 INLINE_SIM_FPU (int)
2095 sim_fpu_to32u (unsigned32
*u
,
2097 sim_fpu_round round
)
2100 int status
= fpu2u (&u64
, f
, 0);
2105 INLINE_SIM_FPU (int)
2106 sim_fpu_to64i (signed64
*i
,
2108 sim_fpu_round round
)
2110 return fpu2i (i
, f
, 1, round
);
2114 INLINE_SIM_FPU (int)
2115 sim_fpu_to64u (unsigned64
*u
,
2117 sim_fpu_round round
)
2119 return fpu2u (u
, f
, 1);
2124 /* sim_fpu -> host format */
2127 INLINE_SIM_FPU (float)
2128 sim_fpu_2f (const sim_fpu
*f
)
2135 INLINE_SIM_FPU (double)
2136 sim_fpu_2d (const sim_fpu
*s
)
2139 if (sim_fpu_is_snan (s
))
2143 n
.class = sim_fpu_class_qnan
;
2144 val
.i
= pack_fpu (&n
, 1);
2148 val
.i
= pack_fpu (s
, 1);
2155 INLINE_SIM_FPU (void)
2156 sim_fpu_f2 (sim_fpu
*f
,
2161 unpack_fpu (f
, val
.i
, 1);
2166 INLINE_SIM_FPU (void)
2167 sim_fpu_d2 (sim_fpu
*f
,
2172 unpack_fpu (f
, val
.i
, 1);
2178 INLINE_SIM_FPU (int)
2179 sim_fpu_is_nan (const sim_fpu
*d
)
2183 case sim_fpu_class_qnan
:
2184 case sim_fpu_class_snan
:
2191 INLINE_SIM_FPU (int)
2192 sim_fpu_is_qnan (const sim_fpu
*d
)
2196 case sim_fpu_class_qnan
:
2203 INLINE_SIM_FPU (int)
2204 sim_fpu_is_snan (const sim_fpu
*d
)
2208 case sim_fpu_class_snan
:
2215 INLINE_SIM_FPU (int)
2216 sim_fpu_is_zero (const sim_fpu
*d
)
2220 case sim_fpu_class_zero
:
2227 INLINE_SIM_FPU (int)
2228 sim_fpu_is_infinity (const sim_fpu
*d
)
2232 case sim_fpu_class_infinity
:
2239 INLINE_SIM_FPU (int)
2240 sim_fpu_is_number (const sim_fpu
*d
)
2244 case sim_fpu_class_denorm
:
2245 case sim_fpu_class_number
:
2252 INLINE_SIM_FPU (int)
2253 sim_fpu_is_denorm (const sim_fpu
*d
)
2257 case sim_fpu_class_denorm
:
2265 INLINE_SIM_FPU (int)
2266 sim_fpu_sign (const sim_fpu
*d
)
2272 INLINE_SIM_FPU (int)
2273 sim_fpu_exp (const sim_fpu
*d
)
2275 return d
->normal_exp
;
2279 INLINE_SIM_FPU (unsigned64
)
2280 sim_fpu_fraction (const sim_fpu
*d
)
2286 INLINE_SIM_FPU (unsigned64
)
2287 sim_fpu_guard (const sim_fpu
*d
, int is_double
)
2290 unsigned64 guardmask
= LSMASK64 (NR_GUARDS
- 1, 0);
2291 rv
= (d
->fraction
& guardmask
) >> NR_PAD
;
2296 INLINE_SIM_FPU (int)
2297 sim_fpu_is (const sim_fpu
*d
)
2301 case sim_fpu_class_qnan
:
2302 return SIM_FPU_IS_QNAN
;
2303 case sim_fpu_class_snan
:
2304 return SIM_FPU_IS_SNAN
;
2305 case sim_fpu_class_infinity
:
2307 return SIM_FPU_IS_NINF
;
2309 return SIM_FPU_IS_PINF
;
2310 case sim_fpu_class_number
:
2312 return SIM_FPU_IS_NNUMBER
;
2314 return SIM_FPU_IS_PNUMBER
;
2315 case sim_fpu_class_denorm
:
2317 return SIM_FPU_IS_NDENORM
;
2319 return SIM_FPU_IS_PDENORM
;
2320 case sim_fpu_class_zero
:
2322 return SIM_FPU_IS_NZERO
;
2324 return SIM_FPU_IS_PZERO
;
2331 INLINE_SIM_FPU (int)
2332 sim_fpu_cmp (const sim_fpu
*l
, const sim_fpu
*r
)
2335 sim_fpu_sub (&res
, l
, r
);
2336 return sim_fpu_is (&res
);
2339 INLINE_SIM_FPU (int)
2340 sim_fpu_is_lt (const sim_fpu
*l
, const sim_fpu
*r
)
2343 sim_fpu_lt (&status
, l
, r
);
2347 INLINE_SIM_FPU (int)
2348 sim_fpu_is_le (const sim_fpu
*l
, const sim_fpu
*r
)
2351 sim_fpu_le (&is
, l
, r
);
2355 INLINE_SIM_FPU (int)
2356 sim_fpu_is_eq (const sim_fpu
*l
, const sim_fpu
*r
)
2359 sim_fpu_eq (&is
, l
, r
);
2363 INLINE_SIM_FPU (int)
2364 sim_fpu_is_ne (const sim_fpu
*l
, const sim_fpu
*r
)
2367 sim_fpu_ne (&is
, l
, r
);
2371 INLINE_SIM_FPU (int)
2372 sim_fpu_is_ge (const sim_fpu
*l
, const sim_fpu
*r
)
2375 sim_fpu_ge (&is
, l
, r
);
2379 INLINE_SIM_FPU (int)
2380 sim_fpu_is_gt (const sim_fpu
*l
, const sim_fpu
*r
)
2383 sim_fpu_gt (&is
, l
, r
);
2388 /* Compare operators */
2390 INLINE_SIM_FPU (int)
2391 sim_fpu_lt (int *is
,
2395 if (!sim_fpu_is_nan (l
) && !sim_fpu_is_nan (r
))
2399 lval
.i
= pack_fpu (l
, 1);
2400 rval
.i
= pack_fpu (r
, 1);
2401 (*is
) = (lval
.d
< rval
.d
);
2404 else if (sim_fpu_is_snan (l
) || sim_fpu_is_snan (r
))
2407 return sim_fpu_status_invalid_snan
;
2412 return sim_fpu_status_invalid_qnan
;
2416 INLINE_SIM_FPU (int)
2417 sim_fpu_le (int *is
,
2421 if (!sim_fpu_is_nan (l
) && !sim_fpu_is_nan (r
))
2425 lval
.i
= pack_fpu (l
, 1);
2426 rval
.i
= pack_fpu (r
, 1);
2427 *is
= (lval
.d
<= rval
.d
);
2430 else if (sim_fpu_is_snan (l
) || sim_fpu_is_snan (r
))
2433 return sim_fpu_status_invalid_snan
;
2438 return sim_fpu_status_invalid_qnan
;
2442 INLINE_SIM_FPU (int)
2443 sim_fpu_eq (int *is
,
2447 if (!sim_fpu_is_nan (l
) && !sim_fpu_is_nan (r
))
2451 lval
.i
= pack_fpu (l
, 1);
2452 rval
.i
= pack_fpu (r
, 1);
2453 (*is
) = (lval
.d
== rval
.d
);
2456 else if (sim_fpu_is_snan (l
) || sim_fpu_is_snan (r
))
2459 return sim_fpu_status_invalid_snan
;
2464 return sim_fpu_status_invalid_qnan
;
2468 INLINE_SIM_FPU (int)
2469 sim_fpu_ne (int *is
,
2473 if (!sim_fpu_is_nan (l
) && !sim_fpu_is_nan (r
))
2477 lval
.i
= pack_fpu (l
, 1);
2478 rval
.i
= pack_fpu (r
, 1);
2479 (*is
) = (lval
.d
!= rval
.d
);
2482 else if (sim_fpu_is_snan (l
) || sim_fpu_is_snan (r
))
2485 return sim_fpu_status_invalid_snan
;
2490 return sim_fpu_status_invalid_qnan
;
2494 INLINE_SIM_FPU (int)
2495 sim_fpu_ge (int *is
,
2499 return sim_fpu_le (is
, r
, l
);
2502 INLINE_SIM_FPU (int)
2503 sim_fpu_gt (int *is
,
2507 return sim_fpu_lt (is
, r
, l
);
2511 /* A number of useful constants */
2513 #if EXTERN_SIM_FPU_P
2514 const sim_fpu sim_fpu_zero
= {
2515 sim_fpu_class_zero
, 0, 0, 0
2517 const sim_fpu sim_fpu_qnan
= {
2518 sim_fpu_class_qnan
, 0, 0, 0
2520 const sim_fpu sim_fpu_one
= {
2521 sim_fpu_class_number
, 0, IMPLICIT_1
, 0
2523 const sim_fpu sim_fpu_two
= {
2524 sim_fpu_class_number
, 0, IMPLICIT_1
, 1
2526 const sim_fpu sim_fpu_max32
= {
2527 sim_fpu_class_number
, 0, LSMASK64 (NR_FRAC_GUARD
, NR_GUARDS32
), NORMAL_EXPMAX32
2529 const sim_fpu sim_fpu_max64
= {
2530 sim_fpu_class_number
, 0, LSMASK64 (NR_FRAC_GUARD
, NR_GUARDS64
), NORMAL_EXPMAX64
2537 INLINE_SIM_FPU (void)
2538 sim_fpu_print_fpu (const sim_fpu
*f
,
2539 sim_fpu_print_func
*print
,
2542 sim_fpu_printn_fpu (f
, print
, -1, arg
);
2545 INLINE_SIM_FPU (void)
2546 sim_fpu_printn_fpu (const sim_fpu
*f
,
2547 sim_fpu_print_func
*print
,
2551 print (arg
, "%s", f
->sign
? "-" : "+");
2554 case sim_fpu_class_qnan
:
2556 print_bits (f
->fraction
, NR_FRAC_GUARD
- 1, digits
, print
, arg
);
2557 print (arg
, "*QuietNaN");
2559 case sim_fpu_class_snan
:
2561 print_bits (f
->fraction
, NR_FRAC_GUARD
- 1, digits
, print
, arg
);
2562 print (arg
, "*SignalNaN");
2564 case sim_fpu_class_zero
:
2567 case sim_fpu_class_infinity
:
2570 case sim_fpu_class_number
:
2571 case sim_fpu_class_denorm
:
2573 print_bits (f
->fraction
, NR_FRAC_GUARD
- 1, digits
, print
, arg
);
2574 print (arg
, "*2^%+d", f
->normal_exp
);
2575 ASSERT (f
->fraction
>= IMPLICIT_1
);
2576 ASSERT (f
->fraction
< IMPLICIT_2
);
2581 INLINE_SIM_FPU (void)
2582 sim_fpu_print_status (int status
,
2583 sim_fpu_print_func
*print
,
2587 const char *prefix
= "";
2590 switch ((sim_fpu_status
) (status
& i
))
2592 case sim_fpu_status_denorm
:
2593 print (arg
, "%sD", prefix
);
2595 case sim_fpu_status_invalid_snan
:
2596 print (arg
, "%sSNaN", prefix
);
2598 case sim_fpu_status_invalid_qnan
:
2599 print (arg
, "%sQNaN", prefix
);
2601 case sim_fpu_status_invalid_isi
:
2602 print (arg
, "%sISI", prefix
);
2604 case sim_fpu_status_invalid_idi
:
2605 print (arg
, "%sIDI", prefix
);
2607 case sim_fpu_status_invalid_zdz
:
2608 print (arg
, "%sZDZ", prefix
);
2610 case sim_fpu_status_invalid_imz
:
2611 print (arg
, "%sIMZ", prefix
);
2613 case sim_fpu_status_invalid_cvi
:
2614 print (arg
, "%sCVI", prefix
);
2616 case sim_fpu_status_invalid_cmp
:
2617 print (arg
, "%sCMP", prefix
);
2619 case sim_fpu_status_invalid_sqrt
:
2620 print (arg
, "%sSQRT", prefix
);
2622 case sim_fpu_status_invalid_irx
:
2623 print (arg
, "%sIRX", prefix
);
2625 case sim_fpu_status_inexact
:
2626 print (arg
, "%sX", prefix
);
2628 case sim_fpu_status_overflow
:
2629 print (arg
, "%sO", prefix
);
2631 case sim_fpu_status_underflow
:
2632 print (arg
, "%sU", prefix
);
2634 case sim_fpu_status_invalid_div0
:
2635 print (arg
, "%s/", prefix
);
2637 case sim_fpu_status_rounded
:
2638 print (arg
, "%sR", prefix
);