gmp-utils: New API to simply use of GMP's integer/rational/float objects
[binutils-gdb.git] / sim / common / sim-fpu.c
blobfdd3b848e98bf99febbe957d6587d678606d4213
1 /* This is a software floating point library which can be used instead
2 of the floating point routines in libgcc1.c for targets without
3 hardware floating point. */
5 /* Copyright 1994-2020 Free Software Foundation, Inc.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 /* As a special exception, if you link this library with other files,
21 some of which are compiled with GCC, to produce an executable,
22 this library does not by itself cause the resulting executable
23 to be covered by the GNU General Public License.
24 This exception does not however invalidate any other reasons why
25 the executable file might be covered by the GNU General Public License. */
27 /* This implements IEEE 754 format arithmetic, but does not provide a
28 mechanism for setting the rounding mode, or for generating or handling
29 exceptions.
31 The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
32 Wilson, all of Cygnus Support. */
35 #ifndef SIM_FPU_C
36 #define SIM_FPU_C
38 #include "sim-basics.h"
39 #include "sim-fpu.h"
41 #include "sim-io.h"
42 #include "sim-assert.h"
44 #ifdef HAVE_STDLIB_H
45 #include <stdlib.h>
46 #endif
48 /* Debugging support.
49 If digits is -1, then print all digits. */
51 static void
52 print_bits (unsigned64 x,
53 int msbit,
54 int digits,
55 sim_fpu_print_func print,
56 void *arg)
58 unsigned64 bit = LSBIT64 (msbit);
59 int i = 4;
60 while (bit && digits)
62 if (i == 0)
63 print (arg, ",");
65 if ((x & bit))
66 print (arg, "1");
67 else
68 print (arg, "0");
69 bit >>= 1;
71 if (digits > 0)
72 digits--;
73 i = (i + 1) % 4;
79 /* Quick and dirty conversion between a host double and host 64bit int. */
81 typedef union
83 double d;
84 unsigned64 i;
85 } sim_fpu_map;
88 /* A packed IEEE floating point number.
90 Form is <SIGN:1><BIASEDEXP:NR_EXPBITS><FRAC:NR_FRACBITS> for both
91 32 and 64 bit numbers. This number is interpreted as:
93 Normalized (0 < BIASEDEXP && BIASEDEXP < EXPMAX):
94 (sign ? '-' : '+') 1.<FRAC> x 2 ^ (BIASEDEXP - EXPBIAS)
96 Denormalized (0 == BIASEDEXP && FRAC != 0):
97 (sign ? "-" : "+") 0.<FRAC> x 2 ^ (- EXPBIAS)
99 Zero (0 == BIASEDEXP && FRAC == 0):
100 (sign ? "-" : "+") 0.0
102 Infinity (BIASEDEXP == EXPMAX && FRAC == 0):
103 (sign ? "-" : "+") "infinity"
105 SignalingNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC < QUIET_NAN):
106 SNaN.FRAC
108 QuietNaN (BIASEDEXP == EXPMAX && FRAC > 0 && FRAC > QUIET_NAN):
109 QNaN.FRAC
113 #define NR_EXPBITS (is_double ? 11 : 8)
114 #define NR_FRACBITS (is_double ? 52 : 23)
115 #define SIGNBIT (is_double ? MSBIT64 (0) : MSBIT64 (32))
117 #define EXPMAX32 (255)
118 #define EXMPAX64 (2047)
119 #define EXPMAX ((unsigned) (is_double ? EXMPAX64 : EXPMAX32))
121 #define EXPBIAS32 (127)
122 #define EXPBIAS64 (1023)
123 #define EXPBIAS (is_double ? EXPBIAS64 : EXPBIAS32)
125 #define QUIET_NAN LSBIT64 (NR_FRACBITS - 1)
129 /* An unpacked floating point number.
131 When unpacked, the fraction of both a 32 and 64 bit floating point
132 number is stored using the same format:
134 64 bit - <IMPLICIT_1:1><FRACBITS:52><GUARDS:8><PAD:00>
135 32 bit - <IMPLICIT_1:1><FRACBITS:23><GUARDS:7><PAD:30> */
137 #define NR_PAD32 (30)
138 #define NR_PAD64 (0)
139 #define NR_PAD (is_double ? NR_PAD64 : NR_PAD32)
140 #define PADMASK (is_double ? 0 : LSMASK64 (NR_PAD32 - 1, 0))
142 #define NR_GUARDS32 (7 + NR_PAD32)
143 #define NR_GUARDS64 (8 + NR_PAD64)
144 #define NR_GUARDS (is_double ? NR_GUARDS64 : NR_GUARDS32)
145 #define GUARDMASK LSMASK64 (NR_GUARDS - 1, 0)
147 #define GUARDMSB LSBIT64 (NR_GUARDS - 1)
148 #define GUARDLSB LSBIT64 (NR_PAD)
149 #define GUARDROUND LSMASK64 (NR_GUARDS - 2, 0)
151 #define NR_FRAC_GUARD (60)
152 #define IMPLICIT_1 LSBIT64 (NR_FRAC_GUARD)
153 #define IMPLICIT_2 LSBIT64 (NR_FRAC_GUARD + 1)
154 #define IMPLICIT_4 LSBIT64 (NR_FRAC_GUARD + 2)
155 #define NR_SPARE 2
157 #define FRAC32MASK LSMASK64 (63, NR_FRAC_GUARD - 32 + 1)
159 #define NORMAL_EXPMIN (-(EXPBIAS)+1)
161 #define NORMAL_EXPMAX32 (EXPBIAS32)
162 #define NORMAL_EXPMAX64 (EXPBIAS64)
163 #define NORMAL_EXPMAX (EXPBIAS)
166 /* Integer constants */
168 #define MAX_INT32 ((signed64) LSMASK64 (30, 0))
169 #define MAX_UINT32 LSMASK64 (31, 0)
170 #define MIN_INT32 ((signed64) LSMASK64 (63, 31))
172 #define MAX_INT64 ((signed64) LSMASK64 (62, 0))
173 #define MAX_UINT64 LSMASK64 (63, 0)
174 #define MIN_INT64 ((signed64) LSMASK64 (63, 63))
176 #define MAX_INT (is_64bit ? MAX_INT64 : MAX_INT32)
177 #define MIN_INT (is_64bit ? MIN_INT64 : MIN_INT32)
178 #define MAX_UINT (is_64bit ? MAX_UINT64 : MAX_UINT32)
179 #define NR_INTBITS (is_64bit ? 64 : 32)
181 /* Squeeze an unpacked sim_fpu struct into a 32/64 bit integer. */
182 STATIC_INLINE_SIM_FPU (unsigned64)
183 pack_fpu (const sim_fpu *src,
184 int is_double)
186 int sign;
187 unsigned64 exp;
188 unsigned64 fraction;
189 unsigned64 packed;
191 switch (src->class)
193 /* Create a NaN. */
194 case sim_fpu_class_qnan:
195 sign = src->sign;
196 exp = EXPMAX;
197 /* Force fraction to correct class. */
198 fraction = src->fraction;
199 fraction >>= NR_GUARDS;
200 #ifdef SIM_QUIET_NAN_NEGATED
201 fraction |= QUIET_NAN - 1;
202 #else
203 fraction |= QUIET_NAN;
204 #endif
205 break;
206 case sim_fpu_class_snan:
207 sign = src->sign;
208 exp = EXPMAX;
209 /* Force fraction to correct class. */
210 fraction = src->fraction;
211 fraction >>= NR_GUARDS;
212 #ifdef SIM_QUIET_NAN_NEGATED
213 fraction |= QUIET_NAN;
214 #else
215 fraction &= ~QUIET_NAN;
216 #endif
217 break;
218 case sim_fpu_class_infinity:
219 sign = src->sign;
220 exp = EXPMAX;
221 fraction = 0;
222 break;
223 case sim_fpu_class_zero:
224 sign = src->sign;
225 exp = 0;
226 fraction = 0;
227 break;
228 case sim_fpu_class_number:
229 case sim_fpu_class_denorm:
230 ASSERT (src->fraction >= IMPLICIT_1);
231 ASSERT (src->fraction < IMPLICIT_2);
232 if (src->normal_exp < NORMAL_EXPMIN)
234 /* This number's exponent is too low to fit into the bits
235 available in the number We'll denormalize the number by
236 storing zero in the exponent and shift the fraction to
237 the right to make up for it. */
238 int nr_shift = NORMAL_EXPMIN - src->normal_exp;
239 if (nr_shift > NR_FRACBITS)
241 /* Underflow, just make the number zero. */
242 sign = src->sign;
243 exp = 0;
244 fraction = 0;
246 else
248 sign = src->sign;
249 exp = 0;
250 /* Shift by the value. */
251 fraction = src->fraction;
252 fraction >>= NR_GUARDS;
253 fraction >>= nr_shift;
256 else if (src->normal_exp > NORMAL_EXPMAX)
258 /* Infinity */
259 sign = src->sign;
260 exp = EXPMAX;
261 fraction = 0;
263 else
265 exp = (src->normal_exp + EXPBIAS);
266 sign = src->sign;
267 fraction = src->fraction;
268 /* FIXME: Need to round according to WITH_SIM_FPU_ROUNDING
269 or some such. */
270 /* Round to nearest: If the guard bits are the all zero, but
271 the first, then we're half way between two numbers,
272 choose the one which makes the lsb of the answer 0. */
273 if ((fraction & GUARDMASK) == GUARDMSB)
275 if ((fraction & (GUARDMSB << 1)))
276 fraction += (GUARDMSB << 1);
278 else
280 /* Add a one to the guards to force round to nearest. */
281 fraction += GUARDROUND;
283 if ((fraction & IMPLICIT_2)) /* Rounding resulted in carry. */
285 exp += 1;
286 fraction >>= 1;
288 fraction >>= NR_GUARDS;
289 /* When exp == EXPMAX (overflow from carry) fraction must
290 have been made zero. */
291 ASSERT ((exp == EXPMAX) <= ((fraction & ~IMPLICIT_1) == 0));
293 break;
294 default:
295 abort ();
298 packed = ((sign ? SIGNBIT : 0)
299 | (exp << NR_FRACBITS)
300 | LSMASKED64 (fraction, NR_FRACBITS - 1, 0));
302 /* Trace operation. */
303 #if 0
304 if (is_double)
307 else
309 printf ("pack_fpu: ");
310 printf ("-> %c%0lX.%06lX\n",
311 LSMASKED32 (packed, 31, 31) ? '8' : '0',
312 (long) LSEXTRACTED32 (packed, 30, 23),
313 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
315 #endif
317 return packed;
321 /* Unpack a 32/64 bit integer into a sim_fpu structure. */
322 STATIC_INLINE_SIM_FPU (void)
323 unpack_fpu (sim_fpu *dst, unsigned64 packed, int is_double)
325 unsigned64 fraction = LSMASKED64 (packed, NR_FRACBITS - 1, 0);
326 unsigned exp = LSEXTRACTED64 (packed, NR_EXPBITS + NR_FRACBITS - 1, NR_FRACBITS);
327 int sign = (packed & SIGNBIT) != 0;
329 if (exp == 0)
331 /* Hmm. Looks like 0 */
332 if (fraction == 0)
334 /* Tastes like zero. */
335 dst->class = sim_fpu_class_zero;
336 dst->sign = sign;
337 dst->normal_exp = 0;
339 else
341 /* Zero exponent with non zero fraction - it's denormalized,
342 so there isn't a leading implicit one - we'll shift it so
343 it gets one. */
344 dst->normal_exp = exp - EXPBIAS + 1;
345 dst->class = sim_fpu_class_denorm;
346 dst->sign = sign;
347 fraction <<= NR_GUARDS;
348 while (fraction < IMPLICIT_1)
350 fraction <<= 1;
351 dst->normal_exp--;
353 dst->fraction = fraction;
356 else if (exp == EXPMAX)
358 /* Huge exponent*/
359 if (fraction == 0)
361 /* Attached to a zero fraction - means infinity. */
362 dst->class = sim_fpu_class_infinity;
363 dst->sign = sign;
364 /* dst->normal_exp = EXPBIAS; */
365 /* dst->fraction = 0; */
367 else
369 int qnan;
371 /* Non zero fraction, means NaN. */
372 dst->sign = sign;
373 dst->fraction = (fraction << NR_GUARDS);
374 #ifdef SIM_QUIET_NAN_NEGATED
375 qnan = (fraction & QUIET_NAN) == 0;
376 #else
377 qnan = fraction >= QUIET_NAN;
378 #endif
379 if (qnan)
380 dst->class = sim_fpu_class_qnan;
381 else
382 dst->class = sim_fpu_class_snan;
385 else
387 /* Nothing strange about this number. */
388 dst->class = sim_fpu_class_number;
389 dst->sign = sign;
390 dst->fraction = ((fraction << NR_GUARDS) | IMPLICIT_1);
391 dst->normal_exp = exp - EXPBIAS;
394 /* Trace operation. */
395 #if 0
396 if (is_double)
399 else
401 printf ("unpack_fpu: %c%02lX.%06lX ->\n",
402 LSMASKED32 (packed, 31, 31) ? '8' : '0',
403 (long) LSEXTRACTED32 (packed, 30, 23),
404 (long) LSEXTRACTED32 (packed, 23 - 1, 0));
406 #endif
408 /* sanity checks */
410 sim_fpu_map val;
411 val.i = pack_fpu (dst, 1);
412 if (is_double)
414 ASSERT (val.i == packed);
416 else
418 unsigned32 val = pack_fpu (dst, 0);
419 unsigned32 org = packed;
420 ASSERT (val == org);
426 /* Convert a floating point into an integer. */
427 STATIC_INLINE_SIM_FPU (int)
428 fpu2i (signed64 *i,
429 const sim_fpu *s,
430 int is_64bit,
431 sim_fpu_round round)
433 unsigned64 tmp;
434 int shift;
435 int status = 0;
436 if (sim_fpu_is_zero (s))
438 *i = 0;
439 return 0;
441 if (sim_fpu_is_snan (s))
443 *i = MIN_INT; /* FIXME */
444 return sim_fpu_status_invalid_cvi;
446 if (sim_fpu_is_qnan (s))
448 *i = MIN_INT; /* FIXME */
449 return sim_fpu_status_invalid_cvi;
451 /* Map infinity onto MAX_INT... */
452 if (sim_fpu_is_infinity (s))
454 *i = s->sign ? MIN_INT : MAX_INT;
455 return sim_fpu_status_invalid_cvi;
457 /* It is a number, but a small one. */
458 if (s->normal_exp < 0)
460 *i = 0;
461 return sim_fpu_status_inexact;
463 /* Is the floating point MIN_INT or just close? */
464 if (s->sign && s->normal_exp == (NR_INTBITS - 1))
466 *i = MIN_INT;
467 ASSERT (s->fraction >= IMPLICIT_1);
468 if (s->fraction == IMPLICIT_1)
469 return 0; /* exact */
470 if (is_64bit) /* can't round */
471 return sim_fpu_status_invalid_cvi; /* must be overflow */
472 /* For a 32bit with MAX_INT, rounding is possible. */
473 switch (round)
475 case sim_fpu_round_default:
476 abort ();
477 case sim_fpu_round_zero:
478 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
479 return sim_fpu_status_invalid_cvi;
480 else
481 return sim_fpu_status_inexact;
482 break;
483 case sim_fpu_round_near:
485 if ((s->fraction & FRAC32MASK) != IMPLICIT_1)
486 return sim_fpu_status_invalid_cvi;
487 else if ((s->fraction & !FRAC32MASK) >= (~FRAC32MASK >> 1))
488 return sim_fpu_status_invalid_cvi;
489 else
490 return sim_fpu_status_inexact;
492 case sim_fpu_round_up:
493 if ((s->fraction & FRAC32MASK) == IMPLICIT_1)
494 return sim_fpu_status_inexact;
495 else
496 return sim_fpu_status_invalid_cvi;
497 case sim_fpu_round_down:
498 return sim_fpu_status_invalid_cvi;
501 /* Would right shifting result in the FRAC being shifted into
502 (through) the integer's sign bit? */
503 if (s->normal_exp > (NR_INTBITS - 2))
505 *i = s->sign ? MIN_INT : MAX_INT;
506 return sim_fpu_status_invalid_cvi;
508 /* Normal number, shift it into place. */
509 tmp = s->fraction;
510 shift = (s->normal_exp - (NR_FRAC_GUARD));
511 if (shift > 0)
513 tmp <<= shift;
515 else
517 shift = -shift;
518 if (tmp & ((SIGNED64 (1) << shift) - 1))
519 status |= sim_fpu_status_inexact;
520 tmp >>= shift;
522 *i = s->sign ? (-tmp) : (tmp);
523 return status;
526 /* Convert an integer into a floating point. */
527 STATIC_INLINE_SIM_FPU (int)
528 i2fpu (sim_fpu *f, signed64 i, int is_64bit)
530 int status = 0;
531 if (i == 0)
533 f->class = sim_fpu_class_zero;
534 f->sign = 0;
535 f->normal_exp = 0;
537 else
539 f->class = sim_fpu_class_number;
540 f->sign = (i < 0);
541 f->normal_exp = NR_FRAC_GUARD;
543 if (f->sign)
545 /* Special case for minint, since there is no corresponding
546 +ve integer representation for it. */
547 if (i == MIN_INT)
549 f->fraction = IMPLICIT_1;
550 f->normal_exp = NR_INTBITS - 1;
552 else
553 f->fraction = (-i);
555 else
556 f->fraction = i;
558 if (f->fraction >= IMPLICIT_2)
562 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
563 f->normal_exp += 1;
565 while (f->fraction >= IMPLICIT_2);
567 else if (f->fraction < IMPLICIT_1)
571 f->fraction <<= 1;
572 f->normal_exp -= 1;
574 while (f->fraction < IMPLICIT_1);
578 /* trace operation */
579 #if 0
581 printf ("i2fpu: 0x%08lX ->\n", (long) i);
583 #endif
585 /* sanity check */
587 signed64 val;
588 fpu2i (&val, f, is_64bit, sim_fpu_round_zero);
589 if (i >= MIN_INT32 && i <= MAX_INT32)
591 ASSERT (val == i);
595 return status;
599 /* Convert a floating point into an integer. */
600 STATIC_INLINE_SIM_FPU (int)
601 fpu2u (unsigned64 *u, const sim_fpu *s, int is_64bit)
603 const int is_double = 1;
604 unsigned64 tmp;
605 int shift;
606 if (sim_fpu_is_zero (s))
608 *u = 0;
609 return 0;
611 if (sim_fpu_is_nan (s))
613 *u = 0;
614 return 0;
616 /* It is a negative number. */
617 if (s->sign)
619 *u = 0;
620 return 0;
622 /* Get reasonable MAX_USI_INT... */
623 if (sim_fpu_is_infinity (s))
625 *u = MAX_UINT;
626 return 0;
628 /* It is a number, but a small one. */
629 if (s->normal_exp < 0)
631 *u = 0;
632 return 0;
634 /* overflow */
635 if (s->normal_exp > (NR_INTBITS - 1))
637 *u = MAX_UINT;
638 return 0;
640 /* normal number */
641 tmp = (s->fraction & ~PADMASK);
642 shift = (s->normal_exp - (NR_FRACBITS + NR_GUARDS));
643 if (shift > 0)
645 tmp <<= shift;
647 else
649 shift = -shift;
650 tmp >>= shift;
652 *u = tmp;
653 return 0;
656 /* Convert an unsigned integer into a floating point. */
657 STATIC_INLINE_SIM_FPU (int)
658 u2fpu (sim_fpu *f, unsigned64 u, int is_64bit)
660 if (u == 0)
662 f->class = sim_fpu_class_zero;
663 f->sign = 0;
664 f->normal_exp = 0;
666 else
668 f->class = sim_fpu_class_number;
669 f->sign = 0;
670 f->normal_exp = NR_FRAC_GUARD;
671 f->fraction = u;
673 while (f->fraction < IMPLICIT_1)
675 f->fraction <<= 1;
676 f->normal_exp -= 1;
679 return 0;
683 /* register <-> sim_fpu */
685 INLINE_SIM_FPU (void)
686 sim_fpu_32to (sim_fpu *f, unsigned32 s)
688 unpack_fpu (f, s, 0);
692 INLINE_SIM_FPU (void)
693 sim_fpu_232to (sim_fpu *f, unsigned32 h, unsigned32 l)
695 unsigned64 s = h;
696 s = (s << 32) | l;
697 unpack_fpu (f, s, 1);
701 INLINE_SIM_FPU (void)
702 sim_fpu_64to (sim_fpu *f, unsigned64 s)
704 unpack_fpu (f, s, 1);
708 INLINE_SIM_FPU (void)
709 sim_fpu_to32 (unsigned32 *s,
710 const sim_fpu *f)
712 *s = pack_fpu (f, 0);
716 INLINE_SIM_FPU (void)
717 sim_fpu_to232 (unsigned32 *h, unsigned32 *l,
718 const sim_fpu *f)
720 unsigned64 s = pack_fpu (f, 1);
721 *l = s;
722 *h = (s >> 32);
726 INLINE_SIM_FPU (void)
727 sim_fpu_to64 (unsigned64 *u,
728 const sim_fpu *f)
730 *u = pack_fpu (f, 1);
734 INLINE_SIM_FPU (void)
735 sim_fpu_fractionto (sim_fpu *f,
736 int sign,
737 int normal_exp,
738 unsigned64 fraction,
739 int precision)
741 int shift = (NR_FRAC_GUARD - precision);
742 f->class = sim_fpu_class_number;
743 f->sign = sign;
744 f->normal_exp = normal_exp;
745 /* Shift the fraction to where sim-fpu expects it. */
746 if (shift >= 0)
747 f->fraction = (fraction << shift);
748 else
749 f->fraction = (fraction >> -shift);
750 f->fraction |= IMPLICIT_1;
754 INLINE_SIM_FPU (unsigned64)
755 sim_fpu_tofraction (const sim_fpu *d,
756 int precision)
758 /* We have NR_FRAC_GUARD bits, we want only PRECISION bits. */
759 int shift = (NR_FRAC_GUARD - precision);
760 unsigned64 fraction = (d->fraction & ~IMPLICIT_1);
761 if (shift >= 0)
762 return fraction >> shift;
763 else
764 return fraction << -shift;
768 /* Rounding */
770 STATIC_INLINE_SIM_FPU (int)
771 do_normal_overflow (sim_fpu *f,
772 int is_double,
773 sim_fpu_round round)
775 switch (round)
777 case sim_fpu_round_default:
778 return 0;
779 case sim_fpu_round_near:
780 f->class = sim_fpu_class_infinity;
781 break;
782 case sim_fpu_round_up:
783 if (!f->sign)
784 f->class = sim_fpu_class_infinity;
785 break;
786 case sim_fpu_round_down:
787 if (f->sign)
788 f->class = sim_fpu_class_infinity;
789 break;
790 case sim_fpu_round_zero:
791 break;
793 f->normal_exp = NORMAL_EXPMAX;
794 f->fraction = LSMASK64 (NR_FRAC_GUARD, NR_GUARDS);
795 return (sim_fpu_status_overflow | sim_fpu_status_inexact);
798 STATIC_INLINE_SIM_FPU (int)
799 do_normal_underflow (sim_fpu *f,
800 int is_double,
801 sim_fpu_round round)
803 switch (round)
805 case sim_fpu_round_default:
806 return 0;
807 case sim_fpu_round_near:
808 f->class = sim_fpu_class_zero;
809 break;
810 case sim_fpu_round_up:
811 if (f->sign)
812 f->class = sim_fpu_class_zero;
813 break;
814 case sim_fpu_round_down:
815 if (!f->sign)
816 f->class = sim_fpu_class_zero;
817 break;
818 case sim_fpu_round_zero:
819 f->class = sim_fpu_class_zero;
820 break;
822 f->normal_exp = NORMAL_EXPMIN - NR_FRACBITS;
823 f->fraction = IMPLICIT_1;
824 return (sim_fpu_status_inexact | sim_fpu_status_underflow);
829 /* Round a number using NR_GUARDS.
830 Will return the rounded number or F->FRACTION == 0 when underflow. */
832 STATIC_INLINE_SIM_FPU (int)
833 do_normal_round (sim_fpu *f,
834 int nr_guards,
835 sim_fpu_round round)
837 unsigned64 guardmask = LSMASK64 (nr_guards - 1, 0);
838 unsigned64 guardmsb = LSBIT64 (nr_guards - 1);
839 unsigned64 fraclsb = guardmsb << 1;
840 if ((f->fraction & guardmask))
842 int status = sim_fpu_status_inexact;
843 switch (round)
845 case sim_fpu_round_default:
846 return 0;
847 case sim_fpu_round_near:
848 if ((f->fraction & guardmsb))
850 if ((f->fraction & fraclsb))
852 status |= sim_fpu_status_rounded;
854 else if ((f->fraction & (guardmask >> 1)))
856 status |= sim_fpu_status_rounded;
859 break;
860 case sim_fpu_round_up:
861 if (!f->sign)
862 status |= sim_fpu_status_rounded;
863 break;
864 case sim_fpu_round_down:
865 if (f->sign)
866 status |= sim_fpu_status_rounded;
867 break;
868 case sim_fpu_round_zero:
869 break;
871 f->fraction &= ~guardmask;
872 /* Round if needed, handle resulting overflow. */
873 if ((status & sim_fpu_status_rounded))
875 f->fraction += fraclsb;
876 if ((f->fraction & IMPLICIT_2))
878 f->fraction >>= 1;
879 f->normal_exp += 1;
882 return status;
884 else
885 return 0;
889 STATIC_INLINE_SIM_FPU (int)
890 do_round (sim_fpu *f,
891 int is_double,
892 sim_fpu_round round,
893 sim_fpu_denorm denorm)
895 switch (f->class)
897 case sim_fpu_class_qnan:
898 case sim_fpu_class_zero:
899 case sim_fpu_class_infinity:
900 return 0;
901 break;
902 case sim_fpu_class_snan:
903 /* Quieten a SignalingNaN. */
904 f->class = sim_fpu_class_qnan;
905 return sim_fpu_status_invalid_snan;
906 break;
907 case sim_fpu_class_number:
908 case sim_fpu_class_denorm:
910 int status;
911 ASSERT (f->fraction < IMPLICIT_2);
912 ASSERT (f->fraction >= IMPLICIT_1);
913 if (f->normal_exp < NORMAL_EXPMIN)
915 /* This number's exponent is too low to fit into the bits
916 available in the number. Round off any bits that will be
917 discarded as a result of denormalization. Edge case is
918 the implicit bit shifted to GUARD0 and then rounded
919 up. */
920 int shift = NORMAL_EXPMIN - f->normal_exp;
921 if (shift + NR_GUARDS <= NR_FRAC_GUARD + 1
922 && !(denorm & sim_fpu_denorm_zero))
924 status = do_normal_round (f, shift + NR_GUARDS, round);
925 if (f->fraction == 0) /* Rounding underflowed. */
927 status |= do_normal_underflow (f, is_double, round);
929 else if (f->normal_exp < NORMAL_EXPMIN) /* still underflow? */
931 status |= sim_fpu_status_denorm;
932 /* Any loss of precision when denormalizing is
933 underflow. Some processors check for underflow
934 before rounding, some after! */
935 if (status & sim_fpu_status_inexact)
936 status |= sim_fpu_status_underflow;
937 /* Flag that resultant value has been denormalized. */
938 f->class = sim_fpu_class_denorm;
940 else if ((denorm & sim_fpu_denorm_underflow_inexact))
942 if ((status & sim_fpu_status_inexact))
943 status |= sim_fpu_status_underflow;
946 else
948 status = do_normal_underflow (f, is_double, round);
951 else if (f->normal_exp > NORMAL_EXPMAX)
953 /* Infinity */
954 status = do_normal_overflow (f, is_double, round);
956 else
958 status = do_normal_round (f, NR_GUARDS, round);
959 if (f->fraction == 0)
960 /* f->class = sim_fpu_class_zero; */
961 status |= do_normal_underflow (f, is_double, round);
962 else if (f->normal_exp > NORMAL_EXPMAX)
963 /* Oops! rounding caused overflow. */
964 status |= do_normal_overflow (f, is_double, round);
966 ASSERT ((f->class == sim_fpu_class_number
967 || f->class == sim_fpu_class_denorm)
968 <= (f->fraction < IMPLICIT_2 && f->fraction >= IMPLICIT_1));
969 return status;
972 return 0;
975 INLINE_SIM_FPU (int)
976 sim_fpu_round_32 (sim_fpu *f,
977 sim_fpu_round round,
978 sim_fpu_denorm denorm)
980 return do_round (f, 0, round, denorm);
983 INLINE_SIM_FPU (int)
984 sim_fpu_round_64 (sim_fpu *f,
985 sim_fpu_round round,
986 sim_fpu_denorm denorm)
988 return do_round (f, 1, round, denorm);
993 /* Arithmetic ops */
995 INLINE_SIM_FPU (int)
996 sim_fpu_add (sim_fpu *f,
997 const sim_fpu *l,
998 const sim_fpu *r)
1000 if (sim_fpu_is_snan (l))
1002 *f = *l;
1003 f->class = sim_fpu_class_qnan;
1004 return sim_fpu_status_invalid_snan;
1006 if (sim_fpu_is_snan (r))
1008 *f = *r;
1009 f->class = sim_fpu_class_qnan;
1010 return sim_fpu_status_invalid_snan;
1012 if (sim_fpu_is_qnan (l))
1014 *f = *l;
1015 return 0;
1017 if (sim_fpu_is_qnan (r))
1019 *f = *r;
1020 return 0;
1022 if (sim_fpu_is_infinity (l))
1024 if (sim_fpu_is_infinity (r)
1025 && l->sign != r->sign)
1027 *f = sim_fpu_qnan;
1028 return sim_fpu_status_invalid_isi;
1030 *f = *l;
1031 return 0;
1033 if (sim_fpu_is_infinity (r))
1035 *f = *r;
1036 return 0;
1038 if (sim_fpu_is_zero (l))
1040 if (sim_fpu_is_zero (r))
1042 *f = sim_fpu_zero;
1043 f->sign = l->sign & r->sign;
1045 else
1046 *f = *r;
1047 return 0;
1049 if (sim_fpu_is_zero (r))
1051 *f = *l;
1052 return 0;
1055 int status = 0;
1056 int shift = l->normal_exp - r->normal_exp;
1057 unsigned64 lfraction;
1058 unsigned64 rfraction;
1059 /* use exp of larger */
1060 if (shift >= NR_FRAC_GUARD)
1062 /* left has much bigger magnitude */
1063 *f = *l;
1064 return sim_fpu_status_inexact;
1066 if (shift <= - NR_FRAC_GUARD)
1068 /* right has much bigger magnitude */
1069 *f = *r;
1070 return sim_fpu_status_inexact;
1072 lfraction = l->fraction;
1073 rfraction = r->fraction;
1074 if (shift > 0)
1076 f->normal_exp = l->normal_exp;
1077 if (rfraction & LSMASK64 (shift - 1, 0))
1079 status |= sim_fpu_status_inexact;
1080 rfraction |= LSBIT64 (shift); /* Stick LSBit. */
1082 rfraction >>= shift;
1084 else if (shift < 0)
1086 f->normal_exp = r->normal_exp;
1087 if (lfraction & LSMASK64 (- shift - 1, 0))
1089 status |= sim_fpu_status_inexact;
1090 lfraction |= LSBIT64 (- shift); /* Stick LSBit. */
1092 lfraction >>= -shift;
1094 else
1096 f->normal_exp = r->normal_exp;
1099 /* Perform the addition. */
1100 if (l->sign)
1101 lfraction = - lfraction;
1102 if (r->sign)
1103 rfraction = - rfraction;
1104 f->fraction = lfraction + rfraction;
1106 /* zero? */
1107 if (f->fraction == 0)
1109 *f = sim_fpu_zero;
1110 return 0;
1113 /* sign? */
1114 f->class = sim_fpu_class_number;
1115 if (((signed64) f->fraction) >= 0)
1116 f->sign = 0;
1117 else
1119 f->sign = 1;
1120 f->fraction = - f->fraction;
1123 /* Normalize it. */
1124 if ((f->fraction & IMPLICIT_2))
1126 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1127 f->normal_exp ++;
1129 else if (f->fraction < IMPLICIT_1)
1133 f->fraction <<= 1;
1134 f->normal_exp --;
1136 while (f->fraction < IMPLICIT_1);
1138 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1139 return status;
1144 INLINE_SIM_FPU (int)
1145 sim_fpu_sub (sim_fpu *f,
1146 const sim_fpu *l,
1147 const sim_fpu *r)
1149 if (sim_fpu_is_snan (l))
1151 *f = *l;
1152 f->class = sim_fpu_class_qnan;
1153 return sim_fpu_status_invalid_snan;
1155 if (sim_fpu_is_snan (r))
1157 *f = *r;
1158 f->class = sim_fpu_class_qnan;
1159 return sim_fpu_status_invalid_snan;
1161 if (sim_fpu_is_qnan (l))
1163 *f = *l;
1164 return 0;
1166 if (sim_fpu_is_qnan (r))
1168 *f = *r;
1169 return 0;
1171 if (sim_fpu_is_infinity (l))
1173 if (sim_fpu_is_infinity (r)
1174 && l->sign == r->sign)
1176 *f = sim_fpu_qnan;
1177 return sim_fpu_status_invalid_isi;
1179 *f = *l;
1180 return 0;
1182 if (sim_fpu_is_infinity (r))
1184 *f = *r;
1185 f->sign = !r->sign;
1186 return 0;
1188 if (sim_fpu_is_zero (l))
1190 if (sim_fpu_is_zero (r))
1192 *f = sim_fpu_zero;
1193 f->sign = l->sign & !r->sign;
1195 else
1197 *f = *r;
1198 f->sign = !r->sign;
1200 return 0;
1202 if (sim_fpu_is_zero (r))
1204 *f = *l;
1205 return 0;
1208 int status = 0;
1209 int shift = l->normal_exp - r->normal_exp;
1210 unsigned64 lfraction;
1211 unsigned64 rfraction;
1212 /* use exp of larger */
1213 if (shift >= NR_FRAC_GUARD)
1215 /* left has much bigger magnitude */
1216 *f = *l;
1217 return sim_fpu_status_inexact;
1219 if (shift <= - NR_FRAC_GUARD)
1221 /* right has much bigger magnitude */
1222 *f = *r;
1223 f->sign = !r->sign;
1224 return sim_fpu_status_inexact;
1226 lfraction = l->fraction;
1227 rfraction = r->fraction;
1228 if (shift > 0)
1230 f->normal_exp = l->normal_exp;
1231 if (rfraction & LSMASK64 (shift - 1, 0))
1233 status |= sim_fpu_status_inexact;
1234 rfraction |= LSBIT64 (shift); /* Stick LSBit. */
1236 rfraction >>= shift;
1238 else if (shift < 0)
1240 f->normal_exp = r->normal_exp;
1241 if (lfraction & LSMASK64 (- shift - 1, 0))
1243 status |= sim_fpu_status_inexact;
1244 lfraction |= LSBIT64 (- shift); /* Stick LSBit. */
1246 lfraction >>= -shift;
1248 else
1250 f->normal_exp = r->normal_exp;
1253 /* Perform the subtraction. */
1254 if (l->sign)
1255 lfraction = - lfraction;
1256 if (!r->sign)
1257 rfraction = - rfraction;
1258 f->fraction = lfraction + rfraction;
1260 /* zero? */
1261 if (f->fraction == 0)
1263 *f = sim_fpu_zero;
1264 return 0;
1267 /* sign? */
1268 f->class = sim_fpu_class_number;
1269 if (((signed64) f->fraction) >= 0)
1270 f->sign = 0;
1271 else
1273 f->sign = 1;
1274 f->fraction = - f->fraction;
1277 /* Normalize it. */
1278 if ((f->fraction & IMPLICIT_2))
1280 f->fraction = (f->fraction >> 1) | (f->fraction & 1);
1281 f->normal_exp ++;
1283 else if (f->fraction < IMPLICIT_1)
1287 f->fraction <<= 1;
1288 f->normal_exp --;
1290 while (f->fraction < IMPLICIT_1);
1292 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1293 return status;
1298 INLINE_SIM_FPU (int)
1299 sim_fpu_mul (sim_fpu *f,
1300 const sim_fpu *l,
1301 const sim_fpu *r)
1303 if (sim_fpu_is_snan (l))
1305 *f = *l;
1306 f->class = sim_fpu_class_qnan;
1307 return sim_fpu_status_invalid_snan;
1309 if (sim_fpu_is_snan (r))
1311 *f = *r;
1312 f->class = sim_fpu_class_qnan;
1313 return sim_fpu_status_invalid_snan;
1315 if (sim_fpu_is_qnan (l))
1317 *f = *l;
1318 return 0;
1320 if (sim_fpu_is_qnan (r))
1322 *f = *r;
1323 return 0;
1325 if (sim_fpu_is_infinity (l))
1327 if (sim_fpu_is_zero (r))
1329 *f = sim_fpu_qnan;
1330 return sim_fpu_status_invalid_imz;
1332 *f = *l;
1333 f->sign = l->sign ^ r->sign;
1334 return 0;
1336 if (sim_fpu_is_infinity (r))
1338 if (sim_fpu_is_zero (l))
1340 *f = sim_fpu_qnan;
1341 return sim_fpu_status_invalid_imz;
1343 *f = *r;
1344 f->sign = l->sign ^ r->sign;
1345 return 0;
1347 if (sim_fpu_is_zero (l) || sim_fpu_is_zero (r))
1349 *f = sim_fpu_zero;
1350 f->sign = l->sign ^ r->sign;
1351 return 0;
1353 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1354 128 bit number. */
1356 unsigned64 low;
1357 unsigned64 high;
1358 unsigned64 nl = l->fraction & 0xffffffff;
1359 unsigned64 nh = l->fraction >> 32;
1360 unsigned64 ml = r->fraction & 0xffffffff;
1361 unsigned64 mh = r->fraction >>32;
1362 unsigned64 pp_ll = ml * nl;
1363 unsigned64 pp_hl = mh * nl;
1364 unsigned64 pp_lh = ml * nh;
1365 unsigned64 pp_hh = mh * nh;
1366 unsigned64 res2 = 0;
1367 unsigned64 res0 = 0;
1368 unsigned64 ps_hh__ = pp_hl + pp_lh;
1369 if (ps_hh__ < pp_hl)
1370 res2 += UNSIGNED64 (0x100000000);
1371 pp_hl = (ps_hh__ << 32) & UNSIGNED64 (0xffffffff00000000);
1372 res0 = pp_ll + pp_hl;
1373 if (res0 < pp_ll)
1374 res2++;
1375 res2 += ((ps_hh__ >> 32) & 0xffffffff) + pp_hh;
1376 high = res2;
1377 low = res0;
1379 f->normal_exp = l->normal_exp + r->normal_exp;
1380 f->sign = l->sign ^ r->sign;
1381 f->class = sim_fpu_class_number;
1383 /* Input is bounded by [1,2) ; [2^60,2^61)
1384 Output is bounded by [1,4) ; [2^120,2^122) */
1386 /* Adjust the exponent according to where the decimal point ended
1387 up in the high 64 bit word. In the source the decimal point
1388 was at NR_FRAC_GUARD. */
1389 f->normal_exp += NR_FRAC_GUARD + 64 - (NR_FRAC_GUARD * 2);
1391 /* The high word is bounded according to the above. Consequently
1392 it has never overflowed into IMPLICIT_2. */
1393 ASSERT (high < LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64));
1394 ASSERT (high >= LSBIT64 ((NR_FRAC_GUARD * 2) - 64));
1395 ASSERT (LSBIT64 (((NR_FRAC_GUARD + 1) * 2) - 64) < IMPLICIT_1);
1397 /* Normalize. */
1400 f->normal_exp--;
1401 high <<= 1;
1402 if (low & LSBIT64 (63))
1403 high |= 1;
1404 low <<= 1;
1406 while (high < IMPLICIT_1);
1408 ASSERT (high >= IMPLICIT_1 && high < IMPLICIT_2);
1409 if (low != 0)
1411 f->fraction = (high | 1); /* sticky */
1412 return sim_fpu_status_inexact;
1414 else
1416 f->fraction = high;
1417 return 0;
1419 return 0;
1423 INLINE_SIM_FPU (int)
1424 sim_fpu_div (sim_fpu *f,
1425 const sim_fpu *l,
1426 const sim_fpu *r)
1428 if (sim_fpu_is_snan (l))
1430 *f = *l;
1431 f->class = sim_fpu_class_qnan;
1432 return sim_fpu_status_invalid_snan;
1434 if (sim_fpu_is_snan (r))
1436 *f = *r;
1437 f->class = sim_fpu_class_qnan;
1438 return sim_fpu_status_invalid_snan;
1440 if (sim_fpu_is_qnan (l))
1442 *f = *l;
1443 f->class = sim_fpu_class_qnan;
1444 return 0;
1446 if (sim_fpu_is_qnan (r))
1448 *f = *r;
1449 f->class = sim_fpu_class_qnan;
1450 return 0;
1452 if (sim_fpu_is_infinity (l))
1454 if (sim_fpu_is_infinity (r))
1456 *f = sim_fpu_qnan;
1457 return sim_fpu_status_invalid_idi;
1459 else
1461 *f = *l;
1462 f->sign = l->sign ^ r->sign;
1463 return 0;
1466 if (sim_fpu_is_zero (l))
1468 if (sim_fpu_is_zero (r))
1470 *f = sim_fpu_qnan;
1471 return sim_fpu_status_invalid_zdz;
1473 else
1475 *f = *l;
1476 f->sign = l->sign ^ r->sign;
1477 return 0;
1480 if (sim_fpu_is_infinity (r))
1482 *f = sim_fpu_zero;
1483 f->sign = l->sign ^ r->sign;
1484 return 0;
1486 if (sim_fpu_is_zero (r))
1488 f->class = sim_fpu_class_infinity;
1489 f->sign = l->sign ^ r->sign;
1490 return sim_fpu_status_invalid_div0;
1493 /* Calculate the mantissa by multiplying both 64bit numbers to get a
1494 128 bit number. */
1496 /* quotient = ( ( numerator / denominator)
1497 x 2^(numerator exponent - denominator exponent)
1499 unsigned64 numerator;
1500 unsigned64 denominator;
1501 unsigned64 quotient;
1502 unsigned64 bit;
1504 f->class = sim_fpu_class_number;
1505 f->sign = l->sign ^ r->sign;
1506 f->normal_exp = l->normal_exp - r->normal_exp;
1508 numerator = l->fraction;
1509 denominator = r->fraction;
1511 /* Fraction will be less than 1.0 */
1512 if (numerator < denominator)
1514 numerator <<= 1;
1515 f->normal_exp--;
1517 ASSERT (numerator >= denominator);
1519 /* Gain extra precision, already used one spare bit. */
1520 numerator <<= NR_SPARE;
1521 denominator <<= NR_SPARE;
1523 /* Does divide one bit at a time. Optimize??? */
1524 quotient = 0;
1525 bit = (IMPLICIT_1 << NR_SPARE);
1526 while (bit)
1528 if (numerator >= denominator)
1530 quotient |= bit;
1531 numerator -= denominator;
1533 bit >>= 1;
1534 numerator <<= 1;
1537 /* Discard (but save) the extra bits. */
1538 if ((quotient & LSMASK64 (NR_SPARE -1, 0)))
1539 quotient = (quotient >> NR_SPARE) | 1;
1540 else
1541 quotient = (quotient >> NR_SPARE);
1543 f->fraction = quotient;
1544 ASSERT (f->fraction >= IMPLICIT_1 && f->fraction < IMPLICIT_2);
1545 if (numerator != 0)
1547 f->fraction |= 1; /* Stick remaining bits. */
1548 return sim_fpu_status_inexact;
1550 else
1551 return 0;
1556 INLINE_SIM_FPU (int)
1557 sim_fpu_rem (sim_fpu *f,
1558 const sim_fpu *l,
1559 const sim_fpu *r)
1561 if (sim_fpu_is_snan (l))
1563 *f = *l;
1564 f->class = sim_fpu_class_qnan;
1565 return sim_fpu_status_invalid_snan;
1567 if (sim_fpu_is_snan (r))
1569 *f = *r;
1570 f->class = sim_fpu_class_qnan;
1571 return sim_fpu_status_invalid_snan;
1573 if (sim_fpu_is_qnan (l))
1575 *f = *l;
1576 f->class = sim_fpu_class_qnan;
1577 return 0;
1579 if (sim_fpu_is_qnan (r))
1581 *f = *r;
1582 f->class = sim_fpu_class_qnan;
1583 return 0;
1585 if (sim_fpu_is_infinity (l))
1587 *f = sim_fpu_qnan;
1588 return sim_fpu_status_invalid_irx;
1590 if (sim_fpu_is_zero (r))
1592 *f = sim_fpu_qnan;
1593 return sim_fpu_status_invalid_div0;
1595 if (sim_fpu_is_zero (l))
1597 *f = *l;
1598 return 0;
1600 if (sim_fpu_is_infinity (r))
1602 *f = *l;
1603 return 0;
1606 sim_fpu n, tmp;
1608 /* Remainder is calculated as l-n*r, where n is l/r rounded to the
1609 nearest integer. The variable n is rounded half even. */
1611 sim_fpu_div (&n, l, r);
1612 sim_fpu_round_64 (&n, 0, 0);
1614 if (n.normal_exp < -1) /* If n looks like zero just return l. */
1616 *f = *l;
1617 return 0;
1619 else if (n.class == sim_fpu_class_number
1620 && n.normal_exp <= (NR_FRAC_GUARD)) /* If not too large round. */
1621 do_normal_round (&n, (NR_FRAC_GUARD) - n.normal_exp, sim_fpu_round_near);
1623 /* Mark 0's as zero so multiply can detect zero. */
1624 if (n.fraction == 0)
1625 n.class = sim_fpu_class_zero;
1627 /* Calculate n*r. */
1628 sim_fpu_mul (&tmp, &n, r);
1629 sim_fpu_round_64 (&tmp, 0, 0);
1631 /* Finally calculate l-n*r. */
1632 sim_fpu_sub (f, l, &tmp);
1634 return 0;
1639 INLINE_SIM_FPU (int)
1640 sim_fpu_max (sim_fpu *f,
1641 const sim_fpu *l,
1642 const sim_fpu *r)
1644 if (sim_fpu_is_snan (l))
1646 *f = *l;
1647 f->class = sim_fpu_class_qnan;
1648 return sim_fpu_status_invalid_snan;
1650 if (sim_fpu_is_snan (r))
1652 *f = *r;
1653 f->class = sim_fpu_class_qnan;
1654 return sim_fpu_status_invalid_snan;
1656 if (sim_fpu_is_qnan (l))
1658 *f = *l;
1659 return 0;
1661 if (sim_fpu_is_qnan (r))
1663 *f = *r;
1664 return 0;
1666 if (sim_fpu_is_infinity (l))
1668 if (sim_fpu_is_infinity (r)
1669 && l->sign == r->sign)
1671 *f = sim_fpu_qnan;
1672 return sim_fpu_status_invalid_isi;
1674 if (l->sign)
1675 *f = *r; /* -inf < anything */
1676 else
1677 *f = *l; /* +inf > anything */
1678 return 0;
1680 if (sim_fpu_is_infinity (r))
1682 if (r->sign)
1683 *f = *l; /* anything > -inf */
1684 else
1685 *f = *r; /* anything < +inf */
1686 return 0;
1688 if (l->sign > r->sign)
1690 *f = *r; /* -ve < +ve */
1691 return 0;
1693 if (l->sign < r->sign)
1695 *f = *l; /* +ve > -ve */
1696 return 0;
1698 ASSERT (l->sign == r->sign);
1699 if (l->normal_exp > r->normal_exp
1700 || (l->normal_exp == r->normal_exp
1701 && l->fraction > r->fraction))
1703 /* |l| > |r| */
1704 if (l->sign)
1705 *f = *r; /* -ve < -ve */
1706 else
1707 *f = *l; /* +ve > +ve */
1708 return 0;
1710 else
1712 /* |l| <= |r| */
1713 if (l->sign)
1714 *f = *l; /* -ve > -ve */
1715 else
1716 *f = *r; /* +ve < +ve */
1717 return 0;
1722 INLINE_SIM_FPU (int)
1723 sim_fpu_min (sim_fpu *f,
1724 const sim_fpu *l,
1725 const sim_fpu *r)
1727 if (sim_fpu_is_snan (l))
1729 *f = *l;
1730 f->class = sim_fpu_class_qnan;
1731 return sim_fpu_status_invalid_snan;
1733 if (sim_fpu_is_snan (r))
1735 *f = *r;
1736 f->class = sim_fpu_class_qnan;
1737 return sim_fpu_status_invalid_snan;
1739 if (sim_fpu_is_qnan (l))
1741 *f = *l;
1742 return 0;
1744 if (sim_fpu_is_qnan (r))
1746 *f = *r;
1747 return 0;
1749 if (sim_fpu_is_infinity (l))
1751 if (sim_fpu_is_infinity (r)
1752 && l->sign == r->sign)
1754 *f = sim_fpu_qnan;
1755 return sim_fpu_status_invalid_isi;
1757 if (l->sign)
1758 *f = *l; /* -inf < anything */
1759 else
1760 *f = *r; /* +inf > anthing */
1761 return 0;
1763 if (sim_fpu_is_infinity (r))
1765 if (r->sign)
1766 *f = *r; /* anything > -inf */
1767 else
1768 *f = *l; /* anything < +inf */
1769 return 0;
1771 if (l->sign > r->sign)
1773 *f = *l; /* -ve < +ve */
1774 return 0;
1776 if (l->sign < r->sign)
1778 *f = *r; /* +ve > -ve */
1779 return 0;
1781 ASSERT (l->sign == r->sign);
1782 if (l->normal_exp > r->normal_exp
1783 || (l->normal_exp == r->normal_exp
1784 && l->fraction > r->fraction))
1786 /* |l| > |r| */
1787 if (l->sign)
1788 *f = *l; /* -ve < -ve */
1789 else
1790 *f = *r; /* +ve > +ve */
1791 return 0;
1793 else
1795 /* |l| <= |r| */
1796 if (l->sign)
1797 *f = *r; /* -ve > -ve */
1798 else
1799 *f = *l; /* +ve < +ve */
1800 return 0;
1805 INLINE_SIM_FPU (int)
1806 sim_fpu_neg (sim_fpu *f,
1807 const sim_fpu *r)
1809 if (sim_fpu_is_snan (r))
1811 *f = *r;
1812 f->class = sim_fpu_class_qnan;
1813 return sim_fpu_status_invalid_snan;
1815 if (sim_fpu_is_qnan (r))
1817 *f = *r;
1818 return 0;
1820 *f = *r;
1821 f->sign = !r->sign;
1822 return 0;
1826 INLINE_SIM_FPU (int)
1827 sim_fpu_abs (sim_fpu *f,
1828 const sim_fpu *r)
1830 *f = *r;
1831 f->sign = 0;
1832 if (sim_fpu_is_snan (r))
1834 f->class = sim_fpu_class_qnan;
1835 return sim_fpu_status_invalid_snan;
1837 return 0;
1841 INLINE_SIM_FPU (int)
1842 sim_fpu_inv (sim_fpu *f,
1843 const sim_fpu *r)
1845 return sim_fpu_div (f, &sim_fpu_one, r);
1849 INLINE_SIM_FPU (int)
1850 sim_fpu_sqrt (sim_fpu *f,
1851 const sim_fpu *r)
1853 if (sim_fpu_is_snan (r))
1855 *f = sim_fpu_qnan;
1856 return sim_fpu_status_invalid_snan;
1858 if (sim_fpu_is_qnan (r))
1860 *f = sim_fpu_qnan;
1861 return 0;
1863 if (sim_fpu_is_zero (r))
1865 f->class = sim_fpu_class_zero;
1866 f->sign = r->sign;
1867 f->normal_exp = 0;
1868 return 0;
1870 if (sim_fpu_is_infinity (r))
1872 if (r->sign)
1874 *f = sim_fpu_qnan;
1875 return sim_fpu_status_invalid_sqrt;
1877 else
1879 f->class = sim_fpu_class_infinity;
1880 f->sign = 0;
1881 f->sign = 0;
1882 return 0;
1885 if (r->sign)
1887 *f = sim_fpu_qnan;
1888 return sim_fpu_status_invalid_sqrt;
1891 /* @(#)e_sqrt.c 5.1 93/09/24 */
1893 * ====================================================
1894 * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
1896 * Developed at SunPro, a Sun Microsystems, Inc. business.
1897 * Permission to use, copy, modify, and distribute this
1898 * software is freely granted, provided that this notice
1899 * is preserved.
1900 * ====================================================
1903 /* __ieee754_sqrt(x)
1904 * Return correctly rounded sqrt.
1905 * ------------------------------------------
1906 * | Use the hardware sqrt if you have one |
1907 * ------------------------------------------
1908 * Method:
1909 * Bit by bit method using integer arithmetic. (Slow, but portable)
1910 * 1. Normalization
1911 * Scale x to y in [1,4) with even powers of 2:
1912 * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then
1913 * sqrt(x) = 2^k * sqrt(y)
1915 - Since:
1916 - sqrt ( x*2^(2m) ) = sqrt(x).2^m ; m even
1917 - sqrt ( x*2^(2m + 1) ) = sqrt(2.x).2^m ; m odd
1918 - Define:
1919 - y = ((m even) ? x : 2.x)
1920 - Then:
1921 - y in [1, 4) ; [IMPLICIT_1,IMPLICIT_4)
1922 - And:
1923 - sqrt (y) in [1, 2) ; [IMPLICIT_1,IMPLICIT_2)
1925 * 2. Bit by bit computation
1926 * Let q = sqrt(y) truncated to i bit after binary point (q = 1),
1927 * i 0
1928 * i+1 2
1929 * s = 2*q , and y = 2 * ( y - q ). (1)
1930 * i i i i
1932 * To compute q from q , one checks whether
1933 * i+1 i
1935 * -(i+1) 2
1936 * (q + 2 ) <= y. (2)
1938 * -(i+1)
1939 * If (2) is false, then q = q ; otherwise q = q + 2 .
1940 * i+1 i i+1 i
1942 * With some algebraic manipulation, it is not difficult to see
1943 * that (2) is equivalent to
1944 * -(i+1)
1945 * s + 2 <= y (3)
1946 * i i
1948 * The advantage of (3) is that s and y can be computed by
1949 * i i
1950 * the following recurrence formula:
1951 * if (3) is false
1953 * s = s , y = y ; (4)
1954 * i+1 i i+1 i
1957 - NOTE: y = 2*y
1958 - i+1 i
1960 * otherwise,
1961 * -i -(i+1)
1962 * s = s + 2 , y = y - s - 2 (5)
1963 * i+1 i i+1 i i
1966 - -(i+1)
1967 - NOTE: y = 2 (y - s - 2 )
1968 - i+1 i i
1970 * One may easily use induction to prove (4) and (5).
1971 * Note. Since the left hand side of (3) contain only i+2 bits,
1972 * it does not necessary to do a full (53-bit) comparison
1973 * in (3).
1974 * 3. Final rounding
1975 * After generating the 53 bits result, we compute one more bit.
1976 * Together with the remainder, we can decide whether the
1977 * result is exact, bigger than 1/2ulp, or less than 1/2ulp
1978 * (it will never equal to 1/2ulp).
1979 * The rounding mode can be detected by checking whether
1980 * huge + tiny is equal to huge, and whether huge - tiny is
1981 * equal to huge for some floating point number "huge" and "tiny".
1983 * Special cases:
1984 * sqrt(+-0) = +-0 ... exact
1985 * sqrt(inf) = inf
1986 * sqrt(-ve) = NaN ... with invalid signal
1987 * sqrt(NaN) = NaN ... with invalid signal for signalling NaN
1989 * Other methods : see the appended file at the end of the program below.
1990 *---------------
1994 /* Generate sqrt(x) bit by bit. */
1995 unsigned64 y;
1996 unsigned64 q;
1997 unsigned64 s;
1998 unsigned64 b;
2000 f->class = sim_fpu_class_number;
2001 f->sign = 0;
2002 y = r->fraction;
2003 f->normal_exp = (r->normal_exp >> 1); /* exp = [exp/2] */
2005 /* Odd exp, double x to make it even. */
2006 ASSERT (y >= IMPLICIT_1 && y < IMPLICIT_4);
2007 if ((r->normal_exp & 1))
2009 y += y;
2011 ASSERT (y >= IMPLICIT_1 && y < (IMPLICIT_2 << 1));
2013 /* Let loop determine first value of s (either 1 or 2) */
2014 b = IMPLICIT_1;
2015 q = 0;
2016 s = 0;
2018 while (b)
2020 unsigned64 t = s + b;
2021 if (t <= y)
2023 s |= (b << 1);
2024 y -= t;
2025 q |= b;
2027 y <<= 1;
2028 b >>= 1;
2031 ASSERT (q >= IMPLICIT_1 && q < IMPLICIT_2);
2032 f->fraction = q;
2033 if (y != 0)
2035 f->fraction |= 1; /* Stick remaining bits. */
2036 return sim_fpu_status_inexact;
2038 else
2039 return 0;
2044 /* int/long <-> sim_fpu */
2046 INLINE_SIM_FPU (int)
2047 sim_fpu_i32to (sim_fpu *f,
2048 signed32 i,
2049 sim_fpu_round round)
2051 i2fpu (f, i, 0);
2052 return 0;
2055 INLINE_SIM_FPU (int)
2056 sim_fpu_u32to (sim_fpu *f,
2057 unsigned32 u,
2058 sim_fpu_round round)
2060 u2fpu (f, u, 0);
2061 return 0;
2064 INLINE_SIM_FPU (int)
2065 sim_fpu_i64to (sim_fpu *f,
2066 signed64 i,
2067 sim_fpu_round round)
2069 i2fpu (f, i, 1);
2070 return 0;
2073 INLINE_SIM_FPU (int)
2074 sim_fpu_u64to (sim_fpu *f,
2075 unsigned64 u,
2076 sim_fpu_round round)
2078 u2fpu (f, u, 1);
2079 return 0;
2083 INLINE_SIM_FPU (int)
2084 sim_fpu_to32i (signed32 *i,
2085 const sim_fpu *f,
2086 sim_fpu_round round)
2088 signed64 i64;
2089 int status = fpu2i (&i64, f, 0, round);
2090 *i = i64;
2091 return status;
2094 INLINE_SIM_FPU (int)
2095 sim_fpu_to32u (unsigned32 *u,
2096 const sim_fpu *f,
2097 sim_fpu_round round)
2099 unsigned64 u64;
2100 int status = fpu2u (&u64, f, 0);
2101 *u = u64;
2102 return status;
2105 INLINE_SIM_FPU (int)
2106 sim_fpu_to64i (signed64 *i,
2107 const sim_fpu *f,
2108 sim_fpu_round round)
2110 return fpu2i (i, f, 1, round);
2114 INLINE_SIM_FPU (int)
2115 sim_fpu_to64u (unsigned64 *u,
2116 const sim_fpu *f,
2117 sim_fpu_round round)
2119 return fpu2u (u, f, 1);
2124 /* sim_fpu -> host format */
2126 #if 0
2127 INLINE_SIM_FPU (float)
2128 sim_fpu_2f (const sim_fpu *f)
2130 return fval.d;
2132 #endif
2135 INLINE_SIM_FPU (double)
2136 sim_fpu_2d (const sim_fpu *s)
2138 sim_fpu_map val;
2139 if (sim_fpu_is_snan (s))
2141 /* gag SNaN's */
2142 sim_fpu n = *s;
2143 n.class = sim_fpu_class_qnan;
2144 val.i = pack_fpu (&n, 1);
2146 else
2148 val.i = pack_fpu (s, 1);
2150 return val.d;
2154 #if 0
2155 INLINE_SIM_FPU (void)
2156 sim_fpu_f2 (sim_fpu *f,
2157 float s)
2159 sim_fpu_map val;
2160 val.d = s;
2161 unpack_fpu (f, val.i, 1);
2163 #endif
2166 INLINE_SIM_FPU (void)
2167 sim_fpu_d2 (sim_fpu *f,
2168 double d)
2170 sim_fpu_map val;
2171 val.d = d;
2172 unpack_fpu (f, val.i, 1);
2176 /* General */
2178 INLINE_SIM_FPU (int)
2179 sim_fpu_is_nan (const sim_fpu *d)
2181 switch (d->class)
2183 case sim_fpu_class_qnan:
2184 case sim_fpu_class_snan:
2185 return 1;
2186 default:
2187 return 0;
2191 INLINE_SIM_FPU (int)
2192 sim_fpu_is_qnan (const sim_fpu *d)
2194 switch (d->class)
2196 case sim_fpu_class_qnan:
2197 return 1;
2198 default:
2199 return 0;
2203 INLINE_SIM_FPU (int)
2204 sim_fpu_is_snan (const sim_fpu *d)
2206 switch (d->class)
2208 case sim_fpu_class_snan:
2209 return 1;
2210 default:
2211 return 0;
2215 INLINE_SIM_FPU (int)
2216 sim_fpu_is_zero (const sim_fpu *d)
2218 switch (d->class)
2220 case sim_fpu_class_zero:
2221 return 1;
2222 default:
2223 return 0;
2227 INLINE_SIM_FPU (int)
2228 sim_fpu_is_infinity (const sim_fpu *d)
2230 switch (d->class)
2232 case sim_fpu_class_infinity:
2233 return 1;
2234 default:
2235 return 0;
2239 INLINE_SIM_FPU (int)
2240 sim_fpu_is_number (const sim_fpu *d)
2242 switch (d->class)
2244 case sim_fpu_class_denorm:
2245 case sim_fpu_class_number:
2246 return 1;
2247 default:
2248 return 0;
2252 INLINE_SIM_FPU (int)
2253 sim_fpu_is_denorm (const sim_fpu *d)
2255 switch (d->class)
2257 case sim_fpu_class_denorm:
2258 return 1;
2259 default:
2260 return 0;
2265 INLINE_SIM_FPU (int)
2266 sim_fpu_sign (const sim_fpu *d)
2268 return d->sign;
2272 INLINE_SIM_FPU (int)
2273 sim_fpu_exp (const sim_fpu *d)
2275 return d->normal_exp;
2279 INLINE_SIM_FPU (unsigned64)
2280 sim_fpu_fraction (const sim_fpu *d)
2282 return d->fraction;
2286 INLINE_SIM_FPU (unsigned64)
2287 sim_fpu_guard (const sim_fpu *d, int is_double)
2289 unsigned64 rv;
2290 unsigned64 guardmask = LSMASK64 (NR_GUARDS - 1, 0);
2291 rv = (d->fraction & guardmask) >> NR_PAD;
2292 return rv;
2296 INLINE_SIM_FPU (int)
2297 sim_fpu_is (const sim_fpu *d)
2299 switch (d->class)
2301 case sim_fpu_class_qnan:
2302 return SIM_FPU_IS_QNAN;
2303 case sim_fpu_class_snan:
2304 return SIM_FPU_IS_SNAN;
2305 case sim_fpu_class_infinity:
2306 if (d->sign)
2307 return SIM_FPU_IS_NINF;
2308 else
2309 return SIM_FPU_IS_PINF;
2310 case sim_fpu_class_number:
2311 if (d->sign)
2312 return SIM_FPU_IS_NNUMBER;
2313 else
2314 return SIM_FPU_IS_PNUMBER;
2315 case sim_fpu_class_denorm:
2316 if (d->sign)
2317 return SIM_FPU_IS_NDENORM;
2318 else
2319 return SIM_FPU_IS_PDENORM;
2320 case sim_fpu_class_zero:
2321 if (d->sign)
2322 return SIM_FPU_IS_NZERO;
2323 else
2324 return SIM_FPU_IS_PZERO;
2325 default:
2326 return -1;
2327 abort ();
2331 INLINE_SIM_FPU (int)
2332 sim_fpu_cmp (const sim_fpu *l, const sim_fpu *r)
2334 sim_fpu res;
2335 sim_fpu_sub (&res, l, r);
2336 return sim_fpu_is (&res);
2339 INLINE_SIM_FPU (int)
2340 sim_fpu_is_lt (const sim_fpu *l, const sim_fpu *r)
2342 int status;
2343 sim_fpu_lt (&status, l, r);
2344 return status;
2347 INLINE_SIM_FPU (int)
2348 sim_fpu_is_le (const sim_fpu *l, const sim_fpu *r)
2350 int is;
2351 sim_fpu_le (&is, l, r);
2352 return is;
2355 INLINE_SIM_FPU (int)
2356 sim_fpu_is_eq (const sim_fpu *l, const sim_fpu *r)
2358 int is;
2359 sim_fpu_eq (&is, l, r);
2360 return is;
2363 INLINE_SIM_FPU (int)
2364 sim_fpu_is_ne (const sim_fpu *l, const sim_fpu *r)
2366 int is;
2367 sim_fpu_ne (&is, l, r);
2368 return is;
2371 INLINE_SIM_FPU (int)
2372 sim_fpu_is_ge (const sim_fpu *l, const sim_fpu *r)
2374 int is;
2375 sim_fpu_ge (&is, l, r);
2376 return is;
2379 INLINE_SIM_FPU (int)
2380 sim_fpu_is_gt (const sim_fpu *l, const sim_fpu *r)
2382 int is;
2383 sim_fpu_gt (&is, l, r);
2384 return is;
2388 /* Compare operators */
2390 INLINE_SIM_FPU (int)
2391 sim_fpu_lt (int *is,
2392 const sim_fpu *l,
2393 const sim_fpu *r)
2395 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2397 sim_fpu_map lval;
2398 sim_fpu_map rval;
2399 lval.i = pack_fpu (l, 1);
2400 rval.i = pack_fpu (r, 1);
2401 (*is) = (lval.d < rval.d);
2402 return 0;
2404 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2406 *is = 0;
2407 return sim_fpu_status_invalid_snan;
2409 else
2411 *is = 0;
2412 return sim_fpu_status_invalid_qnan;
2416 INLINE_SIM_FPU (int)
2417 sim_fpu_le (int *is,
2418 const sim_fpu *l,
2419 const sim_fpu *r)
2421 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2423 sim_fpu_map lval;
2424 sim_fpu_map rval;
2425 lval.i = pack_fpu (l, 1);
2426 rval.i = pack_fpu (r, 1);
2427 *is = (lval.d <= rval.d);
2428 return 0;
2430 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2432 *is = 0;
2433 return sim_fpu_status_invalid_snan;
2435 else
2437 *is = 0;
2438 return sim_fpu_status_invalid_qnan;
2442 INLINE_SIM_FPU (int)
2443 sim_fpu_eq (int *is,
2444 const sim_fpu *l,
2445 const sim_fpu *r)
2447 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2449 sim_fpu_map lval;
2450 sim_fpu_map rval;
2451 lval.i = pack_fpu (l, 1);
2452 rval.i = pack_fpu (r, 1);
2453 (*is) = (lval.d == rval.d);
2454 return 0;
2456 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2458 *is = 0;
2459 return sim_fpu_status_invalid_snan;
2461 else
2463 *is = 0;
2464 return sim_fpu_status_invalid_qnan;
2468 INLINE_SIM_FPU (int)
2469 sim_fpu_ne (int *is,
2470 const sim_fpu *l,
2471 const sim_fpu *r)
2473 if (!sim_fpu_is_nan (l) && !sim_fpu_is_nan (r))
2475 sim_fpu_map lval;
2476 sim_fpu_map rval;
2477 lval.i = pack_fpu (l, 1);
2478 rval.i = pack_fpu (r, 1);
2479 (*is) = (lval.d != rval.d);
2480 return 0;
2482 else if (sim_fpu_is_snan (l) || sim_fpu_is_snan (r))
2484 *is = 0;
2485 return sim_fpu_status_invalid_snan;
2487 else
2489 *is = 0;
2490 return sim_fpu_status_invalid_qnan;
2494 INLINE_SIM_FPU (int)
2495 sim_fpu_ge (int *is,
2496 const sim_fpu *l,
2497 const sim_fpu *r)
2499 return sim_fpu_le (is, r, l);
2502 INLINE_SIM_FPU (int)
2503 sim_fpu_gt (int *is,
2504 const sim_fpu *l,
2505 const sim_fpu *r)
2507 return sim_fpu_lt (is, r, l);
2511 /* A number of useful constants */
2513 #if EXTERN_SIM_FPU_P
2514 const sim_fpu sim_fpu_zero = {
2515 sim_fpu_class_zero, 0, 0, 0
2517 const sim_fpu sim_fpu_qnan = {
2518 sim_fpu_class_qnan, 0, 0, 0
2520 const sim_fpu sim_fpu_one = {
2521 sim_fpu_class_number, 0, IMPLICIT_1, 0
2523 const sim_fpu sim_fpu_two = {
2524 sim_fpu_class_number, 0, IMPLICIT_1, 1
2526 const sim_fpu sim_fpu_max32 = {
2527 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS32), NORMAL_EXPMAX32
2529 const sim_fpu sim_fpu_max64 = {
2530 sim_fpu_class_number, 0, LSMASK64 (NR_FRAC_GUARD, NR_GUARDS64), NORMAL_EXPMAX64
2532 #endif
2535 /* For debugging */
2537 INLINE_SIM_FPU (void)
2538 sim_fpu_print_fpu (const sim_fpu *f,
2539 sim_fpu_print_func *print,
2540 void *arg)
2542 sim_fpu_printn_fpu (f, print, -1, arg);
2545 INLINE_SIM_FPU (void)
2546 sim_fpu_printn_fpu (const sim_fpu *f,
2547 sim_fpu_print_func *print,
2548 int digits,
2549 void *arg)
2551 print (arg, "%s", f->sign ? "-" : "+");
2552 switch (f->class)
2554 case sim_fpu_class_qnan:
2555 print (arg, "0.");
2556 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2557 print (arg, "*QuietNaN");
2558 break;
2559 case sim_fpu_class_snan:
2560 print (arg, "0.");
2561 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2562 print (arg, "*SignalNaN");
2563 break;
2564 case sim_fpu_class_zero:
2565 print (arg, "0.0");
2566 break;
2567 case sim_fpu_class_infinity:
2568 print (arg, "INF");
2569 break;
2570 case sim_fpu_class_number:
2571 case sim_fpu_class_denorm:
2572 print (arg, "1.");
2573 print_bits (f->fraction, NR_FRAC_GUARD - 1, digits, print, arg);
2574 print (arg, "*2^%+d", f->normal_exp);
2575 ASSERT (f->fraction >= IMPLICIT_1);
2576 ASSERT (f->fraction < IMPLICIT_2);
2581 INLINE_SIM_FPU (void)
2582 sim_fpu_print_status (int status,
2583 sim_fpu_print_func *print,
2584 void *arg)
2586 int i = 1;
2587 const char *prefix = "";
2588 while (status >= i)
2590 switch ((sim_fpu_status) (status & i))
2592 case sim_fpu_status_denorm:
2593 print (arg, "%sD", prefix);
2594 break;
2595 case sim_fpu_status_invalid_snan:
2596 print (arg, "%sSNaN", prefix);
2597 break;
2598 case sim_fpu_status_invalid_qnan:
2599 print (arg, "%sQNaN", prefix);
2600 break;
2601 case sim_fpu_status_invalid_isi:
2602 print (arg, "%sISI", prefix);
2603 break;
2604 case sim_fpu_status_invalid_idi:
2605 print (arg, "%sIDI", prefix);
2606 break;
2607 case sim_fpu_status_invalid_zdz:
2608 print (arg, "%sZDZ", prefix);
2609 break;
2610 case sim_fpu_status_invalid_imz:
2611 print (arg, "%sIMZ", prefix);
2612 break;
2613 case sim_fpu_status_invalid_cvi:
2614 print (arg, "%sCVI", prefix);
2615 break;
2616 case sim_fpu_status_invalid_cmp:
2617 print (arg, "%sCMP", prefix);
2618 break;
2619 case sim_fpu_status_invalid_sqrt:
2620 print (arg, "%sSQRT", prefix);
2621 break;
2622 case sim_fpu_status_invalid_irx:
2623 print (arg, "%sIRX", prefix);
2624 break;
2625 case sim_fpu_status_inexact:
2626 print (arg, "%sX", prefix);
2627 break;
2628 case sim_fpu_status_overflow:
2629 print (arg, "%sO", prefix);
2630 break;
2631 case sim_fpu_status_underflow:
2632 print (arg, "%sU", prefix);
2633 break;
2634 case sim_fpu_status_invalid_div0:
2635 print (arg, "%s/", prefix);
2636 break;
2637 case sim_fpu_status_rounded:
2638 print (arg, "%sR", prefix);
2639 break;
2641 i <<= 1;
2642 prefix = ",";
2646 #endif