1 #include "FEATURE/uwin"
3 #if !_UWIN || (_lib__copysign||_lib_copysign) && _lib_logb && (_lib__finite||_lib_finite) && (_lib_drem||_lib_remainder) && _lib_sqrt && _lib_ilogb && (_lib__scalb||_lib_scalb)
10 * Copyright (c) 1985, 1993
11 * The Regents of the University of California. All rights reserved.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 static char sccsid
[] = "@(#)support.c 8.1 (Berkeley) 6/4/93";
41 * Some IEEE standard 754 recommended functions and remainder and sqrt for
42 * supporting the C elementary functions.
43 ******************************************************************************
45 * These codes are developed (in double) to support the C elementary
46 * functions temporarily. They are not universal, and some of them are very
47 * slow (in particular, drem and sqrt is extremely inefficient). Each
48 * computer system should have its implementation of these functions using
50 ******************************************************************************
52 * IEEE 754 required operations:
54 * returns x REM y = x - [x/y]*y , where [x/y] is the integer
55 * nearest x/y; in half way case, choose the even one.
57 * returns the square root of x correctly rounded according to
60 * IEEE 754 recommended functions:
62 * returns x with the sign of y.
64 * returns x * (2**N), for integer values N.
66 * returns the unbiased exponent of x, a signed integer in
67 * double precision, except that logb(0) is -INF, logb(INF)
68 * is +INF, and logb(NAN) is that NAN.
70 * returns the value TRUE if -INF < x < +INF and returns
74 * CODED IN C BY K.C. NG, 11/25/84;
75 * REVISED BY K.C. NG on 1/22/85, 2/13/85, 3/24/85.
80 #if defined(vax)||defined(tahoe) /* VAX D format */
82 static const unsigned short msign
=0x7fff , mexp
=0x7f80 ;
83 static const short prep1
=57, gap
=7, bias
=129 ;
84 static const double novf
=1.7E38
, nunf
=3.0E-39, zero
=0.0 ;
85 #else /* defined(vax)||defined(tahoe) */
86 static const unsigned short msign
=0x7fff, mexp
=0x7ff0 ;
87 static const short prep1
=54, gap
=4, bias
=1023 ;
88 static const double novf
=1.7E308
, nunf
=3.0E-308,zero
=0.0;
89 #endif /* defined(vax)||defined(tahoe) */
91 #if !_lib__scalb || !_lib_scalb
93 extern double _scalb(x
,N
)
99 unsigned short *px
=(unsigned short *) &x
+ 3;
101 unsigned short *px
=(unsigned short *) &x
;
102 #endif /* national */
104 if( x
== zero
) return(x
);
106 #if defined(vax)||defined(tahoe)
107 if( (k
= *px
& mexp
) != ~msign
) {
111 return(copysign(infnan(ERANGE
),x
));
113 #else /* defined(vax)||defined(tahoe) */
114 if( (k
= *px
& mexp
) != mexp
) {
115 if( N
<-2100) return(nunf
*nunf
); else if(N
>2100) return(novf
+novf
);
117 x
*= scalb(1.0,prep1
); N
-= prep1
; return(scalb(x
,N
));}
118 #endif /* defined(vax)||defined(tahoe) */
120 if((k
= (k
>>gap
)+ N
) > 0 )
121 if( k
< (mexp
>>gap
) ) *px
= (*px
&~mexp
) | (k
<<gap
);
122 else x
=novf
+novf
; /* overflow */
125 /* gradual underflow */
126 {*px
=(*px
&~mexp
)|(short)(1<<gap
); x
*= scalb(1.0,k
-1);}
137 extern double scalb(x
,N
)
147 extern double _copysign(x
,y
)
151 unsigned short *px
=(unsigned short *) &x
+3,
152 *py
=(unsigned short *) &y
+3;
154 unsigned short *px
=(unsigned short *) &x
,
155 *py
=(unsigned short *) &y
;
156 #endif /* national */
158 #if defined(vax)||defined(tahoe)
159 if ( (*px
& mexp
) == 0 ) return(x
);
160 #endif /* defined(vax)||defined(tahoe) */
162 *px
= ( *px
& msign
) | ( *py
& ~msign
);
170 extern double copysign(x
,y
)
173 return _copysign(x
,y
);
180 extern double logb(x
)
185 short *px
=(short *) &x
+3, k
;
187 short *px
=(short *) &x
, k
;
188 #endif /* national */
190 #if defined(vax)||defined(tahoe)
191 return (int)(((*px
&mexp
)>>gap
)-bias
);
192 #else /* defined(vax)||defined(tahoe) */
193 if( (k
= *px
& mexp
) != mexp
)
195 return ( (k
>>gap
) - bias
);
203 {*px
&= msign
; return(x
);}
204 #endif /* defined(vax)||defined(tahoe) */
211 extern int _finite(x
)
214 #if defined(vax)||defined(tahoe)
216 #else /* defined(vax)||defined(tahoe) */
218 return( (*((short *) &x
+3 ) & mexp
) != mexp
);
220 return( (*((short *) &x
) & mexp
) != mexp
);
221 #endif /* national */
222 #endif /* defined(vax)||defined(tahoe) */
239 extern double drem(x
,p
)
243 return remainder(x
,p
);
250 *px
=(unsigned short *) &x
+3,
251 *pp
=(unsigned short *) &p
+3,
252 *pd
=(unsigned short *) &dp
+3,
253 *pt
=(unsigned short *) &tmp
+3;
256 *px
=(unsigned short *) &x
,
257 *pp
=(unsigned short *) &p
,
258 *pd
=(unsigned short *) &dp
,
259 *pt
=(unsigned short *) &tmp
;
260 #endif /* national */
264 #if defined(vax)||defined(tahoe)
265 if( ( *px
& mexp
) == ~msign
) /* is x a reserved operand? */
266 #else /* defined(vax)||defined(tahoe) */
267 if( ( *px
& mexp
) == mexp
)
268 #endif /* defined(vax)||defined(tahoe) */
269 return (x
-p
)-(x
-p
); /* create nan if x is inf */
271 #if defined(vax)||defined(tahoe)
272 return(infnan(EDOM
));
273 #else /* defined(vax)||defined(tahoe) */
275 #endif /* defined(vax)||defined(tahoe) */
278 #if defined(vax)||defined(tahoe)
279 if( ( *pp
& mexp
) == ~msign
) /* is p a reserved operand? */
280 #else /* defined(vax)||defined(tahoe) */
281 if( ( *pp
& mexp
) == mexp
)
282 #endif /* defined(vax)||defined(tahoe) */
283 { if (p
!= p
) return p
; else return x
;}
285 else if ( ((*pp
& mexp
)>>gap
) <= 1 )
286 /* subnormal p, or almost subnormal p */
287 { double b
; b
=scalb(1.0,(int)prep1
);
288 p
*= b
; x
= drem(x
,p
); x
*= b
; return(drem(x
,p
)/b
);}
289 else if ( p
>= novf
/2)
290 { p
/= 2 ; x
/= 2; return(drem(x
,p
)*2);}
298 k
=(*px
& mexp
) - (*pd
& mexp
) ;
302 #if defined(vax)||defined(tahoe)
303 if( x
< tmp
) *pt
-= 128 ;
304 #else /* defined(vax)||defined(tahoe) */
305 if( x
< tmp
) *pt
-= 16 ;
306 #endif /* defined(vax)||defined(tahoe) */
311 { x
-= p
; if ( x
>= hp
) x
-= p
; }
313 #if defined(vax)||defined(tahoe)
315 #endif /* defined(vax)||defined(tahoe) */
327 extern double remainder(x
,p
)
337 extern double sqrt(x
)
342 double const zero
=0.0;
344 #if defined(vax)||defined(tahoe)
346 #else /* defined(vax)||defined(tahoe) */
348 #endif /* defined(vax)||defined(tahoe) */
350 /* sqrt(NaN) is NaN, sqrt(+-0) = +-0 */
351 if(x
!=x
||x
==zero
) return(x
);
353 /* sqrt(negative) is invalid */
355 #if defined(vax)||defined(tahoe)
356 return (infnan(EDOM
)); /* NaN */
357 #else /* defined(vax)||defined(tahoe) */
359 #endif /* defined(vax)||defined(tahoe) */
362 /* sqrt(INF) is INF */
363 if(!finite(x
)) return(x
);
365 /* scale x to [1,4) */
368 if((m
=logb(x
))!=0) x
=scalb(x
,-m
); /* subnormal number */
371 if((n
+n
)!=m
) {x
*= 2; m
-=1; n
=m
/2;}
373 /* generate sqrt(x) bit by bit (accumulating in q) */
374 q
=1.0; s
=4.0; x
-= 1.0; r
=1;
376 t
=s
+1; x
*= 4; r
/= 2;
378 s
=t
+t
+2, x
-= t
; q
+= r
;}
383 /* generate the last bit and determine the final rounding */
385 if(x
==zero
) goto end
; 100+r
; /* trigger inexact flag */
387 q
+=r
; x
-=s
; s
+= 2; s
*= 2; x
*= 4;
389 b
=1.0+3*r
/4; if(b
==1.0) goto end
; /* b==1 : Round-to-zero */
390 b
=1.0+r
/4; if(b
>1.0) t
=1; /* b>1 : Round-to-(+INF) */
391 if(t
>=0) q
+=r
; } /* else: Round-to-nearest */
395 b
=1.0+3*r
/4; if(b
==1.0) goto end
;
396 b
=1.0+r
/4; if(b
>1.0) t
=1;
399 end
: return(scalb(q
,n
));
406 * RETURN X REM Y =X-N*Y, N=[X/Y] ROUNDED (ROUNDED TO EVEN IN THE HALF WAY CASE)
407 * DOUBLE PRECISION (VAX D format 56 bits, IEEE DOUBLE 53 BITS)
408 * INTENDED FOR ASSEMBLY LANGUAGE
409 * CODED IN C BY K.C. NG, 3/23/85, 4/8/85.
411 * Warning: this code should not get compiled in unless ALL of
412 * the following machine-dependent routines are supplied.
414 * Required machine dependent functions (not on a VAX):
415 * swapINX(i): save inexact flag and reset it to "i"
416 * swapENI(e): save inexact enable and reset it to "e"
419 extern double drem(x
,y
)
423 #ifdef national /* order of words in floating point number */
424 static const n0
=3,n1
=2,n2
=1,n3
=0;
425 #else /* VAX, SUN, ZILOG, TAHOE */
426 static const n0
=0,n1
=1,n2
=2,n3
=3;
429 static const unsigned short mexp
=0x7ff0, m25
=0x0190, m57
=0x0390;
430 static const double zero
=0.0;
435 unsigned short xexp
,yexp
, *px
=(unsigned short *) &x
,
436 nx
,nf
, *py
=(unsigned short *) &y
,
437 sign
, *pt
=(unsigned short *) &t
,
438 *pt1
=(unsigned short *) &t1
;
440 xexp
= px
[n0
] & mexp
; /* exponent of x */
441 yexp
= py
[n0
] & mexp
; /* exponent of y */
442 sign
= px
[n0
] &0x8000; /* sign of x */
444 /* return NaN if x is NaN, or y is NaN, or x is INF, or y is zero */
445 if(x
!=x
) return(x
); if(y
!=y
) return(y
); /* x or y is NaN */
446 if( xexp
== mexp
) return(zero
/zero
); /* x is INF */
447 if(y
==zero
) return(y
/y
);
449 /* save the inexact flag and inexact enable in i and e respectively
450 * and reset them to zero
452 i
=swapINX(0); e
=swapENI(0);
454 /* subnormal number */
456 if(yexp
==0) {t
=1.0,pt
[n0
]+=m57
; y
*=t
; nx
=m57
;}
458 /* if y is tiny (biased exponent <= 57), scale up y to y*2**57 */
459 if( yexp
<= m57
) {py
[n0
]+=m57
; nx
+=m57
; yexp
+=m57
;}
465 /* mask off the least significant 27 bits of y */
466 t
=y
; pt
[n3
]=0; pt
[n2
]&=0xf800; y1
=t
;
468 /* LOOP: argument reduction on x whenever x > y */
474 xexp
=px
[n0
]&mexp
; /* exponent of x */
476 if(k
>0) /* if x/y >= 2**26, scale up y so that x/y < 2**26 */
477 {pt
[n0
]+=k
;pt1
[n0
]+=k
;}
478 n
=x
/t
; x
=(x
-n
*t1
)-n
*(t
-t1
);
480 /* end while (x > y) */
482 if(nx
!=0) {t
=1.0; pt
[n0
]+=nx
; x
*=t
; nx
=0; goto loop
;}
484 /* final adjustment */
487 if(x
>hy
||((x
==hy
)&&n
%2==1)) x
-=y
;
489 if(nf
!=0) { t
=1.0; pt
[n0
]-=nf
; x
*=t
;}
491 /* restore inexact flag and inexact enable */
492 swapINX(i
); swapENI(e
);
500 * RETURN CORRECTLY ROUNDED (ACCORDING TO THE ROUNDING MODE) SQRT
501 * FOR IEEE DOUBLE PRECISION ONLY, INTENDED FOR ASSEMBLY LANGUAGE
502 * CODED IN C BY K.C. NG, 3/22/85.
504 * Warning: this code should not get compiled in unless ALL of
505 * the following machine-dependent routines are supplied.
507 * Required machine dependent functions:
508 * swapINX(i) ...return the status of INEXACT flag and reset it to "i"
509 * swapRM(r) ...return the current Rounding Mode and reset it to "r"
510 * swapENI(e) ...return the status of inexact enable and reset it to "e"
511 * addc(t) ...perform t=t+1 regarding t as a 64 bit unsigned integer
512 * subc(t) ...perform t=t-1 regarding t as a 64 bit unsigned integer
515 static const unsigned long table
[] = {
516 0, 1204, 3062, 5746, 9193, 13348, 18162, 23592, 29598, 36145, 43202, 50740,
517 58733, 67158, 75992, 85215, 83599, 71378, 60428, 50647, 41945, 34246, 27478,
518 21581, 16499, 12183, 8588, 5674, 3403, 1742, 661, 130, };
520 extern double newsqrt(x
)
523 double y
,z
,t
,addc(),subc()
524 double const b54
=134217728.*134217728.; /* b54=2**54 */
526 long const mexp
=0x7ff00000;
527 int i
,j
,r
,e
,swapINX(),swapRM(),swapENI();
528 unsigned long *py
=(unsigned long *) &y
,
529 *pt
=(unsigned long *) &t
,
530 *px
=(unsigned long *) &x
;
531 #ifdef national /* ordering of word in a floating point number */
532 const int n0
=1, n1
=0;
534 const int n0
=0, n1
=1;
536 /* Rounding Mode: RN ...round-to-nearest
537 * RZ ...round-towards 0
538 * RP ...round-towards +INF
539 * RM ...round-towards -INF
541 const int RN
=0,RZ
=1,RP
=2,RM
=3;
542 /* machine dependent: work on a Zilog Z8070
543 * and a National 32081 & 16081
547 if(x
!=x
||x
==0.0) return(x
); /* sqrt(NaN) is NaN, sqrt(+-0) = +-0 */
548 if(x
<0) return((x
-x
)/(x
-x
)); /* sqrt(negative) is invalid */
549 if((mx
=px
[n0
]&mexp
)==mexp
) return(x
); /* sqrt(+INF) is +INF */
551 /* save, reset, initialize */
552 e
=swapENI(0); /* ...save and reset the inexact enable */
553 i
=swapINX(0); /* ...save INEXACT flag */
554 r
=swapRM(RN
); /* ...save and reset the Rounding Mode to RN */
557 /* subnormal number, scale up x to x*2**54 */
558 if(mx
==0) {x
*= b54
; scalx
-=0x01b00000;}
560 /* scale x to avoid intermediate over/underflow:
561 * if (x > 2**512) x=x/2**512; if (x < 2**-512) x=x*2**512 */
562 if(mx
>0x5ff00000) {px
[n0
] -= 0x20000000; scalx
+= 0x10000000;}
563 if(mx
<0x1ff00000) {px
[n0
] += 0x20000000; scalx
-= 0x10000000;}
565 /* magic initial approximation to almost 8 sig. bits */
566 py
[n0
]=(px
[n0
]>>1)+0x1ff80000;
567 py
[n0
]=py
[n0
]-table
[(py
[n0
]>>15)&31];
569 /* Heron's rule once with correction to improve y to almost 18 sig. bits */
570 t
=x
/y
; y
=y
+t
; py
[n0
]=py
[n0
]-0x00100006; py
[n1
]=0;
572 /* triple to almost 56 sig. bits; now y approx. sqrt(x) to within 1 ulp */
573 t
=y
*y
; z
=t
; pt
[n0
]+=0x00100000; t
+=z
; z
=(x
-z
)*y
;
574 t
=z
/(t
+x
) ; pt
[n0
]+=0x00100000; y
+=t
;
576 /* twiddle last bit to force y correctly rounded */
577 swapRM(RZ
); /* ...set Rounding Mode to round-toward-zero */
578 swapINX(0); /* ...clear INEXACT flag */
579 swapENI(e
); /* ...restore inexact enable status */
580 t
=x
/y
; /* ...chopped quotient, possibly inexact */
581 j
=swapINX(i
); /* ...read and restore inexact flag */
582 if(j
==0) { if(t
==y
) goto end
; else t
=subc(t
); } /* ...t=t-ulp */
583 b54
+0.1; /* ..trigger inexact flag, sqrt(x) is inexact */
584 if(r
==RN
) t
=addc(t
); /* ...t=t+ulp */
585 else if(r
==RP
) { t
=addc(t
);y
=addc(y
);}/* ...t=t+ulp;y=y+ulp; */
586 y
=y
+t
; /* ...chopped sum */
587 py
[n0
]=py
[n0
]-0x00100000; /* ...correctly rounded sqrt(x) */
588 end
: py
[n0
]=py
[n0
]+scalx
; /* ...scale back y */
589 swapRM(r
); /* ...restore Rounding Mode */
596 extern int ilogb(double x
)
598 return((int)logb(x
));