import less(1)
[unleashed/tickless.git] / usr / src / lib / libast / common / uwin / support.c
blobd849117101b035340b77fd24fef01e1202e1e780
1 #include "FEATURE/uwin"
3 #if !_UWIN || (_lib__copysign||_lib_copysign) && _lib_logb && (_lib__finite||_lib_finite) && (_lib_drem||_lib_remainder) && _lib_sqrt && _lib_ilogb && (_lib__scalb||_lib_scalb)
5 void _STUB_support(){}
7 #else
9 /*
10 * Copyright (c) 1985, 1993
11 * The Regents of the University of California. All rights reserved.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
38 static char sccsid[] = "@(#)support.c 8.1 (Berkeley) 6/4/93";
40 /*
41 * Some IEEE standard 754 recommended functions and remainder and sqrt for
42 * supporting the C elementary functions.
43 ******************************************************************************
44 * WARNING:
45 * These codes are developed (in double) to support the C elementary
46 * functions temporarily. They are not universal, and some of them are very
47 * slow (in particular, drem and sqrt is extremely inefficient). Each
48 * computer system should have its implementation of these functions using
49 * its own assembler.
50 ******************************************************************************
52 * IEEE 754 required operations:
53 * drem(x,p)
54 * returns x REM y = x - [x/y]*y , where [x/y] is the integer
55 * nearest x/y; in half way case, choose the even one.
56 * sqrt(x)
57 * returns the square root of x correctly rounded according to
58 * the rounding mod.
60 * IEEE 754 recommended functions:
61 * (a) copysign(x,y)
62 * returns x with the sign of y.
63 * (b) scalb(x,N)
64 * returns x * (2**N), for integer values N.
65 * (c) logb(x)
66 * returns the unbiased exponent of x, a signed integer in
67 * double precision, except that logb(0) is -INF, logb(INF)
68 * is +INF, and logb(NAN) is that NAN.
69 * (d) finite(x)
70 * returns the value TRUE if -INF < x < +INF and returns
71 * FALSE otherwise.
74 * CODED IN C BY K.C. NG, 11/25/84;
75 * REVISED BY K.C. NG on 1/22/85, 2/13/85, 3/24/85.
78 #include "mathimpl.h"
80 #if defined(vax)||defined(tahoe) /* VAX D format */
81 #include <errno.h>
82 static const unsigned short msign=0x7fff , mexp =0x7f80 ;
83 static const short prep1=57, gap=7, bias=129 ;
84 static const double novf=1.7E38, nunf=3.0E-39, zero=0.0 ;
85 #else /* defined(vax)||defined(tahoe) */
86 static const unsigned short msign=0x7fff, mexp =0x7ff0 ;
87 static const short prep1=54, gap=4, bias=1023 ;
88 static const double novf=1.7E308, nunf=3.0E-308,zero=0.0;
89 #endif /* defined(vax)||defined(tahoe) */
91 #if !_lib__scalb || !_lib_scalb
93 extern double _scalb(x,N)
94 double x; double N;
96 int k;
98 #ifdef national
99 unsigned short *px=(unsigned short *) &x + 3;
100 #else /* national */
101 unsigned short *px=(unsigned short *) &x;
102 #endif /* national */
104 if( x == zero ) return(x);
106 #if defined(vax)||defined(tahoe)
107 if( (k= *px & mexp ) != ~msign ) {
108 if (N < -260)
109 return(nunf*nunf);
110 else if (N > 260) {
111 return(copysign(infnan(ERANGE),x));
113 #else /* defined(vax)||defined(tahoe) */
114 if( (k= *px & mexp ) != mexp ) {
115 if( N<-2100) return(nunf*nunf); else if(N>2100) return(novf+novf);
116 if( k == 0 ) {
117 x *= scalb(1.0,prep1); N -= prep1; return(scalb(x,N));}
118 #endif /* defined(vax)||defined(tahoe) */
120 if((k = (k>>gap)+ N) > 0 )
121 if( k < (mexp>>gap) ) *px = (*px&~mexp) | (k<<gap);
122 else x=novf+novf; /* overflow */
123 else
124 if( k > -prep1 )
125 /* gradual underflow */
126 {*px=(*px&~mexp)|(short)(1<<gap); x *= scalb(1.0,k-1);}
127 else
128 return(nunf*nunf);
130 return(x);
133 #endif
135 #if !_lib_scalb
137 extern double scalb(x,N)
138 double x; double N;
140 return _scalb(x, N);
143 #endif
145 #if !_lib__copysign
147 extern double _copysign(x,y)
148 double x,y;
150 #ifdef national
151 unsigned short *px=(unsigned short *) &x+3,
152 *py=(unsigned short *) &y+3;
153 #else /* national */
154 unsigned short *px=(unsigned short *) &x,
155 *py=(unsigned short *) &y;
156 #endif /* national */
158 #if defined(vax)||defined(tahoe)
159 if ( (*px & mexp) == 0 ) return(x);
160 #endif /* defined(vax)||defined(tahoe) */
162 *px = ( *px & msign ) | ( *py & ~msign );
163 return(x);
166 #endif
168 #if !_lib_copysign
170 extern double copysign(x,y)
171 double x,y;
173 return _copysign(x,y);
176 #endif
178 #if !_lib_logb
180 extern double logb(x)
181 double x;
184 #ifdef national
185 short *px=(short *) &x+3, k;
186 #else /* national */
187 short *px=(short *) &x, k;
188 #endif /* national */
190 #if defined(vax)||defined(tahoe)
191 return (int)(((*px&mexp)>>gap)-bias);
192 #else /* defined(vax)||defined(tahoe) */
193 if( (k= *px & mexp ) != mexp )
194 if ( k != 0 )
195 return ( (k>>gap) - bias );
196 else if( x != zero)
197 return ( -1022.0 );
198 else
199 return(-(1.0/zero));
200 else if(x != x)
201 return(x);
202 else
203 {*px &= msign; return(x);}
204 #endif /* defined(vax)||defined(tahoe) */
207 #endif
209 #if !_lib__finite
211 extern int _finite(x)
212 double x;
214 #if defined(vax)||defined(tahoe)
215 return(1);
216 #else /* defined(vax)||defined(tahoe) */
217 #ifdef national
218 return( (*((short *) &x+3 ) & mexp ) != mexp );
219 #else /* national */
220 return( (*((short *) &x ) & mexp ) != mexp );
221 #endif /* national */
222 #endif /* defined(vax)||defined(tahoe) */
225 #endif
227 #if !_lib_finite
229 extern int finite(x)
230 double x;
232 return _finite(x);
235 #endif
237 #if !_lib_drem
239 extern double drem(x,p)
240 double x,p;
242 #if _lib_remainder
243 return remainder(x,p);
244 #else
245 short sign;
246 double hp,dp,tmp;
247 unsigned short k;
248 #ifdef national
249 unsigned short
250 *px=(unsigned short *) &x +3,
251 *pp=(unsigned short *) &p +3,
252 *pd=(unsigned short *) &dp +3,
253 *pt=(unsigned short *) &tmp+3;
254 #else /* national */
255 unsigned short
256 *px=(unsigned short *) &x ,
257 *pp=(unsigned short *) &p ,
258 *pd=(unsigned short *) &dp ,
259 *pt=(unsigned short *) &tmp;
260 #endif /* national */
262 *pp &= msign ;
264 #if defined(vax)||defined(tahoe)
265 if( ( *px & mexp ) == ~msign ) /* is x a reserved operand? */
266 #else /* defined(vax)||defined(tahoe) */
267 if( ( *px & mexp ) == mexp )
268 #endif /* defined(vax)||defined(tahoe) */
269 return (x-p)-(x-p); /* create nan if x is inf */
270 if (p == zero) {
271 #if defined(vax)||defined(tahoe)
272 return(infnan(EDOM));
273 #else /* defined(vax)||defined(tahoe) */
274 return zero/zero;
275 #endif /* defined(vax)||defined(tahoe) */
278 #if defined(vax)||defined(tahoe)
279 if( ( *pp & mexp ) == ~msign ) /* is p a reserved operand? */
280 #else /* defined(vax)||defined(tahoe) */
281 if( ( *pp & mexp ) == mexp )
282 #endif /* defined(vax)||defined(tahoe) */
283 { if (p != p) return p; else return x;}
285 else if ( ((*pp & mexp)>>gap) <= 1 )
286 /* subnormal p, or almost subnormal p */
287 { double b; b=scalb(1.0,(int)prep1);
288 p *= b; x = drem(x,p); x *= b; return(drem(x,p)/b);}
289 else if ( p >= novf/2)
290 { p /= 2 ; x /= 2; return(drem(x,p)*2);}
291 else
293 dp=p+p; hp=p/2;
294 sign= *px & ~msign ;
295 *px &= msign ;
296 while ( x > dp )
298 k=(*px & mexp) - (*pd & mexp) ;
299 tmp = dp ;
300 *pt += k ;
302 #if defined(vax)||defined(tahoe)
303 if( x < tmp ) *pt -= 128 ;
304 #else /* defined(vax)||defined(tahoe) */
305 if( x < tmp ) *pt -= 16 ;
306 #endif /* defined(vax)||defined(tahoe) */
308 x -= tmp ;
310 if ( x > hp )
311 { x -= p ; if ( x >= hp ) x -= p ; }
313 #if defined(vax)||defined(tahoe)
314 if (x)
315 #endif /* defined(vax)||defined(tahoe) */
316 *px ^= sign;
317 return( x);
320 #endif
323 #endif
325 #if !_lib_remainder
327 extern double remainder(x,p)
328 double x,p;
330 return drem(x,p);
333 #endif
335 #if !_lib_sqrt
337 extern double sqrt(x)
338 double x;
340 double q,s,b,r;
341 double t;
342 double const zero=0.0;
343 int m,n,i;
344 #if defined(vax)||defined(tahoe)
345 int k=54;
346 #else /* defined(vax)||defined(tahoe) */
347 int k=51;
348 #endif /* defined(vax)||defined(tahoe) */
350 /* sqrt(NaN) is NaN, sqrt(+-0) = +-0 */
351 if(x!=x||x==zero) return(x);
353 /* sqrt(negative) is invalid */
354 if(x<zero) {
355 #if defined(vax)||defined(tahoe)
356 return (infnan(EDOM)); /* NaN */
357 #else /* defined(vax)||defined(tahoe) */
358 return(zero/zero);
359 #endif /* defined(vax)||defined(tahoe) */
362 /* sqrt(INF) is INF */
363 if(!finite(x)) return(x);
365 /* scale x to [1,4) */
366 n=logb(x);
367 x=scalb(x,-n);
368 if((m=logb(x))!=0) x=scalb(x,-m); /* subnormal number */
369 m += n;
370 n = m/2;
371 if((n+n)!=m) {x *= 2; m -=1; n=m/2;}
373 /* generate sqrt(x) bit by bit (accumulating in q) */
374 q=1.0; s=4.0; x -= 1.0; r=1;
375 for(i=1;i<=k;i++) {
376 t=s+1; x *= 4; r /= 2;
377 if(t<=x) {
378 s=t+t+2, x -= t; q += r;}
379 else
380 s *= 2;
383 /* generate the last bit and determine the final rounding */
384 r/=2; x *= 4;
385 if(x==zero) goto end; 100+r; /* trigger inexact flag */
386 if(s<x) {
387 q+=r; x -=s; s += 2; s *= 2; x *= 4;
388 t = (x-s)-5;
389 b=1.0+3*r/4; if(b==1.0) goto end; /* b==1 : Round-to-zero */
390 b=1.0+r/4; if(b>1.0) t=1; /* b>1 : Round-to-(+INF) */
391 if(t>=0) q+=r; } /* else: Round-to-nearest */
392 else {
393 s *= 2; x *= 4;
394 t = (x-s)-1;
395 b=1.0+3*r/4; if(b==1.0) goto end;
396 b=1.0+r/4; if(b>1.0) t=1;
397 if(t>=0) q+=r; }
399 end: return(scalb(q,n));
402 #endif
404 #if 0
405 /* DREM(X,Y)
406 * RETURN X REM Y =X-N*Y, N=[X/Y] ROUNDED (ROUNDED TO EVEN IN THE HALF WAY CASE)
407 * DOUBLE PRECISION (VAX D format 56 bits, IEEE DOUBLE 53 BITS)
408 * INTENDED FOR ASSEMBLY LANGUAGE
409 * CODED IN C BY K.C. NG, 3/23/85, 4/8/85.
411 * Warning: this code should not get compiled in unless ALL of
412 * the following machine-dependent routines are supplied.
414 * Required machine dependent functions (not on a VAX):
415 * swapINX(i): save inexact flag and reset it to "i"
416 * swapENI(e): save inexact enable and reset it to "e"
419 extern double drem(x,y)
420 double x,y;
423 #ifdef national /* order of words in floating point number */
424 static const n0=3,n1=2,n2=1,n3=0;
425 #else /* VAX, SUN, ZILOG, TAHOE */
426 static const n0=0,n1=1,n2=2,n3=3;
427 #endif
429 static const unsigned short mexp =0x7ff0, m25 =0x0190, m57 =0x0390;
430 static const double zero=0.0;
431 double hy,y1,t,t1;
432 short k;
433 long n;
434 int i,e;
435 unsigned short xexp,yexp, *px =(unsigned short *) &x ,
436 nx,nf, *py =(unsigned short *) &y ,
437 sign, *pt =(unsigned short *) &t ,
438 *pt1 =(unsigned short *) &t1 ;
440 xexp = px[n0] & mexp ; /* exponent of x */
441 yexp = py[n0] & mexp ; /* exponent of y */
442 sign = px[n0] &0x8000; /* sign of x */
444 /* return NaN if x is NaN, or y is NaN, or x is INF, or y is zero */
445 if(x!=x) return(x); if(y!=y) return(y); /* x or y is NaN */
446 if( xexp == mexp ) return(zero/zero); /* x is INF */
447 if(y==zero) return(y/y);
449 /* save the inexact flag and inexact enable in i and e respectively
450 * and reset them to zero
452 i=swapINX(0); e=swapENI(0);
454 /* subnormal number */
455 nx=0;
456 if(yexp==0) {t=1.0,pt[n0]+=m57; y*=t; nx=m57;}
458 /* if y is tiny (biased exponent <= 57), scale up y to y*2**57 */
459 if( yexp <= m57 ) {py[n0]+=m57; nx+=m57; yexp+=m57;}
461 nf=nx;
462 py[n0] &= 0x7fff;
463 px[n0] &= 0x7fff;
465 /* mask off the least significant 27 bits of y */
466 t=y; pt[n3]=0; pt[n2]&=0xf800; y1=t;
468 /* LOOP: argument reduction on x whenever x > y */
469 loop:
470 while ( x > y )
472 t=y;
473 t1=y1;
474 xexp=px[n0]&mexp; /* exponent of x */
475 k=xexp-yexp-m25;
476 if(k>0) /* if x/y >= 2**26, scale up y so that x/y < 2**26 */
477 {pt[n0]+=k;pt1[n0]+=k;}
478 n=x/t; x=(x-n*t1)-n*(t-t1);
480 /* end while (x > y) */
482 if(nx!=0) {t=1.0; pt[n0]+=nx; x*=t; nx=0; goto loop;}
484 /* final adjustment */
486 hy=y/2.0;
487 if(x>hy||((x==hy)&&n%2==1)) x-=y;
488 px[n0] ^= sign;
489 if(nf!=0) { t=1.0; pt[n0]-=nf; x*=t;}
491 /* restore inexact flag and inexact enable */
492 swapINX(i); swapENI(e);
494 return(x);
496 #endif
498 #if 0
499 /* SQRT
500 * RETURN CORRECTLY ROUNDED (ACCORDING TO THE ROUNDING MODE) SQRT
501 * FOR IEEE DOUBLE PRECISION ONLY, INTENDED FOR ASSEMBLY LANGUAGE
502 * CODED IN C BY K.C. NG, 3/22/85.
504 * Warning: this code should not get compiled in unless ALL of
505 * the following machine-dependent routines are supplied.
507 * Required machine dependent functions:
508 * swapINX(i) ...return the status of INEXACT flag and reset it to "i"
509 * swapRM(r) ...return the current Rounding Mode and reset it to "r"
510 * swapENI(e) ...return the status of inexact enable and reset it to "e"
511 * addc(t) ...perform t=t+1 regarding t as a 64 bit unsigned integer
512 * subc(t) ...perform t=t-1 regarding t as a 64 bit unsigned integer
515 static const unsigned long table[] = {
516 0, 1204, 3062, 5746, 9193, 13348, 18162, 23592, 29598, 36145, 43202, 50740,
517 58733, 67158, 75992, 85215, 83599, 71378, 60428, 50647, 41945, 34246, 27478,
518 21581, 16499, 12183, 8588, 5674, 3403, 1742, 661, 130, };
520 extern double newsqrt(x)
521 double x;
523 double y,z,t,addc(),subc()
524 double const b54=134217728.*134217728.; /* b54=2**54 */
525 long mx,scalx;
526 long const mexp=0x7ff00000;
527 int i,j,r,e,swapINX(),swapRM(),swapENI();
528 unsigned long *py=(unsigned long *) &y ,
529 *pt=(unsigned long *) &t ,
530 *px=(unsigned long *) &x ;
531 #ifdef national /* ordering of word in a floating point number */
532 const int n0=1, n1=0;
533 #else
534 const int n0=0, n1=1;
535 #endif
536 /* Rounding Mode: RN ...round-to-nearest
537 * RZ ...round-towards 0
538 * RP ...round-towards +INF
539 * RM ...round-towards -INF
541 const int RN=0,RZ=1,RP=2,RM=3;
542 /* machine dependent: work on a Zilog Z8070
543 * and a National 32081 & 16081
546 /* exceptions */
547 if(x!=x||x==0.0) return(x); /* sqrt(NaN) is NaN, sqrt(+-0) = +-0 */
548 if(x<0) return((x-x)/(x-x)); /* sqrt(negative) is invalid */
549 if((mx=px[n0]&mexp)==mexp) return(x); /* sqrt(+INF) is +INF */
551 /* save, reset, initialize */
552 e=swapENI(0); /* ...save and reset the inexact enable */
553 i=swapINX(0); /* ...save INEXACT flag */
554 r=swapRM(RN); /* ...save and reset the Rounding Mode to RN */
555 scalx=0;
557 /* subnormal number, scale up x to x*2**54 */
558 if(mx==0) {x *= b54 ; scalx-=0x01b00000;}
560 /* scale x to avoid intermediate over/underflow:
561 * if (x > 2**512) x=x/2**512; if (x < 2**-512) x=x*2**512 */
562 if(mx>0x5ff00000) {px[n0] -= 0x20000000; scalx+= 0x10000000;}
563 if(mx<0x1ff00000) {px[n0] += 0x20000000; scalx-= 0x10000000;}
565 /* magic initial approximation to almost 8 sig. bits */
566 py[n0]=(px[n0]>>1)+0x1ff80000;
567 py[n0]=py[n0]-table[(py[n0]>>15)&31];
569 /* Heron's rule once with correction to improve y to almost 18 sig. bits */
570 t=x/y; y=y+t; py[n0]=py[n0]-0x00100006; py[n1]=0;
572 /* triple to almost 56 sig. bits; now y approx. sqrt(x) to within 1 ulp */
573 t=y*y; z=t; pt[n0]+=0x00100000; t+=z; z=(x-z)*y;
574 t=z/(t+x) ; pt[n0]+=0x00100000; y+=t;
576 /* twiddle last bit to force y correctly rounded */
577 swapRM(RZ); /* ...set Rounding Mode to round-toward-zero */
578 swapINX(0); /* ...clear INEXACT flag */
579 swapENI(e); /* ...restore inexact enable status */
580 t=x/y; /* ...chopped quotient, possibly inexact */
581 j=swapINX(i); /* ...read and restore inexact flag */
582 if(j==0) { if(t==y) goto end; else t=subc(t); } /* ...t=t-ulp */
583 b54+0.1; /* ..trigger inexact flag, sqrt(x) is inexact */
584 if(r==RN) t=addc(t); /* ...t=t+ulp */
585 else if(r==RP) { t=addc(t);y=addc(y);}/* ...t=t+ulp;y=y+ulp; */
586 y=y+t; /* ...chopped sum */
587 py[n0]=py[n0]-0x00100000; /* ...correctly rounded sqrt(x) */
588 end: py[n0]=py[n0]+scalx; /* ...scale back y */
589 swapRM(r); /* ...restore Rounding Mode */
590 return(y);
592 #endif
594 #if !_lib_ilogb
596 extern int ilogb(double x)
598 return((int)logb(x));
601 #endif
603 #endif