ldivmod, uldivmod: fix qdivrem calls
[minix.git] / lib / libm / noieee_src / n_support.c
blobb37c74f3d2d048dba6a02e5587f37b83c262c187
1 /* $NetBSD: n_support.c,v 1.5 2003/08/07 16:44:52 agc Exp $ */
2 /*
3 * Copyright (c) 1985, 1993
4 * The Regents of the University of California. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
31 #ifndef lint
32 static char sccsid[] = "@(#)support.c 8.1 (Berkeley) 6/4/93";
33 #endif /* not lint */
36 * Some IEEE standard 754 recommended functions and remainder and sqrt for
37 * supporting the C elementary functions.
38 ******************************************************************************
39 * WARNING:
40 * These codes are developed (in double) to support the C elementary
41 * functions temporarily. They are not universal, and some of them are very
42 * slow (in particular, drem and sqrt is extremely inefficient). Each
43 * computer system should have its implementation of these functions using
44 * its own assembler.
45 ******************************************************************************
47 * IEEE 754 required operations:
48 * drem(x,p)
49 * returns x REM y = x - [x/y]*y , where [x/y] is the integer
50 * nearest x/y; in half way case, choose the even one.
51 * sqrt(x)
52 * returns the square root of x correctly rounded according to
53 * the rounding mod.
55 * IEEE 754 recommended functions:
56 * (a) copysign(x,y)
57 * returns x with the sign of y.
58 * (b) scalb(x,N)
59 * returns x * (2**N), for integer values N.
60 * (c) logb(x)
61 * returns the unbiased exponent of x, a signed integer in
62 * double precision, except that logb(0) is -INF, logb(INF)
63 * is +INF, and logb(NAN) is that NAN.
64 * (d) finite(x)
65 * returns the value TRUE if -INF < x < +INF and returns
66 * FALSE otherwise.
69 * CODED IN C BY K.C. NG, 11/25/84;
70 * REVISED BY K.C. NG on 1/22/85, 2/13/85, 3/24/85.
73 #include "mathimpl.h"
74 #include "trig.h"
76 #if defined(__vax__)||defined(tahoe) /* VAX D format */
77 #include <errno.h>
78 static const unsigned short msign=0x7fff , mexp =0x7f80 ;
79 static const short prep1=57, gap=7, bias=129 ;
80 static const double novf=1.7E38, nunf=3.0E-39 ;
81 #else /* defined(__vax__)||defined(tahoe) */
82 static const unsigned short msign=0x7fff, mexp =0x7ff0 ;
83 static const short prep1=54, gap=4, bias=1023 ;
84 static const double novf=1.7E308, nunf=3.0E-308;
85 #endif /* defined(__vax__)||defined(tahoe) */
87 double
88 scalb(double x, int N)
90 int k;
92 #ifdef national
93 unsigned short *px=(unsigned short *) &x + 3;
94 #else /* national */
95 unsigned short *px=(unsigned short *) &x;
96 #endif /* national */
98 if( x == __zero ) return(x);
100 #if defined(__vax__)||defined(tahoe)
101 if( (k= *px & mexp ) != ~msign ) {
102 if (N < -260)
103 return(nunf*nunf);
104 else if (N > 260) {
105 return(copysign(infnan(ERANGE),x));
107 #else /* defined(__vax__)||defined(tahoe) */
108 if( (k= *px & mexp ) != mexp ) {
109 if( N<-2100) return(nunf*nunf); else if(N>2100) return(novf+novf);
110 if( k == 0 ) {
111 x *= scalb(1.0,(int)prep1); N -= prep1; return(scalb(x,N));}
112 #endif /* defined(__vax__)||defined(tahoe) */
114 if((k = (k>>gap)+ N) > 0 )
115 if( k < (mexp>>gap) ) *px = (*px&~mexp) | (k<<gap);
116 else x=novf+novf; /* overflow */
117 else
118 if( k > -prep1 )
119 /* gradual underflow */
120 {*px=(*px&~mexp)|(short)(1<<gap); x *= scalb(1.0,k-1);}
121 else
122 return(nunf*nunf);
124 return(x);
128 double
129 copysign(double x, double y)
131 #ifdef national
132 unsigned short *px=(unsigned short *) &x+3,
133 *py=(unsigned short *) &y+3;
134 #else /* national */
135 unsigned short *px=(unsigned short *) &x,
136 *py=(unsigned short *) &y;
137 #endif /* national */
139 #if defined(__vax__)||defined(tahoe)
140 if ( (*px & mexp) == 0 ) return(x);
141 #endif /* defined(__vax__)||defined(tahoe) */
143 *px = ( *px & msign ) | ( *py & ~msign );
144 return(x);
147 double
148 logb(double x)
151 #ifdef national
152 short *px=(short *) &x+3, k;
153 #else /* national */
154 short *px=(short *) &x, k;
155 #endif /* national */
157 #if defined(__vax__)||defined(tahoe)
158 return (int)(((*px&mexp)>>gap)-bias);
159 #else /* defined(__vax__)||defined(tahoe) */
160 if( (k= *px & mexp ) != mexp )
161 if ( k != 0 )
162 return ( (k>>gap) - bias );
163 else if( x != __zero)
164 return ( -1022.0 );
165 else
166 return(-(1.0/__zero));
167 else if(x != x)
168 return(x);
169 else
170 {*px &= msign; return(x);}
171 #endif /* defined(__vax__)||defined(tahoe) */
175 finite(double x)
177 #if defined(__vax__)||defined(tahoe)
178 return(1);
179 #else /* defined(__vax__)||defined(tahoe) */
180 #ifdef national
181 return( (*((short *) &x+3 ) & mexp ) != mexp );
182 #else /* national */
183 return( (*((short *) &x ) & mexp ) != mexp );
184 #endif /* national */
185 #endif /* defined(__vax__)||defined(tahoe) */
188 double
189 drem(double x, double p)
191 short sign;
192 double hp,dp,tmp;
193 unsigned short k;
194 #ifdef national
195 unsigned short
196 *px=(unsigned short *) &x +3,
197 *pp=(unsigned short *) &p +3,
198 *pd=(unsigned short *) &dp +3,
199 *pt=(unsigned short *) &tmp+3;
200 #else /* national */
201 unsigned short
202 *px=(unsigned short *) &x ,
203 *pp=(unsigned short *) &p ,
204 *pd=(unsigned short *) &dp ,
205 *pt=(unsigned short *) &tmp;
206 #endif /* national */
208 *pp &= msign ;
210 #if defined(__vax__)||defined(tahoe)
211 if( ( *px & mexp ) == ~msign ) /* is x a reserved operand? */
212 #else /* defined(__vax__)||defined(tahoe) */
213 if( ( *px & mexp ) == mexp )
214 #endif /* defined(__vax__)||defined(tahoe) */
215 return (x-p)-(x-p); /* create nan if x is inf */
216 if (p == __zero) {
217 #if defined(__vax__)||defined(tahoe)
218 return(infnan(EDOM));
219 #else /* defined(__vax__)||defined(tahoe) */
220 return __zero/__zero;
221 #endif /* defined(__vax__)||defined(tahoe) */
224 #if defined(__vax__)||defined(tahoe)
225 if( ( *pp & mexp ) == ~msign ) /* is p a reserved operand? */
226 #else /* defined(__vax__)||defined(tahoe) */
227 if( ( *pp & mexp ) == mexp )
228 #endif /* defined(__vax__)||defined(tahoe) */
229 { if (p != p) return p; else return x;}
231 else if ( ((*pp & mexp)>>gap) <= 1 )
232 /* subnormal p, or almost subnormal p */
233 { double b; b=scalb(1.0,(int)prep1);
234 p *= b; x = drem(x,p); x *= b; return(drem(x,p)/b);}
235 else if ( p >= novf/2)
236 { p /= 2 ; x /= 2; return(drem(x,p)*2);}
237 else
239 dp=p+p; hp=p/2;
240 sign= *px & ~msign ;
241 *px &= msign ;
242 while ( x > dp )
244 k=(*px & mexp) - (*pd & mexp) ;
245 tmp = dp ;
246 *pt += k ;
248 #if defined(__vax__)||defined(tahoe)
249 if( x < tmp ) *pt -= 128 ;
250 #else /* defined(__vax__)||defined(tahoe) */
251 if( x < tmp ) *pt -= 16 ;
252 #endif /* defined(__vax__)||defined(tahoe) */
254 x -= tmp ;
256 if ( x > hp )
257 { x -= p ; if ( x >= hp ) x -= p ; }
259 #if defined(__vax__)||defined(tahoe)
260 if (x)
261 #endif /* defined(__vax__)||defined(tahoe) */
262 *px ^= sign;
263 return( x);
269 double
270 sqrt(double x)
272 double q,s,b,r;
273 double t;
274 int m,n,i;
275 #if defined(__vax__)||defined(tahoe)
276 int k=54;
277 #else /* defined(__vax__)||defined(tahoe) */
278 int k=51;
279 #endif /* defined(__vax__)||defined(tahoe) */
281 /* sqrt(NaN) is NaN, sqrt(+-0) = +-0 */
282 if(x!=x||x==__zero) return(x);
284 /* sqrt(negative) is invalid */
285 if(x<__zero) {
286 #if defined(__vax__)||defined(tahoe)
287 return (infnan(EDOM)); /* NaN */
288 #else /* defined(__vax__)||defined(tahoe) */
289 return(__zero/__zero);
290 #endif /* defined(__vax__)||defined(tahoe) */
293 /* sqrt(INF) is INF */
294 if(!finite(x)) return(x);
296 /* scale x to [1,4) */
297 n=logb(x);
298 x=scalb(x,-n);
299 if((m=logb(x))!=0) x=scalb(x,-m); /* subnormal number */
300 m += n;
301 n = m/2;
302 if((n+n)!=m) {x *= 2; m -=1; n=m/2;}
304 /* generate sqrt(x) bit by bit (accumulating in q) */
305 q=1.0; s=4.0; x -= 1.0; r=1;
306 for(i=1;i<=k;i++) {
307 t=s+1; x *= 4; r /= 2;
308 if(t<=x) {
309 s=t+t+2, x -= t; q += r;}
310 else
311 s *= 2;
314 /* generate the last bit and determine the final rounding */
315 r/=2; x *= 4;
316 if(x==__zero) goto end; 100+r; /* trigger inexact flag */
317 if(s<x) {
318 q+=r; x -=s; s += 2; s *= 2; x *= 4;
319 t = (x-s)-5;
320 b=1.0+3*r/4; if(b==1.0) goto end; /* b==1 : Round-to-zero */
321 b=1.0+r/4; if(b>1.0) t=1; /* b>1 : Round-to-(+INF) */
322 if(t>=0) q+=r; } /* else: Round-to-nearest */
323 else {
324 s *= 2; x *= 4;
325 t = (x-s)-1;
326 b=1.0+3*r/4; if(b==1.0) goto end;
327 b=1.0+r/4; if(b>1.0) t=1;
328 if(t>=0) q+=r; }
330 end: return(scalb(q,n));
333 #if 0
334 /* DREM(X,Y)
335 * RETURN X REM Y =X-N*Y, N=[X/Y] ROUNDED (ROUNDED TO EVEN IN THE HALF WAY CASE)
336 * DOUBLE PRECISION (VAX D format 56 bits, IEEE DOUBLE 53 BITS)
337 * INTENDED FOR ASSEMBLY LANGUAGE
338 * CODED IN C BY K.C. NG, 3/23/85, 4/8/85.
340 * Warning: this code should not get compiled in unless ALL of
341 * the following machine-dependent routines are supplied.
343 * Required machine dependent functions (not on a VAX):
344 * swapINX(i): save inexact flag and reset it to "i"
345 * swapENI(e): save inexact enable and reset it to "e"
348 double
349 drem(double x, double y)
352 #ifdef national /* order of words in floating point number */
353 static const n0=3,n1=2,n2=1,n3=0;
354 #else /* VAX, SUN, ZILOG, TAHOE */
355 static const n0=0,n1=1,n2=2,n3=3;
356 #endif
358 static const unsigned short mexp =0x7ff0, m25 =0x0190, m57 =0x0390;
359 double hy,y1,t,t1;
360 short k;
361 long n;
362 int i,e;
363 unsigned short xexp,yexp, *px =(unsigned short *) &x ,
364 nx,nf, *py =(unsigned short *) &y ,
365 sign, *pt =(unsigned short *) &t ,
366 *pt1 =(unsigned short *) &t1 ;
368 xexp = px[n0] & mexp ; /* exponent of x */
369 yexp = py[n0] & mexp ; /* exponent of y */
370 sign = px[n0] &0x8000; /* sign of x */
372 /* return NaN if x is NaN, or y is NaN, or x is INF, or y is zero */
373 if(x!=x) return(x); if(y!=y) return(y); /* x or y is NaN */
374 if( xexp == mexp ) return(__zero/__zero); /* x is INF */
375 if(y==__zero) return(y/y);
377 /* save the inexact flag and inexact enable in i and e respectively
378 * and reset them to zero
380 i=swapINX(0); e=swapENI(0);
382 /* subnormal number */
383 nx=0;
384 if(yexp==0) {t=1.0,pt[n0]+=m57; y*=t; nx=m57;}
386 /* if y is tiny (biased exponent <= 57), scale up y to y*2**57 */
387 if( yexp <= m57 ) {py[n0]+=m57; nx+=m57; yexp+=m57;}
389 nf=nx;
390 py[n0] &= 0x7fff;
391 px[n0] &= 0x7fff;
393 /* mask off the least significant 27 bits of y */
394 t=y; pt[n3]=0; pt[n2]&=0xf800; y1=t;
396 /* LOOP: argument reduction on x whenever x > y */
397 loop:
398 while ( x > y )
400 t=y;
401 t1=y1;
402 xexp=px[n0]&mexp; /* exponent of x */
403 k=xexp-yexp-m25;
404 if(k>0) /* if x/y >= 2**26, scale up y so that x/y < 2**26 */
405 {pt[n0]+=k;pt1[n0]+=k;}
406 n=x/t; x=(x-n*t1)-n*(t-t1);
408 /* end while (x > y) */
410 if(nx!=0) {t=1.0; pt[n0]+=nx; x*=t; nx=0; goto loop;}
412 /* final adjustment */
414 hy=y/2.0;
415 if(x>hy||((x==hy)&&n%2==1)) x-=y;
416 px[n0] ^= sign;
417 if(nf!=0) { t=1.0; pt[n0]-=nf; x*=t;}
419 /* restore inexact flag and inexact enable */
420 swapINX(i); swapENI(e);
422 return(x);
424 #endif
426 #if 0
427 /* SQRT
428 * RETURN CORRECTLY ROUNDED (ACCORDING TO THE ROUNDING MODE) SQRT
429 * FOR IEEE DOUBLE PRECISION ONLY, INTENDED FOR ASSEMBLY LANGUAGE
430 * CODED IN C BY K.C. NG, 3/22/85.
432 * Warning: this code should not get compiled in unless ALL of
433 * the following machine-dependent routines are supplied.
435 * Required machine dependent functions:
436 * swapINX(i) ...return the status of INEXACT flag and reset it to "i"
437 * swapRM(r) ...return the current Rounding Mode and reset it to "r"
438 * swapENI(e) ...return the status of inexact enable and reset it to "e"
439 * addc(t) ...perform t=t+1 regarding t as a 64 bit unsigned integer
440 * subc(t) ...perform t=t-1 regarding t as a 64 bit unsigned integer
443 static const unsigned long table[] = {
444 0, 1204, 3062, 5746, 9193, 13348, 18162, 23592, 29598, 36145, 43202, 50740,
445 58733, 67158, 75992, 85215, 83599, 71378, 60428, 50647, 41945, 34246, 27478,
446 21581, 16499, 12183, 8588, 5674, 3403, 1742, 661, 130, };
448 double
449 newsqrt(double x)
451 double y,z,t,addc(),subc()
452 double const b54=134217728.*134217728.; /* b54=2**54 */
453 long mx,scalx;
454 long const mexp=0x7ff00000;
455 int i,j,r,e,swapINX(),swapRM(),swapENI();
456 unsigned long *py=(unsigned long *) &y ,
457 *pt=(unsigned long *) &t ,
458 *px=(unsigned long *) &x ;
459 #ifdef national /* ordering of word in a floating point number */
460 const int n0=1, n1=0;
461 #else
462 const int n0=0, n1=1;
463 #endif
464 /* Rounding Mode: RN ...round-to-nearest
465 * RZ ...round-towards 0
466 * RP ...round-towards +INF
467 * RM ...round-towards -INF
469 const int RN=0,RZ=1,RP=2,RM=3;
470 /* machine dependent: work on a Zilog Z8070
471 * and a National 32081 & 16081
474 /* exceptions */
475 if(x!=x||x==0.0) return(x); /* sqrt(NaN) is NaN, sqrt(+-0) = +-0 */
476 if(x<0) return((x-x)/(x-x)); /* sqrt(negative) is invalid */
477 if((mx=px[n0]&mexp)==mexp) return(x); /* sqrt(+INF) is +INF */
479 /* save, reset, initialize */
480 e=swapENI(0); /* ...save and reset the inexact enable */
481 i=swapINX(0); /* ...save INEXACT flag */
482 r=swapRM(RN); /* ...save and reset the Rounding Mode to RN */
483 scalx=0;
485 /* subnormal number, scale up x to x*2**54 */
486 if(mx==0) {x *= b54 ; scalx-=0x01b00000;}
488 /* scale x to avoid intermediate over/underflow:
489 * if (x > 2**512) x=x/2**512; if (x < 2**-512) x=x*2**512 */
490 if(mx>0x5ff00000) {px[n0] -= 0x20000000; scalx+= 0x10000000;}
491 if(mx<0x1ff00000) {px[n0] += 0x20000000; scalx-= 0x10000000;}
493 /* magic initial approximation to almost 8 sig. bits */
494 py[n0]=(px[n0]>>1)+0x1ff80000;
495 py[n0]=py[n0]-table[(py[n0]>>15)&31];
497 /* Heron's rule once with correction to improve y to almost 18 sig. bits */
498 t=x/y; y=y+t; py[n0]=py[n0]-0x00100006; py[n1]=0;
500 /* triple to almost 56 sig. bits; now y approx. sqrt(x) to within 1 ulp */
501 t=y*y; z=t; pt[n0]+=0x00100000; t+=z; z=(x-z)*y;
502 t=z/(t+x) ; pt[n0]+=0x00100000; y+=t;
504 /* twiddle last bit to force y correctly rounded */
505 swapRM(RZ); /* ...set Rounding Mode to round-toward-zero */
506 swapINX(0); /* ...clear INEXACT flag */
507 swapENI(e); /* ...restore inexact enable status */
508 t=x/y; /* ...chopped quotient, possibly inexact */
509 j=swapINX(i); /* ...read and restore inexact flag */
510 if(j==0) { if(t==y) goto end; else t=subc(t); } /* ...t=t-ulp */
511 b54+0.1; /* ..trigger inexact flag, sqrt(x) is inexact */
512 if(r==RN) t=addc(t); /* ...t=t+ulp */
513 else if(r==RP) { t=addc(t);y=addc(y);}/* ...t=t+ulp;y=y+ulp; */
514 y=y+t; /* ...chopped sum */
515 py[n0]=py[n0]-0x00100000; /* ...correctly rounded sqrt(x) */
516 end: py[n0]=py[n0]+scalx; /* ...scale back y */
517 swapRM(r); /* ...restore Rounding Mode */
518 return(y);
520 #endif