1 /* Double-precision 2^x function.
2 Copyright (c) 2018 Arm Ltd. All rights reserved.
4 SPDX-License-Identifier: BSD-3-Clause
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions
9 1. Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
11 2. Redistributions in binary form must reproduce the above copyright
12 notice, this list of conditions and the following disclaimer in the
13 documentation and/or other materials provided with the distribution.
14 3. The name of the company may not be used to endorse or promote
15 products derived from this software without specific prior written
18 THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
23 TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
25 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
34 #include "math_config.h"
36 #define N (1 << EXP_TABLE_BITS)
37 #define Shift __exp_data.exp2_shift
38 #define T __exp_data.tab
39 #define C1 __exp_data.exp2_poly[0]
40 #define C2 __exp_data.exp2_poly[1]
41 #define C3 __exp_data.exp2_poly[2]
42 #define C4 __exp_data.exp2_poly[3]
43 #define C5 __exp_data.exp2_poly[4]
44 #define C6 __exp_data.exp2_poly[5]
46 /* Handle cases that may overflow or underflow when computing the result that
47 is scale*(1+TMP) without intermediate rounding. The bit representation of
48 scale is in SBITS, however it has a computed exponent that may have
49 overflown into the sign bit so that needs to be adjusted before using it as
50 a double. (int32_t)KI is the k used in the argument reduction and exponent
51 adjustment of scale, positive k here means the result may overflow and
52 negative k means the result may underflow. */
54 specialcase (double_t tmp
, uint64_t sbits
, uint64_t ki
)
58 if ((ki
& 0x80000000) == 0)
60 /* k > 0, the exponent of scale might have overflowed by 1. */
62 scale
= asdouble (sbits
);
63 y
= 2 * (scale
+ scale
* tmp
);
64 return check_oflow (y
);
66 /* k < 0, need special care in the subnormal range. */
67 sbits
+= 1022ull << 52;
68 scale
= asdouble (sbits
);
69 y
= scale
+ scale
* tmp
;
72 /* Round y to the right precision before scaling it into the subnormal
73 range to avoid double rounding that can cause 0.5+E/2 ulp error where
74 E is the worst-case ulp error outside the subnormal range. So this
75 is only useful if the goal is better than 1 ulp worst-case error. */
77 lo
= scale
- y
+ scale
* tmp
;
79 lo
= 1.0 - hi
+ y
+ lo
;
80 y
= eval_as_double (hi
+ lo
) - 1.0;
81 /* Avoid -0.0 with downward rounding. */
82 if (WANT_ROUNDING
&& y
== 0.0)
84 /* The underflow exception needs to be signaled explicitly. */
85 force_eval_double (opt_barrier_double (0x1p
-1022) * 0x1p
-1022);
88 return check_uflow (y
);
91 /* Top 12 bits of a double (sign and exponent bits). */
92 static inline uint32_t
95 return asuint64 (x
) >> 52;
102 uint64_t ki
, idx
, top
, sbits
;
103 /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */
104 double_t kd
, r
, r2
, scale
, tail
, tmp
;
106 abstop
= top12 (x
) & 0x7ff;
107 if (unlikely (abstop
- top12 (0x1p
-54) >= top12 (512.0) - top12 (0x1p
-54)))
109 if (abstop
- top12 (0x1p
-54) >= 0x80000000)
110 /* Avoid spurious underflow for tiny x. */
111 /* Note: 0 is common input. */
112 return WANT_ROUNDING
? 1.0 + x
: 1.0;
113 if (abstop
>= top12 (1024.0))
115 if (asuint64 (x
) == asuint64 (-INFINITY
))
117 if (abstop
>= top12 (INFINITY
))
119 if (!(asuint64 (x
) >> 63))
120 return __math_oflow (0);
121 else if (asuint64 (x
) >= asuint64 (-1075.0))
122 return __math_uflow (0);
124 if (2 * asuint64 (x
) > 2 * asuint64 (928.0))
125 /* Large x is special cased below. */
129 /* exp2(x) = 2^(k/N) * 2^r, with 2^r in [2^(-1/2N),2^(1/2N)]. */
130 /* x = k/N + r, with int k and r in [-1/2N, 1/2N]. */
131 kd
= eval_as_double (x
+ Shift
);
132 ki
= asuint64 (kd
); /* k. */
133 kd
-= Shift
; /* k/N for int k. */
135 /* 2^(k/N) ~= scale * (1 + tail). */
137 top
= ki
<< (52 - EXP_TABLE_BITS
);
138 tail
= asdouble (T
[idx
]);
139 /* This is only a valid scale when -1023*N < k < 1024*N. */
140 sbits
= T
[idx
+ 1] + top
;
141 /* exp2(x) = 2^(k/N) * 2^r ~= scale + scale * (tail + 2^r - 1). */
142 /* Evaluation is optimized assuming superscalar pipelined execution. */
144 /* Without fma the worst case error is 0.5/N ulp larger. */
145 /* Worst case error is less than 0.5+0.86/N+(abs poly error * 2^53) ulp. */
146 #if EXP2_POLY_ORDER == 4
147 tmp
= tail
+ r
* C1
+ r2
* C2
+ r
* r2
* (C3
+ r
* C4
);
148 #elif EXP2_POLY_ORDER == 5
149 tmp
= tail
+ r
* C1
+ r2
* (C2
+ r
* C3
) + r2
* r2
* (C4
+ r
* C5
);
150 #elif EXP2_POLY_ORDER == 6
151 tmp
= tail
+ r
* C1
+ r2
* (0.5 + r
* C3
) + r2
* r2
* (C4
+ r
* C5
+ r2
* C6
);
153 if (unlikely (abstop
== 0))
154 return specialcase (tmp
, sbits
, ki
);
155 scale
= asdouble (sbits
);
156 /* Note: tmp == 0 or |tmp| > 2^-65 and scale > 2^-928, so there
157 is no spurious underflow here even without fma. */
158 return scale
+ scale
* tmp
;