2 * Double-precision e^x function.
4 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 * See https://llvm.org/LICENSE.txt for license information.
6 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
12 #include "math_config.h"
14 #define N (1 << EXP_TABLE_BITS)
15 #define InvLn2N __exp_data.invln2N
16 #define NegLn2hiN __exp_data.negln2hiN
17 #define NegLn2loN __exp_data.negln2loN
18 #define Shift __exp_data.shift
19 #define T __exp_data.tab
20 #define C2 __exp_data.poly[5 - EXP_POLY_ORDER]
21 #define C3 __exp_data.poly[6 - EXP_POLY_ORDER]
22 #define C4 __exp_data.poly[7 - EXP_POLY_ORDER]
23 #define C5 __exp_data.poly[8 - EXP_POLY_ORDER]
24 #define C6 __exp_data.poly[9 - EXP_POLY_ORDER]
26 /* Handle cases that may overflow or underflow when computing the result that
27 is scale*(1+TMP) without intermediate rounding. The bit representation of
28 scale is in SBITS, however it has a computed exponent that may have
29 overflown into the sign bit so that needs to be adjusted before using it as
30 a double. (int32_t)KI is the k used in the argument reduction and exponent
31 adjustment of scale, positive k here means the result may overflow and
32 negative k means the result may underflow. */
34 specialcase (double_t tmp
, uint64_t sbits
, uint64_t ki
)
38 if ((ki
& 0x80000000) == 0)
40 /* k > 0, the exponent of scale might have overflowed by <= 460. */
41 sbits
-= 1009ull << 52;
42 scale
= asdouble (sbits
);
43 y
= 0x1p
1009 * (scale
+ scale
* tmp
);
44 return check_oflow (eval_as_double (y
));
46 /* k < 0, need special care in the subnormal range. */
47 sbits
+= 1022ull << 52;
48 scale
= asdouble (sbits
);
49 y
= scale
+ scale
* tmp
;
52 /* Round y to the right precision before scaling it into the subnormal
53 range to avoid double rounding that can cause 0.5+E/2 ulp error where
54 E is the worst-case ulp error outside the subnormal range. So this
55 is only useful if the goal is better than 1 ulp worst-case error. */
57 lo
= scale
- y
+ scale
* tmp
;
59 lo
= 1.0 - hi
+ y
+ lo
;
60 y
= eval_as_double (hi
+ lo
) - 1.0;
61 /* Avoid -0.0 with downward rounding. */
62 if (WANT_ROUNDING
&& y
== 0.0)
64 /* The underflow exception needs to be signaled explicitly. */
65 force_eval_double (opt_barrier_double (0x1p
-1022) * 0x1p
-1022);
68 return check_uflow (eval_as_double (y
));
71 /* Top 12 bits of a double (sign and exponent bits). */
72 static inline uint32_t
75 return asuint64 (x
) >> 52;
78 /* Computes exp(x+xtail) where |xtail| < 2^-8/N and |xtail| <= |x|.
79 If hastail is 0 then xtail is assumed to be 0 too. */
81 exp_inline (double x
, double xtail
, int hastail
)
84 uint64_t ki
, idx
, top
, sbits
;
85 /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */
86 double_t kd
, z
, r
, r2
, scale
, tail
, tmp
;
88 abstop
= top12 (x
) & 0x7ff;
89 if (unlikely (abstop
- top12 (0x1p
-54) >= top12 (512.0) - top12 (0x1p
-54)))
91 if (abstop
- top12 (0x1p
-54) >= 0x80000000)
92 /* Avoid spurious underflow for tiny x. */
93 /* Note: 0 is common input. */
94 return WANT_ROUNDING
? 1.0 + x
: 1.0;
95 if (abstop
>= top12 (1024.0))
97 if (asuint64 (x
) == asuint64 (-INFINITY
))
99 if (abstop
>= top12 (INFINITY
))
101 if (asuint64 (x
) >> 63)
102 return __math_uflow (0);
104 return __math_oflow (0);
106 /* Large x is special cased below. */
110 /* exp(x) = 2^(k/N) * exp(r), with exp(r) in [2^(-1/2N),2^(1/2N)]. */
111 /* x = ln2/N*k + r, with int k and r in [-ln2/2N, ln2/2N]. */
115 ki
= converttoint (z
);
116 #elif EXP_USE_TOINT_NARROW
117 /* z - kd is in [-0.5-2^-16, 0.5] in all rounding modes. */
118 kd
= eval_as_double (z
+ Shift
);
119 ki
= asuint64 (kd
) >> 16;
120 kd
= (double_t
) (int32_t) ki
;
122 /* z - kd is in [-1, 1] in non-nearest rounding modes. */
123 kd
= eval_as_double (z
+ Shift
);
127 r
= x
+ kd
* NegLn2hiN
+ kd
* NegLn2loN
;
128 /* The code assumes 2^-200 < |xtail| < 2^-8/N. */
131 /* 2^(k/N) ~= scale * (1 + tail). */
133 top
= ki
<< (52 - EXP_TABLE_BITS
);
134 tail
= asdouble (T
[idx
]);
135 /* This is only a valid scale when -1023*N < k < 1024*N. */
136 sbits
= T
[idx
+ 1] + top
;
137 /* exp(x) = 2^(k/N) * exp(r) ~= scale + scale * (tail + exp(r) - 1). */
138 /* Evaluation is optimized assuming superscalar pipelined execution. */
140 /* Without fma the worst case error is 0.25/N ulp larger. */
141 /* Worst case error is less than 0.5+1.11/N+(abs poly error * 2^53) ulp. */
142 #if EXP_POLY_ORDER == 4
143 tmp
= tail
+ r
+ r2
* C2
+ r
* r2
* (C3
+ r
* C4
);
144 #elif EXP_POLY_ORDER == 5
145 tmp
= tail
+ r
+ r2
* (C2
+ r
* C3
) + r2
* r2
* (C4
+ r
* C5
);
146 #elif EXP_POLY_ORDER == 6
147 tmp
= tail
+ r
+ r2
* (0.5 + r
* C3
) + r2
* r2
* (C4
+ r
* C5
+ r2
* C6
);
149 if (unlikely (abstop
== 0))
150 return specialcase (tmp
, sbits
, ki
);
151 scale
= asdouble (sbits
);
152 /* Note: tmp == 0 or |tmp| > 2^-200 and scale > 2^-739, so there
153 is no spurious underflow here even without fma. */
154 return eval_as_double (scale
+ scale
* tmp
);
160 return exp_inline (x
, 0, 0);
163 /* May be useful for implementing pow where more than double
164 precision input is needed. */
166 __exp_dd (double x
, double xtail
)
168 return exp_inline (x
, xtail
, 1);
171 strong_alias (exp
, __exp_finite
)
172 hidden_alias (exp
, __ieee754_exp
)
173 hidden_alias (__exp_dd
, __exp1
)
174 # if LDBL_MANT_DIG == 53
175 long double expl (long double x
) { return exp (x
); }