2 * Configuration for math routines.
4 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 * See https://llvm.org/LICENSE.txt for license information.
6 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 #define _MATH_CONFIG_H
16 /* If defined to 1, return correct results for special cases in non-nearest
17 rounding modes (logf (1.0f) returns 0.0f with FE_DOWNWARD rather than -0.0f).
18 This may be set to 0 if there is no fenv support or if math functions only
19 get called in round to nearest mode. */
20 # define WANT_ROUNDING 1
23 /* If defined to 1, set errno in math functions according to ISO C. Many math
24 libraries do not set errno, so this is 0 by default. It may need to be
25 set to 1 if math.h has (math_errhandling & MATH_ERRNO) != 0. */
28 #ifndef WANT_ERRNO_UFLOW
29 /* Set errno to ERANGE if result underflows to 0 (in all rounding modes). */
30 # define WANT_ERRNO_UFLOW (WANT_ROUNDING && WANT_ERRNO)
33 /* Compiler can inline round as a single instruction. */
34 #ifndef HAVE_FAST_ROUND
36 # define HAVE_FAST_ROUND 1
38 # define HAVE_FAST_ROUND 0
42 /* Compiler can inline lround, but not (long)round(x). */
43 #ifndef HAVE_FAST_LROUND
44 # if __aarch64__ && (100*__GNUC__ + __GNUC_MINOR__) >= 408 && __NO_MATH_ERRNO__
45 # define HAVE_FAST_LROUND 1
47 # define HAVE_FAST_LROUND 0
51 /* Compiler can inline fma as a single instruction. */
53 # if defined FP_FAST_FMA || __aarch64__
54 # define HAVE_FAST_FMA 1
56 # define HAVE_FAST_FMA 0
60 /* Provide *_finite symbols and some of the glibc hidden symbols
61 so libmathlib can be used with binaries compiled against glibc
62 to interpose math functions with both static and dynamic linking. */
65 # define USE_GLIBC_ABI 1
67 # define USE_GLIBC_ABI 0
71 /* Optionally used extensions. */
73 # define HIDDEN __attribute__ ((__visibility__ ("hidden")))
74 # define NOINLINE __attribute__ ((noinline))
75 # define UNUSED __attribute__ ((unused))
76 # define likely(x) __builtin_expect (!!(x), 1)
77 # define unlikely(x) __builtin_expect (x, 0)
79 # define attribute_copy(f) __attribute__ ((copy (f)))
81 # define attribute_copy(f)
83 # define strong_alias(f, a) \
84 extern __typeof (f) a __attribute__ ((alias (#f))) attribute_copy (f);
85 # define hidden_alias(f, a) \
86 extern __typeof (f) a __attribute__ ((alias (#f), visibility ("hidden"))) \
92 # define likely(x) (x)
93 # define unlikely(x) (x)
97 /* When set, the roundtoint and converttoint functions are provided with
98 the semantics documented below. */
99 # define TOINT_INTRINSICS 1
101 /* Round x to nearest int in all rounding modes, ties have to be rounded
102 consistently with converttoint so the results match. If the result
103 would be outside of [-2^31, 2^31-1] then the semantics is unspecified. */
104 static inline double_t
105 roundtoint (double_t x
)
110 /* Convert x to nearest int in all rounding modes, ties have to be rounded
111 consistently with roundtoint. If the result is not representable in an
112 int32_t then the semantics is unspecified. */
113 static inline int32_t
114 converttoint (double_t x
)
116 # if HAVE_FAST_LROUND
119 return (long) round (x
);
124 static inline uint32_t
146 static inline uint64_t
158 asdouble (uint64_t i
)
168 #ifndef IEEE_754_2008_SNAN
169 # define IEEE_754_2008_SNAN 1
172 issignalingf_inline (float x
)
174 uint32_t ix
= asuint (x
);
175 if (!IEEE_754_2008_SNAN
)
176 return (ix
& 0x7fc00000) == 0x7fc00000;
177 return 2 * (ix
^ 0x00400000) > 2u * 0x7fc00000;
181 issignaling_inline (double x
)
183 uint64_t ix
= asuint64 (x
);
184 if (!IEEE_754_2008_SNAN
)
185 return (ix
& 0x7ff8000000000000) == 0x7ff8000000000000;
186 return 2 * (ix
^ 0x0008000000000000) > 2 * 0x7ff8000000000000ULL
;
189 #if __aarch64__ && __GNUC__
190 /* Prevent the optimization of a floating-point expression. */
192 opt_barrier_float (float x
)
194 __asm__
__volatile__ ("" : "+w" (x
));
198 opt_barrier_double (double x
)
200 __asm__
__volatile__ ("" : "+w" (x
));
203 /* Force the evaluation of a floating-point expression for its side-effect. */
205 force_eval_float (float x
)
207 __asm__
__volatile__ ("" : "+w" (x
));
210 force_eval_double (double x
)
212 __asm__
__volatile__ ("" : "+w" (x
));
216 opt_barrier_float (float x
)
218 volatile float y
= x
;
222 opt_barrier_double (double x
)
224 volatile double y
= x
;
228 force_eval_float (float x
)
230 volatile float y UNUSED
= x
;
233 force_eval_double (double x
)
235 volatile double y UNUSED
= x
;
239 /* Evaluate an expression as the specified type, normally a type
240 cast should be enough, but compilers implement non-standard
241 excess-precision handling, so when FLT_EVAL_METHOD != 0 then
242 these functions may need to be customized. */
244 eval_as_float (float x
)
249 eval_as_double (double x
)
254 /* Error handling tail calls for special cases, with a sign argument.
255 The sign of the return value is set if the argument is non-zero. */
257 /* The result overflows. */
258 HIDDEN
float __math_oflowf (uint32_t);
259 /* The result underflows to 0 in nearest rounding mode. */
260 HIDDEN
float __math_uflowf (uint32_t);
261 /* The result underflows to 0 in some directed rounding mode only. */
262 HIDDEN
float __math_may_uflowf (uint32_t);
263 /* Division by zero. */
264 HIDDEN
float __math_divzerof (uint32_t);
265 /* The result overflows. */
266 HIDDEN
double __math_oflow (uint32_t);
267 /* The result underflows to 0 in nearest rounding mode. */
268 HIDDEN
double __math_uflow (uint32_t);
269 /* The result underflows to 0 in some directed rounding mode only. */
270 HIDDEN
double __math_may_uflow (uint32_t);
271 /* Division by zero. */
272 HIDDEN
double __math_divzero (uint32_t);
274 /* Error handling using input checking. */
276 /* Invalid input unless it is a quiet NaN. */
277 HIDDEN
float __math_invalidf (float);
278 /* Invalid input unless it is a quiet NaN. */
279 HIDDEN
double __math_invalid (double);
281 /* Error handling using output checking, only for errno setting. */
283 /* Check if the result overflowed to infinity. */
284 HIDDEN
double __math_check_oflow (double);
285 /* Check if the result underflowed to 0. */
286 HIDDEN
double __math_check_uflow (double);
288 /* Check if the result overflowed to infinity. */
290 check_oflow (double x
)
292 return WANT_ERRNO
? __math_check_oflow (x
) : x
;
295 /* Check if the result underflowed to 0. */
297 check_uflow (double x
)
299 return WANT_ERRNO
? __math_check_uflow (x
) : x
;
303 /* Shared between expf, exp2f and powf. */
304 #define EXP2F_TABLE_BITS 5
305 #define EXP2F_POLY_ORDER 3
306 extern const struct exp2f_data
308 uint64_t tab
[1 << EXP2F_TABLE_BITS
];
310 double poly
[EXP2F_POLY_ORDER
];
312 double invln2_scaled
;
313 double poly_scaled
[EXP2F_POLY_ORDER
];
314 } __exp2f_data HIDDEN
;
316 #define LOGF_TABLE_BITS 4
317 #define LOGF_POLY_ORDER 4
318 extern const struct logf_data
323 } tab
[1 << LOGF_TABLE_BITS
];
325 double poly
[LOGF_POLY_ORDER
- 1]; /* First order coefficient is 1. */
326 } __logf_data HIDDEN
;
328 #define LOG2F_TABLE_BITS 4
329 #define LOG2F_POLY_ORDER 4
330 extern const struct log2f_data
335 } tab
[1 << LOG2F_TABLE_BITS
];
336 double poly
[LOG2F_POLY_ORDER
];
337 } __log2f_data HIDDEN
;
339 #define POWF_LOG2_TABLE_BITS 4
340 #define POWF_LOG2_POLY_ORDER 5
342 # define POWF_SCALE_BITS EXP2F_TABLE_BITS
344 # define POWF_SCALE_BITS 0
346 #define POWF_SCALE ((double) (1 << POWF_SCALE_BITS))
347 extern const struct powf_log2_data
352 } tab
[1 << POWF_LOG2_TABLE_BITS
];
353 double poly
[POWF_LOG2_POLY_ORDER
];
354 } __powf_log2_data HIDDEN
;
357 #define EXP_TABLE_BITS 7
358 #define EXP_POLY_ORDER 5
359 /* Use polynomial that is optimized for a wider input range. This may be
360 needed for good precision in non-nearest rounding and !TOINT_INTRINSICS. */
361 #define EXP_POLY_WIDE 0
362 /* Use close to nearest rounding toint when !TOINT_INTRINSICS. This may be
363 needed for good precision in non-nearest rounding and !EXP_POLY_WIDE. */
364 #define EXP_USE_TOINT_NARROW 0
365 #define EXP2_POLY_ORDER 5
366 #define EXP2_POLY_WIDE 0
367 extern const struct exp_data
373 double poly
[4]; /* Last four coefficients. */
375 double exp2_poly
[EXP2_POLY_ORDER
];
376 uint64_t tab
[2*(1 << EXP_TABLE_BITS
)];
379 #define LOG_TABLE_BITS 7
380 #define LOG_POLY_ORDER 6
381 #define LOG_POLY1_ORDER 12
382 extern const struct log_data
386 double poly
[LOG_POLY_ORDER
- 1]; /* First coefficient is 1. */
387 double poly1
[LOG_POLY1_ORDER
- 1];
388 struct {double invc
, logc
;} tab
[1 << LOG_TABLE_BITS
];
390 struct {double chi
, clo
;} tab2
[1 << LOG_TABLE_BITS
];
394 #define LOG2_TABLE_BITS 6
395 #define LOG2_POLY_ORDER 7
396 #define LOG2_POLY1_ORDER 11
397 extern const struct log2_data
401 double poly
[LOG2_POLY_ORDER
- 1];
402 double poly1
[LOG2_POLY1_ORDER
- 1];
403 struct {double invc
, logc
;} tab
[1 << LOG2_TABLE_BITS
];
405 struct {double chi
, clo
;} tab2
[1 << LOG2_TABLE_BITS
];
407 } __log2_data HIDDEN
;
409 #define POW_LOG_TABLE_BITS 7
410 #define POW_LOG_POLY_ORDER 8
411 extern const struct pow_log_data
415 double poly
[POW_LOG_POLY_ORDER
- 1]; /* First coefficient is 1. */
416 /* Note: the pad field is unused, but allows slightly faster indexing. */
417 struct {double invc
, pad
, logc
, logctail
;} tab
[1 << POW_LOG_TABLE_BITS
];
418 } __pow_log_data HIDDEN
;