[NFC][Py Reformat] Added more commits to .git-blame-ignore-revs
[llvm-project.git] / libc / AOR_v20.02 / math / math_config.h
blob261f44fc2ed0495eb996b0f5f5d1dac668c9f52f
1 /*
2 * Configuration for math routines.
4 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 * See https://llvm.org/LICENSE.txt for license information.
6 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 */
9 #ifndef _MATH_CONFIG_H
10 #define _MATH_CONFIG_H
12 #include <math.h>
13 #include <stdint.h>
15 #ifndef WANT_ROUNDING
16 /* If defined to 1, return correct results for special cases in non-nearest
17 rounding modes (logf (1.0f) returns 0.0f with FE_DOWNWARD rather than -0.0f).
18 This may be set to 0 if there is no fenv support or if math functions only
19 get called in round to nearest mode. */
20 # define WANT_ROUNDING 1
21 #endif
22 #ifndef WANT_ERRNO
23 /* If defined to 1, set errno in math functions according to ISO C. Many math
24 libraries do not set errno, so this is 0 by default. It may need to be
25 set to 1 if math.h has (math_errhandling & MATH_ERRNO) != 0. */
26 # define WANT_ERRNO 0
27 #endif
28 #ifndef WANT_ERRNO_UFLOW
29 /* Set errno to ERANGE if result underflows to 0 (in all rounding modes). */
30 # define WANT_ERRNO_UFLOW (WANT_ROUNDING && WANT_ERRNO)
31 #endif
33 /* Compiler can inline round as a single instruction. */
34 #ifndef HAVE_FAST_ROUND
35 # if __aarch64__
36 # define HAVE_FAST_ROUND 1
37 # else
38 # define HAVE_FAST_ROUND 0
39 # endif
40 #endif
42 /* Compiler can inline lround, but not (long)round(x). */
43 #ifndef HAVE_FAST_LROUND
44 # if __aarch64__ && (100*__GNUC__ + __GNUC_MINOR__) >= 408 && __NO_MATH_ERRNO__
45 # define HAVE_FAST_LROUND 1
46 # else
47 # define HAVE_FAST_LROUND 0
48 # endif
49 #endif
51 /* Compiler can inline fma as a single instruction. */
52 #ifndef HAVE_FAST_FMA
53 # if defined FP_FAST_FMA || __aarch64__
54 # define HAVE_FAST_FMA 1
55 # else
56 # define HAVE_FAST_FMA 0
57 # endif
58 #endif
60 /* Provide *_finite symbols and some of the glibc hidden symbols
61 so libmathlib can be used with binaries compiled against glibc
62 to interpose math functions with both static and dynamic linking. */
63 #ifndef USE_GLIBC_ABI
64 # if __GNUC__
65 # define USE_GLIBC_ABI 1
66 # else
67 # define USE_GLIBC_ABI 0
68 # endif
69 #endif
71 /* Optionally used extensions. */
72 #ifdef __GNUC__
73 # define HIDDEN __attribute__ ((__visibility__ ("hidden")))
74 # define NOINLINE __attribute__ ((noinline))
75 # define UNUSED __attribute__ ((unused))
76 # define likely(x) __builtin_expect (!!(x), 1)
77 # define unlikely(x) __builtin_expect (x, 0)
78 # if __GNUC__ >= 9
79 # define attribute_copy(f) __attribute__ ((copy (f)))
80 # else
81 # define attribute_copy(f)
82 # endif
83 # define strong_alias(f, a) \
84 extern __typeof (f) a __attribute__ ((alias (#f))) attribute_copy (f);
85 # define hidden_alias(f, a) \
86 extern __typeof (f) a __attribute__ ((alias (#f), visibility ("hidden"))) \
87 attribute_copy (f);
88 #else
89 # define HIDDEN
90 # define NOINLINE
91 # define UNUSED
92 # define likely(x) (x)
93 # define unlikely(x) (x)
94 #endif
96 #if HAVE_FAST_ROUND
97 /* When set, the roundtoint and converttoint functions are provided with
98 the semantics documented below. */
99 # define TOINT_INTRINSICS 1
101 /* Round x to nearest int in all rounding modes, ties have to be rounded
102 consistently with converttoint so the results match. If the result
103 would be outside of [-2^31, 2^31-1] then the semantics is unspecified. */
104 static inline double_t
105 roundtoint (double_t x)
107 return round (x);
110 /* Convert x to nearest int in all rounding modes, ties have to be rounded
111 consistently with roundtoint. If the result is not representable in an
112 int32_t then the semantics is unspecified. */
113 static inline int32_t
114 converttoint (double_t x)
116 # if HAVE_FAST_LROUND
117 return lround (x);
118 # else
119 return (long) round (x);
120 # endif
122 #endif
124 static inline uint32_t
125 asuint (float f)
127 union
129 float f;
130 uint32_t i;
131 } u = {f};
132 return u.i;
135 static inline float
136 asfloat (uint32_t i)
138 union
140 uint32_t i;
141 float f;
142 } u = {i};
143 return u.f;
146 static inline uint64_t
147 asuint64 (double f)
149 union
151 double f;
152 uint64_t i;
153 } u = {f};
154 return u.i;
157 static inline double
158 asdouble (uint64_t i)
160 union
162 uint64_t i;
163 double f;
164 } u = {i};
165 return u.f;
168 #ifndef IEEE_754_2008_SNAN
169 # define IEEE_754_2008_SNAN 1
170 #endif
171 static inline int
172 issignalingf_inline (float x)
174 uint32_t ix = asuint (x);
175 if (!IEEE_754_2008_SNAN)
176 return (ix & 0x7fc00000) == 0x7fc00000;
177 return 2 * (ix ^ 0x00400000) > 2u * 0x7fc00000;
180 static inline int
181 issignaling_inline (double x)
183 uint64_t ix = asuint64 (x);
184 if (!IEEE_754_2008_SNAN)
185 return (ix & 0x7ff8000000000000) == 0x7ff8000000000000;
186 return 2 * (ix ^ 0x0008000000000000) > 2 * 0x7ff8000000000000ULL;
189 #if __aarch64__ && __GNUC__
190 /* Prevent the optimization of a floating-point expression. */
191 static inline float
192 opt_barrier_float (float x)
194 __asm__ __volatile__ ("" : "+w" (x));
195 return x;
197 static inline double
198 opt_barrier_double (double x)
200 __asm__ __volatile__ ("" : "+w" (x));
201 return x;
203 /* Force the evaluation of a floating-point expression for its side-effect. */
204 static inline void
205 force_eval_float (float x)
207 __asm__ __volatile__ ("" : "+w" (x));
209 static inline void
210 force_eval_double (double x)
212 __asm__ __volatile__ ("" : "+w" (x));
214 #else
215 static inline float
216 opt_barrier_float (float x)
218 volatile float y = x;
219 return y;
221 static inline double
222 opt_barrier_double (double x)
224 volatile double y = x;
225 return y;
227 static inline void
228 force_eval_float (float x)
230 volatile float y UNUSED = x;
232 static inline void
233 force_eval_double (double x)
235 volatile double y UNUSED = x;
237 #endif
239 /* Evaluate an expression as the specified type, normally a type
240 cast should be enough, but compilers implement non-standard
241 excess-precision handling, so when FLT_EVAL_METHOD != 0 then
242 these functions may need to be customized. */
243 static inline float
244 eval_as_float (float x)
246 return x;
248 static inline double
249 eval_as_double (double x)
251 return x;
254 /* Error handling tail calls for special cases, with a sign argument.
255 The sign of the return value is set if the argument is non-zero. */
257 /* The result overflows. */
258 HIDDEN float __math_oflowf (uint32_t);
259 /* The result underflows to 0 in nearest rounding mode. */
260 HIDDEN float __math_uflowf (uint32_t);
261 /* The result underflows to 0 in some directed rounding mode only. */
262 HIDDEN float __math_may_uflowf (uint32_t);
263 /* Division by zero. */
264 HIDDEN float __math_divzerof (uint32_t);
265 /* The result overflows. */
266 HIDDEN double __math_oflow (uint32_t);
267 /* The result underflows to 0 in nearest rounding mode. */
268 HIDDEN double __math_uflow (uint32_t);
269 /* The result underflows to 0 in some directed rounding mode only. */
270 HIDDEN double __math_may_uflow (uint32_t);
271 /* Division by zero. */
272 HIDDEN double __math_divzero (uint32_t);
274 /* Error handling using input checking. */
276 /* Invalid input unless it is a quiet NaN. */
277 HIDDEN float __math_invalidf (float);
278 /* Invalid input unless it is a quiet NaN. */
279 HIDDEN double __math_invalid (double);
281 /* Error handling using output checking, only for errno setting. */
283 /* Check if the result overflowed to infinity. */
284 HIDDEN double __math_check_oflow (double);
285 /* Check if the result underflowed to 0. */
286 HIDDEN double __math_check_uflow (double);
288 /* Check if the result overflowed to infinity. */
289 static inline double
290 check_oflow (double x)
292 return WANT_ERRNO ? __math_check_oflow (x) : x;
295 /* Check if the result underflowed to 0. */
296 static inline double
297 check_uflow (double x)
299 return WANT_ERRNO ? __math_check_uflow (x) : x;
303 /* Shared between expf, exp2f and powf. */
304 #define EXP2F_TABLE_BITS 5
305 #define EXP2F_POLY_ORDER 3
306 extern const struct exp2f_data
308 uint64_t tab[1 << EXP2F_TABLE_BITS];
309 double shift_scaled;
310 double poly[EXP2F_POLY_ORDER];
311 double shift;
312 double invln2_scaled;
313 double poly_scaled[EXP2F_POLY_ORDER];
314 } __exp2f_data HIDDEN;
316 #define LOGF_TABLE_BITS 4
317 #define LOGF_POLY_ORDER 4
318 extern const struct logf_data
320 struct
322 double invc, logc;
323 } tab[1 << LOGF_TABLE_BITS];
324 double ln2;
325 double poly[LOGF_POLY_ORDER - 1]; /* First order coefficient is 1. */
326 } __logf_data HIDDEN;
328 #define LOG2F_TABLE_BITS 4
329 #define LOG2F_POLY_ORDER 4
330 extern const struct log2f_data
332 struct
334 double invc, logc;
335 } tab[1 << LOG2F_TABLE_BITS];
336 double poly[LOG2F_POLY_ORDER];
337 } __log2f_data HIDDEN;
339 #define POWF_LOG2_TABLE_BITS 4
340 #define POWF_LOG2_POLY_ORDER 5
341 #if TOINT_INTRINSICS
342 # define POWF_SCALE_BITS EXP2F_TABLE_BITS
343 #else
344 # define POWF_SCALE_BITS 0
345 #endif
346 #define POWF_SCALE ((double) (1 << POWF_SCALE_BITS))
347 extern const struct powf_log2_data
349 struct
351 double invc, logc;
352 } tab[1 << POWF_LOG2_TABLE_BITS];
353 double poly[POWF_LOG2_POLY_ORDER];
354 } __powf_log2_data HIDDEN;
357 #define EXP_TABLE_BITS 7
358 #define EXP_POLY_ORDER 5
359 /* Use polynomial that is optimized for a wider input range. This may be
360 needed for good precision in non-nearest rounding and !TOINT_INTRINSICS. */
361 #define EXP_POLY_WIDE 0
362 /* Use close to nearest rounding toint when !TOINT_INTRINSICS. This may be
363 needed for good precision in non-nearest rounding and !EXP_POLY_WIDE. */
364 #define EXP_USE_TOINT_NARROW 0
365 #define EXP2_POLY_ORDER 5
366 #define EXP2_POLY_WIDE 0
367 extern const struct exp_data
369 double invln2N;
370 double shift;
371 double negln2hiN;
372 double negln2loN;
373 double poly[4]; /* Last four coefficients. */
374 double exp2_shift;
375 double exp2_poly[EXP2_POLY_ORDER];
376 uint64_t tab[2*(1 << EXP_TABLE_BITS)];
377 } __exp_data HIDDEN;
379 #define LOG_TABLE_BITS 7
380 #define LOG_POLY_ORDER 6
381 #define LOG_POLY1_ORDER 12
382 extern const struct log_data
384 double ln2hi;
385 double ln2lo;
386 double poly[LOG_POLY_ORDER - 1]; /* First coefficient is 1. */
387 double poly1[LOG_POLY1_ORDER - 1];
388 struct {double invc, logc;} tab[1 << LOG_TABLE_BITS];
389 #if !HAVE_FAST_FMA
390 struct {double chi, clo;} tab2[1 << LOG_TABLE_BITS];
391 #endif
392 } __log_data HIDDEN;
394 #define LOG2_TABLE_BITS 6
395 #define LOG2_POLY_ORDER 7
396 #define LOG2_POLY1_ORDER 11
397 extern const struct log2_data
399 double invln2hi;
400 double invln2lo;
401 double poly[LOG2_POLY_ORDER - 1];
402 double poly1[LOG2_POLY1_ORDER - 1];
403 struct {double invc, logc;} tab[1 << LOG2_TABLE_BITS];
404 #if !HAVE_FAST_FMA
405 struct {double chi, clo;} tab2[1 << LOG2_TABLE_BITS];
406 #endif
407 } __log2_data HIDDEN;
409 #define POW_LOG_TABLE_BITS 7
410 #define POW_LOG_POLY_ORDER 8
411 extern const struct pow_log_data
413 double ln2hi;
414 double ln2lo;
415 double poly[POW_LOG_POLY_ORDER - 1]; /* First coefficient is 1. */
416 /* Note: the pad field is unused, but allows slightly faster indexing. */
417 struct {double invc, pad, logc, logctail;} tab[1 << POW_LOG_TABLE_BITS];
418 } __pow_log_data HIDDEN;
420 #endif