Use a unique_ptr to manager PFFFT_Setup objects
[openal-soft.git] / common / alnumeric.h
blob19ebd72e4297a430dc6666523bfb2f60d837e2e4
1 #ifndef AL_NUMERIC_H
2 #define AL_NUMERIC_H
4 #include <algorithm>
5 #include <cmath>
6 #include <cstddef>
7 #include <cstdint>
8 #include <type_traits>
9 #ifdef HAVE_INTRIN_H
10 #include <intrin.h>
11 #endif
12 #ifdef HAVE_SSE_INTRINSICS
13 #include <xmmintrin.h>
14 #endif
16 #include "albit.h"
17 #include "altraits.h"
18 #include "opthelpers.h"
21 constexpr auto operator "" _i64(unsigned long long n) noexcept { return static_cast<int64_t>(n); }
22 constexpr auto operator "" _u64(unsigned long long n) noexcept { return static_cast<uint64_t>(n); }
24 constexpr auto operator "" _z(unsigned long long n) noexcept
25 { return static_cast<std::make_signed_t<size_t>>(n); }
26 constexpr auto operator "" _uz(unsigned long long n) noexcept { return static_cast<size_t>(n); }
27 constexpr auto operator "" _zu(unsigned long long n) noexcept { return static_cast<size_t>(n); }
30 constexpr auto GetCounterSuffix(size_t count) noexcept -> const char*
32 auto &suffix = (((count%100)/10) == 1) ? "th" :
33 ((count%10) == 1) ? "st" :
34 ((count%10) == 2) ? "nd" :
35 ((count%10) == 3) ? "rd" : "th";
36 return std::data(suffix);
40 constexpr inline float lerpf(float val1, float val2, float mu) noexcept
41 { return val1 + (val2-val1)*mu; }
42 constexpr inline float cubic(float val1, float val2, float val3, float val4, float mu) noexcept
44 const float mu2{mu*mu}, mu3{mu2*mu};
45 const float a0{-0.5f*mu3 + mu2 + -0.5f*mu};
46 const float a1{ 1.5f*mu3 + -2.5f*mu2 + 1.0f};
47 const float a2{-1.5f*mu3 + 2.0f*mu2 + 0.5f*mu};
48 const float a3{ 0.5f*mu3 + -0.5f*mu2};
49 return val1*a0 + val2*a1 + val3*a2 + val4*a3;
52 constexpr inline double lerpd(double val1, double val2, double mu) noexcept
53 { return val1 + (val2-val1)*mu; }
56 /** Find the next power-of-2 for non-power-of-2 numbers. */
57 inline uint32_t NextPowerOf2(uint32_t value) noexcept
59 if(value > 0)
61 value--;
62 value |= value>>1;
63 value |= value>>2;
64 value |= value>>4;
65 value |= value>>8;
66 value |= value>>16;
68 return value+1;
71 /**
72 * If the value is not already a multiple of r, round down to the next
73 * multiple.
75 template<typename T>
76 constexpr T RoundDown(T value, al::type_identity_t<T> r) noexcept
77 { return value - (value%r); }
79 /**
80 * If the value is not already a multiple of r, round up to the next multiple.
82 template<typename T>
83 constexpr T RoundUp(T value, al::type_identity_t<T> r) noexcept
84 { return RoundDown(value + r-1, r); }
87 /**
88 * Fast float-to-int conversion. No particular rounding mode is assumed; the
89 * IEEE-754 default is round-to-nearest with ties-to-even, though an app could
90 * change it on its own threads. On some systems, a truncating conversion may
91 * always be the fastest method.
93 inline int fastf2i(float f) noexcept
95 #if defined(HAVE_SSE_INTRINSICS)
96 return _mm_cvt_ss2si(_mm_set_ss(f));
98 #elif defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP == 0
100 int i;
101 __asm fld f
102 __asm fistp i
103 return i;
105 #elif (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) \
106 && !defined(__SSE_MATH__)
108 int i;
109 __asm__ __volatile__("fistpl %0" : "=m"(i) : "t"(f) : "st");
110 return i;
112 #else
114 return static_cast<int>(f);
115 #endif
117 inline unsigned int fastf2u(float f) noexcept
118 { return static_cast<unsigned int>(fastf2i(f)); }
120 /** Converts float-to-int using standard behavior (truncation). */
121 inline int float2int(float f) noexcept
123 #if defined(HAVE_SSE_INTRINSICS)
124 return _mm_cvtt_ss2si(_mm_set_ss(f));
126 #elif (defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP == 0) \
127 || ((defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) \
128 && !defined(__SSE_MATH__))
129 const int conv_i{al::bit_cast<int>(f)};
131 const int sign{(conv_i>>31) | 1};
132 const int shift{((conv_i>>23)&0xff) - (127+23)};
134 /* Over/underflow */
135 if(shift >= 31 || shift < -23) UNLIKELY
136 return 0;
138 const int mant{(conv_i&0x7fffff) | 0x800000};
139 if(shift < 0) LIKELY
140 return (mant >> -shift) * sign;
141 return (mant << shift) * sign;
143 #else
145 return static_cast<int>(f);
146 #endif
148 inline unsigned int float2uint(float f) noexcept
149 { return static_cast<unsigned int>(float2int(f)); }
151 /** Converts double-to-int using standard behavior (truncation). */
152 inline int double2int(double d) noexcept
154 #if defined(HAVE_SSE_INTRINSICS)
155 return _mm_cvttsd_si32(_mm_set_sd(d));
157 #elif (defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP < 2) \
158 || ((defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) \
159 && !defined(__SSE2_MATH__))
160 const int64_t conv_i64{al::bit_cast<int64_t>(d)};
162 const int sign{static_cast<int>(conv_i64 >> 63) | 1};
163 const int shift{(static_cast<int>(conv_i64 >> 52) & 0x7ff) - (1023 + 52)};
165 /* Over/underflow */
166 if(shift >= 63 || shift < -52) UNLIKELY
167 return 0;
169 const int64_t mant{(conv_i64 & 0xfffffffffffff_i64) | 0x10000000000000_i64};
170 if(shift < 0) LIKELY
171 return static_cast<int>(mant >> -shift) * sign;
172 return static_cast<int>(mant << shift) * sign;
174 #else
176 return static_cast<int>(d);
177 #endif
181 * Rounds a float to the nearest integral value, according to the current
182 * rounding mode. This is essentially an inlined version of rintf, although
183 * makes fewer promises (e.g. -0 or -0.25 rounded to 0 may result in +0).
185 inline float fast_roundf(float f) noexcept
187 #if (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) \
188 && !defined(__SSE_MATH__)
190 float out;
191 __asm__ __volatile__("frndint" : "=t"(out) : "0"(f));
192 return out;
194 #elif (defined(__GNUC__) || defined(__clang__)) && defined(__aarch64__)
196 float out;
197 __asm__ volatile("frintx %s0, %s1" : "=w"(out) : "w"(f));
198 return out;
200 #else
202 /* Integral limit, where sub-integral precision is not available for
203 * floats.
205 static constexpr std::array ilim{
206 8388608.0f /* 0x1.0p+23 */,
207 -8388608.0f /* -0x1.0p+23 */
209 const unsigned int conv_i{al::bit_cast<unsigned int>(f)};
211 const unsigned int sign{(conv_i>>31)&0x01};
212 const unsigned int expo{(conv_i>>23)&0xff};
214 if(expo >= 150/*+23*/) UNLIKELY
216 /* An exponent (base-2) of 23 or higher is incapable of sub-integral
217 * precision, so it's already an integral value. We don't need to worry
218 * about infinity or NaN here.
220 return f;
222 /* Adding the integral limit to the value (with a matching sign) forces a
223 * result that has no sub-integral precision, and is consequently forced to
224 * round to an integral value. Removing the integral limit then restores
225 * the initial value rounded to the integral. The compiler should not
226 * optimize this out because of non-associative rules on floating-point
227 * math (as long as you don't use -fassociative-math,
228 * -funsafe-math-optimizations, -ffast-math, or -Ofast, in which case this
229 * may break without __builtin_assoc_barrier support).
231 #if HAS_BUILTIN(__builtin_assoc_barrier)
232 return __builtin_assoc_barrier(f + ilim[sign]) - ilim[sign];
233 #else
234 f += ilim[sign];
235 return f - ilim[sign];
236 #endif
237 #endif
241 // Converts level (mB) to gain.
242 inline float level_mb_to_gain(float x)
244 if(x <= -10'000.0f)
245 return 0.0f;
246 return std::pow(10.0f, x / 2'000.0f);
249 // Converts gain to level (mB).
250 inline float gain_to_level_mb(float x)
252 if (x <= 0.0f)
253 return -10'000.0f;
254 return std::max(std::log10(x) * 2'000.0f, -10'000.0f);
257 #endif /* AL_NUMERIC_H */