11 #ifdef HAVE_SSE_INTRINSICS
12 #include <xmmintrin.h>
15 #include "opthelpers.h"
18 inline constexpr int64_t operator "" _i64(unsigned long long int n
) noexcept
{ return static_cast<int64_t>(n
); }
19 inline constexpr uint64_t operator "" _u64(unsigned long long int n
) noexcept
{ return static_cast<uint64_t>(n
); }
22 constexpr inline float minf(float a
, float b
) noexcept
23 { return ((a
> b
) ? b
: a
); }
24 constexpr inline float maxf(float a
, float b
) noexcept
25 { return ((a
> b
) ? a
: b
); }
26 constexpr inline float clampf(float val
, float min
, float max
) noexcept
27 { return minf(max
, maxf(min
, val
)); }
29 constexpr inline double mind(double a
, double b
) noexcept
30 { return ((a
> b
) ? b
: a
); }
31 constexpr inline double maxd(double a
, double b
) noexcept
32 { return ((a
> b
) ? a
: b
); }
33 constexpr inline double clampd(double val
, double min
, double max
) noexcept
34 { return mind(max
, maxd(min
, val
)); }
36 constexpr inline unsigned int minu(unsigned int a
, unsigned int b
) noexcept
37 { return ((a
> b
) ? b
: a
); }
38 constexpr inline unsigned int maxu(unsigned int a
, unsigned int b
) noexcept
39 { return ((a
> b
) ? a
: b
); }
40 constexpr inline unsigned int clampu(unsigned int val
, unsigned int min
, unsigned int max
) noexcept
41 { return minu(max
, maxu(min
, val
)); }
43 constexpr inline int mini(int a
, int b
) noexcept
44 { return ((a
> b
) ? b
: a
); }
45 constexpr inline int maxi(int a
, int b
) noexcept
46 { return ((a
> b
) ? a
: b
); }
47 constexpr inline int clampi(int val
, int min
, int max
) noexcept
48 { return mini(max
, maxi(min
, val
)); }
50 constexpr inline int64_t mini64(int64_t a
, int64_t b
) noexcept
51 { return ((a
> b
) ? b
: a
); }
52 constexpr inline int64_t maxi64(int64_t a
, int64_t b
) noexcept
53 { return ((a
> b
) ? a
: b
); }
54 constexpr inline int64_t clampi64(int64_t val
, int64_t min
, int64_t max
) noexcept
55 { return mini64(max
, maxi64(min
, val
)); }
57 constexpr inline uint64_t minu64(uint64_t a
, uint64_t b
) noexcept
58 { return ((a
> b
) ? b
: a
); }
59 constexpr inline uint64_t maxu64(uint64_t a
, uint64_t b
) noexcept
60 { return ((a
> b
) ? a
: b
); }
61 constexpr inline uint64_t clampu64(uint64_t val
, uint64_t min
, uint64_t max
) noexcept
62 { return minu64(max
, maxu64(min
, val
)); }
64 constexpr inline size_t minz(size_t a
, size_t b
) noexcept
65 { return ((a
> b
) ? b
: a
); }
66 constexpr inline size_t maxz(size_t a
, size_t b
) noexcept
67 { return ((a
> b
) ? a
: b
); }
68 constexpr inline size_t clampz(size_t val
, size_t min
, size_t max
) noexcept
69 { return minz(max
, maxz(min
, val
)); }
72 constexpr inline float lerpf(float val1
, float val2
, float mu
) noexcept
73 { return val1
+ (val2
-val1
)*mu
; }
74 constexpr inline float cubic(float val1
, float val2
, float val3
, float val4
, float mu
) noexcept
76 const float mu2
{mu
*mu
}, mu3
{mu2
*mu
};
77 const float a0
{-0.5f
*mu3
+ mu2
+ -0.5f
*mu
};
78 const float a1
{ 1.5f
*mu3
+ -2.5f
*mu2
+ 1.0f
};
79 const float a2
{-1.5f
*mu3
+ 2.0f
*mu2
+ 0.5f
*mu
};
80 const float a3
{ 0.5f
*mu3
+ -0.5f
*mu2
};
81 return val1
*a0
+ val2
*a1
+ val3
*a2
+ val4
*a3
;
85 /** Find the next power-of-2 for non-power-of-2 numbers. */
86 inline uint32_t NextPowerOf2(uint32_t value
) noexcept
100 /** Round up a value to the next multiple. */
101 inline size_t RoundUp(size_t value
, size_t r
) noexcept
104 return value
- (value
%r
);
109 * Fast float-to-int conversion. No particular rounding mode is assumed; the
110 * IEEE-754 default is round-to-nearest with ties-to-even, though an app could
111 * change it on its own threads. On some systems, a truncating conversion may
112 * always be the fastest method.
114 inline int fastf2i(float f
) noexcept
116 #if defined(HAVE_SSE_INTRINSICS)
117 return _mm_cvt_ss2si(_mm_set_ss(f
));
119 #elif defined(_MSC_VER) && defined(_M_IX86_FP)
126 #elif (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__))
130 __asm__("cvtss2si %1, %0" : "=r"(i
) : "x"(f
));
132 __asm__
__volatile__("fistpl %0" : "=m"(i
) : "t"(f
) : "st");
138 return static_cast<int>(f
);
141 inline unsigned int fastf2u(float f
) noexcept
142 { return static_cast<unsigned int>(fastf2i(f
)); }
144 /** Converts float-to-int using standard behavior (truncation). */
145 inline int float2int(float f
) noexcept
147 #if defined(HAVE_SSE_INTRINSICS)
148 return _mm_cvtt_ss2si(_mm_set_ss(f
));
150 #elif (defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP == 0) \
151 || ((defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) \
152 && !defined(__SSE_MATH__))
153 int sign
, shift
, mant
;
160 sign
= (conv
.i
>>31) | 1;
161 shift
= ((conv
.i
>>23)&0xff) - (127+23);
164 if UNLIKELY(shift
>= 31 || shift
< -23)
167 mant
= (conv
.i
&0x7fffff) | 0x800000;
169 return (mant
>> -shift
) * sign
;
170 return (mant
<< shift
) * sign
;
174 return static_cast<int>(f
);
177 inline unsigned int float2uint(float f
) noexcept
178 { return static_cast<unsigned int>(float2int(f
)); }
180 /** Converts double-to-int using standard behavior (truncation). */
181 inline int double2int(double d
) noexcept
183 #if defined(HAVE_SSE_INTRINSICS)
184 return _mm_cvttsd_si32(_mm_set_sd(d
));
186 #elif (defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP < 2) \
187 || ((defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) \
188 && !defined(__SSE2_MATH__))
197 sign
= (conv
.i64
>> 63) | 1;
198 shift
= ((conv
.i64
>> 52) & 0x7ff) - (1023 + 52);
201 if UNLIKELY(shift
>= 63 || shift
< -52)
204 mant
= (conv
.i64
& 0xfffffffffffff_i
64) | 0x10000000000000_i
64;
206 return (int)(mant
>> -shift
) * sign
;
207 return (int)(mant
<< shift
) * sign
;
211 return static_cast<int>(d
);
216 * Rounds a float to the nearest integral value, according to the current
217 * rounding mode. This is essentially an inlined version of rintf, although
218 * makes fewer promises (e.g. -0 or -0.25 rounded to 0 may result in +0).
220 inline float fast_roundf(float f
) noexcept
222 #if (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) \
223 && !defined(__SSE_MATH__)
226 __asm__
__volatile__("frndint" : "=t"(out
) : "0"(f
));
229 #elif (defined(__GNUC__) || defined(__clang__)) && defined(__aarch64__)
232 __asm__
volatile("frintx %s0, %s1" : "=w"(out
) : "w"(f
));
237 /* Integral limit, where sub-integral precision is not available for
240 static const float ilim
[2]{
241 8388608.0f
/* 0x1.0p+23 */,
242 -8388608.0f
/* -0x1.0p+23 */
244 unsigned int sign
, expo
;
251 sign
= (conv
.i
>>31)&0x01;
252 expo
= (conv
.i
>>23)&0xff;
254 if UNLIKELY(expo
>= 150/*+23*/)
256 /* An exponent (base-2) of 23 or higher is incapable of sub-integral
257 * precision, so it's already an integral value. We don't need to worry
258 * about infinity or NaN here.
262 /* Adding the integral limit to the value (with a matching sign) forces a
263 * result that has no sub-integral precision, and is consequently forced to
264 * round to an integral value. Removing the integral limit then restores
265 * the initial value rounded to the integral. The compiler should not
266 * optimize this out because of non-associative rules on floating-point
267 * math (as long as you don't use -fassociative-math,
268 * -funsafe-math-optimizations, -ffast-math, or -Ofast, in which case this
272 return f
- ilim
[sign
];
278 constexpr const T
& clamp(const T
& value
, const T
& min_value
, const T
& max_value
) noexcept
280 return std::min(std::max(value
, min_value
), max_value
);
283 // Converts level (mB) to gain.
284 inline float level_mb_to_gain(float x
)
288 return std::pow(10.0f
, x
/ 2'000.0f
);
291 // Converts gain to level (mB).
292 inline float gain_to_level_mb(float x
)
296 return maxf(std::log10(x
) * 2'000.0f
, -10'000.0f
);
299 #endif /* AL_NUMERIC_H */