11 #ifdef HAVE_SSE_INTRINSICS
12 #include <xmmintrin.h>
16 #include "opthelpers.h"
19 inline constexpr int64_t operator "" _i64(unsigned long long int n
) noexcept
{ return static_cast<int64_t>(n
); }
20 inline constexpr uint64_t operator "" _u64(unsigned long long int n
) noexcept
{ return static_cast<uint64_t>(n
); }
23 constexpr inline float minf(float a
, float b
) noexcept
24 { return ((a
> b
) ? b
: a
); }
25 constexpr inline float maxf(float a
, float b
) noexcept
26 { return ((a
> b
) ? a
: b
); }
27 constexpr inline float clampf(float val
, float min
, float max
) noexcept
28 { return minf(max
, maxf(min
, val
)); }
30 constexpr inline double mind(double a
, double b
) noexcept
31 { return ((a
> b
) ? b
: a
); }
32 constexpr inline double maxd(double a
, double b
) noexcept
33 { return ((a
> b
) ? a
: b
); }
34 constexpr inline double clampd(double val
, double min
, double max
) noexcept
35 { return mind(max
, maxd(min
, val
)); }
37 constexpr inline unsigned int minu(unsigned int a
, unsigned int b
) noexcept
38 { return ((a
> b
) ? b
: a
); }
39 constexpr inline unsigned int maxu(unsigned int a
, unsigned int b
) noexcept
40 { return ((a
> b
) ? a
: b
); }
41 constexpr inline unsigned int clampu(unsigned int val
, unsigned int min
, unsigned int max
) noexcept
42 { return minu(max
, maxu(min
, val
)); }
44 constexpr inline int mini(int a
, int b
) noexcept
45 { return ((a
> b
) ? b
: a
); }
46 constexpr inline int maxi(int a
, int b
) noexcept
47 { return ((a
> b
) ? a
: b
); }
48 constexpr inline int clampi(int val
, int min
, int max
) noexcept
49 { return mini(max
, maxi(min
, val
)); }
51 constexpr inline int64_t mini64(int64_t a
, int64_t b
) noexcept
52 { return ((a
> b
) ? b
: a
); }
53 constexpr inline int64_t maxi64(int64_t a
, int64_t b
) noexcept
54 { return ((a
> b
) ? a
: b
); }
55 constexpr inline int64_t clampi64(int64_t val
, int64_t min
, int64_t max
) noexcept
56 { return mini64(max
, maxi64(min
, val
)); }
58 constexpr inline uint64_t minu64(uint64_t a
, uint64_t b
) noexcept
59 { return ((a
> b
) ? b
: a
); }
60 constexpr inline uint64_t maxu64(uint64_t a
, uint64_t b
) noexcept
61 { return ((a
> b
) ? a
: b
); }
62 constexpr inline uint64_t clampu64(uint64_t val
, uint64_t min
, uint64_t max
) noexcept
63 { return minu64(max
, maxu64(min
, val
)); }
65 constexpr inline size_t minz(size_t a
, size_t b
) noexcept
66 { return ((a
> b
) ? b
: a
); }
67 constexpr inline size_t maxz(size_t a
, size_t b
) noexcept
68 { return ((a
> b
) ? a
: b
); }
69 constexpr inline size_t clampz(size_t val
, size_t min
, size_t max
) noexcept
70 { return minz(max
, maxz(min
, val
)); }
73 constexpr inline float lerpf(float val1
, float val2
, float mu
) noexcept
74 { return val1
+ (val2
-val1
)*mu
; }
75 constexpr inline float cubic(float val1
, float val2
, float val3
, float val4
, float mu
) noexcept
77 const float mu2
{mu
*mu
}, mu3
{mu2
*mu
};
78 const float a0
{-0.5f
*mu3
+ mu2
+ -0.5f
*mu
};
79 const float a1
{ 1.5f
*mu3
+ -2.5f
*mu2
+ 1.0f
};
80 const float a2
{-1.5f
*mu3
+ 2.0f
*mu2
+ 0.5f
*mu
};
81 const float a3
{ 0.5f
*mu3
+ -0.5f
*mu2
};
82 return val1
*a0
+ val2
*a1
+ val3
*a2
+ val4
*a3
;
86 /** Find the next power-of-2 for non-power-of-2 numbers. */
87 inline uint32_t NextPowerOf2(uint32_t value
) noexcept
102 * If the value is not already a multiple of r, round down to the next
106 constexpr T
RoundDown(T value
, al::type_identity_t
<T
> r
) noexcept
107 { return value
- (value
%r
); }
110 * If the value is not already a multiple of r, round up to the next multiple.
113 constexpr T
RoundUp(T value
, al::type_identity_t
<T
> r
) noexcept
114 { return RoundDown(value
+ r
-1, r
); }
118 * Fast float-to-int conversion. No particular rounding mode is assumed; the
119 * IEEE-754 default is round-to-nearest with ties-to-even, though an app could
120 * change it on its own threads. On some systems, a truncating conversion may
121 * always be the fastest method.
123 inline int fastf2i(float f
) noexcept
125 #if defined(HAVE_SSE_INTRINSICS)
126 return _mm_cvt_ss2si(_mm_set_ss(f
));
128 #elif defined(_MSC_VER) && defined(_M_IX86_FP)
135 #elif (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__))
139 __asm__("cvtss2si %1, %0" : "=r"(i
) : "x"(f
));
141 __asm__
__volatile__("fistpl %0" : "=m"(i
) : "t"(f
) : "st");
147 return static_cast<int>(f
);
150 inline unsigned int fastf2u(float f
) noexcept
151 { return static_cast<unsigned int>(fastf2i(f
)); }
153 /** Converts float-to-int using standard behavior (truncation). */
154 inline int float2int(float f
) noexcept
156 #if defined(HAVE_SSE_INTRINSICS)
157 return _mm_cvtt_ss2si(_mm_set_ss(f
));
159 #elif (defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP == 0) \
160 || ((defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) \
161 && !defined(__SSE_MATH__))
162 int sign
, shift
, mant
;
169 sign
= (conv
.i
>>31) | 1;
170 shift
= ((conv
.i
>>23)&0xff) - (127+23);
173 if(shift
>= 31 || shift
< -23) [[unlikely
]]
176 mant
= (conv
.i
&0x7fffff) | 0x800000;
177 if(shift
< 0) [[likely
]]
178 return (mant
>> -shift
) * sign
;
179 return (mant
<< shift
) * sign
;
183 return static_cast<int>(f
);
186 inline unsigned int float2uint(float f
) noexcept
187 { return static_cast<unsigned int>(float2int(f
)); }
189 /** Converts double-to-int using standard behavior (truncation). */
190 inline int double2int(double d
) noexcept
192 #if defined(HAVE_SSE_INTRINSICS)
193 return _mm_cvttsd_si32(_mm_set_sd(d
));
195 #elif (defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP < 2) \
196 || ((defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) \
197 && !defined(__SSE2_MATH__))
206 sign
= (conv
.i64
>> 63) | 1;
207 shift
= ((conv
.i64
>> 52) & 0x7ff) - (1023 + 52);
210 if(shift
>= 63 || shift
< -52) [[unlikely
]]
213 mant
= (conv
.i64
& 0xfffffffffffff_i
64) | 0x10000000000000_i
64;
214 if(shift
< 0) [[likely
]]
215 return (int)(mant
>> -shift
) * sign
;
216 return (int)(mant
<< shift
) * sign
;
220 return static_cast<int>(d
);
225 * Rounds a float to the nearest integral value, according to the current
226 * rounding mode. This is essentially an inlined version of rintf, although
227 * makes fewer promises (e.g. -0 or -0.25 rounded to 0 may result in +0).
229 inline float fast_roundf(float f
) noexcept
231 #if (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) \
232 && !defined(__SSE_MATH__)
235 __asm__
__volatile__("frndint" : "=t"(out
) : "0"(f
));
238 #elif (defined(__GNUC__) || defined(__clang__)) && defined(__aarch64__)
241 __asm__
volatile("frintx %s0, %s1" : "=w"(out
) : "w"(f
));
246 /* Integral limit, where sub-integral precision is not available for
249 static const float ilim
[2]{
250 8388608.0f
/* 0x1.0p+23 */,
251 -8388608.0f
/* -0x1.0p+23 */
253 unsigned int sign
, expo
;
260 sign
= (conv
.i
>>31)&0x01;
261 expo
= (conv
.i
>>23)&0xff;
263 if(expo
>= 150/*+23*/) [[unlikely
]]
265 /* An exponent (base-2) of 23 or higher is incapable of sub-integral
266 * precision, so it's already an integral value. We don't need to worry
267 * about infinity or NaN here.
271 /* Adding the integral limit to the value (with a matching sign) forces a
272 * result that has no sub-integral precision, and is consequently forced to
273 * round to an integral value. Removing the integral limit then restores
274 * the initial value rounded to the integral. The compiler should not
275 * optimize this out because of non-associative rules on floating-point
276 * math (as long as you don't use -fassociative-math,
277 * -funsafe-math-optimizations, -ffast-math, or -Ofast, in which case this
281 return f
- ilim
[sign
];
287 constexpr const T
& clamp(const T
& value
, const T
& min_value
, const T
& max_value
) noexcept
289 return std::min(std::max(value
, min_value
), max_value
);
292 // Converts level (mB) to gain.
293 inline float level_mb_to_gain(float x
)
297 return std::pow(10.0f
, x
/ 2'000.0f
);
300 // Converts gain to level (mB).
301 inline float gain_to_level_mb(float x
)
305 return maxf(std::log10(x
) * 2'000.0f
, -10'000.0f
);
308 #endif /* AL_NUMERIC_H */