9 #ifdef HAVE_SSE_INTRINSICS
10 #include <xmmintrin.h>
13 #include "opthelpers.h"
16 inline constexpr int64_t operator "" _i64(unsigned long long int n
) noexcept
{ return static_cast<int64_t>(n
); }
17 inline constexpr uint64_t operator "" _u64(unsigned long long int n
) noexcept
{ return static_cast<uint64_t>(n
); }
20 constexpr inline float minf(float a
, float b
) noexcept
21 { return ((a
> b
) ? b
: a
); }
22 constexpr inline float maxf(float a
, float b
) noexcept
23 { return ((a
> b
) ? a
: b
); }
24 constexpr inline float clampf(float val
, float min
, float max
) noexcept
25 { return minf(max
, maxf(min
, val
)); }
27 constexpr inline double mind(double a
, double b
) noexcept
28 { return ((a
> b
) ? b
: a
); }
29 constexpr inline double maxd(double a
, double b
) noexcept
30 { return ((a
> b
) ? a
: b
); }
31 constexpr inline double clampd(double val
, double min
, double max
) noexcept
32 { return mind(max
, maxd(min
, val
)); }
34 constexpr inline unsigned int minu(unsigned int a
, unsigned int b
) noexcept
35 { return ((a
> b
) ? b
: a
); }
36 constexpr inline unsigned int maxu(unsigned int a
, unsigned int b
) noexcept
37 { return ((a
> b
) ? a
: b
); }
38 constexpr inline unsigned int clampu(unsigned int val
, unsigned int min
, unsigned int max
) noexcept
39 { return minu(max
, maxu(min
, val
)); }
41 constexpr inline int mini(int a
, int b
) noexcept
42 { return ((a
> b
) ? b
: a
); }
43 constexpr inline int maxi(int a
, int b
) noexcept
44 { return ((a
> b
) ? a
: b
); }
45 constexpr inline int clampi(int val
, int min
, int max
) noexcept
46 { return mini(max
, maxi(min
, val
)); }
48 constexpr inline int64_t mini64(int64_t a
, int64_t b
) noexcept
49 { return ((a
> b
) ? b
: a
); }
50 constexpr inline int64_t maxi64(int64_t a
, int64_t b
) noexcept
51 { return ((a
> b
) ? a
: b
); }
52 constexpr inline int64_t clampi64(int64_t val
, int64_t min
, int64_t max
) noexcept
53 { return mini64(max
, maxi64(min
, val
)); }
55 constexpr inline uint64_t minu64(uint64_t a
, uint64_t b
) noexcept
56 { return ((a
> b
) ? b
: a
); }
57 constexpr inline uint64_t maxu64(uint64_t a
, uint64_t b
) noexcept
58 { return ((a
> b
) ? a
: b
); }
59 constexpr inline uint64_t clampu64(uint64_t val
, uint64_t min
, uint64_t max
) noexcept
60 { return minu64(max
, maxu64(min
, val
)); }
62 constexpr inline size_t minz(size_t a
, size_t b
) noexcept
63 { return ((a
> b
) ? b
: a
); }
64 constexpr inline size_t maxz(size_t a
, size_t b
) noexcept
65 { return ((a
> b
) ? a
: b
); }
66 constexpr inline size_t clampz(size_t val
, size_t min
, size_t max
) noexcept
67 { return minz(max
, maxz(min
, val
)); }
70 /** Find the next power-of-2 for non-power-of-2 numbers. */
71 inline uint32_t NextPowerOf2(uint32_t value
) noexcept
85 /** Round up a value to the next multiple. */
86 inline size_t RoundUp(size_t value
, size_t r
) noexcept
89 return value
- (value
%r
);
93 /* Define CountTrailingZeros (count trailing zero bits, starting from the lsb)
94 * and PopCount (population count/count 1 bits) methods, for 32- and 64-bit
95 * integers. The CountTrailingZeros results are *UNDEFINED* if the value is 0.
98 inline int PopCount(T val
) = delete;
100 inline int CountTrailingZeros(T val
) = delete;
104 /* Define variations for unsigned (long (long)) int, since we don't know what
105 * uint32/64_t are typedef'd to.
108 inline int PopCount(unsigned long long val
) { return __builtin_popcountll(val
); }
110 inline int PopCount(unsigned long val
) { return __builtin_popcountl(val
); }
112 inline int PopCount(unsigned int val
) { return __builtin_popcount(val
); }
115 inline int CountTrailingZeros(unsigned long long val
) { return __builtin_ctzll(val
); }
117 inline int CountTrailingZeros(unsigned long val
) { return __builtin_ctzl(val
); }
119 inline int CountTrailingZeros(unsigned int val
) { return __builtin_ctz(val
); }
123 /* There be black magics here. The popcnt method is derived from
124 * https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
125 * while the ctz-utilizing-popcnt algorithm is shown here
126 * http://www.hackersdelight.org/hdcodetxt/ntz.c.txt
127 * as the ntz2 variant. These likely aren't the most efficient methods, but
128 * they're good enough if the GCC built-ins aren't available.
131 inline int PopCount(uint32_t v
)
133 v
= v
- ((v
>> 1) & 0x55555555u
);
134 v
= (v
& 0x33333333u
) + ((v
>> 2) & 0x33333333u
);
135 v
= (v
+ (v
>> 4)) & 0x0f0f0f0fu
;
136 return static_cast<int>((v
* 0x01010101u
) >> 24);
139 inline int PopCount(uint64_t v
)
141 v
= v
- ((v
>> 1) & 0x5555555555555555_u
64);
142 v
= (v
& 0x3333333333333333_u
64) + ((v
>> 2) & 0x3333333333333333_u
64);
143 v
= (v
+ (v
>> 4)) & 0x0f0f0f0f0f0f0f0f_u
64;
144 return static_cast<int>((v
* 0x0101010101010101_u
64) >> 56);
150 inline int CountTrailingZeros(uint32_t v
)
152 unsigned long idx
= 32;
153 _BitScanForward(&idx
, v
);
154 return static_cast<int>(idx
);
157 inline int CountTrailingZeros(uint64_t v
)
159 unsigned long idx
= 64;
160 _BitScanForward64(&idx
, v
);
161 return static_cast<int>(idx
);
164 #elif defined(_WIN32)
167 inline int CountTrailingZeros(uint32_t v
)
169 unsigned long idx
= 32;
170 _BitScanForward(&idx
, v
);
171 return static_cast<int>(idx
);
174 inline int CountTrailingZeros(uint64_t v
)
176 unsigned long idx
= 64;
177 if(!_BitScanForward(&idx
, static_cast<uint32_t>(v
&0xffffffff)))
179 if(_BitScanForward(&idx
, static_cast<uint32_t>(v
>>32)))
182 return static_cast<int>(idx
);
188 inline int CountTrailingZeros(uint32_t value
)
189 { return PopCount(~value
& (value
- 1)); }
191 inline int CountTrailingZeros(uint64_t value
)
192 { return PopCount(~value
& (value
- 1)); }
199 * Fast float-to-int conversion. No particular rounding mode is assumed; the
200 * IEEE-754 default is round-to-nearest with ties-to-even, though an app could
201 * change it on its own threads. On some systems, a truncating conversion may
202 * always be the fastest method.
204 inline int fastf2i(float f
) noexcept
206 #if defined(HAVE_SSE_INTRINSICS)
207 return _mm_cvt_ss2si(_mm_set_ss(f
));
209 #elif defined(_MSC_VER) && defined(_M_IX86_FP)
216 #elif (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__))
220 __asm__("cvtss2si %1, %0" : "=r"(i
) : "x"(f
));
222 __asm__
__volatile__("fistpl %0" : "=m"(i
) : "t"(f
) : "st");
228 return static_cast<int>(f
);
231 inline unsigned int fastf2u(float f
) noexcept
232 { return static_cast<unsigned int>(fastf2i(f
)); }
234 /** Converts float-to-int using standard behavior (truncation). */
235 inline int float2int(float f
) noexcept
237 #if defined(HAVE_SSE_INTRINSICS)
238 return _mm_cvtt_ss2si(_mm_set_ss(f
));
240 #elif (defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP == 0) \
241 || ((defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) \
242 && !defined(__SSE_MATH__))
243 int sign
, shift
, mant
;
250 sign
= (conv
.i
>>31) | 1;
251 shift
= ((conv
.i
>>23)&0xff) - (127+23);
254 if UNLIKELY(shift
>= 31 || shift
< -23)
257 mant
= (conv
.i
&0x7fffff) | 0x800000;
259 return (mant
>> -shift
) * sign
;
260 return (mant
<< shift
) * sign
;
264 return static_cast<int>(f
);
267 inline unsigned int float2uint(float f
) noexcept
268 { return static_cast<unsigned int>(float2int(f
)); }
270 /** Converts double-to-int using standard behavior (truncation). */
271 inline int double2int(double d
) noexcept
273 #if defined(HAVE_SSE_INTRINSICS)
274 return _mm_cvttsd_si32(_mm_set_sd(d
));
276 #elif (defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP < 2) \
277 || ((defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) \
278 && !defined(__SSE2_MATH__))
287 sign
= (conv
.i64
>> 63) | 1;
288 shift
= ((conv
.i64
>> 52) & 0x7ff) - (1023 + 52);
291 if UNLIKELY(shift
>= 63 || shift
< -52)
294 mant
= (conv
.i64
& 0xfffffffffffff_i
64) | 0x10000000000000_i
64;
296 return (int)(mant
>> -shift
) * sign
;
297 return (int)(mant
<< shift
) * sign
;
301 return static_cast<int>(d
);
306 * Rounds a float to the nearest integral value, according to the current
307 * rounding mode. This is essentially an inlined version of rintf, although
308 * makes fewer promises (e.g. -0 or -0.25 rounded to 0 may result in +0).
310 inline float fast_roundf(float f
) noexcept
312 #if (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) \
313 && !defined(__SSE_MATH__)
316 __asm__
__volatile__("frndint" : "=t"(out
) : "0"(f
));
321 /* Integral limit, where sub-integral precision is not available for
324 static const float ilim
[2]{
325 8388608.0f
/* 0x1.0p+23 */,
326 -8388608.0f
/* -0x1.0p+23 */
328 unsigned int sign
, expo
;
335 sign
= (conv
.i
>>31)&0x01;
336 expo
= (conv
.i
>>23)&0xff;
338 if UNLIKELY(expo
>= 150/*+23*/)
340 /* An exponent (base-2) of 23 or higher is incapable of sub-integral
341 * precision, so it's already an integral value. We don't need to worry
342 * about infinity or NaN here.
346 /* Adding the integral limit to the value (with a matching sign) forces a
347 * result that has no sub-integral precision, and is consequently forced to
348 * round to an integral value. Removing the integral limit then restores
349 * the initial value rounded to the integral. The compiler should not
350 * optimize this out because of non-associative rules on floating-point
351 * math (as long as you don't use -fassociative-math,
352 * -funsafe-math-optimizations, -ffast-math, or -Ofast, in which case this
356 return f
- ilim
[sign
];
360 #endif /* AL_NUMERIC_H */