9 #ifdef HAVE_SSE_INTRINSICS
10 #include <xmmintrin.h>
13 #include "opthelpers.h"
16 inline constexpr int64_t operator "" _i64(unsigned long long int n
) noexcept
{ return static_cast<int64_t>(n
); }
17 inline constexpr uint64_t operator "" _u64(unsigned long long int n
) noexcept
{ return static_cast<uint64_t>(n
); }
20 constexpr inline float minf(float a
, float b
) noexcept
21 { return ((a
> b
) ? b
: a
); }
22 constexpr inline float maxf(float a
, float b
) noexcept
23 { return ((a
> b
) ? a
: b
); }
24 constexpr inline float clampf(float val
, float min
, float max
) noexcept
25 { return minf(max
, maxf(min
, val
)); }
27 constexpr inline double mind(double a
, double b
) noexcept
28 { return ((a
> b
) ? b
: a
); }
29 constexpr inline double maxd(double a
, double b
) noexcept
30 { return ((a
> b
) ? a
: b
); }
31 constexpr inline double clampd(double val
, double min
, double max
) noexcept
32 { return mind(max
, maxd(min
, val
)); }
34 constexpr inline unsigned int minu(unsigned int a
, unsigned int b
) noexcept
35 { return ((a
> b
) ? b
: a
); }
36 constexpr inline unsigned int maxu(unsigned int a
, unsigned int b
) noexcept
37 { return ((a
> b
) ? a
: b
); }
38 constexpr inline unsigned int clampu(unsigned int val
, unsigned int min
, unsigned int max
) noexcept
39 { return minu(max
, maxu(min
, val
)); }
41 constexpr inline int mini(int a
, int b
) noexcept
42 { return ((a
> b
) ? b
: a
); }
43 constexpr inline int maxi(int a
, int b
) noexcept
44 { return ((a
> b
) ? a
: b
); }
45 constexpr inline int clampi(int val
, int min
, int max
) noexcept
46 { return mini(max
, maxi(min
, val
)); }
48 constexpr inline int64_t mini64(int64_t a
, int64_t b
) noexcept
49 { return ((a
> b
) ? b
: a
); }
50 constexpr inline int64_t maxi64(int64_t a
, int64_t b
) noexcept
51 { return ((a
> b
) ? a
: b
); }
52 constexpr inline int64_t clampi64(int64_t val
, int64_t min
, int64_t max
) noexcept
53 { return mini64(max
, maxi64(min
, val
)); }
55 constexpr inline uint64_t minu64(uint64_t a
, uint64_t b
) noexcept
56 { return ((a
> b
) ? b
: a
); }
57 constexpr inline uint64_t maxu64(uint64_t a
, uint64_t b
) noexcept
58 { return ((a
> b
) ? a
: b
); }
59 constexpr inline uint64_t clampu64(uint64_t val
, uint64_t min
, uint64_t max
) noexcept
60 { return minu64(max
, maxu64(min
, val
)); }
62 constexpr inline size_t minz(size_t a
, size_t b
) noexcept
63 { return ((a
> b
) ? b
: a
); }
64 constexpr inline size_t maxz(size_t a
, size_t b
) noexcept
65 { return ((a
> b
) ? a
: b
); }
66 constexpr inline size_t clampz(size_t val
, size_t min
, size_t max
) noexcept
67 { return minz(max
, maxz(min
, val
)); }
70 /** Find the next power-of-2 for non-power-of-2 numbers. */
71 inline uint32_t NextPowerOf2(uint32_t value
) noexcept
85 /** Round up a value to the next multiple. */
86 inline size_t RoundUp(size_t value
, size_t r
) noexcept
89 return value
- (value
%r
);
93 /* Define CTZ macros (count trailing zeros), and POPCNT macros (population
94 * count/count 1 bits), for 32- and 64-bit integers. The CTZ macros' results
95 * are *UNDEFINED* if the value is 0.
99 #define POPCNT32 __builtin_popcount
100 #define CTZ32 __builtin_ctz
102 #define POPCNT64 __builtin_popcountl
103 #define CTZ64 __builtin_ctzl
105 #define POPCNT64 __builtin_popcountll
106 #define CTZ64 __builtin_ctzll
111 /* There be black magics here. The popcnt method is derived from
112 * https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
113 * while the ctz-utilizing-popcnt algorithm is shown here
114 * http://www.hackersdelight.org/hdcodetxt/ntz.c.txt
115 * as the ntz2 variant. These likely aren't the most efficient methods, but
116 * they're good enough if the GCC built-ins aren't available.
118 inline int fallback_popcnt32(uint32_t v
)
120 v
= v
- ((v
>> 1) & 0x55555555u
);
121 v
= (v
& 0x33333333u
) + ((v
>> 2) & 0x33333333u
);
122 v
= (v
+ (v
>> 4)) & 0x0f0f0f0fu
;
123 return (int)((v
* 0x01010101u
) >> 24);
125 #define POPCNT32 fallback_popcnt32
126 inline int fallback_popcnt64(uint64_t v
)
128 v
= v
- ((v
>> 1) & 0x5555555555555555_u
64);
129 v
= (v
& 0x3333333333333333_u
64) + ((v
>> 2) & 0x3333333333333333_u
64);
130 v
= (v
+ (v
>> 4)) & 0x0f0f0f0f0f0f0f0f_u
64;
131 return (int)((v
* 0x0101010101010101_u
64) >> 56);
133 #define POPCNT64 fallback_popcnt64
135 #if defined(HAVE_BITSCANFORWARD64_INTRINSIC)
137 inline int msvc64_ctz32(uint32_t v
)
139 unsigned long idx
= 32;
140 _BitScanForward(&idx
, v
);
143 #define CTZ32 msvc64_ctz32
144 inline int msvc64_ctz64(uint64_t v
)
146 unsigned long idx
= 64;
147 _BitScanForward64(&idx
, v
);
150 #define CTZ64 msvc64_ctz64
152 #elif defined(HAVE_BITSCANFORWARD_INTRINSIC)
154 inline int msvc_ctz32(uint32_t v
)
156 unsigned long idx
= 32;
157 _BitScanForward(&idx
, v
);
160 #define CTZ32 msvc_ctz32
161 inline int msvc_ctz64(uint64_t v
)
163 unsigned long idx
= 64;
164 if(!_BitScanForward(&idx
, (uint32_t)(v
&0xffffffff)))
166 if(_BitScanForward(&idx
, (uint32_t)(v
>>32)))
171 #define CTZ64 msvc_ctz64
175 inline int fallback_ctz32(uint32_t value
)
176 { return POPCNT32(~value
& (value
- 1)); }
177 #define CTZ32 fallback_ctz32
178 inline int fallback_ctz64(uint64_t value
)
179 { return POPCNT64(~value
& (value
- 1)); }
180 #define CTZ64 fallback_ctz64
187 * Fast float-to-int conversion. No particular rounding mode is assumed; the
188 * IEEE-754 default is round-to-nearest with ties-to-even, though an app could
189 * change it on its own threads. On some systems, a truncating conversion may
190 * always be the fastest method.
192 inline int fastf2i(float f
) noexcept
194 #if defined(HAVE_SSE_INTRINSICS)
195 return _mm_cvt_ss2si(_mm_set_ss(f
));
197 #elif defined(_MSC_VER) && defined(_M_IX86_FP)
204 #elif (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__))
208 __asm__("cvtss2si %1, %0" : "=r"(i
) : "x"(f
));
210 __asm__
__volatile__("fistpl %0" : "=m"(i
) : "t"(f
) : "st");
216 return static_cast<int>(f
);
219 inline unsigned int fastf2u(float f
) noexcept
220 { return static_cast<unsigned int>(fastf2i(f
)); }
222 /** Converts float-to-int using standard behavior (truncation). */
223 inline int float2int(float f
) noexcept
225 #if defined(HAVE_SSE_INTRINSICS)
226 return _mm_cvtt_ss2si(_mm_set_ss(f
));
228 #elif ((defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) && \
229 !defined(__SSE_MATH__)) || (defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP == 0)
230 int sign
, shift
, mant
;
237 sign
= (conv
.i
>>31) | 1;
238 shift
= ((conv
.i
>>23)&0xff) - (127+23);
241 if UNLIKELY(shift
>= 31 || shift
< -23)
244 mant
= (conv
.i
&0x7fffff) | 0x800000;
246 return (mant
>> -shift
) * sign
;
247 return (mant
<< shift
) * sign
;
251 return static_cast<int>(f
);
254 inline unsigned int float2uint(float f
) noexcept
255 { return static_cast<unsigned int>(float2int(f
)); }
257 /** Converts double-to-int using standard behavior (truncation). */
258 inline int double2int(double d
) noexcept
260 #if defined(HAVE_SSE_INTRINSICS)
261 return _mm_cvttsd_si32(_mm_set_sd(d
));
263 #elif ((defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) && \
264 !defined(__SSE2_MATH__)) || (defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP < 2)
274 sign
= (conv
.i64
>> 63) | 1;
275 shift
= ((conv
.i64
>> 52) & 0x7ff) - (1023 + 52);
278 if UNLIKELY(shift
>= 63 || shift
< -52)
281 mant
= (conv
.i64
& 0xfffffffffffff_i
64) | 0x10000000000000_i
64;
283 return (int)(mant
>> -shift
) * sign
;
284 return (int)(mant
<< shift
) * sign
;
288 return static_cast<int>(d
);
293 * Rounds a float to the nearest integral value, according to the current
294 * rounding mode. This is essentially an inlined version of rintf, although
295 * makes fewer promises (e.g. -0 or -0.25 rounded to 0 may result in +0).
297 inline float fast_roundf(float f
) noexcept
299 #if (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) && \
300 !defined(__SSE_MATH__)
303 __asm__
__volatile__("frndint" : "=t"(out
) : "0"(f
));
308 /* Integral limit, where sub-integral precision is not available for
311 static const float ilim
[2]{
312 8388608.0f
/* 0x1.0p+23 */,
313 -8388608.0f
/* -0x1.0p+23 */
315 unsigned int sign
, expo
;
322 sign
= (conv
.i
>>31)&0x01;
323 expo
= (conv
.i
>>23)&0xff;
325 if UNLIKELY(expo
>= 150/*+23*/)
327 /* An exponent (base-2) of 23 or higher is incapable of sub-integral
328 * precision, so it's already an integral value. We don't need to worry
329 * about infinity or NaN here.
333 /* Adding the integral limit to the value (with a matching sign) forces a
334 * result that has no sub-integral precision, and is consequently forced to
335 * round to an integral value. Removing the integral limit then restores
336 * the initial value rounded to the integral. The compiler should not
337 * optimize this out because of non-associative rules on floating-point
338 * math (as long as you don't use -fassociative-math,
339 * -funsafe-math-optimizations, -ffast-math, or -Ofast, in which case this
343 return f
- ilim
[sign
];
347 #endif /* AL_NUMERIC_H */