1 //===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains some functions that are useful for math stuff.
11 //===----------------------------------------------------------------------===//
13 #ifndef LLVM_SUPPORT_MATHEXTRAS_H
14 #define LLVM_SUPPORT_MATHEXTRAS_H
16 #include "llvm/Support/Compiler.h"
17 #include "llvm/Support/SwapByteOrder.h"
23 #include <type_traits>
25 #ifdef __ANDROID_NDK__
26 #include <android/api-level.h>
30 // Declare these intrinsics manually rather including intrin.h. It's very
31 // expensive, and MathExtras.h is popular.
32 // #include <intrin.h>
34 unsigned char _BitScanForward(unsigned long *_Index
, unsigned long _Mask
);
35 unsigned char _BitScanForward64(unsigned long *_Index
, unsigned __int64 _Mask
);
36 unsigned char _BitScanReverse(unsigned long *_Index
, unsigned long _Mask
);
37 unsigned char _BitScanReverse64(unsigned long *_Index
, unsigned __int64 _Mask
);
42 /// The behavior an operation has on an input of 0.
44 /// The returned value is undefined.
46 /// The returned value is numeric_limits<T>::max()
48 /// The returned value is numeric_limits<T>::digits
53 template <typename T
, std::size_t SizeOfT
> struct TrailingZerosCounter
{
54 static std::size_t count(T Val
, ZeroBehavior
) {
56 return std::numeric_limits
<T
>::digits
;
61 std::size_t ZeroBits
= 0;
62 T Shift
= std::numeric_limits
<T
>::digits
>> 1;
63 T Mask
= std::numeric_limits
<T
>::max() >> Shift
;
65 if ((Val
& Mask
) == 0) {
76 #if __GNUC__ >= 4 || defined(_MSC_VER)
77 template <typename T
> struct TrailingZerosCounter
<T
, 4> {
78 static std::size_t count(T Val
, ZeroBehavior ZB
) {
79 if (ZB
!= ZB_Undefined
&& Val
== 0)
82 #if __has_builtin(__builtin_ctz) || LLVM_GNUC_PREREQ(4, 0, 0)
83 return __builtin_ctz(Val
);
84 #elif defined(_MSC_VER)
86 _BitScanForward(&Index
, Val
);
92 #if !defined(_MSC_VER) || defined(_M_X64)
93 template <typename T
> struct TrailingZerosCounter
<T
, 8> {
94 static std::size_t count(T Val
, ZeroBehavior ZB
) {
95 if (ZB
!= ZB_Undefined
&& Val
== 0)
98 #if __has_builtin(__builtin_ctzll) || LLVM_GNUC_PREREQ(4, 0, 0)
99 return __builtin_ctzll(Val
);
100 #elif defined(_MSC_VER)
102 _BitScanForward64(&Index
, Val
);
109 } // namespace detail
111 /// Count number of 0's from the least significant bit to the most
112 /// stopping at the first 1.
114 /// Only unsigned integral types are allowed.
116 /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
118 template <typename T
>
119 std::size_t countTrailingZeros(T Val
, ZeroBehavior ZB
= ZB_Width
) {
120 static_assert(std::numeric_limits
<T
>::is_integer
&&
121 !std::numeric_limits
<T
>::is_signed
,
122 "Only unsigned integral types are allowed.");
123 return llvm::detail::TrailingZerosCounter
<T
, sizeof(T
)>::count(Val
, ZB
);
127 template <typename T
, std::size_t SizeOfT
> struct LeadingZerosCounter
{
128 static std::size_t count(T Val
, ZeroBehavior
) {
130 return std::numeric_limits
<T
>::digits
;
133 std::size_t ZeroBits
= 0;
134 for (T Shift
= std::numeric_limits
<T
>::digits
>> 1; Shift
; Shift
>>= 1) {
135 T Tmp
= Val
>> Shift
;
145 #if __GNUC__ >= 4 || defined(_MSC_VER)
146 template <typename T
> struct LeadingZerosCounter
<T
, 4> {
147 static std::size_t count(T Val
, ZeroBehavior ZB
) {
148 if (ZB
!= ZB_Undefined
&& Val
== 0)
151 #if __has_builtin(__builtin_clz) || LLVM_GNUC_PREREQ(4, 0, 0)
152 return __builtin_clz(Val
);
153 #elif defined(_MSC_VER)
155 _BitScanReverse(&Index
, Val
);
161 #if !defined(_MSC_VER) || defined(_M_X64)
162 template <typename T
> struct LeadingZerosCounter
<T
, 8> {
163 static std::size_t count(T Val
, ZeroBehavior ZB
) {
164 if (ZB
!= ZB_Undefined
&& Val
== 0)
167 #if __has_builtin(__builtin_clzll) || LLVM_GNUC_PREREQ(4, 0, 0)
168 return __builtin_clzll(Val
);
169 #elif defined(_MSC_VER)
171 _BitScanReverse64(&Index
, Val
);
178 } // namespace detail
180 /// Count number of 0's from the most significant bit to the least
181 /// stopping at the first 1.
183 /// Only unsigned integral types are allowed.
185 /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
187 template <typename T
>
188 std::size_t countLeadingZeros(T Val
, ZeroBehavior ZB
= ZB_Width
) {
189 static_assert(std::numeric_limits
<T
>::is_integer
&&
190 !std::numeric_limits
<T
>::is_signed
,
191 "Only unsigned integral types are allowed.");
192 return llvm::detail::LeadingZerosCounter
<T
, sizeof(T
)>::count(Val
, ZB
);
195 /// Get the index of the first set bit starting from the least
198 /// Only unsigned integral types are allowed.
200 /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
202 template <typename T
> T
findFirstSet(T Val
, ZeroBehavior ZB
= ZB_Max
) {
203 if (ZB
== ZB_Max
&& Val
== 0)
204 return std::numeric_limits
<T
>::max();
206 return countTrailingZeros(Val
, ZB_Undefined
);
209 /// Create a bitmask with the N right-most bits set to 1, and all other
210 /// bits set to 0. Only unsigned types are allowed.
211 template <typename T
> T
maskTrailingOnes(unsigned N
) {
212 static_assert(std::is_unsigned
<T
>::value
, "Invalid type!");
213 const unsigned Bits
= CHAR_BIT
* sizeof(T
);
214 assert(N
<= Bits
&& "Invalid bit index");
215 return N
== 0 ? 0 : (T(-1) >> (Bits
- N
));
218 /// Create a bitmask with the N left-most bits set to 1, and all other
219 /// bits set to 0. Only unsigned types are allowed.
220 template <typename T
> T
maskLeadingOnes(unsigned N
) {
221 return ~maskTrailingOnes
<T
>(CHAR_BIT
* sizeof(T
) - N
);
224 /// Create a bitmask with the N right-most bits set to 0, and all other
225 /// bits set to 1. Only unsigned types are allowed.
226 template <typename T
> T
maskTrailingZeros(unsigned N
) {
227 return maskLeadingOnes
<T
>(CHAR_BIT
* sizeof(T
) - N
);
230 /// Create a bitmask with the N left-most bits set to 0, and all other
231 /// bits set to 1. Only unsigned types are allowed.
232 template <typename T
> T
maskLeadingZeros(unsigned N
) {
233 return maskTrailingOnes
<T
>(CHAR_BIT
* sizeof(T
) - N
);
236 /// Get the index of the last set bit starting from the least
239 /// Only unsigned integral types are allowed.
241 /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
243 template <typename T
> T
findLastSet(T Val
, ZeroBehavior ZB
= ZB_Max
) {
244 if (ZB
== ZB_Max
&& Val
== 0)
245 return std::numeric_limits
<T
>::max();
247 // Use ^ instead of - because both gcc and llvm can remove the associated ^
248 // in the __builtin_clz intrinsic on x86.
249 return countLeadingZeros(Val
, ZB_Undefined
) ^
250 (std::numeric_limits
<T
>::digits
- 1);
253 /// Macro compressed bit reversal table for 256 bits.
255 /// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
256 static const unsigned char BitReverseTable256
[256] = {
257 #define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64
258 #define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16)
259 #define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4)
260 R6(0), R6(2), R6(1), R6(3)
266 /// Reverse the bits in \p Val.
267 template <typename T
>
268 T
reverseBits(T Val
) {
269 unsigned char in
[sizeof(Val
)];
270 unsigned char out
[sizeof(Val
)];
271 std::memcpy(in
, &Val
, sizeof(Val
));
272 for (unsigned i
= 0; i
< sizeof(Val
); ++i
)
273 out
[(sizeof(Val
) - i
) - 1] = BitReverseTable256
[in
[i
]];
274 std::memcpy(&Val
, out
, sizeof(Val
));
278 // NOTE: The following support functions use the _32/_64 extensions instead of
279 // type overloading so that signed and unsigned integers can be used without
282 /// Return the high 32 bits of a 64 bit value.
283 constexpr inline uint32_t Hi_32(uint64_t Value
) {
284 return static_cast<uint32_t>(Value
>> 32);
287 /// Return the low 32 bits of a 64 bit value.
288 constexpr inline uint32_t Lo_32(uint64_t Value
) {
289 return static_cast<uint32_t>(Value
);
292 /// Make a 64-bit integer from a high / low pair of 32-bit integers.
293 constexpr inline uint64_t Make_64(uint32_t High
, uint32_t Low
) {
294 return ((uint64_t)High
<< 32) | (uint64_t)Low
;
297 /// Checks if an integer fits into the given bit width.
298 template <unsigned N
> constexpr inline bool isInt(int64_t x
) {
299 return N
>= 64 || (-(INT64_C(1)<<(N
-1)) <= x
&& x
< (INT64_C(1)<<(N
-1)));
301 // Template specializations to get better code for common cases.
302 template <> constexpr inline bool isInt
<8>(int64_t x
) {
303 return static_cast<int8_t>(x
) == x
;
305 template <> constexpr inline bool isInt
<16>(int64_t x
) {
306 return static_cast<int16_t>(x
) == x
;
308 template <> constexpr inline bool isInt
<32>(int64_t x
) {
309 return static_cast<int32_t>(x
) == x
;
312 /// Checks if a signed integer is an N bit number shifted left by S.
313 template <unsigned N
, unsigned S
>
314 constexpr inline bool isShiftedInt(int64_t x
) {
316 N
> 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number.");
317 static_assert(N
+ S
<= 64, "isShiftedInt<N, S> with N + S > 64 is too wide.");
318 return isInt
<N
+ S
>(x
) && (x
% (UINT64_C(1) << S
) == 0);
321 /// Checks if an unsigned integer fits into the given bit width.
323 /// This is written as two functions rather than as simply
325 /// return N >= 64 || X < (UINT64_C(1) << N);
327 /// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting
328 /// left too many places.
329 template <unsigned N
>
330 constexpr inline typename
std::enable_if
<(N
< 64), bool>::type
332 static_assert(N
> 0, "isUInt<0> doesn't make sense");
333 return X
< (UINT64_C(1) << (N
));
335 template <unsigned N
>
336 constexpr inline typename
std::enable_if
<N
>= 64, bool>::type
341 // Template specializations to get better code for common cases.
342 template <> constexpr inline bool isUInt
<8>(uint64_t x
) {
343 return static_cast<uint8_t>(x
) == x
;
345 template <> constexpr inline bool isUInt
<16>(uint64_t x
) {
346 return static_cast<uint16_t>(x
) == x
;
348 template <> constexpr inline bool isUInt
<32>(uint64_t x
) {
349 return static_cast<uint32_t>(x
) == x
;
352 /// Checks if a unsigned integer is an N bit number shifted left by S.
353 template <unsigned N
, unsigned S
>
354 constexpr inline bool isShiftedUInt(uint64_t x
) {
356 N
> 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)");
357 static_assert(N
+ S
<= 64,
358 "isShiftedUInt<N, S> with N + S > 64 is too wide.");
359 // Per the two static_asserts above, S must be strictly less than 64. So
360 // 1 << S is not undefined behavior.
361 return isUInt
<N
+ S
>(x
) && (x
% (UINT64_C(1) << S
) == 0);
364 /// Gets the maximum value for a N-bit unsigned integer.
365 inline uint64_t maxUIntN(uint64_t N
) {
366 assert(N
> 0 && N
<= 64 && "integer width out of range");
368 // uint64_t(1) << 64 is undefined behavior, so we can't do
369 // (uint64_t(1) << N) - 1
370 // without checking first that N != 64. But this works and doesn't have a
372 return UINT64_MAX
>> (64 - N
);
375 /// Gets the minimum value for a N-bit signed integer.
376 inline int64_t minIntN(int64_t N
) {
377 assert(N
> 0 && N
<= 64 && "integer width out of range");
379 return -(UINT64_C(1)<<(N
-1));
382 /// Gets the maximum value for a N-bit signed integer.
383 inline int64_t maxIntN(int64_t N
) {
384 assert(N
> 0 && N
<= 64 && "integer width out of range");
386 // This relies on two's complement wraparound when N == 64, so we convert to
387 // int64_t only at the very end to avoid UB.
388 return (UINT64_C(1) << (N
- 1)) - 1;
391 /// Checks if an unsigned integer fits into the given (dynamic) bit width.
392 inline bool isUIntN(unsigned N
, uint64_t x
) {
393 return N
>= 64 || x
<= maxUIntN(N
);
396 /// Checks if an signed integer fits into the given (dynamic) bit width.
397 inline bool isIntN(unsigned N
, int64_t x
) {
398 return N
>= 64 || (minIntN(N
) <= x
&& x
<= maxIntN(N
));
401 /// Return true if the argument is a non-empty sequence of ones starting at the
402 /// least significant bit with the remainder zero (32 bit version).
403 /// Ex. isMask_32(0x0000FFFFU) == true.
404 constexpr inline bool isMask_32(uint32_t Value
) {
405 return Value
&& ((Value
+ 1) & Value
) == 0;
408 /// Return true if the argument is a non-empty sequence of ones starting at the
409 /// least significant bit with the remainder zero (64 bit version).
410 constexpr inline bool isMask_64(uint64_t Value
) {
411 return Value
&& ((Value
+ 1) & Value
) == 0;
414 /// Return true if the argument contains a non-empty sequence of ones with the
415 /// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true.
416 constexpr inline bool isShiftedMask_32(uint32_t Value
) {
417 return Value
&& isMask_32((Value
- 1) | Value
);
420 /// Return true if the argument contains a non-empty sequence of ones with the
421 /// remainder zero (64 bit version.)
422 constexpr inline bool isShiftedMask_64(uint64_t Value
) {
423 return Value
&& isMask_64((Value
- 1) | Value
);
426 /// Return true if the argument is a power of two > 0.
427 /// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
428 constexpr inline bool isPowerOf2_32(uint32_t Value
) {
429 return Value
&& !(Value
& (Value
- 1));
432 /// Return true if the argument is a power of two > 0 (64 bit edition.)
433 constexpr inline bool isPowerOf2_64(uint64_t Value
) {
434 return Value
&& !(Value
& (Value
- 1));
437 /// Return a byte-swapped representation of the 16-bit argument.
438 inline uint16_t ByteSwap_16(uint16_t Value
) {
439 return sys::SwapByteOrder_16(Value
);
442 /// Return a byte-swapped representation of the 32-bit argument.
443 inline uint32_t ByteSwap_32(uint32_t Value
) {
444 return sys::SwapByteOrder_32(Value
);
447 /// Return a byte-swapped representation of the 64-bit argument.
448 inline uint64_t ByteSwap_64(uint64_t Value
) {
449 return sys::SwapByteOrder_64(Value
);
452 /// Count the number of ones from the most significant bit to the first
455 /// Ex. countLeadingOnes(0xFF0FFF00) == 8.
456 /// Only unsigned integral types are allowed.
458 /// \param ZB the behavior on an input of all ones. Only ZB_Width and
459 /// ZB_Undefined are valid arguments.
460 template <typename T
>
461 std::size_t countLeadingOnes(T Value
, ZeroBehavior ZB
= ZB_Width
) {
462 static_assert(std::numeric_limits
<T
>::is_integer
&&
463 !std::numeric_limits
<T
>::is_signed
,
464 "Only unsigned integral types are allowed.");
465 return countLeadingZeros
<T
>(~Value
, ZB
);
468 /// Count the number of ones from the least significant bit to the first
471 /// Ex. countTrailingOnes(0x00FF00FF) == 8.
472 /// Only unsigned integral types are allowed.
474 /// \param ZB the behavior on an input of all ones. Only ZB_Width and
475 /// ZB_Undefined are valid arguments.
476 template <typename T
>
477 std::size_t countTrailingOnes(T Value
, ZeroBehavior ZB
= ZB_Width
) {
478 static_assert(std::numeric_limits
<T
>::is_integer
&&
479 !std::numeric_limits
<T
>::is_signed
,
480 "Only unsigned integral types are allowed.");
481 return countTrailingZeros
<T
>(~Value
, ZB
);
485 template <typename T
, std::size_t SizeOfT
> struct PopulationCounter
{
486 static unsigned count(T Value
) {
487 // Generic version, forward to 32 bits.
488 static_assert(SizeOfT
<= 4, "Not implemented!");
490 return __builtin_popcount(Value
);
493 v
= v
- ((v
>> 1) & 0x55555555);
494 v
= (v
& 0x33333333) + ((v
>> 2) & 0x33333333);
495 return ((v
+ (v
>> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
500 template <typename T
> struct PopulationCounter
<T
, 8> {
501 static unsigned count(T Value
) {
503 return __builtin_popcountll(Value
);
506 v
= v
- ((v
>> 1) & 0x5555555555555555ULL
);
507 v
= (v
& 0x3333333333333333ULL
) + ((v
>> 2) & 0x3333333333333333ULL
);
508 v
= (v
+ (v
>> 4)) & 0x0F0F0F0F0F0F0F0FULL
;
509 return unsigned((uint64_t)(v
* 0x0101010101010101ULL
) >> 56);
513 } // namespace detail
515 /// Count the number of set bits in a value.
516 /// Ex. countPopulation(0xF000F000) = 8
517 /// Returns 0 if the word is zero.
518 template <typename T
>
519 inline unsigned countPopulation(T Value
) {
520 static_assert(std::numeric_limits
<T
>::is_integer
&&
521 !std::numeric_limits
<T
>::is_signed
,
522 "Only unsigned integral types are allowed.");
523 return detail::PopulationCounter
<T
, sizeof(T
)>::count(Value
);
526 /// Return the log base 2 of the specified value.
527 inline double Log2(double Value
) {
528 #if defined(__ANDROID_API__) && __ANDROID_API__ < 18
529 return __builtin_log(Value
) / __builtin_log(2.0);
535 /// Return the floor log base 2 of the specified value, -1 if the value is zero.
536 /// (32 bit edition.)
537 /// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
538 inline unsigned Log2_32(uint32_t Value
) {
539 return 31 - countLeadingZeros(Value
);
542 /// Return the floor log base 2 of the specified value, -1 if the value is zero.
543 /// (64 bit edition.)
544 inline unsigned Log2_64(uint64_t Value
) {
545 return 63 - countLeadingZeros(Value
);
548 /// Return the ceil log base 2 of the specified value, 32 if the value is zero.
549 /// (32 bit edition).
550 /// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
551 inline unsigned Log2_32_Ceil(uint32_t Value
) {
552 return 32 - countLeadingZeros(Value
- 1);
555 /// Return the ceil log base 2 of the specified value, 64 if the value is zero.
556 /// (64 bit edition.)
557 inline unsigned Log2_64_Ceil(uint64_t Value
) {
558 return 64 - countLeadingZeros(Value
- 1);
561 /// Return the greatest common divisor of the values using Euclid's algorithm.
562 inline uint64_t GreatestCommonDivisor64(uint64_t A
, uint64_t B
) {
571 /// This function takes a 64-bit integer and returns the bit equivalent double.
572 inline double BitsToDouble(uint64_t Bits
) {
574 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
575 memcpy(&D
, &Bits
, sizeof(Bits
));
579 /// This function takes a 32-bit integer and returns the bit equivalent float.
580 inline float BitsToFloat(uint32_t Bits
) {
582 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
583 memcpy(&F
, &Bits
, sizeof(Bits
));
587 /// This function takes a double and returns the bit equivalent 64-bit integer.
588 /// Note that copying doubles around changes the bits of NaNs on some hosts,
589 /// notably x86, so this routine cannot be used if these bits are needed.
590 inline uint64_t DoubleToBits(double Double
) {
592 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
593 memcpy(&Bits
, &Double
, sizeof(Double
));
597 /// This function takes a float and returns the bit equivalent 32-bit integer.
598 /// Note that copying floats around changes the bits of NaNs on some hosts,
599 /// notably x86, so this routine cannot be used if these bits are needed.
600 inline uint32_t FloatToBits(float Float
) {
602 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
603 memcpy(&Bits
, &Float
, sizeof(Float
));
607 /// A and B are either alignments or offsets. Return the minimum alignment that
608 /// may be assumed after adding the two together.
609 constexpr inline uint64_t MinAlign(uint64_t A
, uint64_t B
) {
610 // The largest power of 2 that divides both A and B.
612 // Replace "-Value" by "1+~Value" in the following commented code to avoid
613 // MSVC warning C4146
614 // return (A | B) & -(A | B);
615 return (A
| B
) & (1 + ~(A
| B
));
618 /// Aligns \c Addr to \c Alignment bytes, rounding up.
620 /// Alignment should be a power of two. This method rounds up, so
621 /// alignAddr(7, 4) == 8 and alignAddr(8, 4) == 8.
622 inline uintptr_t alignAddr(const void *Addr
, size_t Alignment
) {
623 assert(Alignment
&& isPowerOf2_64((uint64_t)Alignment
) &&
624 "Alignment is not a power of two!");
626 assert((uintptr_t)Addr
+ Alignment
- 1 >= (uintptr_t)Addr
);
628 return (((uintptr_t)Addr
+ Alignment
- 1) & ~(uintptr_t)(Alignment
- 1));
631 /// Returns the necessary adjustment for aligning \c Ptr to \c Alignment
632 /// bytes, rounding up.
633 inline size_t alignmentAdjustment(const void *Ptr
, size_t Alignment
) {
634 return alignAddr(Ptr
, Alignment
) - (uintptr_t)Ptr
;
637 /// Returns the next power of two (in 64-bits) that is strictly greater than A.
638 /// Returns zero on overflow.
639 inline uint64_t NextPowerOf2(uint64_t A
) {
649 /// Returns the power of two which is less than or equal to the given value.
650 /// Essentially, it is a floor operation across the domain of powers of two.
651 inline uint64_t PowerOf2Floor(uint64_t A
) {
653 return 1ull << (63 - countLeadingZeros(A
, ZB_Undefined
));
656 /// Returns the power of two which is greater than or equal to the given value.
657 /// Essentially, it is a ceil operation across the domain of powers of two.
658 inline uint64_t PowerOf2Ceil(uint64_t A
) {
661 return NextPowerOf2(A
- 1);
664 /// Returns the next integer (mod 2**64) that is greater than or equal to
665 /// \p Value and is a multiple of \p Align. \p Align must be non-zero.
667 /// If non-zero \p Skew is specified, the return value will be a minimal
668 /// integer that is greater than or equal to \p Value and equal to
669 /// \p Align * N + \p Skew for some integer N. If \p Skew is larger than
670 /// \p Align, its value is adjusted to '\p Skew mod \p Align'.
674 /// alignTo(5, 8) = 8
675 /// alignTo(17, 8) = 24
676 /// alignTo(~0LL, 8) = 0
677 /// alignTo(321, 255) = 510
679 /// alignTo(5, 8, 7) = 7
680 /// alignTo(17, 8, 1) = 17
681 /// alignTo(~0LL, 8, 3) = 3
682 /// alignTo(321, 255, 42) = 552
684 inline uint64_t alignTo(uint64_t Value
, uint64_t Align
, uint64_t Skew
= 0) {
685 assert(Align
!= 0u && "Align can't be 0.");
687 return (Value
+ Align
- 1 - Skew
) / Align
* Align
+ Skew
;
690 /// Returns the next integer (mod 2**64) that is greater than or equal to
691 /// \p Value and is a multiple of \c Align. \c Align must be non-zero.
692 template <uint64_t Align
> constexpr inline uint64_t alignTo(uint64_t Value
) {
693 static_assert(Align
!= 0u, "Align must be non-zero");
694 return (Value
+ Align
- 1) / Align
* Align
;
697 /// Returns the integer ceil(Numerator / Denominator).
698 inline uint64_t divideCeil(uint64_t Numerator
, uint64_t Denominator
) {
699 return alignTo(Numerator
, Denominator
) / Denominator
;
702 /// \c alignTo for contexts where a constant expression is required.
705 /// \todo FIXME: remove when \c constexpr becomes really \c constexpr
706 template <uint64_t Align
>
708 static_assert(Align
!= 0u, "Align must be non-zero");
709 template <uint64_t Value
>
711 static const uint64_t value
= (Value
+ Align
- 1) / Align
* Align
;
715 /// Returns the largest uint64_t less than or equal to \p Value and is
716 /// \p Skew mod \p Align. \p Align must be non-zero
717 inline uint64_t alignDown(uint64_t Value
, uint64_t Align
, uint64_t Skew
= 0) {
718 assert(Align
!= 0u && "Align can't be 0.");
720 return (Value
- Skew
) / Align
* Align
+ Skew
;
723 /// Returns the offset to the next integer (mod 2**64) that is greater than
724 /// or equal to \p Value and is a multiple of \p Align. \p Align must be
726 inline uint64_t OffsetToAlignment(uint64_t Value
, uint64_t Align
) {
727 return alignTo(Value
, Align
) - Value
;
730 /// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
731 /// Requires 0 < B <= 32.
732 template <unsigned B
> constexpr inline int32_t SignExtend32(uint32_t X
) {
733 static_assert(B
> 0, "Bit width can't be 0.");
734 static_assert(B
<= 32, "Bit width out of range.");
735 return int32_t(X
<< (32 - B
)) >> (32 - B
);
738 /// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
739 /// Requires 0 < B < 32.
740 inline int32_t SignExtend32(uint32_t X
, unsigned B
) {
741 assert(B
> 0 && "Bit width can't be 0.");
742 assert(B
<= 32 && "Bit width out of range.");
743 return int32_t(X
<< (32 - B
)) >> (32 - B
);
746 /// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
747 /// Requires 0 < B < 64.
748 template <unsigned B
> constexpr inline int64_t SignExtend64(uint64_t x
) {
749 static_assert(B
> 0, "Bit width can't be 0.");
750 static_assert(B
<= 64, "Bit width out of range.");
751 return int64_t(x
<< (64 - B
)) >> (64 - B
);
754 /// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
755 /// Requires 0 < B < 64.
756 inline int64_t SignExtend64(uint64_t X
, unsigned B
) {
757 assert(B
> 0 && "Bit width can't be 0.");
758 assert(B
<= 64 && "Bit width out of range.");
759 return int64_t(X
<< (64 - B
)) >> (64 - B
);
762 /// Subtract two unsigned integers, X and Y, of type T and return the absolute
763 /// value of the result.
764 template <typename T
>
765 typename
std::enable_if
<std::is_unsigned
<T
>::value
, T
>::type
766 AbsoluteDifference(T X
, T Y
) {
767 return std::max(X
, Y
) - std::min(X
, Y
);
770 /// Add two unsigned integers, X and Y, of type T. Clamp the result to the
771 /// maximum representable value of T on overflow. ResultOverflowed indicates if
772 /// the result is larger than the maximum representable value of type T.
773 template <typename T
>
774 typename
std::enable_if
<std::is_unsigned
<T
>::value
, T
>::type
775 SaturatingAdd(T X
, T Y
, bool *ResultOverflowed
= nullptr) {
777 bool &Overflowed
= ResultOverflowed
? *ResultOverflowed
: Dummy
;
778 // Hacker's Delight, p. 29
780 Overflowed
= (Z
< X
|| Z
< Y
);
782 return std::numeric_limits
<T
>::max();
787 /// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the
788 /// maximum representable value of T on overflow. ResultOverflowed indicates if
789 /// the result is larger than the maximum representable value of type T.
790 template <typename T
>
791 typename
std::enable_if
<std::is_unsigned
<T
>::value
, T
>::type
792 SaturatingMultiply(T X
, T Y
, bool *ResultOverflowed
= nullptr) {
794 bool &Overflowed
= ResultOverflowed
? *ResultOverflowed
: Dummy
;
796 // Hacker's Delight, p. 30 has a different algorithm, but we don't use that
797 // because it fails for uint16_t (where multiplication can have undefined
798 // behavior due to promotion to int), and requires a division in addition
799 // to the multiplication.
803 // Log2(Z) would be either Log2Z or Log2Z + 1.
804 // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z
805 // will necessarily be less than Log2Max as desired.
806 int Log2Z
= Log2_64(X
) + Log2_64(Y
);
807 const T Max
= std::numeric_limits
<T
>::max();
808 int Log2Max
= Log2_64(Max
);
809 if (Log2Z
< Log2Max
) {
812 if (Log2Z
> Log2Max
) {
817 // We're going to use the top bit, and maybe overflow one
818 // bit past it. Multiply all but the bottom bit then add
819 // that on at the end.
821 if (Z
& ~(Max
>> 1)) {
827 return SaturatingAdd(Z
, Y
, ResultOverflowed
);
832 /// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to
833 /// the product. Clamp the result to the maximum representable value of T on
834 /// overflow. ResultOverflowed indicates if the result is larger than the
835 /// maximum representable value of type T.
836 template <typename T
>
837 typename
std::enable_if
<std::is_unsigned
<T
>::value
, T
>::type
838 SaturatingMultiplyAdd(T X
, T Y
, T A
, bool *ResultOverflowed
= nullptr) {
840 bool &Overflowed
= ResultOverflowed
? *ResultOverflowed
: Dummy
;
842 T Product
= SaturatingMultiply(X
, Y
, &Overflowed
);
846 return SaturatingAdd(A
, Product
, &Overflowed
);
849 /// Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
850 extern const float huge_valf
;
851 } // End llvm namespace