accel/ivpu: Enable HWS by default on all platforms
[drm/drm-misc.git] / include / vdso / math64.h
blob22ae212f8b28fe0323d3e5b707c09d516a282b83
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __VDSO_MATH64_H
3 #define __VDSO_MATH64_H
5 static __always_inline u32
6 __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
8 u32 ret = 0;
10 while (dividend >= divisor) {
11 /* The following asm() prevents the compiler from
12 optimising this loop into a modulo operation. */
13 asm("" : "+rm"(dividend));
15 dividend -= divisor;
16 ret++;
19 *remainder = dividend;
21 return ret;
24 #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
26 #ifndef mul_u64_u32_add_u64_shr
27 static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
29 return (u64)((((unsigned __int128)a * mul) + b) >> shift);
31 #endif /* mul_u64_u32_add_u64_shr */
33 #else
35 #ifndef mul_u64_u32_add_u64_shr
36 #ifndef mul_u32_u32
37 static inline u64 mul_u32_u32(u32 a, u32 b)
39 return (u64)a * b;
41 #define mul_u32_u32 mul_u32_u32
42 #endif
43 static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
45 u32 ah = a >> 32, al = a;
46 bool ovf;
47 u64 ret;
49 ovf = __builtin_add_overflow(mul_u32_u32(al, mul), b, &ret);
50 ret >>= shift;
51 if (ovf && shift)
52 ret += 1ULL << (64 - shift);
53 if (ah)
54 ret += mul_u32_u32(ah, mul) << (32 - shift);
56 return ret;
58 #endif /* mul_u64_u32_add_u64_shr */
60 #endif
62 #endif /* __VDSO_MATH64_H */