1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MATH64_H
3 #define _LINUX_MATH64_H
5 #include <linux/types.h>
8 #if BITS_PER_LONG == 64
10 #define div64_long(x, y) div64_s64((x), (y))
11 #define div64_ul(x, y) div64_u64((x), (y))
14 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
15 * @dividend: unsigned 64bit dividend
16 * @divisor: unsigned 32bit divisor
17 * @remainder: pointer to unsigned 32bit remainder
19 * Return: sets ``*remainder``, then returns dividend / divisor
21 * This is commonly provided by 32bit archs to provide an optimized 64bit
24 static inline u64
div_u64_rem(u64 dividend
, u32 divisor
, u32
*remainder
)
26 *remainder
= dividend
% divisor
;
27 return dividend
/ divisor
;
31 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
32 * @dividend: signed 64bit dividend
33 * @divisor: signed 32bit divisor
34 * @remainder: pointer to signed 32bit remainder
36 * Return: sets ``*remainder``, then returns dividend / divisor
38 static inline s64
div_s64_rem(s64 dividend
, s32 divisor
, s32
*remainder
)
40 *remainder
= dividend
% divisor
;
41 return dividend
/ divisor
;
45 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
46 * @dividend: unsigned 64bit dividend
47 * @divisor: unsigned 64bit divisor
48 * @remainder: pointer to unsigned 64bit remainder
50 * Return: sets ``*remainder``, then returns dividend / divisor
52 static inline u64
div64_u64_rem(u64 dividend
, u64 divisor
, u64
*remainder
)
54 *remainder
= dividend
% divisor
;
55 return dividend
/ divisor
;
59 * div64_u64 - unsigned 64bit divide with 64bit divisor
60 * @dividend: unsigned 64bit dividend
61 * @divisor: unsigned 64bit divisor
63 * Return: dividend / divisor
65 static inline u64
div64_u64(u64 dividend
, u64 divisor
)
67 return dividend
/ divisor
;
71 * div64_s64 - signed 64bit divide with 64bit divisor
72 * @dividend: signed 64bit dividend
73 * @divisor: signed 64bit divisor
75 * Return: dividend / divisor
77 static inline s64
div64_s64(s64 dividend
, s64 divisor
)
79 return dividend
/ divisor
;
82 #elif BITS_PER_LONG == 32
84 #define div64_long(x, y) div_s64((x), (y))
85 #define div64_ul(x, y) div_u64((x), (y))
88 static inline u64
div_u64_rem(u64 dividend
, u32 divisor
, u32
*remainder
)
90 *remainder
= do_div(dividend
, divisor
);
96 extern s64
div_s64_rem(s64 dividend
, s32 divisor
, s32
*remainder
);
100 extern u64
div64_u64_rem(u64 dividend
, u64 divisor
, u64
*remainder
);
104 extern u64
div64_u64(u64 dividend
, u64 divisor
);
108 extern s64
div64_s64(s64 dividend
, s64 divisor
);
111 #endif /* BITS_PER_LONG */
114 * div_u64 - unsigned 64bit divide with 32bit divisor
115 * @dividend: unsigned 64bit dividend
116 * @divisor: unsigned 32bit divisor
118 * This is the most common 64bit divide and should be used if possible,
119 * as many 32bit archs can optimize this variant better than a full 64bit
123 static inline u64
div_u64(u64 dividend
, u32 divisor
)
126 return div_u64_rem(dividend
, divisor
, &remainder
);
131 * div_s64 - signed 64bit divide with 32bit divisor
132 * @dividend: signed 64bit dividend
133 * @divisor: signed 32bit divisor
136 static inline s64
div_s64(s64 dividend
, s32 divisor
)
139 return div_s64_rem(dividend
, divisor
, &remainder
);
143 u32
iter_div_u64_rem(u64 dividend
, u32 divisor
, u64
*remainder
);
145 static __always_inline u32
146 __iter_div_u64_rem(u64 dividend
, u32 divisor
, u64
*remainder
)
150 while (dividend
>= divisor
) {
151 /* The following asm() prevents the compiler from
152 optimising this loop into a modulo operation. */
153 asm("" : "+rm"(dividend
));
159 *remainder
= dividend
;
166 * Many a GCC version messes this up and generates a 64x64 mult :-(
168 static inline u64
mul_u32_u32(u32 a
, u32 b
)
174 #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
176 #ifndef mul_u64_u32_shr
177 static inline u64
mul_u64_u32_shr(u64 a
, u32 mul
, unsigned int shift
)
179 return (u64
)(((unsigned __int128
)a
* mul
) >> shift
);
181 #endif /* mul_u64_u32_shr */
183 #ifndef mul_u64_u64_shr
184 static inline u64
mul_u64_u64_shr(u64 a
, u64 mul
, unsigned int shift
)
186 return (u64
)(((unsigned __int128
)a
* mul
) >> shift
);
188 #endif /* mul_u64_u64_shr */
192 #ifndef mul_u64_u32_shr
193 static inline u64
mul_u64_u32_shr(u64 a
, u32 mul
, unsigned int shift
)
201 ret
= mul_u32_u32(al
, mul
) >> shift
;
203 ret
+= mul_u32_u32(ah
, mul
) << (32 - shift
);
207 #endif /* mul_u64_u32_shr */
209 #ifndef mul_u64_u64_shr
210 static inline u64
mul_u64_u64_shr(u64 a
, u64 b
, unsigned int shift
)
221 } rl
, rm
, rn
, rh
, a0
, b0
;
227 rl
.ll
= mul_u32_u32(a0
.l
.low
, b0
.l
.low
);
228 rm
.ll
= mul_u32_u32(a0
.l
.low
, b0
.l
.high
);
229 rn
.ll
= mul_u32_u32(a0
.l
.high
, b0
.l
.low
);
230 rh
.ll
= mul_u32_u32(a0
.l
.high
, b0
.l
.high
);
233 * Each of these lines computes a 64-bit intermediate result into "c",
234 * starting at bits 32-95. The low 32-bits go into the result of the
235 * multiplication, the high 32-bits are carried into the next step.
237 rl
.l
.high
= c
= (u64
)rl
.l
.high
+ rm
.l
.low
+ rn
.l
.low
;
238 rh
.l
.low
= c
= (c
>> 32) + rm
.l
.high
+ rn
.l
.high
+ rh
.l
.low
;
239 rh
.l
.high
= (c
>> 32) + rh
.l
.high
;
242 * The 128-bit result of the multiplication is in rl.ll and rh.ll,
243 * shift it right and throw away the high part of the result.
248 return (rl
.ll
>> shift
) | (rh
.ll
<< (64 - shift
));
249 return rh
.ll
>> (shift
& 63);
251 #endif /* mul_u64_u64_shr */
255 #ifndef mul_u64_u32_div
256 static inline u64
mul_u64_u32_div(u64 a
, u32 mul
, u32 divisor
)
270 rl
.ll
= mul_u32_u32(u
.l
.low
, mul
);
271 rh
.ll
= mul_u32_u32(u
.l
.high
, mul
) + rl
.l
.high
;
273 /* Bits 32-63 of the result will be in rh.l.low. */
274 rl
.l
.high
= do_div(rh
.ll
, divisor
);
276 /* Bits 0-31 of the result will be in rl.l.low. */
277 do_div(rl
.ll
, divisor
);
279 rl
.l
.high
= rh
.l
.low
;
282 #endif /* mul_u64_u32_div */
284 #endif /* _LINUX_MATH64_H */