Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / include / linux / math64.h
blob6aaccc1626ab4c6d38d7b4cb6cd3820ba41f628f
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MATH64_H
3 #define _LINUX_MATH64_H
5 #include <linux/types.h>
6 #include <linux/math.h>
7 #include <asm/div64.h>
8 #include <vdso/math64.h>
10 #if BITS_PER_LONG == 64
12 #define div64_long(x, y) div64_s64((x), (y))
13 #define div64_ul(x, y) div64_u64((x), (y))
15 /**
16 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
17 * @dividend: unsigned 64bit dividend
18 * @divisor: unsigned 32bit divisor
19 * @remainder: pointer to unsigned 32bit remainder
21 * Return: sets ``*remainder``, then returns dividend / divisor
23 * This is commonly provided by 32bit archs to provide an optimized 64bit
24 * divide.
26 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
28 *remainder = dividend % divisor;
29 return dividend / divisor;
32 /**
33 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
34 * @dividend: signed 64bit dividend
35 * @divisor: signed 32bit divisor
36 * @remainder: pointer to signed 32bit remainder
38 * Return: sets ``*remainder``, then returns dividend / divisor
40 static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
42 *remainder = dividend % divisor;
43 return dividend / divisor;
46 /**
47 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
48 * @dividend: unsigned 64bit dividend
49 * @divisor: unsigned 64bit divisor
50 * @remainder: pointer to unsigned 64bit remainder
52 * Return: sets ``*remainder``, then returns dividend / divisor
54 static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
56 *remainder = dividend % divisor;
57 return dividend / divisor;
60 /**
61 * div64_u64 - unsigned 64bit divide with 64bit divisor
62 * @dividend: unsigned 64bit dividend
63 * @divisor: unsigned 64bit divisor
65 * Return: dividend / divisor
67 static inline u64 div64_u64(u64 dividend, u64 divisor)
69 return dividend / divisor;
72 /**
73 * div64_s64 - signed 64bit divide with 64bit divisor
74 * @dividend: signed 64bit dividend
75 * @divisor: signed 64bit divisor
77 * Return: dividend / divisor
79 static inline s64 div64_s64(s64 dividend, s64 divisor)
81 return dividend / divisor;
84 #elif BITS_PER_LONG == 32
86 #define div64_long(x, y) div_s64((x), (y))
87 #define div64_ul(x, y) div_u64((x), (y))
89 #ifndef div_u64_rem
90 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
92 *remainder = do_div(dividend, divisor);
93 return dividend;
95 #endif
97 #ifndef div_s64_rem
98 extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
99 #endif
101 #ifndef div64_u64_rem
102 extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
103 #endif
105 #ifndef div64_u64
106 extern u64 div64_u64(u64 dividend, u64 divisor);
107 #endif
109 #ifndef div64_s64
110 extern s64 div64_s64(s64 dividend, s64 divisor);
111 #endif
113 #endif /* BITS_PER_LONG */
116 * div_u64 - unsigned 64bit divide with 32bit divisor
117 * @dividend: unsigned 64bit dividend
118 * @divisor: unsigned 32bit divisor
120 * This is the most common 64bit divide and should be used if possible,
121 * as many 32bit archs can optimize this variant better than a full 64bit
122 * divide.
124 * Return: dividend / divisor
126 #ifndef div_u64
127 static inline u64 div_u64(u64 dividend, u32 divisor)
129 u32 remainder;
130 return div_u64_rem(dividend, divisor, &remainder);
132 #endif
135 * div_s64 - signed 64bit divide with 32bit divisor
136 * @dividend: signed 64bit dividend
137 * @divisor: signed 32bit divisor
139 * Return: dividend / divisor
141 #ifndef div_s64
142 static inline s64 div_s64(s64 dividend, s32 divisor)
144 s32 remainder;
145 return div_s64_rem(dividend, divisor, &remainder);
147 #endif
149 u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
151 #ifndef mul_u32_u32
153 * Many a GCC version messes this up and generates a 64x64 mult :-(
155 static inline u64 mul_u32_u32(u32 a, u32 b)
157 return (u64)a * b;
159 #endif
161 #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
163 #ifndef mul_u64_u32_shr
164 static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
166 return (u64)(((unsigned __int128)a * mul) >> shift);
168 #endif /* mul_u64_u32_shr */
170 #ifndef mul_u64_u64_shr
171 static __always_inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
173 return (u64)(((unsigned __int128)a * mul) >> shift);
175 #endif /* mul_u64_u64_shr */
177 #else
179 #ifndef mul_u64_u32_shr
180 static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
182 u32 ah = a >> 32, al = a;
183 u64 ret;
185 ret = mul_u32_u32(al, mul) >> shift;
186 if (ah)
187 ret += mul_u32_u32(ah, mul) << (32 - shift);
188 return ret;
190 #endif /* mul_u64_u32_shr */
192 #ifndef mul_u64_u64_shr
193 static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
195 union {
196 u64 ll;
197 struct {
198 #ifdef __BIG_ENDIAN
199 u32 high, low;
200 #else
201 u32 low, high;
202 #endif
203 } l;
204 } rl, rm, rn, rh, a0, b0;
205 u64 c;
207 a0.ll = a;
208 b0.ll = b;
210 rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
211 rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
212 rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
213 rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
216 * Each of these lines computes a 64-bit intermediate result into "c",
217 * starting at bits 32-95. The low 32-bits go into the result of the
218 * multiplication, the high 32-bits are carried into the next step.
220 rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
221 rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
222 rh.l.high = (c >> 32) + rh.l.high;
225 * The 128-bit result of the multiplication is in rl.ll and rh.ll,
226 * shift it right and throw away the high part of the result.
228 if (shift == 0)
229 return rl.ll;
230 if (shift < 64)
231 return (rl.ll >> shift) | (rh.ll << (64 - shift));
232 return rh.ll >> (shift & 63);
234 #endif /* mul_u64_u64_shr */
236 #endif
238 #ifndef mul_s64_u64_shr
239 static inline u64 mul_s64_u64_shr(s64 a, u64 b, unsigned int shift)
241 u64 ret;
244 * Extract the sign before the multiplication and put it back
245 * afterwards if needed.
247 ret = mul_u64_u64_shr(abs(a), b, shift);
249 if (a < 0)
250 ret = -((s64) ret);
252 return ret;
254 #endif /* mul_s64_u64_shr */
256 #ifndef mul_u64_u32_div
257 static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
259 union {
260 u64 ll;
261 struct {
262 #ifdef __BIG_ENDIAN
263 u32 high, low;
264 #else
265 u32 low, high;
266 #endif
267 } l;
268 } u, rl, rh;
270 u.ll = a;
271 rl.ll = mul_u32_u32(u.l.low, mul);
272 rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
274 /* Bits 32-63 of the result will be in rh.l.low. */
275 rl.l.high = do_div(rh.ll, divisor);
277 /* Bits 0-31 of the result will be in rl.l.low. */
278 do_div(rl.ll, divisor);
280 rl.l.high = rh.l.low;
281 return rl.ll;
283 #endif /* mul_u64_u32_div */
285 u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
288 * DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up
289 * @ll: unsigned 64bit dividend
290 * @d: unsigned 64bit divisor
292 * Divide unsigned 64bit dividend by unsigned 64bit divisor
293 * and round up.
295 * Return: dividend / divisor rounded up
297 #define DIV64_U64_ROUND_UP(ll, d) \
298 ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
301 * DIV_U64_ROUND_UP - unsigned 64bit divide with 32bit divisor rounded up
302 * @ll: unsigned 64bit dividend
303 * @d: unsigned 32bit divisor
305 * Divide unsigned 64bit dividend by unsigned 32bit divisor
306 * and round up.
308 * Return: dividend / divisor rounded up
310 #define DIV_U64_ROUND_UP(ll, d) \
311 ({ u32 _tmp = (d); div_u64((ll) + _tmp - 1, _tmp); })
314 * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer
315 * @dividend: unsigned 64bit dividend
316 * @divisor: unsigned 64bit divisor
318 * Divide unsigned 64bit dividend by unsigned 64bit divisor
319 * and round to closest integer.
321 * Return: dividend / divisor rounded to nearest integer
323 #define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \
324 ({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); })
327 * DIV_U64_ROUND_CLOSEST - unsigned 64bit divide with 32bit divisor rounded to nearest integer
328 * @dividend: unsigned 64bit dividend
329 * @divisor: unsigned 32bit divisor
331 * Divide unsigned 64bit dividend by unsigned 32bit divisor
332 * and round to closest integer.
334 * Return: dividend / divisor rounded to nearest integer
336 #define DIV_U64_ROUND_CLOSEST(dividend, divisor) \
337 ({ u32 _tmp = (divisor); div_u64((u64)(dividend) + _tmp / 2, _tmp); })
340 * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer
341 * @dividend: signed 64bit dividend
342 * @divisor: signed 32bit divisor
344 * Divide signed 64bit dividend by signed 32bit divisor
345 * and round to closest integer.
347 * Return: dividend / divisor rounded to nearest integer
349 #define DIV_S64_ROUND_CLOSEST(dividend, divisor)( \
351 s64 __x = (dividend); \
352 s32 __d = (divisor); \
353 ((__x > 0) == (__d > 0)) ? \
354 div_s64((__x + (__d / 2)), __d) : \
355 div_s64((__x - (__d / 2)), __d); \
360 * roundup_u64 - Round up a 64bit value to the next specified 32bit multiple
361 * @x: the value to up
362 * @y: 32bit multiple to round up to
364 * Rounds @x to the next multiple of @y. For 32bit @x values, see roundup and
365 * the faster round_up() for powers of 2.
367 * Return: rounded up value.
369 static inline u64 roundup_u64(u64 x, u32 y)
371 return DIV_U64_ROUND_UP(x, y) * y;
373 #endif /* _LINUX_MATH64_H */