Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-stable.git] / lib / math / div64.c
blob5faa29208bdb43fbc6a630a29de07463dfb3aa4e
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
5 * Based on former do_div() implementation from asm-parisc/div64.h:
6 * Copyright (C) 1999 Hewlett-Packard Co
7 * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
10 * Generic C version of 64bit/32bit division and modulo, with
11 * 64bit result and 32bit remainder.
13 * The fast case for (n>>32 == 0) is handled inline by do_div().
15 * Code generated for this function might be very inefficient
16 * for some CPUs. __div64_32() can be overridden by linking arch-specific
17 * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S
18 * or by defining a preprocessor macro in arch/include/asm/div64.h.
21 #include <linux/bitops.h>
22 #include <linux/export.h>
23 #include <linux/math.h>
24 #include <linux/math64.h>
25 #include <linux/minmax.h>
26 #include <linux/log2.h>
28 /* Not needed on 64bit architectures */
29 #if BITS_PER_LONG == 32
31 #ifndef __div64_32
32 uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
34 uint64_t rem = *n;
35 uint64_t b = base;
36 uint64_t res, d = 1;
37 uint32_t high = rem >> 32;
39 /* Reduce the thing a bit first */
40 res = 0;
41 if (high >= base) {
42 high /= base;
43 res = (uint64_t) high << 32;
44 rem -= (uint64_t) (high*base) << 32;
47 while ((int64_t)b > 0 && b < rem) {
48 b = b+b;
49 d = d+d;
52 do {
53 if (rem >= b) {
54 rem -= b;
55 res += d;
57 b >>= 1;
58 d >>= 1;
59 } while (d);
61 *n = res;
62 return rem;
64 EXPORT_SYMBOL(__div64_32);
65 #endif
67 #ifndef div_s64_rem
68 s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
70 u64 quotient;
72 if (dividend < 0) {
73 quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
74 *remainder = -*remainder;
75 if (divisor > 0)
76 quotient = -quotient;
77 } else {
78 quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
79 if (divisor < 0)
80 quotient = -quotient;
82 return quotient;
84 EXPORT_SYMBOL(div_s64_rem);
85 #endif
88 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
89 * @dividend: 64bit dividend
90 * @divisor: 64bit divisor
91 * @remainder: 64bit remainder
93 * This implementation is a comparable to algorithm used by div64_u64.
94 * But this operation, which includes math for calculating the remainder,
95 * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
96 * systems.
98 #ifndef div64_u64_rem
99 u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
101 u32 high = divisor >> 32;
102 u64 quot;
104 if (high == 0) {
105 u32 rem32;
106 quot = div_u64_rem(dividend, divisor, &rem32);
107 *remainder = rem32;
108 } else {
109 int n = fls(high);
110 quot = div_u64(dividend >> n, divisor >> n);
112 if (quot != 0)
113 quot--;
115 *remainder = dividend - quot * divisor;
116 if (*remainder >= divisor) {
117 quot++;
118 *remainder -= divisor;
122 return quot;
124 EXPORT_SYMBOL(div64_u64_rem);
125 #endif
128 * div64_u64 - unsigned 64bit divide with 64bit divisor
129 * @dividend: 64bit dividend
130 * @divisor: 64bit divisor
132 * This implementation is a modified version of the algorithm proposed
133 * by the book 'Hacker's Delight'. The original source and full proof
134 * can be found here and is available for use without restriction.
136 * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
138 #ifndef div64_u64
139 u64 div64_u64(u64 dividend, u64 divisor)
141 u32 high = divisor >> 32;
142 u64 quot;
144 if (high == 0) {
145 quot = div_u64(dividend, divisor);
146 } else {
147 int n = fls(high);
148 quot = div_u64(dividend >> n, divisor >> n);
150 if (quot != 0)
151 quot--;
152 if ((dividend - quot * divisor) >= divisor)
153 quot++;
156 return quot;
158 EXPORT_SYMBOL(div64_u64);
159 #endif
161 #ifndef div64_s64
162 s64 div64_s64(s64 dividend, s64 divisor)
164 s64 quot, t;
166 quot = div64_u64(abs(dividend), abs(divisor));
167 t = (dividend ^ divisor) >> 63;
169 return (quot ^ t) - t;
171 EXPORT_SYMBOL(div64_s64);
172 #endif
174 #endif /* BITS_PER_LONG == 32 */
177 * Iterative div/mod for use when dividend is not expected to be much
178 * bigger than divisor.
180 u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
182 return __iter_div_u64_rem(dividend, divisor, remainder);
184 EXPORT_SYMBOL(iter_div_u64_rem);
186 #ifndef mul_u64_u64_div_u64
187 u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
189 if (ilog2(a) + ilog2(b) <= 62)
190 return div64_u64(a * b, c);
192 #if defined(__SIZEOF_INT128__)
194 /* native 64x64=128 bits multiplication */
195 u128 prod = (u128)a * b;
196 u64 n_lo = prod, n_hi = prod >> 64;
198 #else
200 /* perform a 64x64=128 bits multiplication manually */
201 u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
202 u64 x, y, z;
204 x = (u64)a_lo * b_lo;
205 y = (u64)a_lo * b_hi + (u32)(x >> 32);
206 z = (u64)a_hi * b_hi + (u32)(y >> 32);
207 y = (u64)a_hi * b_lo + (u32)y;
208 z += (u32)(y >> 32);
209 x = (y << 32) + (u32)x;
211 u64 n_lo = x, n_hi = z;
213 #endif
215 /* make sure c is not zero, trigger exception otherwise */
216 #pragma GCC diagnostic push
217 #pragma GCC diagnostic ignored "-Wdiv-by-zero"
218 if (unlikely(c == 0))
219 return 1/0;
220 #pragma GCC diagnostic pop
222 int shift = __builtin_ctzll(c);
224 /* try reducing the fraction in case the dividend becomes <= 64 bits */
225 if ((n_hi >> shift) == 0) {
226 u64 n = shift ? (n_lo >> shift) | (n_hi << (64 - shift)) : n_lo;
228 return div64_u64(n, c >> shift);
230 * The remainder value if needed would be:
231 * res = div64_u64_rem(n, c >> shift, &rem);
232 * rem = (rem << shift) + (n_lo - (n << shift));
236 if (n_hi >= c) {
237 /* overflow: result is unrepresentable in a u64 */
238 return -1;
241 /* Do the full 128 by 64 bits division */
243 shift = __builtin_clzll(c);
244 c <<= shift;
246 int p = 64 + shift;
247 u64 res = 0;
248 bool carry;
250 do {
251 carry = n_hi >> 63;
252 shift = carry ? 1 : __builtin_clzll(n_hi);
253 if (p < shift)
254 break;
255 p -= shift;
256 n_hi <<= shift;
257 n_hi |= n_lo >> (64 - shift);
258 n_lo <<= shift;
259 if (carry || (n_hi >= c)) {
260 n_hi -= c;
261 res |= 1ULL << p;
263 } while (n_hi);
264 /* The remainder value if needed would be n_hi << p */
266 return res;
268 EXPORT_SYMBOL(mul_u64_u64_div_u64);
269 #endif