x86: move NUMAQ io handling into arch/x86/pci/numa.c
[wrt350n-kernel.git] / lib / div64.c
blobb71cf93c529adaa3364afa4a1b997c54cc9ef363
1 /*
2 * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
4 * Based on former do_div() implementation from asm-parisc/div64.h:
5 * Copyright (C) 1999 Hewlett-Packard Co
6 * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
9 * Generic C version of 64bit/32bit division and modulo, with
10 * 64bit result and 32bit remainder.
12 * The fast case for (n>>32 == 0) is handled inline by do_div().
14 * Code generated for this function might be very inefficient
15 * for some CPUs. __div64_32() can be overridden by linking arch-specific
16 * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S.
19 #include <linux/types.h>
20 #include <linux/module.h>
21 #include <asm/div64.h>
23 /* Not needed on 64bit architectures */
24 #if BITS_PER_LONG == 32
26 uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
28 uint64_t rem = *n;
29 uint64_t b = base;
30 uint64_t res, d = 1;
31 uint32_t high = rem >> 32;
33 /* Reduce the thing a bit first */
34 res = 0;
35 if (high >= base) {
36 high /= base;
37 res = (uint64_t) high << 32;
38 rem -= (uint64_t) (high*base) << 32;
41 while ((int64_t)b > 0 && b < rem) {
42 b = b+b;
43 d = d+d;
46 do {
47 if (rem >= b) {
48 rem -= b;
49 res += d;
51 b >>= 1;
52 d >>= 1;
53 } while (d);
55 *n = res;
56 return rem;
59 EXPORT_SYMBOL(__div64_32);
61 /* 64bit divisor, dividend and result. dynamic precision */
62 uint64_t div64_64(uint64_t dividend, uint64_t divisor)
64 uint32_t high, d;
66 high = divisor >> 32;
67 if (high) {
68 unsigned int shift = fls(high);
70 d = divisor >> shift;
71 dividend >>= shift;
72 } else
73 d = divisor;
75 do_div(dividend, d);
77 return dividend;
79 EXPORT_SYMBOL(div64_64);
81 #endif /* BITS_PER_LONG == 32 */