[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / arch / sparc64 / lib / find_bit.c
blob6059557067b4e4210f6fe9f21fc14bc949aaa6c4
1 #include <linux/bitops.h>
3 /**
4 * find_next_bit - find the next set bit in a memory region
5 * @addr: The address to base the search on
6 * @offset: The bitnumber to start searching at
7 * @size: The maximum size to search
8 */
9 unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
10 unsigned long offset)
12 const unsigned long *p = addr + (offset >> 6);
13 unsigned long result = offset & ~63UL;
14 unsigned long tmp;
16 if (offset >= size)
17 return size;
18 size -= result;
19 offset &= 63UL;
20 if (offset) {
21 tmp = *(p++);
22 tmp &= (~0UL << offset);
23 if (size < 64)
24 goto found_first;
25 if (tmp)
26 goto found_middle;
27 size -= 64;
28 result += 64;
30 while (size & ~63UL) {
31 if ((tmp = *(p++)))
32 goto found_middle;
33 result += 64;
34 size -= 64;
36 if (!size)
37 return result;
38 tmp = *p;
40 found_first:
41 tmp &= (~0UL >> (64 - size));
42 if (tmp == 0UL) /* Are any bits set? */
43 return result + size; /* Nope. */
44 found_middle:
45 return result + __ffs(tmp);
48 /* find_next_zero_bit() finds the first zero bit in a bit string of length
49 * 'size' bits, starting the search at bit 'offset'. This is largely based
50 * on Linus's ALPHA routines, which are pretty portable BTW.
53 unsigned long find_next_zero_bit(const unsigned long *addr,
54 unsigned long size, unsigned long offset)
56 const unsigned long *p = addr + (offset >> 6);
57 unsigned long result = offset & ~63UL;
58 unsigned long tmp;
60 if (offset >= size)
61 return size;
62 size -= result;
63 offset &= 63UL;
64 if (offset) {
65 tmp = *(p++);
66 tmp |= ~0UL >> (64-offset);
67 if (size < 64)
68 goto found_first;
69 if (~tmp)
70 goto found_middle;
71 size -= 64;
72 result += 64;
74 while (size & ~63UL) {
75 if (~(tmp = *(p++)))
76 goto found_middle;
77 result += 64;
78 size -= 64;
80 if (!size)
81 return result;
82 tmp = *p;
84 found_first:
85 tmp |= ~0UL << size;
86 if (tmp == ~0UL) /* Are any bits zero? */
87 return result + size; /* Nope. */
88 found_middle:
89 return result + ffz(tmp);
92 unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long offset)
94 unsigned long *p = addr + (offset >> 6);
95 unsigned long result = offset & ~63UL;
96 unsigned long tmp;
98 if (offset >= size)
99 return size;
100 size -= result;
101 offset &= 63UL;
102 if(offset) {
103 tmp = __swab64p(p++);
104 tmp |= (~0UL >> (64-offset));
105 if(size < 64)
106 goto found_first;
107 if(~tmp)
108 goto found_middle;
109 size -= 64;
110 result += 64;
112 while(size & ~63) {
113 if(~(tmp = __swab64p(p++)))
114 goto found_middle;
115 result += 64;
116 size -= 64;
118 if(!size)
119 return result;
120 tmp = __swab64p(p);
121 found_first:
122 tmp |= (~0UL << size);
123 if (tmp == ~0UL) /* Are any bits zero? */
124 return result + size; /* Nope. */
125 found_middle:
126 return result + ffz(tmp);