* add p cc
[mascara-docs.git] / i386 / linux / linux-2.3.21 / include / asm-ppc / bitops.h
blob95b59cafc5df6a5f2b5cd84cba3953209222d576
1 /*
2 * $Id: bitops.h,v 1.11 1999/01/03 20:16:48 cort Exp $
3 * bitops.h: Bit string operations on the ppc
4 */
6 #ifndef _PPC_BITOPS_H
7 #define _PPC_BITOPS_H
9 #include <asm/system.h>
10 #include <asm/byteorder.h>
12 extern void set_bit(int nr, volatile void *addr);
13 extern void clear_bit(int nr, volatile void *addr);
14 extern void change_bit(int nr, volatile void *addr);
15 extern int test_and_set_bit(int nr, volatile void *addr);
16 extern int test_and_clear_bit(int nr, volatile void *addr);
17 extern int test_and_change_bit(int nr, volatile void *addr);
20 /* Returns the number of 0's to the left of the most significant 1 bit */
21 extern __inline__ int cntlzw(int bits)
23 int lz;
25 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (bits));
26 return lz;
30 * These are if'd out here because using : "cc" as a constraint
31 * results in errors from gcc. -- Cort
32 * Besides, they need to be changed so we have both set_bit
33 * and test_and_set_bit, etc.
35 #if 0
36 extern __inline__ int set_bit(int nr, void * addr)
38 unsigned long old, t;
39 unsigned long mask = 1 << (nr & 0x1f);
40 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
42 __asm__ __volatile__(
43 "1:lwarx %0,0,%3 \n\t"
44 "or %1,%0,%2 \n\t"
45 "stwcx. %1,0,%3 \n\t"
46 "bne 1b \n\t"
47 : "=&r" (old), "=&r" (t) /*, "=m" (*p)*/
48 : "r" (mask), "r" (p)
49 /*: "cc" */);
51 return (old & mask) != 0;
54 extern __inline__ unsigned long clear_bit(unsigned long nr, void *addr)
56 unsigned long old, t;
57 unsigned long mask = 1 << (nr & 0x1f);
58 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
60 __asm__ __volatile__("\n\
61 1: lwarx %0,0,%3
62 andc %1,%0,%2
63 stwcx. %1,0,%3
64 bne 1b"
65 : "=&r" (old), "=&r" (t) /*, "=m" (*p)*/
66 : "r" (mask), "r" (p)
67 /*: "cc"*/);
69 return (old & mask) != 0;
72 extern __inline__ unsigned long change_bit(unsigned long nr, void *addr)
74 unsigned long old, t;
75 unsigned long mask = 1 << (nr & 0x1f);
76 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
78 __asm__ __volatile__("\n\
79 1: lwarx %0,0,%3
80 xor %1,%0,%2
81 stwcx. %1,0,%3
82 bne 1b"
83 : "=&r" (old), "=&r" (t) /*, "=m" (*p)*/
84 : "r" (mask), "r" (p)
85 /*: "cc"*/);
87 return (old & mask) != 0;
89 #endif
91 extern __inline__ unsigned long test_bit(int nr, __const__ volatile void *addr)
93 __const__ unsigned int *p = (__const__ unsigned int *) addr;
95 return (p[nr >> 5] >> (nr & 0x1f)) & 1UL;
98 extern __inline__ int ffz(unsigned int x)
100 int n;
102 if (x == ~0)
103 return 32;
104 x = ~x & (x+1); /* set LS zero to 1, other bits to 0 */
105 __asm__ ("cntlzw %0,%1" : "=r" (n) : "r" (x));
106 return 31 - n;
109 #ifdef __KERNEL__
112 * ffs: find first bit set. This is defined the same way as
113 * the libc and compiler builtin ffs routines, therefore
114 * differs in spirit from the above ffz (man ffs).
117 #define ffs(x) generic_ffs(x)
119 #if 0
120 /* untested, someone with PPC knowledge? */
121 /* From Alexander Kjeldaas <astor@guardian.no> */
122 extern __inline__ int ffs(int x)
124 int result;
125 asm ("cntlzw %0,%1" : "=r" (result) : "r" (x));
126 return 32 - result; /* IBM backwards ordering of bits */
128 #endif
131 * hweightN: returns the hamming weight (i.e. the number
132 * of bits set) of a N-bit word
135 #define hweight32(x) generic_hweight32(x)
136 #define hweight16(x) generic_hweight16(x)
137 #define hweight8(x) generic_hweight8(x)
139 #endif /* __KERNEL__ */
142 * This implementation of find_{first,next}_zero_bit was stolen from
143 * Linus' asm-alpha/bitops.h.
145 #define find_first_zero_bit(addr, size) \
146 find_next_zero_bit((addr), (size), 0)
148 extern __inline__ unsigned long find_next_zero_bit(void * addr,
149 unsigned long size, unsigned long offset)
151 unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
152 unsigned int result = offset & ~31UL;
153 unsigned int tmp;
155 if (offset >= size)
156 return size;
157 size -= result;
158 offset &= 31UL;
159 if (offset) {
160 tmp = *p++;
161 tmp |= ~0UL >> (32-offset);
162 if (size < 32)
163 goto found_first;
164 if (tmp != ~0U)
165 goto found_middle;
166 size -= 32;
167 result += 32;
169 while (size >= 32) {
170 if ((tmp = *p++) != ~0U)
171 goto found_middle;
172 result += 32;
173 size -= 32;
175 if (!size)
176 return result;
177 tmp = *p;
178 found_first:
179 tmp |= ~0UL << size;
180 found_middle:
181 return result + ffz(tmp);
185 #define _EXT2_HAVE_ASM_BITOPS_
187 #ifdef __KERNEL__
189 * test_and_{set,clear}_bit guarantee atomicity without
190 * disabling interrupts.
192 #define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x18, addr)
193 #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, addr)
195 #else
196 extern __inline__ int ext2_set_bit(int nr, void * addr)
198 int mask;
199 unsigned char *ADDR = (unsigned char *) addr;
200 int oldbit;
202 ADDR += nr >> 3;
203 mask = 1 << (nr & 0x07);
204 oldbit = (*ADDR & mask) ? 1 : 0;
205 *ADDR |= mask;
206 return oldbit;
209 extern __inline__ int ext2_clear_bit(int nr, void * addr)
211 int mask;
212 unsigned char *ADDR = (unsigned char *) addr;
213 int oldbit;
215 ADDR += nr >> 3;
216 mask = 1 << (nr & 0x07);
217 oldbit = (*ADDR & mask) ? 1 : 0;
218 *ADDR = *ADDR & ~mask;
219 return oldbit;
221 #endif /* __KERNEL__ */
223 extern __inline__ int ext2_test_bit(int nr, __const__ void * addr)
225 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
227 return (ADDR[nr >> 3] >> (nr & 7)) & 1;
231 * This implementation of ext2_find_{first,next}_zero_bit was stolen from
232 * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
235 #define ext2_find_first_zero_bit(addr, size) \
236 ext2_find_next_zero_bit((addr), (size), 0)
238 extern __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
239 unsigned long size, unsigned long offset)
241 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
242 unsigned int result = offset & ~31UL;
243 unsigned int tmp;
245 if (offset >= size)
246 return size;
247 size -= result;
248 offset &= 31UL;
249 if (offset) {
250 tmp = cpu_to_le32p(p++);
251 tmp |= ~0UL >> (32-offset);
252 if (size < 32)
253 goto found_first;
254 if (tmp != ~0U)
255 goto found_middle;
256 size -= 32;
257 result += 32;
259 while (size >= 32) {
260 if ((tmp = cpu_to_le32p(p++)) != ~0U)
261 goto found_middle;
262 result += 32;
263 size -= 32;
265 if (!size)
266 return result;
267 tmp = cpu_to_le32p(p);
268 found_first:
269 tmp |= ~0U << size;
270 found_middle:
271 return result + ffz(tmp);
274 /* Bitmap functions for the minix filesystem. */
275 #define minix_set_bit(nr,addr) ext2_set_bit(nr,addr)
276 #define minix_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
277 #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
278 #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
280 #endif /* _PPC_BITOPS_H */