Automatic merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/gregkh/driver...
[linux-2.6/verdex.git] / include / asm-alpha / bitops.h
blob578ed3f1a6071d831248a6164694ac4d1492c712
1 #ifndef _ALPHA_BITOPS_H
2 #define _ALPHA_BITOPS_H
4 #include <linux/config.h>
5 #include <asm/compiler.h>
7 /*
8 * Copyright 1994, Linus Torvalds.
9 */
12 * These have to be done with inline assembly: that way the bit-setting
13 * is guaranteed to be atomic. All bit operations return 0 if the bit
14 * was cleared before the operation and != 0 if it was not.
16 * To get proper branch prediction for the main line, we must branch
17 * forward to code at the end of this object's .text section, then
18 * branch back to restart the operation.
20 * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
23 static inline void
24 set_bit(unsigned long nr, volatile void * addr)
26 unsigned long temp;
27 int *m = ((int *) addr) + (nr >> 5);
29 __asm__ __volatile__(
30 "1: ldl_l %0,%3\n"
31 " bis %0,%2,%0\n"
32 " stl_c %0,%1\n"
33 " beq %0,2f\n"
34 ".subsection 2\n"
35 "2: br 1b\n"
36 ".previous"
37 :"=&r" (temp), "=m" (*m)
38 :"Ir" (1UL << (nr & 31)), "m" (*m));
42 * WARNING: non atomic version.
44 static inline void
45 __set_bit(unsigned long nr, volatile void * addr)
47 int *m = ((int *) addr) + (nr >> 5);
49 *m |= 1 << (nr & 31);
52 #define smp_mb__before_clear_bit() smp_mb()
53 #define smp_mb__after_clear_bit() smp_mb()
55 static inline void
56 clear_bit(unsigned long nr, volatile void * addr)
58 unsigned long temp;
59 int *m = ((int *) addr) + (nr >> 5);
61 __asm__ __volatile__(
62 "1: ldl_l %0,%3\n"
63 " bic %0,%2,%0\n"
64 " stl_c %0,%1\n"
65 " beq %0,2f\n"
66 ".subsection 2\n"
67 "2: br 1b\n"
68 ".previous"
69 :"=&r" (temp), "=m" (*m)
70 :"Ir" (1UL << (nr & 31)), "m" (*m));
74 * WARNING: non atomic version.
76 static __inline__ void
77 __clear_bit(unsigned long nr, volatile void * addr)
79 int *m = ((int *) addr) + (nr >> 5);
81 *m &= ~(1 << (nr & 31));
84 static inline void
85 change_bit(unsigned long nr, volatile void * addr)
87 unsigned long temp;
88 int *m = ((int *) addr) + (nr >> 5);
90 __asm__ __volatile__(
91 "1: ldl_l %0,%3\n"
92 " xor %0,%2,%0\n"
93 " stl_c %0,%1\n"
94 " beq %0,2f\n"
95 ".subsection 2\n"
96 "2: br 1b\n"
97 ".previous"
98 :"=&r" (temp), "=m" (*m)
99 :"Ir" (1UL << (nr & 31)), "m" (*m));
103 * WARNING: non atomic version.
105 static __inline__ void
106 __change_bit(unsigned long nr, volatile void * addr)
108 int *m = ((int *) addr) + (nr >> 5);
110 *m ^= 1 << (nr & 31);
113 static inline int
114 test_and_set_bit(unsigned long nr, volatile void *addr)
116 unsigned long oldbit;
117 unsigned long temp;
118 int *m = ((int *) addr) + (nr >> 5);
120 __asm__ __volatile__(
121 "1: ldl_l %0,%4\n"
122 " and %0,%3,%2\n"
123 " bne %2,2f\n"
124 " xor %0,%3,%0\n"
125 " stl_c %0,%1\n"
126 " beq %0,3f\n"
127 "2:\n"
128 #ifdef CONFIG_SMP
129 " mb\n"
130 #endif
131 ".subsection 2\n"
132 "3: br 1b\n"
133 ".previous"
134 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
135 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
137 return oldbit != 0;
141 * WARNING: non atomic version.
143 static inline int
144 __test_and_set_bit(unsigned long nr, volatile void * addr)
146 unsigned long mask = 1 << (nr & 0x1f);
147 int *m = ((int *) addr) + (nr >> 5);
148 int old = *m;
150 *m = old | mask;
151 return (old & mask) != 0;
154 static inline int
155 test_and_clear_bit(unsigned long nr, volatile void * addr)
157 unsigned long oldbit;
158 unsigned long temp;
159 int *m = ((int *) addr) + (nr >> 5);
161 __asm__ __volatile__(
162 "1: ldl_l %0,%4\n"
163 " and %0,%3,%2\n"
164 " beq %2,2f\n"
165 " xor %0,%3,%0\n"
166 " stl_c %0,%1\n"
167 " beq %0,3f\n"
168 "2:\n"
169 #ifdef CONFIG_SMP
170 " mb\n"
171 #endif
172 ".subsection 2\n"
173 "3: br 1b\n"
174 ".previous"
175 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
176 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
178 return oldbit != 0;
182 * WARNING: non atomic version.
184 static inline int
185 __test_and_clear_bit(unsigned long nr, volatile void * addr)
187 unsigned long mask = 1 << (nr & 0x1f);
188 int *m = ((int *) addr) + (nr >> 5);
189 int old = *m;
191 *m = old & ~mask;
192 return (old & mask) != 0;
195 static inline int
196 test_and_change_bit(unsigned long nr, volatile void * addr)
198 unsigned long oldbit;
199 unsigned long temp;
200 int *m = ((int *) addr) + (nr >> 5);
202 __asm__ __volatile__(
203 "1: ldl_l %0,%4\n"
204 " and %0,%3,%2\n"
205 " xor %0,%3,%0\n"
206 " stl_c %0,%1\n"
207 " beq %0,3f\n"
208 #ifdef CONFIG_SMP
209 " mb\n"
210 #endif
211 ".subsection 2\n"
212 "3: br 1b\n"
213 ".previous"
214 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
215 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
217 return oldbit != 0;
221 * WARNING: non atomic version.
223 static __inline__ int
224 __test_and_change_bit(unsigned long nr, volatile void * addr)
226 unsigned long mask = 1 << (nr & 0x1f);
227 int *m = ((int *) addr) + (nr >> 5);
228 int old = *m;
230 *m = old ^ mask;
231 return (old & mask) != 0;
234 static inline int
235 test_bit(int nr, const volatile void * addr)
237 return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
241 * ffz = Find First Zero in word. Undefined if no zero exists,
242 * so code should check against ~0UL first..
244 * Do a binary search on the bits. Due to the nature of large
245 * constants on the alpha, it is worthwhile to split the search.
247 static inline unsigned long ffz_b(unsigned long x)
249 unsigned long sum, x1, x2, x4;
251 x = ~x & -~x; /* set first 0 bit, clear others */
252 x1 = x & 0xAA;
253 x2 = x & 0xCC;
254 x4 = x & 0xF0;
255 sum = x2 ? 2 : 0;
256 sum += (x4 != 0) * 4;
257 sum += (x1 != 0);
259 return sum;
262 static inline unsigned long ffz(unsigned long word)
264 #if defined(__alpha_cix__) && defined(__alpha_fix__)
265 /* Whee. EV67 can calculate it directly. */
266 return __kernel_cttz(~word);
267 #else
268 unsigned long bits, qofs, bofs;
270 bits = __kernel_cmpbge(word, ~0UL);
271 qofs = ffz_b(bits);
272 bits = __kernel_extbl(word, qofs);
273 bofs = ffz_b(bits);
275 return qofs*8 + bofs;
276 #endif
280 * __ffs = Find First set bit in word. Undefined if no set bit exists.
282 static inline unsigned long __ffs(unsigned long word)
284 #if defined(__alpha_cix__) && defined(__alpha_fix__)
285 /* Whee. EV67 can calculate it directly. */
286 return __kernel_cttz(word);
287 #else
288 unsigned long bits, qofs, bofs;
290 bits = __kernel_cmpbge(0, word);
291 qofs = ffz_b(bits);
292 bits = __kernel_extbl(word, qofs);
293 bofs = ffz_b(~bits);
295 return qofs*8 + bofs;
296 #endif
299 #ifdef __KERNEL__
302 * ffs: find first bit set. This is defined the same way as
303 * the libc and compiler builtin ffs routines, therefore
304 * differs in spirit from the above __ffs.
307 static inline int ffs(int word)
309 int result = __ffs(word) + 1;
310 return word ? result : 0;
314 * fls: find last bit set.
316 #if defined(__alpha_cix__) && defined(__alpha_fix__)
317 static inline int fls(int word)
319 return 64 - __kernel_ctlz(word & 0xffffffff);
321 #else
322 #define fls generic_fls
323 #endif
325 /* Compute powers of two for the given integer. */
326 static inline long floor_log2(unsigned long word)
328 #if defined(__alpha_cix__) && defined(__alpha_fix__)
329 return 63 - __kernel_ctlz(word);
330 #else
331 long bit;
332 for (bit = -1; word ; bit++)
333 word >>= 1;
334 return bit;
335 #endif
338 static inline long ceil_log2(unsigned long word)
340 long bit = floor_log2(word);
341 return bit + (word > (1UL << bit));
345 * hweightN: returns the hamming weight (i.e. the number
346 * of bits set) of a N-bit word
349 #if defined(__alpha_cix__) && defined(__alpha_fix__)
350 /* Whee. EV67 can calculate it directly. */
351 static inline unsigned long hweight64(unsigned long w)
353 return __kernel_ctpop(w);
356 #define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful)
357 #define hweight16(x) (unsigned int) hweight64((x) & 0xfffful)
358 #define hweight8(x) (unsigned int) hweight64((x) & 0xfful)
359 #else
360 static inline unsigned long hweight64(unsigned long w)
362 unsigned long result;
363 for (result = 0; w ; w >>= 1)
364 result += (w & 1);
365 return result;
368 #define hweight32(x) generic_hweight32(x)
369 #define hweight16(x) generic_hweight16(x)
370 #define hweight8(x) generic_hweight8(x)
371 #endif
373 #endif /* __KERNEL__ */
376 * Find next zero bit in a bitmap reasonably efficiently..
378 static inline unsigned long
379 find_next_zero_bit(const void *addr, unsigned long size, unsigned long offset)
381 const unsigned long *p = addr;
382 unsigned long result = offset & ~63UL;
383 unsigned long tmp;
385 p += offset >> 6;
386 if (offset >= size)
387 return size;
388 size -= result;
389 offset &= 63UL;
390 if (offset) {
391 tmp = *(p++);
392 tmp |= ~0UL >> (64-offset);
393 if (size < 64)
394 goto found_first;
395 if (~tmp)
396 goto found_middle;
397 size -= 64;
398 result += 64;
400 while (size & ~63UL) {
401 if (~(tmp = *(p++)))
402 goto found_middle;
403 result += 64;
404 size -= 64;
406 if (!size)
407 return result;
408 tmp = *p;
409 found_first:
410 tmp |= ~0UL << size;
411 if (tmp == ~0UL) /* Are any bits zero? */
412 return result + size; /* Nope. */
413 found_middle:
414 return result + ffz(tmp);
418 * Find next one bit in a bitmap reasonably efficiently.
420 static inline unsigned long
421 find_next_bit(const void * addr, unsigned long size, unsigned long offset)
423 const unsigned long *p = addr;
424 unsigned long result = offset & ~63UL;
425 unsigned long tmp;
427 p += offset >> 6;
428 if (offset >= size)
429 return size;
430 size -= result;
431 offset &= 63UL;
432 if (offset) {
433 tmp = *(p++);
434 tmp &= ~0UL << offset;
435 if (size < 64)
436 goto found_first;
437 if (tmp)
438 goto found_middle;
439 size -= 64;
440 result += 64;
442 while (size & ~63UL) {
443 if ((tmp = *(p++)))
444 goto found_middle;
445 result += 64;
446 size -= 64;
448 if (!size)
449 return result;
450 tmp = *p;
451 found_first:
452 tmp &= ~0UL >> (64 - size);
453 if (!tmp)
454 return result + size;
455 found_middle:
456 return result + __ffs(tmp);
460 * The optimizer actually does good code for this case.
462 #define find_first_zero_bit(addr, size) \
463 find_next_zero_bit((addr), (size), 0)
464 #define find_first_bit(addr, size) \
465 find_next_bit((addr), (size), 0)
467 #ifdef __KERNEL__
470 * Every architecture must define this function. It's the fastest
471 * way of searching a 140-bit bitmap where the first 100 bits are
472 * unlikely to be set. It's guaranteed that at least one of the 140
473 * bits is set.
475 static inline unsigned long
476 sched_find_first_bit(unsigned long b[3])
478 unsigned long b0 = b[0], b1 = b[1], b2 = b[2];
479 unsigned long ofs;
481 ofs = (b1 ? 64 : 128);
482 b1 = (b1 ? b1 : b2);
483 ofs = (b0 ? 0 : ofs);
484 b0 = (b0 ? b0 : b1);
486 return __ffs(b0) + ofs;
490 #define ext2_set_bit __test_and_set_bit
491 #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
492 #define ext2_clear_bit __test_and_clear_bit
493 #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
494 #define ext2_test_bit test_bit
495 #define ext2_find_first_zero_bit find_first_zero_bit
496 #define ext2_find_next_zero_bit find_next_zero_bit
498 /* Bitmap functions for the minix filesystem. */
499 #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
500 #define minix_set_bit(nr,addr) __set_bit(nr,addr)
501 #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
502 #define minix_test_bit(nr,addr) test_bit(nr,addr)
503 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
505 #endif /* __KERNEL__ */
507 #endif /* _ALPHA_BITOPS_H */