2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
12 #include <linux/config.h>
13 #include <linux/compiler.h>
14 #include <linux/types.h>
16 #include <asm/byteorder.h> /* sigh ... */
17 #include <asm/cpu-features.h>
19 #if (_MIPS_SZLONG == 32)
21 #define SZLONG_MASK 31UL
24 #define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
25 #elif (_MIPS_SZLONG == 64)
27 #define SZLONG_MASK 63UL
30 #define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
35 #include <asm/interrupt.h>
36 #include <asm/sgidefs.h>
40 * clear_bit() doesn't provide any barrier for the compiler.
42 #define smp_mb__before_clear_bit() smp_mb()
43 #define smp_mb__after_clear_bit() smp_mb()
46 * Only disable interrupt for kernel mode stuff to keep usermode stuff
47 * that dares to use kernel include files alive.
50 #define __bi_flags unsigned long flags
51 #define __bi_local_irq_save(x) local_irq_save(x)
52 #define __bi_local_irq_restore(x) local_irq_restore(x)
55 #define __bi_local_irq_save(x)
56 #define __bi_local_irq_restore(x)
57 #endif /* __KERNEL__ */
60 * set_bit - Atomically set a bit in memory
62 * @addr: the address to start counting from
64 * This function is atomic and may not be reordered. See __set_bit()
65 * if you do not require the atomic guarantees.
66 * Note that @nr may be almost arbitrarily large; this function is not
67 * restricted to acting on a single-word quantity.
69 static inline void set_bit(unsigned long nr
, volatile unsigned long *addr
)
71 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
74 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
77 "1: " __LL
"%0, %1 # set_bit \n"
82 : "=&r" (temp
), "=m" (*m
)
83 : "ir" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
));
84 } else if (cpu_has_llsc
) {
87 "1: " __LL
"%0, %1 # set_bit \n"
92 : "=&r" (temp
), "=m" (*m
)
93 : "ir" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
));
95 volatile unsigned long *a
= addr
;
99 a
+= nr
>> SZLONG_LOG
;
100 mask
= 1UL << (nr
& SZLONG_MASK
);
101 __bi_local_irq_save(flags
);
103 __bi_local_irq_restore(flags
);
108 * __set_bit - Set a bit in memory
109 * @nr: the bit to set
110 * @addr: the address to start counting from
112 * Unlike set_bit(), this function is non-atomic and may be reordered.
113 * If it's called on the same region of memory simultaneously, the effect
114 * may be that only one operation succeeds.
116 static inline void __set_bit(unsigned long nr
, volatile unsigned long * addr
)
118 unsigned long * m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
120 *m
|= 1UL << (nr
& SZLONG_MASK
);
124 * clear_bit - Clears a bit in memory
126 * @addr: Address to start counting from
128 * clear_bit() is atomic and may not be reordered. However, it does
129 * not contain a memory barrier, so if it is used for locking purposes,
130 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
131 * in order to ensure changes are visible on other processors.
133 static inline void clear_bit(unsigned long nr
, volatile unsigned long *addr
)
135 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
138 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
139 __asm__
__volatile__(
141 "1: " __LL
"%0, %1 # clear_bit \n"
146 : "=&r" (temp
), "=m" (*m
)
147 : "ir" (~(1UL << (nr
& SZLONG_MASK
))), "m" (*m
));
148 } else if (cpu_has_llsc
) {
149 __asm__
__volatile__(
151 "1: " __LL
"%0, %1 # clear_bit \n"
156 : "=&r" (temp
), "=m" (*m
)
157 : "ir" (~(1UL << (nr
& SZLONG_MASK
))), "m" (*m
));
159 volatile unsigned long *a
= addr
;
163 a
+= nr
>> SZLONG_LOG
;
164 mask
= 1UL << (nr
& SZLONG_MASK
);
165 __bi_local_irq_save(flags
);
167 __bi_local_irq_restore(flags
);
172 * __clear_bit - Clears a bit in memory
174 * @addr: Address to start counting from
176 * Unlike clear_bit(), this function is non-atomic and may be reordered.
177 * If it's called on the same region of memory simultaneously, the effect
178 * may be that only one operation succeeds.
180 static inline void __clear_bit(unsigned long nr
, volatile unsigned long * addr
)
182 unsigned long * m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
184 *m
&= ~(1UL << (nr
& SZLONG_MASK
));
188 * change_bit - Toggle a bit in memory
190 * @addr: Address to start counting from
192 * change_bit() is atomic and may not be reordered.
193 * Note that @nr may be almost arbitrarily large; this function is not
194 * restricted to acting on a single-word quantity.
196 static inline void change_bit(unsigned long nr
, volatile unsigned long *addr
)
198 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
199 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
202 __asm__
__volatile__(
204 "1: " __LL
"%0, %1 # change_bit \n"
209 : "=&r" (temp
), "=m" (*m
)
210 : "ir" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
));
211 } else if (cpu_has_llsc
) {
212 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
215 __asm__
__volatile__(
217 "1: " __LL
"%0, %1 # change_bit \n"
222 : "=&r" (temp
), "=m" (*m
)
223 : "ir" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
));
225 volatile unsigned long *a
= addr
;
229 a
+= nr
>> SZLONG_LOG
;
230 mask
= 1UL << (nr
& SZLONG_MASK
);
231 __bi_local_irq_save(flags
);
233 __bi_local_irq_restore(flags
);
238 * __change_bit - Toggle a bit in memory
239 * @nr: the bit to change
240 * @addr: the address to start counting from
242 * Unlike change_bit(), this function is non-atomic and may be reordered.
243 * If it's called on the same region of memory simultaneously, the effect
244 * may be that only one operation succeeds.
246 static inline void __change_bit(unsigned long nr
, volatile unsigned long * addr
)
248 unsigned long * m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
250 *m
^= 1UL << (nr
& SZLONG_MASK
);
254 * test_and_set_bit - Set a bit and return its old value
256 * @addr: Address to count from
258 * This operation is atomic and cannot be reordered.
259 * It also implies a memory barrier.
261 static inline int test_and_set_bit(unsigned long nr
,
262 volatile unsigned long *addr
)
264 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
265 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
266 unsigned long temp
, res
;
268 __asm__
__volatile__(
270 "1: " __LL
"%0, %1 # test_and_set_bit \n"
279 : "=&r" (temp
), "=m" (*m
), "=&r" (res
)
280 : "r" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
)
284 } else if (cpu_has_llsc
) {
285 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
286 unsigned long temp
, res
;
288 __asm__
__volatile__(
292 "1: " __LL
"%0, %1 # test_and_set_bit \n"
301 : "=&r" (temp
), "=m" (*m
), "=&r" (res
)
302 : "r" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
)
307 volatile unsigned long *a
= addr
;
312 a
+= nr
>> SZLONG_LOG
;
313 mask
= 1UL << (nr
& SZLONG_MASK
);
314 __bi_local_irq_save(flags
);
315 retval
= (mask
& *a
) != 0;
317 __bi_local_irq_restore(flags
);
324 * __test_and_set_bit - Set a bit and return its old value
326 * @addr: Address to count from
328 * This operation is non-atomic and can be reordered.
329 * If two examples of this operation race, one can appear to succeed
330 * but actually fail. You must protect multiple accesses with a lock.
332 static inline int __test_and_set_bit(unsigned long nr
,
333 volatile unsigned long *addr
)
335 volatile unsigned long *a
= addr
;
339 a
+= nr
>> SZLONG_LOG
;
340 mask
= 1UL << (nr
& SZLONG_MASK
);
341 retval
= (mask
& *a
) != 0;
348 * test_and_clear_bit - Clear a bit and return its old value
350 * @addr: Address to count from
352 * This operation is atomic and cannot be reordered.
353 * It also implies a memory barrier.
355 static inline int test_and_clear_bit(unsigned long nr
,
356 volatile unsigned long *addr
)
358 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
359 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
360 unsigned long temp
, res
;
362 __asm__
__volatile__(
364 "1: " __LL
"%0, %1 # test_and_clear_bit \n"
374 : "=&r" (temp
), "=m" (*m
), "=&r" (res
)
375 : "r" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
)
379 } else if (cpu_has_llsc
) {
380 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
381 unsigned long temp
, res
;
383 __asm__
__volatile__(
387 "1: " __LL
"%0, %1 # test_and_clear_bit \n"
397 : "=&r" (temp
), "=m" (*m
), "=&r" (res
)
398 : "r" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
)
403 volatile unsigned long *a
= addr
;
408 a
+= nr
>> SZLONG_LOG
;
409 mask
= 1UL << (nr
& SZLONG_MASK
);
410 __bi_local_irq_save(flags
);
411 retval
= (mask
& *a
) != 0;
413 __bi_local_irq_restore(flags
);
420 * __test_and_clear_bit - Clear a bit and return its old value
422 * @addr: Address to count from
424 * This operation is non-atomic and can be reordered.
425 * If two examples of this operation race, one can appear to succeed
426 * but actually fail. You must protect multiple accesses with a lock.
428 static inline int __test_and_clear_bit(unsigned long nr
,
429 volatile unsigned long * addr
)
431 volatile unsigned long *a
= addr
;
435 a
+= (nr
>> SZLONG_LOG
);
436 mask
= 1UL << (nr
& SZLONG_MASK
);
437 retval
= ((mask
& *a
) != 0);
444 * test_and_change_bit - Change a bit and return its old value
446 * @addr: Address to count from
448 * This operation is atomic and cannot be reordered.
449 * It also implies a memory barrier.
451 static inline int test_and_change_bit(unsigned long nr
,
452 volatile unsigned long *addr
)
454 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
455 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
456 unsigned long temp
, res
;
458 __asm__
__volatile__(
460 "1: " __LL
"%0, %1 # test_and_change_bit \n"
469 : "=&r" (temp
), "=m" (*m
), "=&r" (res
)
470 : "r" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
)
474 } else if (cpu_has_llsc
) {
475 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
476 unsigned long temp
, res
;
478 __asm__
__volatile__(
482 "1: " __LL
"%0, %1 # test_and_change_bit \n"
484 " " __SC
"\t%2, %1 \n"
491 : "=&r" (temp
), "=m" (*m
), "=&r" (res
)
492 : "r" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
)
497 volatile unsigned long *a
= addr
;
498 unsigned long mask
, retval
;
501 a
+= nr
>> SZLONG_LOG
;
502 mask
= 1UL << (nr
& SZLONG_MASK
);
503 __bi_local_irq_save(flags
);
504 retval
= (mask
& *a
) != 0;
506 __bi_local_irq_restore(flags
);
513 * __test_and_change_bit - Change a bit and return its old value
515 * @addr: Address to count from
517 * This operation is non-atomic and can be reordered.
518 * If two examples of this operation race, one can appear to succeed
519 * but actually fail. You must protect multiple accesses with a lock.
521 static inline int __test_and_change_bit(unsigned long nr
,
522 volatile unsigned long *addr
)
524 volatile unsigned long *a
= addr
;
528 a
+= (nr
>> SZLONG_LOG
);
529 mask
= 1UL << (nr
& SZLONG_MASK
);
530 retval
= ((mask
& *a
) != 0);
537 #undef __bi_local_irq_save
538 #undef __bi_local_irq_restore
541 * test_bit - Determine whether a bit is set
542 * @nr: bit number to test
543 * @addr: Address to start counting from
545 static inline int test_bit(unsigned long nr
, const volatile unsigned long *addr
)
547 return 1UL & (addr
[nr
>> SZLONG_LOG
] >> (nr
& SZLONG_MASK
));
551 * Return the bit position (0..63) of the most significant 1 bit in a word
552 * Returns -1 if no 1 bit exists
554 static inline int __ilog2(unsigned long x
)
558 if (sizeof(x
) == 4) {
570 BUG_ON(sizeof(x
) != 8);
584 * __ffs - find first bit in word.
585 * @word: The word to search
587 * Returns 0..SZLONG-1
588 * Undefined if no bit exists, so code should check against 0 first.
590 static inline unsigned long __ffs(unsigned long word
)
592 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
593 return __ilog2(word
& -word
);
598 s
= 16; if (word
<< 16 != 0) s
= 0; b
+= s
; word
>>= s
;
599 s
= 8; if (word
<< 24 != 0) s
= 0; b
+= s
; word
>>= s
;
600 s
= 4; if (word
<< 28 != 0) s
= 0; b
+= s
; word
>>= s
;
601 s
= 2; if (word
<< 30 != 0) s
= 0; b
+= s
; word
>>= s
;
602 s
= 1; if (word
<< 31 != 0) s
= 0; b
+= s
;
607 s
= 32; if (word
<< 32 != 0) s
= 0; b
+= s
; word
>>= s
;
608 s
= 16; if (word
<< 48 != 0) s
= 0; b
+= s
; word
>>= s
;
609 s
= 8; if (word
<< 56 != 0) s
= 0; b
+= s
; word
>>= s
;
610 s
= 4; if (word
<< 60 != 0) s
= 0; b
+= s
; word
>>= s
;
611 s
= 2; if (word
<< 62 != 0) s
= 0; b
+= s
; word
>>= s
;
612 s
= 1; if (word
<< 63 != 0) s
= 0; b
+= s
;
620 * ffs - find first bit set.
621 * @word: The word to search
624 * Returns 0 if no bit exists
627 static inline unsigned long ffs(unsigned long word
)
632 return __ffs(word
) + 1;
636 * ffz - find first zero in word.
637 * @word: The word to search
639 * Undefined if no zero exists, so code should check against ~0UL first.
641 static inline unsigned long ffz(unsigned long word
)
643 return __ffs (~word
);
647 * flz - find last zero in word.
648 * @word: The word to search
650 * Returns 0..SZLONG-1
651 * Undefined if no zero exists, so code should check against ~0UL first.
653 static inline unsigned long flz(unsigned long word
)
655 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
656 return __ilog2(~word
);
661 s
= 16; if ((word
& 0xffff0000)) s
= 0; r
-= s
; word
<<= s
;
662 s
= 8; if ((word
& 0xff000000)) s
= 0; r
-= s
; word
<<= s
;
663 s
= 4; if ((word
& 0xf0000000)) s
= 0; r
-= s
; word
<<= s
;
664 s
= 2; if ((word
& 0xc0000000)) s
= 0; r
-= s
; word
<<= s
;
665 s
= 1; if ((word
& 0x80000000)) s
= 0; r
-= s
;
672 s
= 32; if ((word
& 0xffffffff00000000UL
)) s
= 0; r
-= s
; word
<<= s
;
673 s
= 16; if ((word
& 0xffff000000000000UL
)) s
= 0; r
-= s
; word
<<= s
;
674 s
= 8; if ((word
& 0xff00000000000000UL
)) s
= 0; r
-= s
; word
<<= s
;
675 s
= 4; if ((word
& 0xf000000000000000UL
)) s
= 0; r
-= s
; word
<<= s
;
676 s
= 2; if ((word
& 0xc000000000000000UL
)) s
= 0; r
-= s
; word
<<= s
;
677 s
= 1; if ((word
& 0x8000000000000000UL
)) s
= 0; r
-= s
;
685 * fls - find last bit set.
686 * @word: The word to search
689 * Returns 0 if no bit exists
691 static inline unsigned long fls(unsigned long word
)
696 return flz(~word
) + 1;
701 * find_next_zero_bit - find the first zero bit in a memory region
702 * @addr: The address to base the search on
703 * @offset: The bitnumber to start searching at
704 * @size: The maximum size to search
706 static inline unsigned long find_next_zero_bit(const unsigned long *addr
,
707 unsigned long size
, unsigned long offset
)
709 const unsigned long *p
= addr
+ (offset
>> SZLONG_LOG
);
710 unsigned long result
= offset
& ~SZLONG_MASK
;
716 offset
&= SZLONG_MASK
;
719 tmp
|= ~0UL >> (_MIPS_SZLONG
-offset
);
720 if (size
< _MIPS_SZLONG
)
724 size
-= _MIPS_SZLONG
;
725 result
+= _MIPS_SZLONG
;
727 while (size
& ~SZLONG_MASK
) {
730 result
+= _MIPS_SZLONG
;
731 size
-= _MIPS_SZLONG
;
739 if (tmp
== ~0UL) /* Are any bits zero? */
740 return result
+ size
; /* Nope. */
742 return result
+ ffz(tmp
);
745 #define find_first_zero_bit(addr, size) \
746 find_next_zero_bit((addr), (size), 0)
749 * find_next_bit - find the next set bit in a memory region
750 * @addr: The address to base the search on
751 * @offset: The bitnumber to start searching at
752 * @size: The maximum size to search
754 static inline unsigned long find_next_bit(const unsigned long *addr
,
755 unsigned long size
, unsigned long offset
)
757 const unsigned long *p
= addr
+ (offset
>> SZLONG_LOG
);
758 unsigned long result
= offset
& ~SZLONG_MASK
;
764 offset
&= SZLONG_MASK
;
767 tmp
&= ~0UL << offset
;
768 if (size
< _MIPS_SZLONG
)
772 size
-= _MIPS_SZLONG
;
773 result
+= _MIPS_SZLONG
;
775 while (size
& ~SZLONG_MASK
) {
778 result
+= _MIPS_SZLONG
;
779 size
-= _MIPS_SZLONG
;
786 tmp
&= ~0UL >> (_MIPS_SZLONG
- size
);
787 if (tmp
== 0UL) /* Are any bits set? */
788 return result
+ size
; /* Nope. */
790 return result
+ __ffs(tmp
);
794 * find_first_bit - find the first set bit in a memory region
795 * @addr: The address to start the search at
796 * @size: The maximum size to search
798 * Returns the bit-number of the first set bit, not the number of the byte
801 #define find_first_bit(addr, size) \
802 find_next_bit((addr), (size), 0)
807 * Every architecture must define this function. It's the fastest
808 * way of searching a 140-bit bitmap where the first 100 bits are
809 * unlikely to be set. It's guaranteed that at least one of the 140
812 static inline int sched_find_first_bit(const unsigned long *b
)
818 return __ffs(b
[1]) + 32;
820 return __ffs(b
[2]) + 64;
822 return __ffs(b
[3]) + 96;
823 return __ffs(b
[4]) + 128;
829 return __ffs(b
[1]) + 64;
830 return __ffs(b
[2]) + 128;
835 * hweightN - returns the hamming weight of a N-bit word
836 * @x: the word to weigh
838 * The Hamming Weight of a number is the total number of bits set in it.
841 #define hweight64(x) generic_hweight64(x)
842 #define hweight32(x) generic_hweight32(x)
843 #define hweight16(x) generic_hweight16(x)
844 #define hweight8(x) generic_hweight8(x)
846 static inline int __test_and_set_le_bit(unsigned long nr
, unsigned long *addr
)
848 unsigned char *ADDR
= (unsigned char *) addr
;
852 mask
= 1 << (nr
& 0x07);
853 retval
= (mask
& *ADDR
) != 0;
859 static inline int __test_and_clear_le_bit(unsigned long nr
, unsigned long *addr
)
861 unsigned char *ADDR
= (unsigned char *) addr
;
865 mask
= 1 << (nr
& 0x07);
866 retval
= (mask
& *ADDR
) != 0;
872 static inline int test_le_bit(unsigned long nr
, const unsigned long * addr
)
874 const unsigned char *ADDR
= (const unsigned char *) addr
;
878 mask
= 1 << (nr
& 0x07);
880 return ((mask
& *ADDR
) != 0);
883 static inline unsigned long find_next_zero_le_bit(unsigned long *addr
,
884 unsigned long size
, unsigned long offset
)
886 unsigned long *p
= ((unsigned long *) addr
) + (offset
>> SZLONG_LOG
);
887 unsigned long result
= offset
& ~SZLONG_MASK
;
893 offset
&= SZLONG_MASK
;
895 tmp
= cpu_to_lelongp(p
++);
896 tmp
|= ~0UL >> (_MIPS_SZLONG
-offset
); /* bug or feature ? */
897 if (size
< _MIPS_SZLONG
)
901 size
-= _MIPS_SZLONG
;
902 result
+= _MIPS_SZLONG
;
904 while (size
& ~SZLONG_MASK
) {
905 if (~(tmp
= cpu_to_lelongp(p
++)))
907 result
+= _MIPS_SZLONG
;
908 size
-= _MIPS_SZLONG
;
912 tmp
= cpu_to_lelongp(p
);
916 if (tmp
== ~0UL) /* Are any bits zero? */
917 return result
+ size
; /* Nope. */
920 return result
+ ffz(tmp
);
923 #define find_first_zero_le_bit(addr, size) \
924 find_next_zero_le_bit((addr), (size), 0)
926 #define ext2_set_bit(nr,addr) \
927 __test_and_set_le_bit((nr),(unsigned long*)addr)
928 #define ext2_clear_bit(nr, addr) \
929 __test_and_clear_le_bit((nr),(unsigned long*)addr)
930 #define ext2_set_bit_atomic(lock, nr, addr) \
934 ret = ext2_set_bit((nr), (addr)); \
939 #define ext2_clear_bit_atomic(lock, nr, addr) \
943 ret = ext2_clear_bit((nr), (addr)); \
947 #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
948 #define ext2_find_first_zero_bit(addr, size) \
949 find_first_zero_le_bit((unsigned long*)addr, size)
950 #define ext2_find_next_zero_bit(addr, size, off) \
951 find_next_zero_le_bit((unsigned long*)addr, size, off)
954 * Bitmap functions for the minix filesystem.
956 * FIXME: These assume that Minix uses the native byte/bitorder.
957 * This limits the Minix filesystem's value for data exchange very much.
959 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
960 #define minix_set_bit(nr,addr) set_bit(nr,addr)
961 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
962 #define minix_test_bit(nr,addr) test_bit(nr,addr)
963 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
965 #endif /* __KERNEL__ */
967 #endif /* _ASM_BITOPS_H */