4 * Copyright 1992, Linus Torvalds.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
11 #ifndef _LINUX_BITOPS_H
12 #error only <linux/bitops.h> can be included directly
15 #include <linux/compiler.h>
18 * Require 68020 or better.
20 * They use the standard big-endian m680x0 bit ordering.
23 #define test_and_set_bit(nr,vaddr) \
24 (__builtin_constant_p(nr) ? \
25 __constant_test_and_set_bit(nr, vaddr) : \
26 __generic_test_and_set_bit(nr, vaddr))
28 #define __test_and_set_bit(nr,vaddr) test_and_set_bit(nr,vaddr)
30 static inline int __constant_test_and_set_bit(int nr
, unsigned long *vaddr
)
32 char *p
= (char *)vaddr
+ (nr
^ 31) / 8;
35 __asm__
__volatile__ ("bset %2,%1; sne %0"
36 : "=d" (retval
), "+m" (*p
)
42 static inline int __generic_test_and_set_bit(int nr
, unsigned long *vaddr
)
46 __asm__
__volatile__ ("bfset %2{%1:#1}; sne %0"
47 : "=d" (retval
) : "d" (nr
^31), "o" (*vaddr
) : "memory");
52 #define set_bit(nr,vaddr) \
53 (__builtin_constant_p(nr) ? \
54 __constant_set_bit(nr, vaddr) : \
55 __generic_set_bit(nr, vaddr))
57 #define __set_bit(nr,vaddr) set_bit(nr,vaddr)
59 static inline void __constant_set_bit(int nr
, volatile unsigned long *vaddr
)
61 char *p
= (char *)vaddr
+ (nr
^ 31) / 8;
62 __asm__
__volatile__ ("bset %1,%0"
63 : "+m" (*p
) : "di" (nr
& 7));
66 static inline void __generic_set_bit(int nr
, volatile unsigned long *vaddr
)
68 __asm__
__volatile__ ("bfset %1{%0:#1}"
69 : : "d" (nr
^31), "o" (*vaddr
) : "memory");
72 #define test_and_clear_bit(nr,vaddr) \
73 (__builtin_constant_p(nr) ? \
74 __constant_test_and_clear_bit(nr, vaddr) : \
75 __generic_test_and_clear_bit(nr, vaddr))
77 #define __test_and_clear_bit(nr,vaddr) test_and_clear_bit(nr,vaddr)
79 static inline int __constant_test_and_clear_bit(int nr
, unsigned long *vaddr
)
81 char *p
= (char *)vaddr
+ (nr
^ 31) / 8;
84 __asm__
__volatile__ ("bclr %2,%1; sne %0"
85 : "=d" (retval
), "+m" (*p
)
91 static inline int __generic_test_and_clear_bit(int nr
, unsigned long *vaddr
)
95 __asm__
__volatile__ ("bfclr %2{%1:#1}; sne %0"
96 : "=d" (retval
) : "d" (nr
^31), "o" (*vaddr
) : "memory");
102 * clear_bit() doesn't provide any barrier for the compiler.
104 #define smp_mb__before_clear_bit() barrier()
105 #define smp_mb__after_clear_bit() barrier()
107 #define clear_bit(nr,vaddr) \
108 (__builtin_constant_p(nr) ? \
109 __constant_clear_bit(nr, vaddr) : \
110 __generic_clear_bit(nr, vaddr))
111 #define __clear_bit(nr,vaddr) clear_bit(nr,vaddr)
113 static inline void __constant_clear_bit(int nr
, volatile unsigned long *vaddr
)
115 char *p
= (char *)vaddr
+ (nr
^ 31) / 8;
116 __asm__
__volatile__ ("bclr %1,%0"
117 : "+m" (*p
) : "di" (nr
& 7));
120 static inline void __generic_clear_bit(int nr
, volatile unsigned long *vaddr
)
122 __asm__
__volatile__ ("bfclr %1{%0:#1}"
123 : : "d" (nr
^31), "o" (*vaddr
) : "memory");
126 #define test_and_change_bit(nr,vaddr) \
127 (__builtin_constant_p(nr) ? \
128 __constant_test_and_change_bit(nr, vaddr) : \
129 __generic_test_and_change_bit(nr, vaddr))
131 #define __test_and_change_bit(nr,vaddr) test_and_change_bit(nr,vaddr)
132 #define __change_bit(nr,vaddr) change_bit(nr,vaddr)
134 static inline int __constant_test_and_change_bit(int nr
, unsigned long *vaddr
)
136 char *p
= (char *)vaddr
+ (nr
^ 31) / 8;
139 __asm__
__volatile__ ("bchg %2,%1; sne %0"
140 : "=d" (retval
), "+m" (*p
)
146 static inline int __generic_test_and_change_bit(int nr
, unsigned long *vaddr
)
150 __asm__
__volatile__ ("bfchg %2{%1:#1}; sne %0"
151 : "=d" (retval
) : "d" (nr
^31), "o" (*vaddr
) : "memory");
156 #define change_bit(nr,vaddr) \
157 (__builtin_constant_p(nr) ? \
158 __constant_change_bit(nr, vaddr) : \
159 __generic_change_bit(nr, vaddr))
161 static inline void __constant_change_bit(int nr
, unsigned long *vaddr
)
163 char *p
= (char *)vaddr
+ (nr
^ 31) / 8;
164 __asm__
__volatile__ ("bchg %1,%0"
165 : "+m" (*p
) : "di" (nr
& 7));
168 static inline void __generic_change_bit(int nr
, unsigned long *vaddr
)
170 __asm__
__volatile__ ("bfchg %1{%0:#1}"
171 : : "d" (nr
^31), "o" (*vaddr
) : "memory");
174 static inline int test_bit(int nr
, const unsigned long *vaddr
)
176 return (vaddr
[nr
>> 5] & (1UL << (nr
& 31))) != 0;
179 static inline int find_first_zero_bit(const unsigned long *vaddr
,
182 const unsigned long *p
= vaddr
;
190 words
= (size
+ 31) >> 5;
191 while (!(num
= ~*p
++)) {
196 __asm__
__volatile__ ("bfffo %1{#0,#0},%0"
197 : "=d" (res
) : "d" (num
& -num
));
200 res
+= ((long)p
- (long)vaddr
- 4) * 8;
201 return res
< size
? res
: size
;
203 #define find_first_zero_bit find_first_zero_bit
205 static inline int find_next_zero_bit(const unsigned long *vaddr
, int size
,
208 const unsigned long *p
= vaddr
+ (offset
>> 5);
209 int bit
= offset
& 31UL, res
;
215 unsigned long num
= ~*p
++ & (~0UL << bit
);
218 /* Look for zero in first longword */
219 __asm__
__volatile__ ("bfffo %1{#0,#0},%0"
220 : "=d" (res
) : "d" (num
& -num
));
223 return offset
< size
? offset
: size
;
230 /* No zero yet, search remaining full bytes for a zero */
231 return offset
+ find_first_zero_bit(p
, size
- offset
);
233 #define find_next_zero_bit find_next_zero_bit
235 static inline int find_first_bit(const unsigned long *vaddr
, unsigned size
)
237 const unsigned long *p
= vaddr
;
245 words
= (size
+ 31) >> 5;
246 while (!(num
= *p
++)) {
251 __asm__
__volatile__ ("bfffo %1{#0,#0},%0"
252 : "=d" (res
) : "d" (num
& -num
));
255 res
+= ((long)p
- (long)vaddr
- 4) * 8;
256 return res
< size
? res
: size
;
258 #define find_first_bit find_first_bit
260 static inline int find_next_bit(const unsigned long *vaddr
, int size
,
263 const unsigned long *p
= vaddr
+ (offset
>> 5);
264 int bit
= offset
& 31UL, res
;
270 unsigned long num
= *p
++ & (~0UL << bit
);
273 /* Look for one in first longword */
274 __asm__
__volatile__ ("bfffo %1{#0,#0},%0"
275 : "=d" (res
) : "d" (num
& -num
));
278 return offset
< size
? offset
: size
;
285 /* No one yet, search remaining full bytes for a one */
286 return offset
+ find_first_bit(p
, size
- offset
);
288 #define find_next_bit find_next_bit
291 * ffz = Find First Zero in word. Undefined if no zero exists,
292 * so code should check against ~0UL first..
294 static inline unsigned long ffz(unsigned long word
)
298 __asm__
__volatile__ ("bfffo %1{#0,#0},%0"
299 : "=d" (res
) : "d" (~word
& -~word
));
306 * ffs: find first bit set. This is defined the same way as
307 * the libc and compiler builtin ffs routines, therefore
308 * differs in spirit from the above ffz (man ffs).
311 static inline int ffs(int x
)
315 asm ("bfffo %1{#0:#0},%0" : "=d" (cnt
) : "dm" (x
& -x
));
319 #define __ffs(x) (ffs(x) - 1)
322 * fls: find last bit set.
325 static inline int fls(int x
)
329 asm ("bfffo %1{#0,#0},%0" : "=d" (cnt
) : "dm" (x
));
334 static inline int __fls(int x
)
339 #include <asm-generic/bitops/fls64.h>
340 #include <asm-generic/bitops/sched.h>
341 #include <asm-generic/bitops/hweight.h>
342 #include <asm-generic/bitops/lock.h>
344 /* Bitmap functions for the little endian bitmap. */
346 static inline void __set_bit_le(int nr
, void *addr
)
348 __set_bit(nr
^ 24, addr
);
351 static inline void __clear_bit_le(int nr
, void *addr
)
353 __clear_bit(nr
^ 24, addr
);
356 static inline int __test_and_set_bit_le(int nr
, void *addr
)
358 return __test_and_set_bit(nr
^ 24, addr
);
361 static inline int test_and_set_bit_le(int nr
, void *addr
)
363 return test_and_set_bit(nr
^ 24, addr
);
366 static inline int __test_and_clear_bit_le(int nr
, void *addr
)
368 return __test_and_clear_bit(nr
^ 24, addr
);
371 static inline int test_and_clear_bit_le(int nr
, void *addr
)
373 return test_and_clear_bit(nr
^ 24, addr
);
376 static inline int test_bit_le(int nr
, const void *vaddr
)
378 const unsigned char *p
= vaddr
;
379 return (p
[nr
>> 3] & (1U << (nr
& 7))) != 0;
382 static inline int find_first_zero_bit_le(const void *vaddr
, unsigned size
)
384 const unsigned long *p
= vaddr
, *addr
= vaddr
;
391 words
= (size
>> 5) + ((size
& 31) > 0);
392 while (*p
++ == ~0UL) {
398 for (res
= 0; res
< 32; res
++)
399 if (!test_bit_le(res
, p
))
402 res
+= (p
- addr
) * 32;
403 return res
< size
? res
: size
;
405 #define find_first_zero_bit_le find_first_zero_bit_le
407 static inline unsigned long find_next_zero_bit_le(const void *addr
,
408 unsigned long size
, unsigned long offset
)
410 const unsigned long *p
= addr
;
411 int bit
= offset
& 31UL, res
;
420 /* Look for zero in first longword */
421 for (res
= bit
; res
< 32; res
++)
422 if (!test_bit_le(res
, p
)) {
424 return offset
< size
? offset
: size
;
432 /* No zero yet, search remaining full bytes for a zero */
433 return offset
+ find_first_zero_bit_le(p
, size
- offset
);
435 #define find_next_zero_bit_le find_next_zero_bit_le
437 static inline int find_first_bit_le(const void *vaddr
, unsigned size
)
439 const unsigned long *p
= vaddr
, *addr
= vaddr
;
446 words
= (size
>> 5) + ((size
& 31) > 0);
447 while (*p
++ == 0UL) {
453 for (res
= 0; res
< 32; res
++)
454 if (test_bit_le(res
, p
))
457 res
+= (p
- addr
) * 32;
458 return res
< size
? res
: size
;
460 #define find_first_bit_le find_first_bit_le
462 static inline unsigned long find_next_bit_le(const void *addr
,
463 unsigned long size
, unsigned long offset
)
465 const unsigned long *p
= addr
;
466 int bit
= offset
& 31UL, res
;
475 /* Look for one in first longword */
476 for (res
= bit
; res
< 32; res
++)
477 if (test_bit_le(res
, p
)) {
479 return offset
< size
? offset
: size
;
487 /* No set bit yet, search remaining full bytes for a set bit */
488 return offset
+ find_first_bit_le(p
, size
- offset
);
490 #define find_next_bit_le find_next_bit_le
492 /* Bitmap functions for the ext2 filesystem. */
494 #define ext2_set_bit_atomic(lock, nr, addr) \
495 test_and_set_bit_le(nr, addr)
496 #define ext2_clear_bit_atomic(lock, nr, addr) \
497 test_and_clear_bit_le(nr, addr)
499 #endif /* __KERNEL__ */
501 #endif /* _M68K_BITOPS_H */