4 * Copyright 1992, Linus Torvalds.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
11 #ifndef _LINUX_BITOPS_H
12 #error only <linux/bitops.h> can be included directly
15 #include <linux/compiler.h>
18 * Bit access functions vary across the ColdFire and 68k families.
19 * So we will break them out here, and then macro in the ones we want.
21 * ColdFire - supports standard bset/bclr/bchg with register operand only
22 * 68000 - supports standard bset/bclr/bchg with memory operand
23 * >= 68020 - also supports the bfset/bfclr/bfchg instructions
25 * Although it is possible to use only the bset/bclr/bchg with register
26 * operands on all platforms you end up with larger generated code.
27 * So we use the best form possible on a given platform.
30 static inline void bset_reg_set_bit(int nr
, volatile unsigned long *vaddr
)
32 char *p
= (char *)vaddr
+ (nr
^ 31) / 8;
34 __asm__
__volatile__ ("bset %1,(%0)"
36 : "a" (p
), "di" (nr
& 7)
40 static inline void bset_mem_set_bit(int nr
, volatile unsigned long *vaddr
)
42 char *p
= (char *)vaddr
+ (nr
^ 31) / 8;
44 __asm__
__volatile__ ("bset %1,%0"
49 static inline void bfset_mem_set_bit(int nr
, volatile unsigned long *vaddr
)
51 __asm__
__volatile__ ("bfset %1{%0:#1}"
53 : "d" (nr
^ 31), "o" (*vaddr
)
57 #if defined(CONFIG_COLDFIRE)
58 #define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr)
59 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
60 #define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr)
62 #define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
63 bset_mem_set_bit(nr, vaddr) : \
64 bfset_mem_set_bit(nr, vaddr))
67 #define __set_bit(nr, vaddr) set_bit(nr, vaddr)
71 * clear_bit() doesn't provide any barrier for the compiler.
73 #define smp_mb__before_clear_bit() barrier()
74 #define smp_mb__after_clear_bit() barrier()
76 static inline void bclr_reg_clear_bit(int nr
, volatile unsigned long *vaddr
)
78 char *p
= (char *)vaddr
+ (nr
^ 31) / 8;
80 __asm__
__volatile__ ("bclr %1,(%0)"
82 : "a" (p
), "di" (nr
& 7)
86 static inline void bclr_mem_clear_bit(int nr
, volatile unsigned long *vaddr
)
88 char *p
= (char *)vaddr
+ (nr
^ 31) / 8;
90 __asm__
__volatile__ ("bclr %1,%0"
95 static inline void bfclr_mem_clear_bit(int nr
, volatile unsigned long *vaddr
)
97 __asm__
__volatile__ ("bfclr %1{%0:#1}"
99 : "d" (nr
^ 31), "o" (*vaddr
)
103 #if defined(CONFIG_COLDFIRE)
104 #define clear_bit(nr, vaddr) bclr_reg_clear_bit(nr, vaddr)
105 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
106 #define clear_bit(nr, vaddr) bclr_mem_clear_bit(nr, vaddr)
108 #define clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
109 bclr_mem_clear_bit(nr, vaddr) : \
110 bfclr_mem_clear_bit(nr, vaddr))
113 #define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
116 static inline void bchg_reg_change_bit(int nr
, volatile unsigned long *vaddr
)
118 char *p
= (char *)vaddr
+ (nr
^ 31) / 8;
120 __asm__
__volatile__ ("bchg %1,(%0)"
122 : "a" (p
), "di" (nr
& 7)
126 static inline void bchg_mem_change_bit(int nr
, volatile unsigned long *vaddr
)
128 char *p
= (char *)vaddr
+ (nr
^ 31) / 8;
130 __asm__
__volatile__ ("bchg %1,%0"
135 static inline void bfchg_mem_change_bit(int nr
, volatile unsigned long *vaddr
)
137 __asm__
__volatile__ ("bfchg %1{%0:#1}"
139 : "d" (nr
^ 31), "o" (*vaddr
)
143 #if defined(CONFIG_COLDFIRE)
144 #define change_bit(nr, vaddr) bchg_reg_change_bit(nr, vaddr)
145 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
146 #define change_bit(nr, vaddr) bchg_mem_change_bit(nr, vaddr)
148 #define change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
149 bchg_mem_change_bit(nr, vaddr) : \
150 bfchg_mem_change_bit(nr, vaddr))
153 #define __change_bit(nr, vaddr) change_bit(nr, vaddr)
156 static inline int test_bit(int nr
, const unsigned long *vaddr
)
158 return (vaddr
[nr
>> 5] & (1UL << (nr
& 31))) != 0;
162 static inline int bset_reg_test_and_set_bit(int nr
,
163 volatile unsigned long *vaddr
)
165 char *p
= (char *)vaddr
+ (nr
^ 31) / 8;
168 __asm__
__volatile__ ("bset %2,(%1); sne %0"
170 : "a" (p
), "di" (nr
& 7)
175 static inline int bset_mem_test_and_set_bit(int nr
,
176 volatile unsigned long *vaddr
)
178 char *p
= (char *)vaddr
+ (nr
^ 31) / 8;
181 __asm__
__volatile__ ("bset %2,%1; sne %0"
182 : "=d" (retval
), "+m" (*p
)
187 static inline int bfset_mem_test_and_set_bit(int nr
,
188 volatile unsigned long *vaddr
)
192 __asm__
__volatile__ ("bfset %2{%1:#1}; sne %0"
194 : "d" (nr
^ 31), "o" (*vaddr
)
199 #if defined(CONFIG_COLDFIRE)
200 #define test_and_set_bit(nr, vaddr) bset_reg_test_and_set_bit(nr, vaddr)
201 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
202 #define test_and_set_bit(nr, vaddr) bset_mem_test_and_set_bit(nr, vaddr)
204 #define test_and_set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
205 bset_mem_test_and_set_bit(nr, vaddr) : \
206 bfset_mem_test_and_set_bit(nr, vaddr))
209 #define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
212 static inline int bclr_reg_test_and_clear_bit(int nr
,
213 volatile unsigned long *vaddr
)
215 char *p
= (char *)vaddr
+ (nr
^ 31) / 8;
218 __asm__
__volatile__ ("bclr %2,(%1); sne %0"
220 : "a" (p
), "di" (nr
& 7)
225 static inline int bclr_mem_test_and_clear_bit(int nr
,
226 volatile unsigned long *vaddr
)
228 char *p
= (char *)vaddr
+ (nr
^ 31) / 8;
231 __asm__
__volatile__ ("bclr %2,%1; sne %0"
232 : "=d" (retval
), "+m" (*p
)
237 static inline int bfclr_mem_test_and_clear_bit(int nr
,
238 volatile unsigned long *vaddr
)
242 __asm__
__volatile__ ("bfclr %2{%1:#1}; sne %0"
244 : "d" (nr
^ 31), "o" (*vaddr
)
249 #if defined(CONFIG_COLDFIRE)
250 #define test_and_clear_bit(nr, vaddr) bclr_reg_test_and_clear_bit(nr, vaddr)
251 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
252 #define test_and_clear_bit(nr, vaddr) bclr_mem_test_and_clear_bit(nr, vaddr)
254 #define test_and_clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
255 bclr_mem_test_and_clear_bit(nr, vaddr) : \
256 bfclr_mem_test_and_clear_bit(nr, vaddr))
259 #define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
262 static inline int bchg_reg_test_and_change_bit(int nr
,
263 volatile unsigned long *vaddr
)
265 char *p
= (char *)vaddr
+ (nr
^ 31) / 8;
268 __asm__
__volatile__ ("bchg %2,(%1); sne %0"
270 : "a" (p
), "di" (nr
& 7)
275 static inline int bchg_mem_test_and_change_bit(int nr
,
276 volatile unsigned long *vaddr
)
278 char *p
= (char *)vaddr
+ (nr
^ 31) / 8;
281 __asm__
__volatile__ ("bchg %2,%1; sne %0"
282 : "=d" (retval
), "+m" (*p
)
287 static inline int bfchg_mem_test_and_change_bit(int nr
,
288 volatile unsigned long *vaddr
)
292 __asm__
__volatile__ ("bfchg %2{%1:#1}; sne %0"
294 : "d" (nr
^ 31), "o" (*vaddr
)
299 #if defined(CONFIG_COLDFIRE)
300 #define test_and_change_bit(nr, vaddr) bchg_reg_test_and_change_bit(nr, vaddr)
301 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
302 #define test_and_change_bit(nr, vaddr) bchg_mem_test_and_change_bit(nr, vaddr)
304 #define test_and_change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
305 bchg_mem_test_and_change_bit(nr, vaddr) : \
306 bfchg_mem_test_and_change_bit(nr, vaddr))
309 #define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
313 * The true 68020 and more advanced processors support the "bfffo"
314 * instruction for finding bits. ColdFire and simple 68000 parts
315 * (including CPU32) do not support this. They simply use the generic
318 #if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
319 #include <asm-generic/bitops/find.h>
320 #include <asm-generic/bitops/ffz.h>
323 static inline int find_first_zero_bit(const unsigned long *vaddr
,
326 const unsigned long *p
= vaddr
;
334 words
= (size
+ 31) >> 5;
335 while (!(num
= ~*p
++)) {
340 __asm__
__volatile__ ("bfffo %1{#0,#0},%0"
341 : "=d" (res
) : "d" (num
& -num
));
344 res
+= ((long)p
- (long)vaddr
- 4) * 8;
345 return res
< size
? res
: size
;
347 #define find_first_zero_bit find_first_zero_bit
349 static inline int find_next_zero_bit(const unsigned long *vaddr
, int size
,
352 const unsigned long *p
= vaddr
+ (offset
>> 5);
353 int bit
= offset
& 31UL, res
;
359 unsigned long num
= ~*p
++ & (~0UL << bit
);
362 /* Look for zero in first longword */
363 __asm__
__volatile__ ("bfffo %1{#0,#0},%0"
364 : "=d" (res
) : "d" (num
& -num
));
367 return offset
< size
? offset
: size
;
374 /* No zero yet, search remaining full bytes for a zero */
375 return offset
+ find_first_zero_bit(p
, size
- offset
);
377 #define find_next_zero_bit find_next_zero_bit
379 static inline int find_first_bit(const unsigned long *vaddr
, unsigned size
)
381 const unsigned long *p
= vaddr
;
389 words
= (size
+ 31) >> 5;
390 while (!(num
= *p
++)) {
395 __asm__
__volatile__ ("bfffo %1{#0,#0},%0"
396 : "=d" (res
) : "d" (num
& -num
));
399 res
+= ((long)p
- (long)vaddr
- 4) * 8;
400 return res
< size
? res
: size
;
402 #define find_first_bit find_first_bit
404 static inline int find_next_bit(const unsigned long *vaddr
, int size
,
407 const unsigned long *p
= vaddr
+ (offset
>> 5);
408 int bit
= offset
& 31UL, res
;
414 unsigned long num
= *p
++ & (~0UL << bit
);
417 /* Look for one in first longword */
418 __asm__
__volatile__ ("bfffo %1{#0,#0},%0"
419 : "=d" (res
) : "d" (num
& -num
));
422 return offset
< size
? offset
: size
;
429 /* No one yet, search remaining full bytes for a one */
430 return offset
+ find_first_bit(p
, size
- offset
);
432 #define find_next_bit find_next_bit
435 * ffz = Find First Zero in word. Undefined if no zero exists,
436 * so code should check against ~0UL first..
438 static inline unsigned long ffz(unsigned long word
)
442 __asm__
__volatile__ ("bfffo %1{#0,#0},%0"
443 : "=d" (res
) : "d" (~word
& -~word
));
451 #if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
454 * The newer ColdFire family members support a "bitrev" instruction
455 * and we can use that to implement a fast ffs. Older Coldfire parts,
456 * and normal 68000 parts don't have anything special, so we use the
457 * generic functions for those.
459 #if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
460 !defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
461 static inline int __ffs(int x
)
463 __asm__
__volatile__ ("bitrev %0; ff1 %0"
469 static inline int ffs(int x
)
477 #include <asm-generic/bitops/ffs.h>
478 #include <asm-generic/bitops/__ffs.h>
481 #include <asm-generic/bitops/fls.h>
482 #include <asm-generic/bitops/__fls.h>
487 * ffs: find first bit set. This is defined the same way as
488 * the libc and compiler builtin ffs routines, therefore
489 * differs in spirit from the above ffz (man ffs).
491 static inline int ffs(int x
)
495 __asm__ ("bfffo %1{#0:#0},%0"
500 #define __ffs(x) (ffs(x) - 1)
503 * fls: find last bit set.
505 static inline int fls(int x
)
509 __asm__ ("bfffo %1{#0,#0},%0"
515 static inline int __fls(int x
)
522 #include <asm-generic/bitops/ext2-atomic.h>
523 #include <asm-generic/bitops/le.h>
524 #include <asm-generic/bitops/fls64.h>
525 #include <asm-generic/bitops/sched.h>
526 #include <asm-generic/bitops/hweight.h>
527 #include <asm-generic/bitops/lock.h>
528 #endif /* __KERNEL__ */
530 #endif /* _M68K_BITOPS_H */