1 #ifndef _ASM_M32R_BITOPS_H
2 #define _ASM_M32R_BITOPS_H
5 * linux/include/asm-m32r/bitops.h
7 * Copyright 1992, Linus Torvalds.
10 * Copyright (C) 2001, 2002 Hitoshi Yamamoto
11 * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
14 #include <linux/config.h>
15 #include <linux/compiler.h>
16 #include <asm/system.h>
17 #include <asm/byteorder.h>
18 #include <asm/types.h>
21 * These have to be done with inline assembly: that way the bit-setting
22 * is guaranteed to be atomic. All bit operations return 0 if the bit
23 * was cleared before the operation and != 0 if it was not.
25 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
32 #define STORE "unlock"
38 /* #define ADDR (*(volatile long *) addr) */
41 * set_bit - Atomically set a bit in memory
43 * @addr: the address to start counting from
45 * This function is atomic and may not be reordered. See __set_bit()
46 * if you do not require the atomic guarantees.
47 * Note that @nr may be almost arbitrarily large; this function is not
48 * restricted to acting on a single-word quantity.
50 static inline void set_bit(int nr
, volatile void * addr
)
53 volatile __u32
*a
= addr
;
58 mask
= (1 << (nr
& 0x1F));
60 local_irq_save(flags
);
61 __asm__
__volatile__ (
62 DCACHE_CLEAR("%0", "r6", "%1")
69 #ifdef CONFIG_CHIP_M32700_TS1
71 #endif /* CONFIG_CHIP_M32700_TS1 */
73 local_irq_restore(flags
);
77 * __set_bit - Set a bit in memory
79 * @addr: the address to start counting from
81 * Unlike set_bit(), this function is non-atomic and may be reordered.
82 * If it's called on the same region of memory simultaneously, the effect
83 * may be that only one operation succeeds.
85 static inline void __set_bit(int nr
, volatile void * addr
)
88 volatile __u32
*a
= addr
;
91 mask
= (1 << (nr
& 0x1F));
96 * clear_bit - Clears a bit in memory
98 * @addr: Address to start counting from
100 * clear_bit() is atomic and may not be reordered. However, it does
101 * not contain a memory barrier, so if it is used for locking purposes,
102 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
103 * in order to ensure changes are visible on other processors.
105 static inline void clear_bit(int nr
, volatile void * addr
)
108 volatile __u32
*a
= addr
;
113 mask
= (1 << (nr
& 0x1F));
115 local_irq_save(flags
);
117 __asm__
__volatile__ (
118 DCACHE_CLEAR("%0", "r6", "%1")
121 STORE
" %0, @%1; \n\t"
123 : "r" (a
), "r" (~mask
)
125 #ifdef CONFIG_CHIP_M32700_TS1
127 #endif /* CONFIG_CHIP_M32700_TS1 */
129 local_irq_restore(flags
);
132 static inline void __clear_bit(int nr
, volatile unsigned long * addr
)
135 volatile unsigned long *a
= addr
;
138 mask
= (1 << (nr
& 0x1F));
142 #define smp_mb__before_clear_bit() barrier()
143 #define smp_mb__after_clear_bit() barrier()
146 * __change_bit - Toggle a bit in memory
147 * @nr: the bit to set
148 * @addr: the address to start counting from
150 * Unlike change_bit(), this function is non-atomic and may be reordered.
151 * If it's called on the same region of memory simultaneously, the effect
152 * may be that only one operation succeeds.
154 static inline void __change_bit(int nr
, volatile void * addr
)
157 volatile __u32
*a
= addr
;
160 mask
= (1 << (nr
& 0x1F));
165 * change_bit - Toggle a bit in memory
167 * @addr: Address to start counting from
169 * change_bit() is atomic and may not be reordered.
170 * Note that @nr may be almost arbitrarily large; this function is not
171 * restricted to acting on a single-word quantity.
173 static inline void change_bit(int nr
, volatile void * addr
)
176 volatile __u32
*a
= addr
;
181 mask
= (1 << (nr
& 0x1F));
183 local_irq_save(flags
);
184 __asm__
__volatile__ (
185 DCACHE_CLEAR("%0", "r6", "%1")
188 STORE
" %0, @%1; \n\t"
190 : "r" (a
), "r" (mask
)
192 #ifdef CONFIG_CHIP_M32700_TS1
194 #endif /* CONFIG_CHIP_M32700_TS1 */
196 local_irq_restore(flags
);
200 * test_and_set_bit - Set a bit and return its old value
202 * @addr: Address to count from
204 * This operation is atomic and cannot be reordered.
205 * It also implies a memory barrier.
207 static inline int test_and_set_bit(int nr
, volatile void * addr
)
210 volatile __u32
*a
= addr
;
215 mask
= (1 << (nr
& 0x1F));
217 local_irq_save(flags
);
218 __asm__
__volatile__ (
219 DCACHE_CLEAR("%0", "%1", "%2")
224 STORE
" %1, @%2; \n\t"
225 : "=&r" (oldbit
), "=&r" (tmp
)
226 : "r" (a
), "r" (mask
)
229 local_irq_restore(flags
);
231 return (oldbit
!= 0);
235 * __test_and_set_bit - Set a bit and return its old value
237 * @addr: Address to count from
239 * This operation is non-atomic and can be reordered.
240 * If two examples of this operation race, one can appear to succeed
241 * but actually fail. You must protect multiple accesses with a lock.
243 static inline int __test_and_set_bit(int nr
, volatile void * addr
)
246 volatile __u32
*a
= addr
;
249 mask
= (1 << (nr
& 0x1F));
250 oldbit
= (*a
& mask
);
253 return (oldbit
!= 0);
257 * test_and_clear_bit - Clear a bit and return its old value
259 * @addr: Address to count from
261 * This operation is atomic and cannot be reordered.
262 * It also implies a memory barrier.
264 static inline int test_and_clear_bit(int nr
, volatile void * addr
)
267 volatile __u32
*a
= addr
;
272 mask
= (1 << (nr
& 0x1F));
274 local_irq_save(flags
);
276 __asm__
__volatile__ (
277 DCACHE_CLEAR("%0", "%1", "%3")
283 STORE
" %1, @%3; \n\t"
284 : "=&r" (oldbit
), "=&r" (tmp
), "+r" (mask
)
288 local_irq_restore(flags
);
290 return (oldbit
!= 0);
294 * __test_and_clear_bit - Clear a bit and return its old value
296 * @addr: Address to count from
298 * This operation is non-atomic and can be reordered.
299 * If two examples of this operation race, one can appear to succeed
300 * but actually fail. You must protect multiple accesses with a lock.
302 static inline int __test_and_clear_bit(int nr
, volatile void * addr
)
305 volatile __u32
*a
= addr
;
308 mask
= (1 << (nr
& 0x1F));
309 oldbit
= (*a
& mask
);
312 return (oldbit
!= 0);
315 /* WARNING: non atomic and it can be reordered! */
316 static inline int __test_and_change_bit(int nr
, volatile void * addr
)
319 volatile __u32
*a
= addr
;
322 mask
= (1 << (nr
& 0x1F));
323 oldbit
= (*a
& mask
);
326 return (oldbit
!= 0);
330 * test_and_change_bit - Change a bit and return its old value
332 * @addr: Address to count from
334 * This operation is atomic and cannot be reordered.
335 * It also implies a memory barrier.
337 static inline int test_and_change_bit(int nr
, volatile void * addr
)
340 volatile __u32
*a
= addr
;
345 mask
= (1 << (nr
& 0x1F));
347 local_irq_save(flags
);
348 __asm__
__volatile__ (
349 DCACHE_CLEAR("%0", "%1", "%2")
354 STORE
" %1, @%2; \n\t"
355 : "=&r" (oldbit
), "=&r" (tmp
)
356 : "r" (a
), "r" (mask
)
359 local_irq_restore(flags
);
361 return (oldbit
!= 0);
364 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
366 * test_bit - Determine whether a bit is set
367 * @nr: bit number to test
368 * @addr: Address to start counting from
370 static int test_bit(int nr
, const volatile void * addr
);
373 static inline int test_bit(int nr
, const volatile void * addr
)
376 const volatile __u32
*a
= addr
;
379 mask
= (1 << (nr
& 0x1F));
381 return ((*a
& mask
) != 0);
385 * ffz - find first zero in word.
386 * @word: The word to search
388 * Undefined if no zero exists, so code should check against ~0UL first.
390 static inline unsigned long ffz(unsigned long word
)
396 if (!(word
& 0x0000ffff)) { k
+= 16; word
>>= 16; }
397 if (!(word
& 0x000000ff)) { k
+= 8; word
>>= 8; }
398 if (!(word
& 0x0000000f)) { k
+= 4; word
>>= 4; }
399 if (!(word
& 0x00000003)) { k
+= 2; word
>>= 2; }
400 if (!(word
& 0x00000001)) { k
+= 1; }
406 * find_first_zero_bit - find the first zero bit in a memory region
407 * @addr: The address to start the search at
408 * @size: The maximum size to search
410 * Returns the bit-number of the first zero bit, not the number of the byte
414 #define find_first_zero_bit(addr, size) \
415 find_next_zero_bit((addr), (size), 0)
418 * find_next_zero_bit - find the first zero bit in a memory region
419 * @addr: The address to base the search on
420 * @offset: The bitnumber to start searching at
421 * @size: The maximum size to search
423 static inline int find_next_zero_bit(void *addr
, int size
, int offset
)
425 unsigned long *p
= ((unsigned long *) addr
) + (offset
>> 5);
426 unsigned long result
= offset
& ~31UL;
435 tmp
|= ~0UL >> (32-offset
);
443 while (size
& ~31UL) {
456 return result
+ ffz(tmp
);
460 * __ffs - find first bit in word.
461 * @word: The word to search
463 * Undefined if no bit exists, so code should check against 0 first.
465 static inline unsigned long __ffs(unsigned long word
)
469 if (!(word
& 0x0000ffff)) { k
+= 16; word
>>= 16; }
470 if (!(word
& 0x000000ff)) { k
+= 8; word
>>= 8; }
471 if (!(word
& 0x0000000f)) { k
+= 4; word
>>= 4; }
472 if (!(word
& 0x00000003)) { k
+= 2; word
>>= 2; }
473 if (!(word
& 0x00000001)) { k
+= 1;}
479 * fls: find last bit set.
481 #define fls(x) generic_fls(x)
486 * Every architecture must define this function. It's the fastest
487 * way of searching a 140-bit bitmap where the first 100 bits are
488 * unlikely to be set. It's guaranteed that at least one of the 140
491 static inline int sched_find_first_bit(unsigned long *b
)
496 return __ffs(b
[1]) + 32;
498 return __ffs(b
[2]) + 64;
500 return __ffs(b
[3]) + 96;
501 return __ffs(b
[4]) + 128;
505 * find_next_bit - find the first set bit in a memory region
506 * @addr: The address to base the search on
507 * @offset: The bitnumber to start searching at
508 * @size: The maximum size to search
510 static inline unsigned long find_next_bit(const unsigned long *addr
,
511 unsigned long size
, unsigned long offset
)
513 unsigned int *p
= ((unsigned int *) addr
) + (offset
>> 5);
514 unsigned int result
= offset
& ~31UL;
523 tmp
&= ~0UL << offset
;
532 if ((tmp
= *p
++) != 0)
542 tmp
&= ~0UL >> (32 - size
);
543 if (tmp
== 0UL) /* Are any bits set? */
544 return result
+ size
; /* Nope. */
546 return result
+ __ffs(tmp
);
550 * find_first_bit - find the first set bit in a memory region
551 * @addr: The address to start the search at
552 * @size: The maximum size to search
554 * Returns the bit-number of the first set bit, not the number of the byte
557 #define find_first_bit(addr, size) \
558 find_next_bit((addr), (size), 0)
561 * ffs - find first bit set
562 * @x: the word to search
564 * This is defined the same way as
565 * the libc and compiler builtin ffs routines, therefore
566 * differs in spirit from the above ffz (man ffs).
568 #define ffs(x) generic_ffs(x)
571 * hweightN - returns the hamming weight of a N-bit word
572 * @x: the word to weigh
574 * The Hamming Weight of a number is the total number of bits set in it.
577 #define hweight32(x) generic_hweight32(x)
578 #define hweight16(x) generic_hweight16(x)
579 #define hweight8(x) generic_hweight8(x)
581 #endif /* __KERNEL__ */
587 * orig: include/asm-sh/bitops.h
590 #ifdef __LITTLE_ENDIAN__
591 #define ext2_set_bit test_and_set_bit
592 #define ext2_clear_bit __test_and_clear_bit
593 #define ext2_test_bit test_bit
594 #define ext2_find_first_zero_bit find_first_zero_bit
595 #define ext2_find_next_zero_bit find_next_zero_bit
597 static inline int ext2_set_bit(int nr
, volatile void * addr
)
600 volatile __u8
*a
= addr
;
603 mask
= (1 << (nr
& 0x07));
604 oldbit
= (*a
& mask
);
607 return (oldbit
!= 0);
610 static inline int ext2_clear_bit(int nr
, volatile void * addr
)
613 volatile __u8
*a
= addr
;
616 mask
= (1 << (nr
& 0x07));
617 oldbit
= (*a
& mask
);
620 return (oldbit
!= 0);
623 static inline int ext2_test_bit(int nr
, const volatile void * addr
)
626 const volatile __u8
*a
= addr
;
629 mask
= (1 << (nr
& 0x07));
631 return ((mask
& *a
) != 0);
634 #define ext2_find_first_zero_bit(addr, size) \
635 ext2_find_next_zero_bit((addr), (size), 0)
637 static inline unsigned long ext2_find_next_zero_bit(void *addr
,
638 unsigned long size
, unsigned long offset
)
640 unsigned long *p
= ((unsigned long *) addr
) + (offset
>> 5);
641 unsigned long result
= offset
& ~31UL;
649 /* We hold the little endian value in tmp, but then the
650 * shift is illegal. So we could keep a big endian value
653 * tmp = __swab32(*(p++));
654 * tmp |= ~0UL >> (32-offset);
656 * but this would decrease preformance, so we change the
660 tmp
|= __swab32(~0UL >> (32-offset
));
668 while(size
& ~31UL) {
679 /* tmp is little endian, so we would have to swab the shift,
680 * see above. But then we have to swab tmp below for ffz, so
681 * we might as well do this here.
683 return result
+ ffz(__swab32(tmp
) | (~0UL << size
));
685 return result
+ ffz(__swab32(tmp
));
689 #define ext2_set_bit_atomic(lock, nr, addr) \
693 ret = ext2_set_bit((nr), (addr)); \
698 #define ext2_clear_bit_atomic(lock, nr, addr) \
702 ret = ext2_clear_bit((nr), (addr)); \
707 /* Bitmap functions for the minix filesystem. */
708 #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
709 #define minix_set_bit(nr,addr) __set_bit(nr,addr)
710 #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
711 #define minix_test_bit(nr,addr) test_bit(nr,addr)
712 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
714 #endif /* __KERNEL__ */
716 #endif /* _ASM_M32R_BITOPS_H */