MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / include / asm-m32r / bitops.h
blob250057554dde43a3cc3869d5a1d94ab462c73446
1 #ifndef _ASM_M32R_BITOPS_H
2 #define _ASM_M32R_BITOPS_H
4 /*
5 * linux/include/asm-m32r/bitops.h
7 * Copyright 1992, Linus Torvalds.
9 * M32R version:
10 * Copyright (C) 2001, 2002 Hitoshi Yamamoto
11 * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
14 #include <linux/config.h>
15 #include <linux/compiler.h>
16 #include <asm/system.h>
17 #include <asm/byteorder.h>
18 #include <asm/types.h>
21 * These have to be done with inline assembly: that way the bit-setting
22 * is guaranteed to be atomic. All bit operations return 0 if the bit
23 * was cleared before the operation and != 0 if it was not.
25 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
28 #undef LOAD
29 #undef STORE
30 #ifdef CONFIG_SMP
31 #define LOAD "lock"
32 #define STORE "unlock"
33 #else
34 #define LOAD "ld"
35 #define STORE "st"
36 #endif
38 /* #define ADDR (*(volatile long *) addr) */
40 /**
41 * set_bit - Atomically set a bit in memory
42 * @nr: the bit to set
43 * @addr: the address to start counting from
45 * This function is atomic and may not be reordered. See __set_bit()
46 * if you do not require the atomic guarantees.
47 * Note that @nr may be almost arbitrarily large; this function is not
48 * restricted to acting on a single-word quantity.
50 static inline void set_bit(int nr, volatile void * addr)
52 __u32 mask;
53 volatile __u32 *a = addr;
54 unsigned long flags;
55 unsigned long tmp;
57 a += (nr >> 5);
58 mask = (1 << (nr & 0x1F));
60 local_irq_save(flags);
61 __asm__ __volatile__ (
62 DCACHE_CLEAR("%0", "r6", "%1")
63 LOAD" %0, @%1; \n\t"
64 "or %0, %2; \n\t"
65 STORE" %0, @%1; \n\t"
66 : "=&r" (tmp)
67 : "r" (a), "r" (mask)
68 : "memory"
69 #ifdef CONFIG_CHIP_M32700_TS1
70 , "r6"
71 #endif /* CONFIG_CHIP_M32700_TS1 */
73 local_irq_restore(flags);
76 /**
77 * __set_bit - Set a bit in memory
78 * @nr: the bit to set
79 * @addr: the address to start counting from
81 * Unlike set_bit(), this function is non-atomic and may be reordered.
82 * If it's called on the same region of memory simultaneously, the effect
83 * may be that only one operation succeeds.
85 static inline void __set_bit(int nr, volatile void * addr)
87 __u32 mask;
88 volatile __u32 *a = addr;
90 a += (nr >> 5);
91 mask = (1 << (nr & 0x1F));
92 *a |= mask;
95 /**
96 * clear_bit - Clears a bit in memory
97 * @nr: Bit to clear
98 * @addr: Address to start counting from
100 * clear_bit() is atomic and may not be reordered. However, it does
101 * not contain a memory barrier, so if it is used for locking purposes,
102 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
103 * in order to ensure changes are visible on other processors.
105 static inline void clear_bit(int nr, volatile void * addr)
107 __u32 mask;
108 volatile __u32 *a = addr;
109 unsigned long flags;
110 unsigned long tmp;
112 a += (nr >> 5);
113 mask = (1 << (nr & 0x1F));
115 local_irq_save(flags);
117 __asm__ __volatile__ (
118 DCACHE_CLEAR("%0", "r6", "%1")
119 LOAD" %0, @%1; \n\t"
120 "and %0, %2; \n\t"
121 STORE" %0, @%1; \n\t"
122 : "=&r" (tmp)
123 : "r" (a), "r" (~mask)
124 : "memory"
125 #ifdef CONFIG_CHIP_M32700_TS1
126 , "r6"
127 #endif /* CONFIG_CHIP_M32700_TS1 */
129 local_irq_restore(flags);
132 static inline void __clear_bit(int nr, volatile unsigned long * addr)
134 unsigned long mask;
135 volatile unsigned long *a = addr;
137 a += (nr >> 5);
138 mask = (1 << (nr & 0x1F));
139 *a &= ~mask;
142 #define smp_mb__before_clear_bit() barrier()
143 #define smp_mb__after_clear_bit() barrier()
146 * __change_bit - Toggle a bit in memory
147 * @nr: the bit to set
148 * @addr: the address to start counting from
150 * Unlike change_bit(), this function is non-atomic and may be reordered.
151 * If it's called on the same region of memory simultaneously, the effect
152 * may be that only one operation succeeds.
154 static inline void __change_bit(int nr, volatile void * addr)
156 __u32 mask;
157 volatile __u32 *a = addr;
159 a += (nr >> 5);
160 mask = (1 << (nr & 0x1F));
161 *a ^= mask;
165 * change_bit - Toggle a bit in memory
166 * @nr: Bit to clear
167 * @addr: Address to start counting from
169 * change_bit() is atomic and may not be reordered.
170 * Note that @nr may be almost arbitrarily large; this function is not
171 * restricted to acting on a single-word quantity.
173 static inline void change_bit(int nr, volatile void * addr)
175 __u32 mask;
176 volatile __u32 *a = addr;
177 unsigned long flags;
178 unsigned long tmp;
180 a += (nr >> 5);
181 mask = (1 << (nr & 0x1F));
183 local_irq_save(flags);
184 __asm__ __volatile__ (
185 DCACHE_CLEAR("%0", "r6", "%1")
186 LOAD" %0, @%1; \n\t"
187 "xor %0, %2; \n\t"
188 STORE" %0, @%1; \n\t"
189 : "=&r" (tmp)
190 : "r" (a), "r" (mask)
191 : "memory"
192 #ifdef CONFIG_CHIP_M32700_TS1
193 , "r6"
194 #endif /* CONFIG_CHIP_M32700_TS1 */
196 local_irq_restore(flags);
200 * test_and_set_bit - Set a bit and return its old value
201 * @nr: Bit to set
202 * @addr: Address to count from
204 * This operation is atomic and cannot be reordered.
205 * It also implies a memory barrier.
207 static inline int test_and_set_bit(int nr, volatile void * addr)
209 __u32 mask, oldbit;
210 volatile __u32 *a = addr;
211 unsigned long flags;
212 unsigned long tmp;
214 a += (nr >> 5);
215 mask = (1 << (nr & 0x1F));
217 local_irq_save(flags);
218 __asm__ __volatile__ (
219 DCACHE_CLEAR("%0", "%1", "%2")
220 LOAD" %0, @%2; \n\t"
221 "mv %1, %0; \n\t"
222 "and %0, %3; \n\t"
223 "or %1, %3; \n\t"
224 STORE" %1, @%2; \n\t"
225 : "=&r" (oldbit), "=&r" (tmp)
226 : "r" (a), "r" (mask)
227 : "memory"
229 local_irq_restore(flags);
231 return (oldbit != 0);
235 * __test_and_set_bit - Set a bit and return its old value
236 * @nr: Bit to set
237 * @addr: Address to count from
239 * This operation is non-atomic and can be reordered.
240 * If two examples of this operation race, one can appear to succeed
241 * but actually fail. You must protect multiple accesses with a lock.
243 static inline int __test_and_set_bit(int nr, volatile void * addr)
245 __u32 mask, oldbit;
246 volatile __u32 *a = addr;
248 a += (nr >> 5);
249 mask = (1 << (nr & 0x1F));
250 oldbit = (*a & mask);
251 *a |= mask;
253 return (oldbit != 0);
257 * test_and_clear_bit - Clear a bit and return its old value
258 * @nr: Bit to set
259 * @addr: Address to count from
261 * This operation is atomic and cannot be reordered.
262 * It also implies a memory barrier.
264 static inline int test_and_clear_bit(int nr, volatile void * addr)
266 __u32 mask, oldbit;
267 volatile __u32 *a = addr;
268 unsigned long flags;
269 unsigned long tmp;
271 a += (nr >> 5);
272 mask = (1 << (nr & 0x1F));
274 local_irq_save(flags);
276 __asm__ __volatile__ (
277 DCACHE_CLEAR("%0", "%1", "%3")
278 LOAD" %0, @%3; \n\t"
279 "mv %1, %0; \n\t"
280 "and %0, %2; \n\t"
281 "not %2, %2; \n\t"
282 "and %1, %2; \n\t"
283 STORE" %1, @%3; \n\t"
284 : "=&r" (oldbit), "=&r" (tmp), "+r" (mask)
285 : "r" (a)
286 : "memory"
288 local_irq_restore(flags);
290 return (oldbit != 0);
294 * __test_and_clear_bit - Clear a bit and return its old value
295 * @nr: Bit to set
296 * @addr: Address to count from
298 * This operation is non-atomic and can be reordered.
299 * If two examples of this operation race, one can appear to succeed
300 * but actually fail. You must protect multiple accesses with a lock.
302 static inline int __test_and_clear_bit(int nr, volatile void * addr)
304 __u32 mask, oldbit;
305 volatile __u32 *a = addr;
307 a += (nr >> 5);
308 mask = (1 << (nr & 0x1F));
309 oldbit = (*a & mask);
310 *a &= ~mask;
312 return (oldbit != 0);
315 /* WARNING: non atomic and it can be reordered! */
316 static inline int __test_and_change_bit(int nr, volatile void * addr)
318 __u32 mask, oldbit;
319 volatile __u32 *a = addr;
321 a += (nr >> 5);
322 mask = (1 << (nr & 0x1F));
323 oldbit = (*a & mask);
324 *a ^= mask;
326 return (oldbit != 0);
330 * test_and_change_bit - Change a bit and return its old value
331 * @nr: Bit to set
332 * @addr: Address to count from
334 * This operation is atomic and cannot be reordered.
335 * It also implies a memory barrier.
337 static inline int test_and_change_bit(int nr, volatile void * addr)
339 __u32 mask, oldbit;
340 volatile __u32 *a = addr;
341 unsigned long flags;
342 unsigned long tmp;
344 a += (nr >> 5);
345 mask = (1 << (nr & 0x1F));
347 local_irq_save(flags);
348 __asm__ __volatile__ (
349 DCACHE_CLEAR("%0", "%1", "%2")
350 LOAD" %0, @%2; \n\t"
351 "mv %1, %0; \n\t"
352 "and %0, %3; \n\t"
353 "xor %1, %3; \n\t"
354 STORE" %1, @%2; \n\t"
355 : "=&r" (oldbit), "=&r" (tmp)
356 : "r" (a), "r" (mask)
357 : "memory"
359 local_irq_restore(flags);
361 return (oldbit != 0);
364 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
366 * test_bit - Determine whether a bit is set
367 * @nr: bit number to test
368 * @addr: Address to start counting from
370 static int test_bit(int nr, const volatile void * addr);
371 #endif
373 static inline int test_bit(int nr, const volatile void * addr)
375 __u32 mask;
376 const volatile __u32 *a = addr;
378 a += (nr >> 5);
379 mask = (1 << (nr & 0x1F));
381 return ((*a & mask) != 0);
385 * ffz - find first zero in word.
386 * @word: The word to search
388 * Undefined if no zero exists, so code should check against ~0UL first.
390 static inline unsigned long ffz(unsigned long word)
392 int k;
394 word = ~word;
395 k = 0;
396 if (!(word & 0x0000ffff)) { k += 16; word >>= 16; }
397 if (!(word & 0x000000ff)) { k += 8; word >>= 8; }
398 if (!(word & 0x0000000f)) { k += 4; word >>= 4; }
399 if (!(word & 0x00000003)) { k += 2; word >>= 2; }
400 if (!(word & 0x00000001)) { k += 1; }
402 return k;
406 * find_first_zero_bit - find the first zero bit in a memory region
407 * @addr: The address to start the search at
408 * @size: The maximum size to search
410 * Returns the bit-number of the first zero bit, not the number of the byte
411 * containing a bit.
414 #define find_first_zero_bit(addr, size) \
415 find_next_zero_bit((addr), (size), 0)
418 * find_next_zero_bit - find the first zero bit in a memory region
419 * @addr: The address to base the search on
420 * @offset: The bitnumber to start searching at
421 * @size: The maximum size to search
423 static inline int find_next_zero_bit(void *addr, int size, int offset)
425 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
426 unsigned long result = offset & ~31UL;
427 unsigned long tmp;
429 if (offset >= size)
430 return size;
431 size -= result;
432 offset &= 31UL;
433 if (offset) {
434 tmp = *(p++);
435 tmp |= ~0UL >> (32-offset);
436 if (size < 32)
437 goto found_first;
438 if (~tmp)
439 goto found_middle;
440 size -= 32;
441 result += 32;
443 while (size & ~31UL) {
444 if (~(tmp = *(p++)))
445 goto found_middle;
446 result += 32;
447 size -= 32;
449 if (!size)
450 return result;
451 tmp = *p;
453 found_first:
454 tmp |= ~0UL << size;
455 found_middle:
456 return result + ffz(tmp);
460 * __ffs - find first bit in word.
461 * @word: The word to search
463 * Undefined if no bit exists, so code should check against 0 first.
465 static inline unsigned long __ffs(unsigned long word)
467 int k = 0;
469 if (!(word & 0x0000ffff)) { k += 16; word >>= 16; }
470 if (!(word & 0x000000ff)) { k += 8; word >>= 8; }
471 if (!(word & 0x0000000f)) { k += 4; word >>= 4; }
472 if (!(word & 0x00000003)) { k += 2; word >>= 2; }
473 if (!(word & 0x00000001)) { k += 1;}
475 return k;
479 * fls: find last bit set.
481 #define fls(x) generic_fls(x)
483 #ifdef __KERNEL__
486 * Every architecture must define this function. It's the fastest
487 * way of searching a 140-bit bitmap where the first 100 bits are
488 * unlikely to be set. It's guaranteed that at least one of the 140
489 * bits is cleared.
491 static inline int sched_find_first_bit(unsigned long *b)
493 if (unlikely(b[0]))
494 return __ffs(b[0]);
495 if (unlikely(b[1]))
496 return __ffs(b[1]) + 32;
497 if (unlikely(b[2]))
498 return __ffs(b[2]) + 64;
499 if (b[3])
500 return __ffs(b[3]) + 96;
501 return __ffs(b[4]) + 128;
505 * find_next_bit - find the first set bit in a memory region
506 * @addr: The address to base the search on
507 * @offset: The bitnumber to start searching at
508 * @size: The maximum size to search
510 static inline unsigned long find_next_bit(const unsigned long *addr,
511 unsigned long size, unsigned long offset)
513 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
514 unsigned int result = offset & ~31UL;
515 unsigned int tmp;
517 if (offset >= size)
518 return size;
519 size -= result;
520 offset &= 31UL;
521 if (offset) {
522 tmp = *p++;
523 tmp &= ~0UL << offset;
524 if (size < 32)
525 goto found_first;
526 if (tmp)
527 goto found_middle;
528 size -= 32;
529 result += 32;
531 while (size >= 32) {
532 if ((tmp = *p++) != 0)
533 goto found_middle;
534 result += 32;
535 size -= 32;
537 if (!size)
538 return result;
539 tmp = *p;
541 found_first:
542 tmp &= ~0UL >> (32 - size);
543 if (tmp == 0UL) /* Are any bits set? */
544 return result + size; /* Nope. */
545 found_middle:
546 return result + __ffs(tmp);
550 * find_first_bit - find the first set bit in a memory region
551 * @addr: The address to start the search at
552 * @size: The maximum size to search
554 * Returns the bit-number of the first set bit, not the number of the byte
555 * containing a bit.
557 #define find_first_bit(addr, size) \
558 find_next_bit((addr), (size), 0)
561 * ffs - find first bit set
562 * @x: the word to search
564 * This is defined the same way as
565 * the libc and compiler builtin ffs routines, therefore
566 * differs in spirit from the above ffz (man ffs).
568 #define ffs(x) generic_ffs(x)
571 * hweightN - returns the hamming weight of a N-bit word
572 * @x: the word to weigh
574 * The Hamming Weight of a number is the total number of bits set in it.
577 #define hweight32(x) generic_hweight32(x)
578 #define hweight16(x) generic_hweight16(x)
579 #define hweight8(x) generic_hweight8(x)
581 #endif /* __KERNEL__ */
583 #ifdef __KERNEL__
586 * ext2_XXXX function
587 * orig: include/asm-sh/bitops.h
590 #ifdef __LITTLE_ENDIAN__
591 #define ext2_set_bit test_and_set_bit
592 #define ext2_clear_bit __test_and_clear_bit
593 #define ext2_test_bit test_bit
594 #define ext2_find_first_zero_bit find_first_zero_bit
595 #define ext2_find_next_zero_bit find_next_zero_bit
596 #else
597 static inline int ext2_set_bit(int nr, volatile void * addr)
599 __u8 mask, oldbit;
600 volatile __u8 *a = addr;
602 a += (nr >> 3);
603 mask = (1 << (nr & 0x07));
604 oldbit = (*a & mask);
605 *a |= mask;
607 return (oldbit != 0);
610 static inline int ext2_clear_bit(int nr, volatile void * addr)
612 __u8 mask, oldbit;
613 volatile __u8 *a = addr;
615 a += (nr >> 3);
616 mask = (1 << (nr & 0x07));
617 oldbit = (*a & mask);
618 *a &= ~mask;
620 return (oldbit != 0);
623 static inline int ext2_test_bit(int nr, const volatile void * addr)
625 __u32 mask;
626 const volatile __u8 *a = addr;
628 a += (nr >> 3);
629 mask = (1 << (nr & 0x07));
631 return ((mask & *a) != 0);
634 #define ext2_find_first_zero_bit(addr, size) \
635 ext2_find_next_zero_bit((addr), (size), 0)
637 static inline unsigned long ext2_find_next_zero_bit(void *addr,
638 unsigned long size, unsigned long offset)
640 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
641 unsigned long result = offset & ~31UL;
642 unsigned long tmp;
644 if (offset >= size)
645 return size;
646 size -= result;
647 offset &= 31UL;
648 if(offset) {
649 /* We hold the little endian value in tmp, but then the
650 * shift is illegal. So we could keep a big endian value
651 * in tmp, like this:
653 * tmp = __swab32(*(p++));
654 * tmp |= ~0UL >> (32-offset);
656 * but this would decrease preformance, so we change the
657 * shift:
659 tmp = *(p++);
660 tmp |= __swab32(~0UL >> (32-offset));
661 if(size < 32)
662 goto found_first;
663 if(~tmp)
664 goto found_middle;
665 size -= 32;
666 result += 32;
668 while(size & ~31UL) {
669 if(~(tmp = *(p++)))
670 goto found_middle;
671 result += 32;
672 size -= 32;
674 if(!size)
675 return result;
676 tmp = *p;
678 found_first:
679 /* tmp is little endian, so we would have to swab the shift,
680 * see above. But then we have to swab tmp below for ffz, so
681 * we might as well do this here.
683 return result + ffz(__swab32(tmp) | (~0UL << size));
684 found_middle:
685 return result + ffz(__swab32(tmp));
687 #endif
689 #define ext2_set_bit_atomic(lock, nr, addr) \
690 ({ \
691 int ret; \
692 spin_lock(lock); \
693 ret = ext2_set_bit((nr), (addr)); \
694 spin_unlock(lock); \
695 ret; \
698 #define ext2_clear_bit_atomic(lock, nr, addr) \
699 ({ \
700 int ret; \
701 spin_lock(lock); \
702 ret = ext2_clear_bit((nr), (addr)); \
703 spin_unlock(lock); \
704 ret; \
707 /* Bitmap functions for the minix filesystem. */
708 #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
709 #define minix_set_bit(nr,addr) __set_bit(nr,addr)
710 #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
711 #define minix_test_bit(nr,addr) test_bit(nr,addr)
712 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
714 #endif /* __KERNEL__ */
716 #endif /* _ASM_M32R_BITOPS_H */