Automatic merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/gregkh/driver...
[linux-2.6/verdex.git] / include / asm-m32r / bitops.h
blobe78443981349df080c2812d82927429f10aa2d85
1 #ifndef _ASM_M32R_BITOPS_H
2 #define _ASM_M32R_BITOPS_H
4 /*
5 * linux/include/asm-m32r/bitops.h
7 * Copyright 1992, Linus Torvalds.
9 * M32R version:
10 * Copyright (C) 2001, 2002 Hitoshi Yamamoto
11 * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
14 #include <linux/config.h>
15 #include <linux/compiler.h>
16 #include <asm/assembler.h>
17 #include <asm/system.h>
18 #include <asm/byteorder.h>
19 #include <asm/types.h>
22 * These have to be done with inline assembly: that way the bit-setting
23 * is guaranteed to be atomic. All bit operations return 0 if the bit
24 * was cleared before the operation and != 0 if it was not.
26 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
29 /**
30 * set_bit - Atomically set a bit in memory
31 * @nr: the bit to set
32 * @addr: the address to start counting from
34 * This function is atomic and may not be reordered. See __set_bit()
35 * if you do not require the atomic guarantees.
36 * Note that @nr may be almost arbitrarily large; this function is not
37 * restricted to acting on a single-word quantity.
39 static __inline__ void set_bit(int nr, volatile void * addr)
41 __u32 mask;
42 volatile __u32 *a = addr;
43 unsigned long flags;
44 unsigned long tmp;
46 a += (nr >> 5);
47 mask = (1 << (nr & 0x1F));
49 local_irq_save(flags);
50 __asm__ __volatile__ (
51 DCACHE_CLEAR("%0", "r6", "%1")
52 M32R_LOCK" %0, @%1; \n\t"
53 "or %0, %2; \n\t"
54 M32R_UNLOCK" %0, @%1; \n\t"
55 : "=&r" (tmp)
56 : "r" (a), "r" (mask)
57 : "memory"
58 #ifdef CONFIG_CHIP_M32700_TS1
59 , "r6"
60 #endif /* CONFIG_CHIP_M32700_TS1 */
62 local_irq_restore(flags);
65 /**
66 * __set_bit - Set a bit in memory
67 * @nr: the bit to set
68 * @addr: the address to start counting from
70 * Unlike set_bit(), this function is non-atomic and may be reordered.
71 * If it's called on the same region of memory simultaneously, the effect
72 * may be that only one operation succeeds.
74 static __inline__ void __set_bit(int nr, volatile void * addr)
76 __u32 mask;
77 volatile __u32 *a = addr;
79 a += (nr >> 5);
80 mask = (1 << (nr & 0x1F));
81 *a |= mask;
84 /**
85 * clear_bit - Clears a bit in memory
86 * @nr: Bit to clear
87 * @addr: Address to start counting from
89 * clear_bit() is atomic and may not be reordered. However, it does
90 * not contain a memory barrier, so if it is used for locking purposes,
91 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
92 * in order to ensure changes are visible on other processors.
94 static __inline__ void clear_bit(int nr, volatile void * addr)
96 __u32 mask;
97 volatile __u32 *a = addr;
98 unsigned long flags;
99 unsigned long tmp;
101 a += (nr >> 5);
102 mask = (1 << (nr & 0x1F));
104 local_irq_save(flags);
106 __asm__ __volatile__ (
107 DCACHE_CLEAR("%0", "r6", "%1")
108 M32R_LOCK" %0, @%1; \n\t"
109 "and %0, %2; \n\t"
110 M32R_UNLOCK" %0, @%1; \n\t"
111 : "=&r" (tmp)
112 : "r" (a), "r" (~mask)
113 : "memory"
114 #ifdef CONFIG_CHIP_M32700_TS1
115 , "r6"
116 #endif /* CONFIG_CHIP_M32700_TS1 */
118 local_irq_restore(flags);
121 static __inline__ void __clear_bit(int nr, volatile unsigned long * addr)
123 unsigned long mask;
124 volatile unsigned long *a = addr;
126 a += (nr >> 5);
127 mask = (1 << (nr & 0x1F));
128 *a &= ~mask;
131 #define smp_mb__before_clear_bit() barrier()
132 #define smp_mb__after_clear_bit() barrier()
135 * __change_bit - Toggle a bit in memory
136 * @nr: the bit to set
137 * @addr: the address to start counting from
139 * Unlike change_bit(), this function is non-atomic and may be reordered.
140 * If it's called on the same region of memory simultaneously, the effect
141 * may be that only one operation succeeds.
143 static __inline__ void __change_bit(int nr, volatile void * addr)
145 __u32 mask;
146 volatile __u32 *a = addr;
148 a += (nr >> 5);
149 mask = (1 << (nr & 0x1F));
150 *a ^= mask;
154 * change_bit - Toggle a bit in memory
155 * @nr: Bit to clear
156 * @addr: Address to start counting from
158 * change_bit() is atomic and may not be reordered.
159 * Note that @nr may be almost arbitrarily large; this function is not
160 * restricted to acting on a single-word quantity.
162 static __inline__ void change_bit(int nr, volatile void * addr)
164 __u32 mask;
165 volatile __u32 *a = addr;
166 unsigned long flags;
167 unsigned long tmp;
169 a += (nr >> 5);
170 mask = (1 << (nr & 0x1F));
172 local_irq_save(flags);
173 __asm__ __volatile__ (
174 DCACHE_CLEAR("%0", "r6", "%1")
175 M32R_LOCK" %0, @%1; \n\t"
176 "xor %0, %2; \n\t"
177 M32R_UNLOCK" %0, @%1; \n\t"
178 : "=&r" (tmp)
179 : "r" (a), "r" (mask)
180 : "memory"
181 #ifdef CONFIG_CHIP_M32700_TS1
182 , "r6"
183 #endif /* CONFIG_CHIP_M32700_TS1 */
185 local_irq_restore(flags);
189 * test_and_set_bit - Set a bit and return its old value
190 * @nr: Bit to set
191 * @addr: Address to count from
193 * This operation is atomic and cannot be reordered.
194 * It also implies a memory barrier.
196 static __inline__ int test_and_set_bit(int nr, volatile void * addr)
198 __u32 mask, oldbit;
199 volatile __u32 *a = addr;
200 unsigned long flags;
201 unsigned long tmp;
203 a += (nr >> 5);
204 mask = (1 << (nr & 0x1F));
206 local_irq_save(flags);
207 __asm__ __volatile__ (
208 DCACHE_CLEAR("%0", "%1", "%2")
209 M32R_LOCK" %0, @%2; \n\t"
210 "mv %1, %0; \n\t"
211 "and %0, %3; \n\t"
212 "or %1, %3; \n\t"
213 M32R_UNLOCK" %1, @%2; \n\t"
214 : "=&r" (oldbit), "=&r" (tmp)
215 : "r" (a), "r" (mask)
216 : "memory"
218 local_irq_restore(flags);
220 return (oldbit != 0);
224 * __test_and_set_bit - Set a bit and return its old value
225 * @nr: Bit to set
226 * @addr: Address to count from
228 * This operation is non-atomic and can be reordered.
229 * If two examples of this operation race, one can appear to succeed
230 * but actually fail. You must protect multiple accesses with a lock.
232 static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
234 __u32 mask, oldbit;
235 volatile __u32 *a = addr;
237 a += (nr >> 5);
238 mask = (1 << (nr & 0x1F));
239 oldbit = (*a & mask);
240 *a |= mask;
242 return (oldbit != 0);
246 * test_and_clear_bit - Clear a bit and return its old value
247 * @nr: Bit to set
248 * @addr: Address to count from
250 * This operation is atomic and cannot be reordered.
251 * It also implies a memory barrier.
253 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
255 __u32 mask, oldbit;
256 volatile __u32 *a = addr;
257 unsigned long flags;
258 unsigned long tmp;
260 a += (nr >> 5);
261 mask = (1 << (nr & 0x1F));
263 local_irq_save(flags);
265 __asm__ __volatile__ (
266 DCACHE_CLEAR("%0", "%1", "%3")
267 M32R_LOCK" %0, @%3; \n\t"
268 "mv %1, %0; \n\t"
269 "and %0, %2; \n\t"
270 "not %2, %2; \n\t"
271 "and %1, %2; \n\t"
272 M32R_UNLOCK" %1, @%3; \n\t"
273 : "=&r" (oldbit), "=&r" (tmp), "+r" (mask)
274 : "r" (a)
275 : "memory"
277 local_irq_restore(flags);
279 return (oldbit != 0);
283 * __test_and_clear_bit - Clear a bit and return its old value
284 * @nr: Bit to set
285 * @addr: Address to count from
287 * This operation is non-atomic and can be reordered.
288 * If two examples of this operation race, one can appear to succeed
289 * but actually fail. You must protect multiple accesses with a lock.
291 static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
293 __u32 mask, oldbit;
294 volatile __u32 *a = addr;
296 a += (nr >> 5);
297 mask = (1 << (nr & 0x1F));
298 oldbit = (*a & mask);
299 *a &= ~mask;
301 return (oldbit != 0);
304 /* WARNING: non atomic and it can be reordered! */
305 static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
307 __u32 mask, oldbit;
308 volatile __u32 *a = addr;
310 a += (nr >> 5);
311 mask = (1 << (nr & 0x1F));
312 oldbit = (*a & mask);
313 *a ^= mask;
315 return (oldbit != 0);
319 * test_and_change_bit - Change a bit and return its old value
320 * @nr: Bit to set
321 * @addr: Address to count from
323 * This operation is atomic and cannot be reordered.
324 * It also implies a memory barrier.
326 static __inline__ int test_and_change_bit(int nr, volatile void * addr)
328 __u32 mask, oldbit;
329 volatile __u32 *a = addr;
330 unsigned long flags;
331 unsigned long tmp;
333 a += (nr >> 5);
334 mask = (1 << (nr & 0x1F));
336 local_irq_save(flags);
337 __asm__ __volatile__ (
338 DCACHE_CLEAR("%0", "%1", "%2")
339 M32R_LOCK" %0, @%2; \n\t"
340 "mv %1, %0; \n\t"
341 "and %0, %3; \n\t"
342 "xor %1, %3; \n\t"
343 M32R_UNLOCK" %1, @%2; \n\t"
344 : "=&r" (oldbit), "=&r" (tmp)
345 : "r" (a), "r" (mask)
346 : "memory"
348 local_irq_restore(flags);
350 return (oldbit != 0);
354 * test_bit - Determine whether a bit is set
355 * @nr: bit number to test
356 * @addr: Address to start counting from
358 static __inline__ int test_bit(int nr, const volatile void * addr)
360 __u32 mask;
361 const volatile __u32 *a = addr;
363 a += (nr >> 5);
364 mask = (1 << (nr & 0x1F));
366 return ((*a & mask) != 0);
370 * ffz - find first zero in word.
371 * @word: The word to search
373 * Undefined if no zero exists, so code should check against ~0UL first.
375 static __inline__ unsigned long ffz(unsigned long word)
377 int k;
379 word = ~word;
380 k = 0;
381 if (!(word & 0x0000ffff)) { k += 16; word >>= 16; }
382 if (!(word & 0x000000ff)) { k += 8; word >>= 8; }
383 if (!(word & 0x0000000f)) { k += 4; word >>= 4; }
384 if (!(word & 0x00000003)) { k += 2; word >>= 2; }
385 if (!(word & 0x00000001)) { k += 1; }
387 return k;
391 * find_first_zero_bit - find the first zero bit in a memory region
392 * @addr: The address to start the search at
393 * @size: The maximum size to search
395 * Returns the bit-number of the first zero bit, not the number of the byte
396 * containing a bit.
399 #define find_first_zero_bit(addr, size) \
400 find_next_zero_bit((addr), (size), 0)
403 * find_next_zero_bit - find the first zero bit in a memory region
404 * @addr: The address to base the search on
405 * @offset: The bitnumber to start searching at
406 * @size: The maximum size to search
408 static __inline__ int find_next_zero_bit(const unsigned long *addr,
409 int size, int offset)
411 const unsigned long *p = addr + (offset >> 5);
412 unsigned long result = offset & ~31UL;
413 unsigned long tmp;
415 if (offset >= size)
416 return size;
417 size -= result;
418 offset &= 31UL;
419 if (offset) {
420 tmp = *(p++);
421 tmp |= ~0UL >> (32-offset);
422 if (size < 32)
423 goto found_first;
424 if (~tmp)
425 goto found_middle;
426 size -= 32;
427 result += 32;
429 while (size & ~31UL) {
430 if (~(tmp = *(p++)))
431 goto found_middle;
432 result += 32;
433 size -= 32;
435 if (!size)
436 return result;
437 tmp = *p;
439 found_first:
440 tmp |= ~0UL << size;
441 found_middle:
442 return result + ffz(tmp);
446 * __ffs - find first bit in word.
447 * @word: The word to search
449 * Undefined if no bit exists, so code should check against 0 first.
451 static __inline__ unsigned long __ffs(unsigned long word)
453 int k = 0;
455 if (!(word & 0x0000ffff)) { k += 16; word >>= 16; }
456 if (!(word & 0x000000ff)) { k += 8; word >>= 8; }
457 if (!(word & 0x0000000f)) { k += 4; word >>= 4; }
458 if (!(word & 0x00000003)) { k += 2; word >>= 2; }
459 if (!(word & 0x00000001)) { k += 1;}
461 return k;
465 * fls: find last bit set.
467 #define fls(x) generic_fls(x)
469 #ifdef __KERNEL__
472 * Every architecture must define this function. It's the fastest
473 * way of searching a 140-bit bitmap where the first 100 bits are
474 * unlikely to be set. It's guaranteed that at least one of the 140
475 * bits is cleared.
477 static inline int sched_find_first_bit(unsigned long *b)
479 if (unlikely(b[0]))
480 return __ffs(b[0]);
481 if (unlikely(b[1]))
482 return __ffs(b[1]) + 32;
483 if (unlikely(b[2]))
484 return __ffs(b[2]) + 64;
485 if (b[3])
486 return __ffs(b[3]) + 96;
487 return __ffs(b[4]) + 128;
491 * find_next_bit - find the first set bit in a memory region
492 * @addr: The address to base the search on
493 * @offset: The bitnumber to start searching at
494 * @size: The maximum size to search
496 static inline unsigned long find_next_bit(const unsigned long *addr,
497 unsigned long size, unsigned long offset)
499 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
500 unsigned int result = offset & ~31UL;
501 unsigned int tmp;
503 if (offset >= size)
504 return size;
505 size -= result;
506 offset &= 31UL;
507 if (offset) {
508 tmp = *p++;
509 tmp &= ~0UL << offset;
510 if (size < 32)
511 goto found_first;
512 if (tmp)
513 goto found_middle;
514 size -= 32;
515 result += 32;
517 while (size >= 32) {
518 if ((tmp = *p++) != 0)
519 goto found_middle;
520 result += 32;
521 size -= 32;
523 if (!size)
524 return result;
525 tmp = *p;
527 found_first:
528 tmp &= ~0UL >> (32 - size);
529 if (tmp == 0UL) /* Are any bits set? */
530 return result + size; /* Nope. */
531 found_middle:
532 return result + __ffs(tmp);
536 * find_first_bit - find the first set bit in a memory region
537 * @addr: The address to start the search at
538 * @size: The maximum size to search
540 * Returns the bit-number of the first set bit, not the number of the byte
541 * containing a bit.
543 #define find_first_bit(addr, size) \
544 find_next_bit((addr), (size), 0)
547 * ffs - find first bit set
548 * @x: the word to search
550 * This is defined the same way as
551 * the libc and compiler builtin ffs routines, therefore
552 * differs in spirit from the above ffz (man ffs).
554 #define ffs(x) generic_ffs(x)
557 * hweightN - returns the hamming weight of a N-bit word
558 * @x: the word to weigh
560 * The Hamming Weight of a number is the total number of bits set in it.
563 #define hweight32(x) generic_hweight32(x)
564 #define hweight16(x) generic_hweight16(x)
565 #define hweight8(x) generic_hweight8(x)
567 #endif /* __KERNEL__ */
569 #ifdef __KERNEL__
572 * ext2_XXXX function
573 * orig: include/asm-sh/bitops.h
576 #ifdef __LITTLE_ENDIAN__
577 #define ext2_set_bit test_and_set_bit
578 #define ext2_clear_bit __test_and_clear_bit
579 #define ext2_test_bit test_bit
580 #define ext2_find_first_zero_bit find_first_zero_bit
581 #define ext2_find_next_zero_bit find_next_zero_bit
582 #else
583 static inline int ext2_set_bit(int nr, volatile void * addr)
585 __u8 mask, oldbit;
586 volatile __u8 *a = addr;
588 a += (nr >> 3);
589 mask = (1 << (nr & 0x07));
590 oldbit = (*a & mask);
591 *a |= mask;
593 return (oldbit != 0);
596 static inline int ext2_clear_bit(int nr, volatile void * addr)
598 __u8 mask, oldbit;
599 volatile __u8 *a = addr;
601 a += (nr >> 3);
602 mask = (1 << (nr & 0x07));
603 oldbit = (*a & mask);
604 *a &= ~mask;
606 return (oldbit != 0);
609 static inline int ext2_test_bit(int nr, const volatile void * addr)
611 __u32 mask;
612 const volatile __u8 *a = addr;
614 a += (nr >> 3);
615 mask = (1 << (nr & 0x07));
617 return ((mask & *a) != 0);
620 #define ext2_find_first_zero_bit(addr, size) \
621 ext2_find_next_zero_bit((addr), (size), 0)
623 static inline unsigned long ext2_find_next_zero_bit(void *addr,
624 unsigned long size, unsigned long offset)
626 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
627 unsigned long result = offset & ~31UL;
628 unsigned long tmp;
630 if (offset >= size)
631 return size;
632 size -= result;
633 offset &= 31UL;
634 if(offset) {
635 /* We hold the little endian value in tmp, but then the
636 * shift is illegal. So we could keep a big endian value
637 * in tmp, like this:
639 * tmp = __swab32(*(p++));
640 * tmp |= ~0UL >> (32-offset);
642 * but this would decrease preformance, so we change the
643 * shift:
645 tmp = *(p++);
646 tmp |= __swab32(~0UL >> (32-offset));
647 if(size < 32)
648 goto found_first;
649 if(~tmp)
650 goto found_middle;
651 size -= 32;
652 result += 32;
654 while(size & ~31UL) {
655 if(~(tmp = *(p++)))
656 goto found_middle;
657 result += 32;
658 size -= 32;
660 if(!size)
661 return result;
662 tmp = *p;
664 found_first:
665 /* tmp is little endian, so we would have to swab the shift,
666 * see above. But then we have to swab tmp below for ffz, so
667 * we might as well do this here.
669 return result + ffz(__swab32(tmp) | (~0UL << size));
670 found_middle:
671 return result + ffz(__swab32(tmp));
673 #endif
675 #define ext2_set_bit_atomic(lock, nr, addr) \
676 ({ \
677 int ret; \
678 spin_lock(lock); \
679 ret = ext2_set_bit((nr), (addr)); \
680 spin_unlock(lock); \
681 ret; \
684 #define ext2_clear_bit_atomic(lock, nr, addr) \
685 ({ \
686 int ret; \
687 spin_lock(lock); \
688 ret = ext2_clear_bit((nr), (addr)); \
689 spin_unlock(lock); \
690 ret; \
693 /* Bitmap functions for the minix filesystem. */
694 #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
695 #define minix_set_bit(nr,addr) __set_bit(nr,addr)
696 #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
697 #define minix_test_bit(nr,addr) test_bit(nr,addr)
698 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
700 #endif /* __KERNEL__ */
702 #endif /* _ASM_M32R_BITOPS_H */