Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / arch / mips / include / asm / bitops.h
blob2e1ad4c652b72cf9042524870a19631f9e0566e0
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_BITOPS_H
10 #define _ASM_BITOPS_H
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
14 #endif
16 #include <linux/compiler.h>
17 #include <linux/irqflags.h>
18 #include <linux/types.h>
19 #include <asm/barrier.h>
20 #include <asm/bug.h>
21 #include <asm/byteorder.h> /* sigh ... */
22 #include <asm/cpu-features.h>
23 #include <asm/sgidefs.h>
24 #include <asm/war.h>
26 #if _MIPS_SZLONG == 32
27 #define SZLONG_LOG 5
28 #define SZLONG_MASK 31UL
29 #define __LL "ll "
30 #define __SC "sc "
31 #define __INS "ins "
32 #define __EXT "ext "
33 #elif _MIPS_SZLONG == 64
34 #define SZLONG_LOG 6
35 #define SZLONG_MASK 63UL
36 #define __LL "lld "
37 #define __SC "scd "
38 #define __INS "dins "
39 #define __EXT "dext "
40 #endif
43 * clear_bit() doesn't provide any barrier for the compiler.
45 #define smp_mb__before_clear_bit() smp_mb__before_llsc()
46 #define smp_mb__after_clear_bit() smp_llsc_mb()
49 * set_bit - Atomically set a bit in memory
50 * @nr: the bit to set
51 * @addr: the address to start counting from
53 * This function is atomic and may not be reordered. See __set_bit()
54 * if you do not require the atomic guarantees.
55 * Note that @nr may be almost arbitrarily large; this function is not
56 * restricted to acting on a single-word quantity.
58 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
60 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
61 unsigned short bit = nr & SZLONG_MASK;
62 unsigned long temp;
64 if (kernel_uses_llsc && R10000_LLSC_WAR) {
65 __asm__ __volatile__(
66 " .set mips3 \n"
67 "1: " __LL "%0, %1 # set_bit \n"
68 " or %0, %2 \n"
69 " " __SC "%0, %1 \n"
70 " beqzl %0, 1b \n"
71 " .set mips0 \n"
72 : "=&r" (temp), "=m" (*m)
73 : "ir" (1UL << bit), "m" (*m));
74 #ifdef CONFIG_CPU_MIPSR2
75 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
76 do {
77 __asm__ __volatile__(
78 " " __LL "%0, %1 # set_bit \n"
79 " " __INS "%0, %3, %2, 1 \n"
80 " " __SC "%0, %1 \n"
81 : "=&r" (temp), "+m" (*m)
82 : "ir" (bit), "r" (~0));
83 } while (unlikely(!temp));
84 #endif /* CONFIG_CPU_MIPSR2 */
85 } else if (kernel_uses_llsc) {
86 do {
87 __asm__ __volatile__(
88 " .set mips3 \n"
89 " " __LL "%0, %1 # set_bit \n"
90 " or %0, %2 \n"
91 " " __SC "%0, %1 \n"
92 " .set mips0 \n"
93 : "=&r" (temp), "+m" (*m)
94 : "ir" (1UL << bit));
95 } while (unlikely(!temp));
96 } else {
97 volatile unsigned long *a = addr;
98 unsigned long mask;
99 unsigned long flags;
101 a += nr >> SZLONG_LOG;
102 mask = 1UL << bit;
103 raw_local_irq_save(flags);
104 *a |= mask;
105 raw_local_irq_restore(flags);
110 * clear_bit - Clears a bit in memory
111 * @nr: Bit to clear
112 * @addr: Address to start counting from
114 * clear_bit() is atomic and may not be reordered. However, it does
115 * not contain a memory barrier, so if it is used for locking purposes,
116 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
117 * in order to ensure changes are visible on other processors.
119 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
121 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
122 unsigned short bit = nr & SZLONG_MASK;
123 unsigned long temp;
125 if (kernel_uses_llsc && R10000_LLSC_WAR) {
126 __asm__ __volatile__(
127 " .set mips3 \n"
128 "1: " __LL "%0, %1 # clear_bit \n"
129 " and %0, %2 \n"
130 " " __SC "%0, %1 \n"
131 " beqzl %0, 1b \n"
132 " .set mips0 \n"
133 : "=&r" (temp), "+m" (*m)
134 : "ir" (~(1UL << bit)));
135 #ifdef CONFIG_CPU_MIPSR2
136 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
137 do {
138 __asm__ __volatile__(
139 " " __LL "%0, %1 # clear_bit \n"
140 " " __INS "%0, $0, %2, 1 \n"
141 " " __SC "%0, %1 \n"
142 : "=&r" (temp), "+m" (*m)
143 : "ir" (bit));
144 } while (unlikely(!temp));
145 #endif /* CONFIG_CPU_MIPSR2 */
146 } else if (kernel_uses_llsc) {
147 do {
148 __asm__ __volatile__(
149 " .set mips3 \n"
150 " " __LL "%0, %1 # clear_bit \n"
151 " and %0, %2 \n"
152 " " __SC "%0, %1 \n"
153 " .set mips0 \n"
154 : "=&r" (temp), "+m" (*m)
155 : "ir" (~(1UL << bit)));
156 } while (unlikely(!temp));
157 } else {
158 volatile unsigned long *a = addr;
159 unsigned long mask;
160 unsigned long flags;
162 a += nr >> SZLONG_LOG;
163 mask = 1UL << bit;
164 raw_local_irq_save(flags);
165 *a &= ~mask;
166 raw_local_irq_restore(flags);
171 * clear_bit_unlock - Clears a bit in memory
172 * @nr: Bit to clear
173 * @addr: Address to start counting from
175 * clear_bit() is atomic and implies release semantics before the memory
176 * operation. It can be used for an unlock.
178 static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
180 smp_mb__before_clear_bit();
181 clear_bit(nr, addr);
185 * change_bit - Toggle a bit in memory
186 * @nr: Bit to change
187 * @addr: Address to start counting from
189 * change_bit() is atomic and may not be reordered.
190 * Note that @nr may be almost arbitrarily large; this function is not
191 * restricted to acting on a single-word quantity.
193 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
195 unsigned short bit = nr & SZLONG_MASK;
197 if (kernel_uses_llsc && R10000_LLSC_WAR) {
198 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
199 unsigned long temp;
201 __asm__ __volatile__(
202 " .set mips3 \n"
203 "1: " __LL "%0, %1 # change_bit \n"
204 " xor %0, %2 \n"
205 " " __SC "%0, %1 \n"
206 " beqzl %0, 1b \n"
207 " .set mips0 \n"
208 : "=&r" (temp), "+m" (*m)
209 : "ir" (1UL << bit));
210 } else if (kernel_uses_llsc) {
211 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
212 unsigned long temp;
214 do {
215 __asm__ __volatile__(
216 " .set mips3 \n"
217 " " __LL "%0, %1 # change_bit \n"
218 " xor %0, %2 \n"
219 " " __SC "%0, %1 \n"
220 " .set mips0 \n"
221 : "=&r" (temp), "+m" (*m)
222 : "ir" (1UL << bit));
223 } while (unlikely(!temp));
224 } else {
225 volatile unsigned long *a = addr;
226 unsigned long mask;
227 unsigned long flags;
229 a += nr >> SZLONG_LOG;
230 mask = 1UL << bit;
231 raw_local_irq_save(flags);
232 *a ^= mask;
233 raw_local_irq_restore(flags);
238 * test_and_set_bit - Set a bit and return its old value
239 * @nr: Bit to set
240 * @addr: Address to count from
242 * This operation is atomic and cannot be reordered.
243 * It also implies a memory barrier.
245 static inline int test_and_set_bit(unsigned long nr,
246 volatile unsigned long *addr)
248 unsigned short bit = nr & SZLONG_MASK;
249 unsigned long res;
251 smp_mb__before_llsc();
253 if (kernel_uses_llsc && R10000_LLSC_WAR) {
254 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
255 unsigned long temp;
257 __asm__ __volatile__(
258 " .set mips3 \n"
259 "1: " __LL "%0, %1 # test_and_set_bit \n"
260 " or %2, %0, %3 \n"
261 " " __SC "%2, %1 \n"
262 " beqzl %2, 1b \n"
263 " and %2, %0, %3 \n"
264 " .set mips0 \n"
265 : "=&r" (temp), "+m" (*m), "=&r" (res)
266 : "r" (1UL << bit)
267 : "memory");
268 } else if (kernel_uses_llsc) {
269 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
270 unsigned long temp;
272 do {
273 __asm__ __volatile__(
274 " .set mips3 \n"
275 " " __LL "%0, %1 # test_and_set_bit \n"
276 " or %2, %0, %3 \n"
277 " " __SC "%2, %1 \n"
278 " .set mips0 \n"
279 : "=&r" (temp), "+m" (*m), "=&r" (res)
280 : "r" (1UL << bit)
281 : "memory");
282 } while (unlikely(!res));
284 res = temp & (1UL << bit);
285 } else {
286 volatile unsigned long *a = addr;
287 unsigned long mask;
288 unsigned long flags;
290 a += nr >> SZLONG_LOG;
291 mask = 1UL << bit;
292 raw_local_irq_save(flags);
293 res = (mask & *a);
294 *a |= mask;
295 raw_local_irq_restore(flags);
298 smp_llsc_mb();
300 return res != 0;
304 * test_and_set_bit_lock - Set a bit and return its old value
305 * @nr: Bit to set
306 * @addr: Address to count from
308 * This operation is atomic and implies acquire ordering semantics
309 * after the memory operation.
311 static inline int test_and_set_bit_lock(unsigned long nr,
312 volatile unsigned long *addr)
314 unsigned short bit = nr & SZLONG_MASK;
315 unsigned long res;
317 if (kernel_uses_llsc && R10000_LLSC_WAR) {
318 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
319 unsigned long temp;
321 __asm__ __volatile__(
322 " .set mips3 \n"
323 "1: " __LL "%0, %1 # test_and_set_bit \n"
324 " or %2, %0, %3 \n"
325 " " __SC "%2, %1 \n"
326 " beqzl %2, 1b \n"
327 " and %2, %0, %3 \n"
328 " .set mips0 \n"
329 : "=&r" (temp), "+m" (*m), "=&r" (res)
330 : "r" (1UL << bit)
331 : "memory");
332 } else if (kernel_uses_llsc) {
333 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
334 unsigned long temp;
336 do {
337 __asm__ __volatile__(
338 " .set mips3 \n"
339 " " __LL "%0, %1 # test_and_set_bit \n"
340 " or %2, %0, %3 \n"
341 " " __SC "%2, %1 \n"
342 " .set mips0 \n"
343 : "=&r" (temp), "+m" (*m), "=&r" (res)
344 : "r" (1UL << bit)
345 : "memory");
346 } while (unlikely(!res));
348 res = temp & (1UL << bit);
349 } else {
350 volatile unsigned long *a = addr;
351 unsigned long mask;
352 unsigned long flags;
354 a += nr >> SZLONG_LOG;
355 mask = 1UL << bit;
356 raw_local_irq_save(flags);
357 res = (mask & *a);
358 *a |= mask;
359 raw_local_irq_restore(flags);
362 smp_llsc_mb();
364 return res != 0;
367 * test_and_clear_bit - Clear a bit and return its old value
368 * @nr: Bit to clear
369 * @addr: Address to count from
371 * This operation is atomic and cannot be reordered.
372 * It also implies a memory barrier.
374 static inline int test_and_clear_bit(unsigned long nr,
375 volatile unsigned long *addr)
377 unsigned short bit = nr & SZLONG_MASK;
378 unsigned long res;
380 smp_mb__before_llsc();
382 if (kernel_uses_llsc && R10000_LLSC_WAR) {
383 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
384 unsigned long temp;
386 __asm__ __volatile__(
387 " .set mips3 \n"
388 "1: " __LL "%0, %1 # test_and_clear_bit \n"
389 " or %2, %0, %3 \n"
390 " xor %2, %3 \n"
391 " " __SC "%2, %1 \n"
392 " beqzl %2, 1b \n"
393 " and %2, %0, %3 \n"
394 " .set mips0 \n"
395 : "=&r" (temp), "+m" (*m), "=&r" (res)
396 : "r" (1UL << bit)
397 : "memory");
398 #ifdef CONFIG_CPU_MIPSR2
399 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
400 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
401 unsigned long temp;
403 do {
404 __asm__ __volatile__(
405 " " __LL "%0, %1 # test_and_clear_bit \n"
406 " " __EXT "%2, %0, %3, 1 \n"
407 " " __INS "%0, $0, %3, 1 \n"
408 " " __SC "%0, %1 \n"
409 : "=&r" (temp), "+m" (*m), "=&r" (res)
410 : "ir" (bit)
411 : "memory");
412 } while (unlikely(!temp));
413 #endif
414 } else if (kernel_uses_llsc) {
415 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
416 unsigned long temp;
418 do {
419 __asm__ __volatile__(
420 " .set mips3 \n"
421 " " __LL "%0, %1 # test_and_clear_bit \n"
422 " or %2, %0, %3 \n"
423 " xor %2, %3 \n"
424 " " __SC "%2, %1 \n"
425 " .set mips0 \n"
426 : "=&r" (temp), "+m" (*m), "=&r" (res)
427 : "r" (1UL << bit)
428 : "memory");
429 } while (unlikely(!res));
431 res = temp & (1UL << bit);
432 } else {
433 volatile unsigned long *a = addr;
434 unsigned long mask;
435 unsigned long flags;
437 a += nr >> SZLONG_LOG;
438 mask = 1UL << bit;
439 raw_local_irq_save(flags);
440 res = (mask & *a);
441 *a &= ~mask;
442 raw_local_irq_restore(flags);
445 smp_llsc_mb();
447 return res != 0;
451 * test_and_change_bit - Change a bit and return its old value
452 * @nr: Bit to change
453 * @addr: Address to count from
455 * This operation is atomic and cannot be reordered.
456 * It also implies a memory barrier.
458 static inline int test_and_change_bit(unsigned long nr,
459 volatile unsigned long *addr)
461 unsigned short bit = nr & SZLONG_MASK;
462 unsigned long res;
464 smp_mb__before_llsc();
466 if (kernel_uses_llsc && R10000_LLSC_WAR) {
467 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
468 unsigned long temp;
470 __asm__ __volatile__(
471 " .set mips3 \n"
472 "1: " __LL "%0, %1 # test_and_change_bit \n"
473 " xor %2, %0, %3 \n"
474 " " __SC "%2, %1 \n"
475 " beqzl %2, 1b \n"
476 " and %2, %0, %3 \n"
477 " .set mips0 \n"
478 : "=&r" (temp), "+m" (*m), "=&r" (res)
479 : "r" (1UL << bit)
480 : "memory");
481 } else if (kernel_uses_llsc) {
482 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
483 unsigned long temp;
485 do {
486 __asm__ __volatile__(
487 " .set mips3 \n"
488 " " __LL "%0, %1 # test_and_change_bit \n"
489 " xor %2, %0, %3 \n"
490 " " __SC "\t%2, %1 \n"
491 " .set mips0 \n"
492 : "=&r" (temp), "+m" (*m), "=&r" (res)
493 : "r" (1UL << bit)
494 : "memory");
495 } while (unlikely(!res));
497 res = temp & (1UL << bit);
498 } else {
499 volatile unsigned long *a = addr;
500 unsigned long mask;
501 unsigned long flags;
503 a += nr >> SZLONG_LOG;
504 mask = 1UL << bit;
505 raw_local_irq_save(flags);
506 res = (mask & *a);
507 *a ^= mask;
508 raw_local_irq_restore(flags);
511 smp_llsc_mb();
513 return res != 0;
516 #include <asm-generic/bitops/non-atomic.h>
519 * __clear_bit_unlock - Clears a bit in memory
520 * @nr: Bit to clear
521 * @addr: Address to start counting from
523 * __clear_bit() is non-atomic and implies release semantics before the memory
524 * operation. It can be used for an unlock if no other CPUs can concurrently
525 * modify other bits in the word.
527 static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
529 smp_mb();
530 __clear_bit(nr, addr);
534 * Return the bit position (0..63) of the most significant 1 bit in a word
535 * Returns -1 if no 1 bit exists
537 static inline unsigned long __fls(unsigned long word)
539 int num;
541 if (BITS_PER_LONG == 32 &&
542 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
543 __asm__(
544 " .set push \n"
545 " .set mips32 \n"
546 " clz %0, %1 \n"
547 " .set pop \n"
548 : "=r" (num)
549 : "r" (word));
551 return 31 - num;
554 if (BITS_PER_LONG == 64 &&
555 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
556 __asm__(
557 " .set push \n"
558 " .set mips64 \n"
559 " dclz %0, %1 \n"
560 " .set pop \n"
561 : "=r" (num)
562 : "r" (word));
564 return 63 - num;
567 num = BITS_PER_LONG - 1;
569 #if BITS_PER_LONG == 64
570 if (!(word & (~0ul << 32))) {
571 num -= 32;
572 word <<= 32;
574 #endif
575 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
576 num -= 16;
577 word <<= 16;
579 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
580 num -= 8;
581 word <<= 8;
583 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
584 num -= 4;
585 word <<= 4;
587 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
588 num -= 2;
589 word <<= 2;
591 if (!(word & (~0ul << (BITS_PER_LONG-1))))
592 num -= 1;
593 return num;
597 * __ffs - find first bit in word.
598 * @word: The word to search
600 * Returns 0..SZLONG-1
601 * Undefined if no bit exists, so code should check against 0 first.
603 static inline unsigned long __ffs(unsigned long word)
605 return __fls(word & -word);
609 * fls - find last bit set.
610 * @word: The word to search
612 * This is defined the same way as ffs.
613 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
615 static inline int fls(int x)
617 int r;
619 if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
620 __asm__("clz %0, %1" : "=r" (x) : "r" (x));
622 return 32 - x;
625 r = 32;
626 if (!x)
627 return 0;
628 if (!(x & 0xffff0000u)) {
629 x <<= 16;
630 r -= 16;
632 if (!(x & 0xff000000u)) {
633 x <<= 8;
634 r -= 8;
636 if (!(x & 0xf0000000u)) {
637 x <<= 4;
638 r -= 4;
640 if (!(x & 0xc0000000u)) {
641 x <<= 2;
642 r -= 2;
644 if (!(x & 0x80000000u)) {
645 x <<= 1;
646 r -= 1;
648 return r;
651 #include <asm-generic/bitops/fls64.h>
654 * ffs - find first bit set.
655 * @word: The word to search
657 * This is defined the same way as
658 * the libc and compiler builtin ffs routines, therefore
659 * differs in spirit from the above ffz (man ffs).
661 static inline int ffs(int word)
663 if (!word)
664 return 0;
666 return fls(word & -word);
669 #include <asm-generic/bitops/ffz.h>
670 #include <asm-generic/bitops/find.h>
672 #ifdef __KERNEL__
674 #include <asm-generic/bitops/sched.h>
676 #include <asm/arch_hweight.h>
677 #include <asm-generic/bitops/const_hweight.h>
679 #include <asm-generic/bitops/le.h>
680 #include <asm-generic/bitops/ext2-atomic.h>
682 #endif /* __KERNEL__ */
684 #endif /* _ASM_BITOPS_H */