2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 #include <asm/barrier.h>
19 #include <asm/byteorder.h> /* sigh ... */
20 #include <asm/compiler.h>
21 #include <asm/cpu-features.h>
23 #include <asm/sgidefs.h>
27 * These are the "slower" versions of the functions and are in bitops.c.
28 * These functions call raw_local_irq_{save,restore}().
30 void __mips_set_bit(unsigned long nr
, volatile unsigned long *addr
);
31 void __mips_clear_bit(unsigned long nr
, volatile unsigned long *addr
);
32 void __mips_change_bit(unsigned long nr
, volatile unsigned long *addr
);
33 int __mips_test_and_set_bit(unsigned long nr
,
34 volatile unsigned long *addr
);
35 int __mips_test_and_set_bit_lock(unsigned long nr
,
36 volatile unsigned long *addr
);
37 int __mips_test_and_clear_bit(unsigned long nr
,
38 volatile unsigned long *addr
);
39 int __mips_test_and_change_bit(unsigned long nr
,
40 volatile unsigned long *addr
);
44 * set_bit - Atomically set a bit in memory
46 * @addr: the address to start counting from
48 * This function is atomic and may not be reordered. See __set_bit()
49 * if you do not require the atomic guarantees.
50 * Note that @nr may be almost arbitrarily large; this function is not
51 * restricted to acting on a single-word quantity.
53 static inline void set_bit(unsigned long nr
, volatile unsigned long *addr
)
55 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
56 int bit
= nr
& SZLONG_MASK
;
59 if (kernel_uses_llsc
&& R10000_LLSC_WAR
) {
62 "1: " __LL
"%0, %1 # set_bit \n"
67 : "=&r" (temp
), "=" GCC_OFF_SMALL_ASM() (*m
)
68 : "ir" (1UL << bit
), GCC_OFF_SMALL_ASM() (*m
));
69 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
70 } else if (kernel_uses_llsc
&& __builtin_constant_p(bit
)) {
73 " " __LL
"%0, %1 # set_bit \n"
74 " " __INS
"%0, %3, %2, 1 \n"
76 : "=&r" (temp
), "+" GCC_OFF_SMALL_ASM() (*m
)
77 : "ir" (bit
), "r" (~0));
78 } while (unlikely(!temp
));
79 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
80 } else if (kernel_uses_llsc
) {
83 " .set "MIPS_ISA_ARCH_LEVEL
" \n"
84 " " __LL
"%0, %1 # set_bit \n"
88 : "=&r" (temp
), "+" GCC_OFF_SMALL_ASM() (*m
)
90 } while (unlikely(!temp
));
92 __mips_set_bit(nr
, addr
);
96 * clear_bit - Clears a bit in memory
98 * @addr: Address to start counting from
100 * clear_bit() is atomic and may not be reordered. However, it does
101 * not contain a memory barrier, so if it is used for locking purposes,
102 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
103 * in order to ensure changes are visible on other processors.
105 static inline void clear_bit(unsigned long nr
, volatile unsigned long *addr
)
107 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
108 int bit
= nr
& SZLONG_MASK
;
111 if (kernel_uses_llsc
&& R10000_LLSC_WAR
) {
112 __asm__
__volatile__(
113 " .set arch=r4000 \n"
114 "1: " __LL
"%0, %1 # clear_bit \n"
119 : "=&r" (temp
), "+" GCC_OFF_SMALL_ASM() (*m
)
120 : "ir" (~(1UL << bit
)));
121 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
122 } else if (kernel_uses_llsc
&& __builtin_constant_p(bit
)) {
124 __asm__
__volatile__(
125 " " __LL
"%0, %1 # clear_bit \n"
126 " " __INS
"%0, $0, %2, 1 \n"
128 : "=&r" (temp
), "+" GCC_OFF_SMALL_ASM() (*m
)
130 } while (unlikely(!temp
));
131 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
132 } else if (kernel_uses_llsc
) {
134 __asm__
__volatile__(
135 " .set "MIPS_ISA_ARCH_LEVEL
" \n"
136 " " __LL
"%0, %1 # clear_bit \n"
140 : "=&r" (temp
), "+" GCC_OFF_SMALL_ASM() (*m
)
141 : "ir" (~(1UL << bit
)));
142 } while (unlikely(!temp
));
144 __mips_clear_bit(nr
, addr
);
148 * clear_bit_unlock - Clears a bit in memory
150 * @addr: Address to start counting from
152 * clear_bit() is atomic and implies release semantics before the memory
153 * operation. It can be used for an unlock.
155 static inline void clear_bit_unlock(unsigned long nr
, volatile unsigned long *addr
)
157 smp_mb__before_atomic();
162 * change_bit - Toggle a bit in memory
164 * @addr: Address to start counting from
166 * change_bit() is atomic and may not be reordered.
167 * Note that @nr may be almost arbitrarily large; this function is not
168 * restricted to acting on a single-word quantity.
170 static inline void change_bit(unsigned long nr
, volatile unsigned long *addr
)
172 int bit
= nr
& SZLONG_MASK
;
174 if (kernel_uses_llsc
&& R10000_LLSC_WAR
) {
175 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
178 __asm__
__volatile__(
179 " .set arch=r4000 \n"
180 "1: " __LL
"%0, %1 # change_bit \n"
185 : "=&r" (temp
), "+" GCC_OFF_SMALL_ASM() (*m
)
186 : "ir" (1UL << bit
));
187 } else if (kernel_uses_llsc
) {
188 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
192 __asm__
__volatile__(
193 " .set "MIPS_ISA_ARCH_LEVEL
" \n"
194 " " __LL
"%0, %1 # change_bit \n"
198 : "=&r" (temp
), "+" GCC_OFF_SMALL_ASM() (*m
)
199 : "ir" (1UL << bit
));
200 } while (unlikely(!temp
));
202 __mips_change_bit(nr
, addr
);
206 * test_and_set_bit - Set a bit and return its old value
208 * @addr: Address to count from
210 * This operation is atomic and cannot be reordered.
211 * It also implies a memory barrier.
213 static inline int test_and_set_bit(unsigned long nr
,
214 volatile unsigned long *addr
)
216 int bit
= nr
& SZLONG_MASK
;
219 smp_mb__before_llsc();
221 if (kernel_uses_llsc
&& R10000_LLSC_WAR
) {
222 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
225 __asm__
__volatile__(
226 " .set arch=r4000 \n"
227 "1: " __LL
"%0, %1 # test_and_set_bit \n"
233 : "=&r" (temp
), "+" GCC_OFF_SMALL_ASM() (*m
), "=&r" (res
)
236 } else if (kernel_uses_llsc
) {
237 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
241 __asm__
__volatile__(
242 " .set "MIPS_ISA_ARCH_LEVEL
" \n"
243 " " __LL
"%0, %1 # test_and_set_bit \n"
247 : "=&r" (temp
), "+" GCC_OFF_SMALL_ASM() (*m
), "=&r" (res
)
250 } while (unlikely(!res
));
252 res
= temp
& (1UL << bit
);
254 res
= __mips_test_and_set_bit(nr
, addr
);
262 * test_and_set_bit_lock - Set a bit and return its old value
264 * @addr: Address to count from
266 * This operation is atomic and implies acquire ordering semantics
267 * after the memory operation.
269 static inline int test_and_set_bit_lock(unsigned long nr
,
270 volatile unsigned long *addr
)
272 int bit
= nr
& SZLONG_MASK
;
275 if (kernel_uses_llsc
&& R10000_LLSC_WAR
) {
276 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
279 __asm__
__volatile__(
280 " .set arch=r4000 \n"
281 "1: " __LL
"%0, %1 # test_and_set_bit \n"
287 : "=&r" (temp
), "+m" (*m
), "=&r" (res
)
290 } else if (kernel_uses_llsc
) {
291 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
295 __asm__
__volatile__(
296 " .set "MIPS_ISA_ARCH_LEVEL
" \n"
297 " " __LL
"%0, %1 # test_and_set_bit \n"
301 : "=&r" (temp
), "+" GCC_OFF_SMALL_ASM() (*m
), "=&r" (res
)
304 } while (unlikely(!res
));
306 res
= temp
& (1UL << bit
);
308 res
= __mips_test_and_set_bit_lock(nr
, addr
);
315 * test_and_clear_bit - Clear a bit and return its old value
317 * @addr: Address to count from
319 * This operation is atomic and cannot be reordered.
320 * It also implies a memory barrier.
322 static inline int test_and_clear_bit(unsigned long nr
,
323 volatile unsigned long *addr
)
325 int bit
= nr
& SZLONG_MASK
;
328 smp_mb__before_llsc();
330 if (kernel_uses_llsc
&& R10000_LLSC_WAR
) {
331 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
334 __asm__
__volatile__(
335 " .set arch=r4000 \n"
336 "1: " __LL
"%0, %1 # test_and_clear_bit \n"
343 : "=&r" (temp
), "+" GCC_OFF_SMALL_ASM() (*m
), "=&r" (res
)
346 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
347 } else if (kernel_uses_llsc
&& __builtin_constant_p(nr
)) {
348 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
352 __asm__
__volatile__(
353 " " __LL
"%0, %1 # test_and_clear_bit \n"
354 " " __EXT
"%2, %0, %3, 1 \n"
355 " " __INS
"%0, $0, %3, 1 \n"
357 : "=&r" (temp
), "+" GCC_OFF_SMALL_ASM() (*m
), "=&r" (res
)
360 } while (unlikely(!temp
));
362 } else if (kernel_uses_llsc
) {
363 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
367 __asm__
__volatile__(
368 " .set "MIPS_ISA_ARCH_LEVEL
" \n"
369 " " __LL
"%0, %1 # test_and_clear_bit \n"
374 : "=&r" (temp
), "+" GCC_OFF_SMALL_ASM() (*m
), "=&r" (res
)
377 } while (unlikely(!res
));
379 res
= temp
& (1UL << bit
);
381 res
= __mips_test_and_clear_bit(nr
, addr
);
389 * test_and_change_bit - Change a bit and return its old value
391 * @addr: Address to count from
393 * This operation is atomic and cannot be reordered.
394 * It also implies a memory barrier.
396 static inline int test_and_change_bit(unsigned long nr
,
397 volatile unsigned long *addr
)
399 int bit
= nr
& SZLONG_MASK
;
402 smp_mb__before_llsc();
404 if (kernel_uses_llsc
&& R10000_LLSC_WAR
) {
405 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
408 __asm__
__volatile__(
409 " .set arch=r4000 \n"
410 "1: " __LL
"%0, %1 # test_and_change_bit \n"
416 : "=&r" (temp
), "+" GCC_OFF_SMALL_ASM() (*m
), "=&r" (res
)
419 } else if (kernel_uses_llsc
) {
420 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
424 __asm__
__volatile__(
425 " .set "MIPS_ISA_ARCH_LEVEL
" \n"
426 " " __LL
"%0, %1 # test_and_change_bit \n"
428 " " __SC
"\t%2, %1 \n"
430 : "=&r" (temp
), "+" GCC_OFF_SMALL_ASM() (*m
), "=&r" (res
)
433 } while (unlikely(!res
));
435 res
= temp
& (1UL << bit
);
437 res
= __mips_test_and_change_bit(nr
, addr
);
444 #include <asm-generic/bitops/non-atomic.h>
447 * __clear_bit_unlock - Clears a bit in memory
449 * @addr: Address to start counting from
451 * __clear_bit() is non-atomic and implies release semantics before the memory
452 * operation. It can be used for an unlock if no other CPUs can concurrently
453 * modify other bits in the word.
455 static inline void __clear_bit_unlock(unsigned long nr
, volatile unsigned long *addr
)
457 smp_mb__before_llsc();
458 __clear_bit(nr
, addr
);
463 * Return the bit position (0..63) of the most significant 1 bit in a word
464 * Returns -1 if no 1 bit exists
466 static inline unsigned long __fls(unsigned long word
)
470 if (BITS_PER_LONG
== 32 && !__builtin_constant_p(word
) &&
471 __builtin_constant_p(cpu_has_clo_clz
) && cpu_has_clo_clz
) {
474 " .set "MIPS_ISA_LEVEL
" \n"
483 if (BITS_PER_LONG
== 64 && !__builtin_constant_p(word
) &&
484 __builtin_constant_p(cpu_has_mips64
) && cpu_has_mips64
) {
487 " .set "MIPS_ISA_LEVEL
" \n"
496 num
= BITS_PER_LONG
- 1;
498 #if BITS_PER_LONG == 64
499 if (!(word
& (~0ul << 32))) {
504 if (!(word
& (~0ul << (BITS_PER_LONG
-16)))) {
508 if (!(word
& (~0ul << (BITS_PER_LONG
-8)))) {
512 if (!(word
& (~0ul << (BITS_PER_LONG
-4)))) {
516 if (!(word
& (~0ul << (BITS_PER_LONG
-2)))) {
520 if (!(word
& (~0ul << (BITS_PER_LONG
-1))))
526 * __ffs - find first bit in word.
527 * @word: The word to search
529 * Returns 0..SZLONG-1
530 * Undefined if no bit exists, so code should check against 0 first.
532 static inline unsigned long __ffs(unsigned long word
)
534 return __fls(word
& -word
);
538 * fls - find last bit set.
539 * @word: The word to search
541 * This is defined the same way as ffs.
542 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
544 static inline int fls(int x
)
548 if (!__builtin_constant_p(x
) &&
549 __builtin_constant_p(cpu_has_clo_clz
) && cpu_has_clo_clz
) {
552 " .set "MIPS_ISA_LEVEL
" \n"
564 if (!(x
& 0xffff0000u
)) {
568 if (!(x
& 0xff000000u
)) {
572 if (!(x
& 0xf0000000u
)) {
576 if (!(x
& 0xc0000000u
)) {
580 if (!(x
& 0x80000000u
)) {
587 #include <asm-generic/bitops/fls64.h>
590 * ffs - find first bit set.
591 * @word: The word to search
593 * This is defined the same way as
594 * the libc and compiler builtin ffs routines, therefore
595 * differs in spirit from the above ffz (man ffs).
597 static inline int ffs(int word
)
602 return fls(word
& -word
);
605 #include <asm-generic/bitops/ffz.h>
606 #include <asm-generic/bitops/find.h>
610 #include <asm-generic/bitops/sched.h>
612 #include <asm/arch_hweight.h>
613 #include <asm-generic/bitops/const_hweight.h>
615 #include <asm-generic/bitops/le.h>
616 #include <asm-generic/bitops/ext2-atomic.h>
618 #endif /* __KERNEL__ */
620 #endif /* _ASM_BITOPS_H */