2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
16 #include <linux/bits.h>
17 #include <linux/compiler.h>
18 #include <linux/types.h>
19 #include <asm/barrier.h>
20 #include <asm/byteorder.h> /* sigh ... */
21 #include <asm/compiler.h>
22 #include <asm/cpu-features.h>
23 #include <asm/isa-rev.h>
25 #include <asm/sgidefs.h>
28 #define __bit_op(mem, insn, inputs...) do { \
33 " .set " MIPS_ISA_LEVEL " \n" \
34 " " __SYNC(full, loongson3_war) " \n" \
35 "1: " __LL "%0, %1 \n" \
37 " " __SC "%0, %1 \n" \
38 " " __SC_BEQZ "%0, 1b \n" \
40 : "=&r"(temp), "+" GCC_OFF_SMALL_ASM()(mem) \
45 #define __test_bit_op(mem, ll_dst, insn, inputs...) ({ \
46 unsigned long orig, temp; \
50 " .set " MIPS_ISA_LEVEL " \n" \
51 " " __SYNC(full, loongson3_war) " \n" \
52 "1: " __LL ll_dst ", %2 \n" \
54 " " __SC "%1, %2 \n" \
55 " " __SC_BEQZ "%1, 1b \n" \
57 : "=&r"(orig), "=&r"(temp), \
58 "+" GCC_OFF_SMALL_ASM()(mem) \
66 * These are the "slower" versions of the functions and are in bitops.c.
67 * These functions call raw_local_irq_{save,restore}().
69 void __mips_set_bit(unsigned long nr
, volatile unsigned long *addr
);
70 void __mips_clear_bit(unsigned long nr
, volatile unsigned long *addr
);
71 void __mips_change_bit(unsigned long nr
, volatile unsigned long *addr
);
72 int __mips_test_and_set_bit_lock(unsigned long nr
,
73 volatile unsigned long *addr
);
74 int __mips_test_and_clear_bit(unsigned long nr
,
75 volatile unsigned long *addr
);
76 int __mips_test_and_change_bit(unsigned long nr
,
77 volatile unsigned long *addr
);
81 * set_bit - Atomically set a bit in memory
83 * @addr: the address to start counting from
85 * This function is atomic and may not be reordered. See __set_bit()
86 * if you do not require the atomic guarantees.
87 * Note that @nr may be almost arbitrarily large; this function is not
88 * restricted to acting on a single-word quantity.
90 static inline void set_bit(unsigned long nr
, volatile unsigned long *addr
)
92 volatile unsigned long *m
= &addr
[BIT_WORD(nr
)];
93 int bit
= nr
% BITS_PER_LONG
;
95 if (!kernel_uses_llsc
) {
96 __mips_set_bit(nr
, addr
);
100 if ((MIPS_ISA_REV
>= 2) && __builtin_constant_p(bit
) && (bit
>= 16)) {
101 __bit_op(*m
, __INS
"%0, %3, %2, 1", "i"(bit
), "r"(~0));
105 __bit_op(*m
, "or\t%0, %2", "ir"(BIT(bit
)));
109 * clear_bit - Clears a bit in memory
111 * @addr: Address to start counting from
113 * clear_bit() is atomic and may not be reordered. However, it does
114 * not contain a memory barrier, so if it is used for locking purposes,
115 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
116 * in order to ensure changes are visible on other processors.
118 static inline void clear_bit(unsigned long nr
, volatile unsigned long *addr
)
120 volatile unsigned long *m
= &addr
[BIT_WORD(nr
)];
121 int bit
= nr
% BITS_PER_LONG
;
123 if (!kernel_uses_llsc
) {
124 __mips_clear_bit(nr
, addr
);
128 if ((MIPS_ISA_REV
>= 2) && __builtin_constant_p(bit
)) {
129 __bit_op(*m
, __INS
"%0, $0, %2, 1", "i"(bit
));
133 __bit_op(*m
, "and\t%0, %2", "ir"(~BIT(bit
)));
137 * clear_bit_unlock - Clears a bit in memory
139 * @addr: Address to start counting from
141 * clear_bit() is atomic and implies release semantics before the memory
142 * operation. It can be used for an unlock.
144 static inline void clear_bit_unlock(unsigned long nr
, volatile unsigned long *addr
)
146 smp_mb__before_atomic();
151 * change_bit - Toggle a bit in memory
153 * @addr: Address to start counting from
155 * change_bit() is atomic and may not be reordered.
156 * Note that @nr may be almost arbitrarily large; this function is not
157 * restricted to acting on a single-word quantity.
159 static inline void change_bit(unsigned long nr
, volatile unsigned long *addr
)
161 volatile unsigned long *m
= &addr
[BIT_WORD(nr
)];
162 int bit
= nr
% BITS_PER_LONG
;
164 if (!kernel_uses_llsc
) {
165 __mips_change_bit(nr
, addr
);
169 __bit_op(*m
, "xor\t%0, %2", "ir"(BIT(bit
)));
173 * test_and_set_bit_lock - Set a bit and return its old value
175 * @addr: Address to count from
177 * This operation is atomic and implies acquire ordering semantics
178 * after the memory operation.
180 static inline int test_and_set_bit_lock(unsigned long nr
,
181 volatile unsigned long *addr
)
183 volatile unsigned long *m
= &addr
[BIT_WORD(nr
)];
184 int bit
= nr
% BITS_PER_LONG
;
185 unsigned long res
, orig
;
187 if (!kernel_uses_llsc
) {
188 res
= __mips_test_and_set_bit_lock(nr
, addr
);
190 orig
= __test_bit_op(*m
, "%0",
193 res
= (orig
& BIT(bit
)) != 0;
202 * test_and_set_bit - Set a bit and return its old value
204 * @addr: Address to count from
206 * This operation is atomic and cannot be reordered.
207 * It also implies a memory barrier.
209 static inline int test_and_set_bit(unsigned long nr
,
210 volatile unsigned long *addr
)
212 smp_mb__before_atomic();
213 return test_and_set_bit_lock(nr
, addr
);
217 * test_and_clear_bit - Clear a bit and return its old value
219 * @addr: Address to count from
221 * This operation is atomic and cannot be reordered.
222 * It also implies a memory barrier.
224 static inline int test_and_clear_bit(unsigned long nr
,
225 volatile unsigned long *addr
)
227 volatile unsigned long *m
= &addr
[BIT_WORD(nr
)];
228 int bit
= nr
% BITS_PER_LONG
;
229 unsigned long res
, orig
;
231 smp_mb__before_atomic();
233 if (!kernel_uses_llsc
) {
234 res
= __mips_test_and_clear_bit(nr
, addr
);
235 } else if ((MIPS_ISA_REV
>= 2) && __builtin_constant_p(nr
)) {
236 res
= __test_bit_op(*m
, "%1",
237 __EXT
"%0, %1, %3, 1;"
238 __INS
"%1, $0, %3, 1",
241 orig
= __test_bit_op(*m
, "%0",
245 res
= (orig
& BIT(bit
)) != 0;
254 * test_and_change_bit - Change a bit and return its old value
256 * @addr: Address to count from
258 * This operation is atomic and cannot be reordered.
259 * It also implies a memory barrier.
261 static inline int test_and_change_bit(unsigned long nr
,
262 volatile unsigned long *addr
)
264 volatile unsigned long *m
= &addr
[BIT_WORD(nr
)];
265 int bit
= nr
% BITS_PER_LONG
;
266 unsigned long res
, orig
;
268 smp_mb__before_atomic();
270 if (!kernel_uses_llsc
) {
271 res
= __mips_test_and_change_bit(nr
, addr
);
273 orig
= __test_bit_op(*m
, "%0",
276 res
= (orig
& BIT(bit
)) != 0;
287 #include <asm-generic/bitops/non-atomic.h>
290 * __clear_bit_unlock - Clears a bit in memory
292 * @addr: Address to start counting from
294 * __clear_bit() is non-atomic and implies release semantics before the memory
295 * operation. It can be used for an unlock if no other CPUs can concurrently
296 * modify other bits in the word.
298 static inline void __clear_bit_unlock(unsigned long nr
, volatile unsigned long *addr
)
300 smp_mb__before_llsc();
301 __clear_bit(nr
, addr
);
306 * Return the bit position (0..63) of the most significant 1 bit in a word
307 * Returns -1 if no 1 bit exists
309 static __always_inline
unsigned long __fls(unsigned long word
)
313 if (BITS_PER_LONG
== 32 && !__builtin_constant_p(word
) &&
314 __builtin_constant_p(cpu_has_clo_clz
) && cpu_has_clo_clz
) {
317 " .set "MIPS_ISA_LEVEL
" \n"
326 if (BITS_PER_LONG
== 64 && !__builtin_constant_p(word
) &&
327 __builtin_constant_p(cpu_has_mips64
) && cpu_has_mips64
) {
330 " .set "MIPS_ISA_LEVEL
" \n"
339 num
= BITS_PER_LONG
- 1;
341 #if BITS_PER_LONG == 64
342 if (!(word
& (~0ul << 32))) {
347 if (!(word
& (~0ul << (BITS_PER_LONG
-16)))) {
351 if (!(word
& (~0ul << (BITS_PER_LONG
-8)))) {
355 if (!(word
& (~0ul << (BITS_PER_LONG
-4)))) {
359 if (!(word
& (~0ul << (BITS_PER_LONG
-2)))) {
363 if (!(word
& (~0ul << (BITS_PER_LONG
-1))))
369 * __ffs - find first bit in word.
370 * @word: The word to search
372 * Returns 0..SZLONG-1
373 * Undefined if no bit exists, so code should check against 0 first.
375 static __always_inline
unsigned long __ffs(unsigned long word
)
377 return __fls(word
& -word
);
381 * fls - find last bit set.
382 * @word: The word to search
384 * This is defined the same way as ffs.
385 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
387 static inline int fls(unsigned int x
)
391 if (!__builtin_constant_p(x
) &&
392 __builtin_constant_p(cpu_has_clo_clz
) && cpu_has_clo_clz
) {
395 " .set "MIPS_ISA_LEVEL
" \n"
407 if (!(x
& 0xffff0000u
)) {
411 if (!(x
& 0xff000000u
)) {
415 if (!(x
& 0xf0000000u
)) {
419 if (!(x
& 0xc0000000u
)) {
423 if (!(x
& 0x80000000u
)) {
430 #include <asm-generic/bitops/fls64.h>
433 * ffs - find first bit set.
434 * @word: The word to search
436 * This is defined the same way as
437 * the libc and compiler builtin ffs routines, therefore
438 * differs in spirit from the above ffz (man ffs).
440 static inline int ffs(int word
)
445 return fls(word
& -word
);
448 #include <asm-generic/bitops/ffz.h>
449 #include <asm-generic/bitops/find.h>
453 #include <asm-generic/bitops/sched.h>
455 #include <asm/arch_hweight.h>
456 #include <asm-generic/bitops/const_hweight.h>
458 #include <asm-generic/bitops/le.h>
459 #include <asm-generic/bitops/ext2-atomic.h>
461 #endif /* __KERNEL__ */
463 #endif /* _ASM_BITOPS_H */