2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
12 #include <linux/compiler.h>
13 #include <linux/types.h>
15 #include <asm/byteorder.h> /* sigh ... */
16 #include <asm/cpu-features.h>
18 #if (_MIPS_SZLONG == 32)
20 #define SZLONG_MASK 31UL
23 #define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
24 #elif (_MIPS_SZLONG == 64)
26 #define SZLONG_MASK 63UL
29 #define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
34 #include <asm/interrupt.h>
35 #include <asm/sgidefs.h>
39 * clear_bit() doesn't provide any barrier for the compiler.
41 #define smp_mb__before_clear_bit() smp_mb()
42 #define smp_mb__after_clear_bit() smp_mb()
45 * Only disable interrupt for kernel mode stuff to keep usermode stuff
46 * that dares to use kernel include files alive.
49 #define __bi_flags unsigned long flags
50 #define __bi_local_irq_save(x) local_irq_save(x)
51 #define __bi_local_irq_restore(x) local_irq_restore(x)
54 #define __bi_local_irq_save(x)
55 #define __bi_local_irq_restore(x)
56 #endif /* __KERNEL__ */
59 * set_bit - Atomically set a bit in memory
61 * @addr: the address to start counting from
63 * This function is atomic and may not be reordered. See __set_bit()
64 * if you do not require the atomic guarantees.
65 * Note that @nr may be almost arbitrarily large; this function is not
66 * restricted to acting on a single-word quantity.
68 static inline void set_bit(unsigned long nr
, volatile unsigned long *addr
)
70 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
73 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
76 "1: " __LL
"%0, %1 # set_bit \n"
81 : "=&r" (temp
), "=m" (*m
)
82 : "ir" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
));
83 } else if (cpu_has_llsc
) {
86 "1: " __LL
"%0, %1 # set_bit \n"
91 : "=&r" (temp
), "=m" (*m
)
92 : "ir" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
));
94 volatile unsigned long *a
= addr
;
98 a
+= nr
>> SZLONG_LOG
;
99 mask
= 1UL << (nr
& SZLONG_MASK
);
100 __bi_local_irq_save(flags
);
102 __bi_local_irq_restore(flags
);
107 * clear_bit - Clears a bit in memory
109 * @addr: Address to start counting from
111 * clear_bit() is atomic and may not be reordered. However, it does
112 * not contain a memory barrier, so if it is used for locking purposes,
113 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
114 * in order to ensure changes are visible on other processors.
116 static inline void clear_bit(unsigned long nr
, volatile unsigned long *addr
)
118 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
121 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
122 __asm__
__volatile__(
124 "1: " __LL
"%0, %1 # clear_bit \n"
129 : "=&r" (temp
), "=m" (*m
)
130 : "ir" (~(1UL << (nr
& SZLONG_MASK
))), "m" (*m
));
131 } else if (cpu_has_llsc
) {
132 __asm__
__volatile__(
134 "1: " __LL
"%0, %1 # clear_bit \n"
139 : "=&r" (temp
), "=m" (*m
)
140 : "ir" (~(1UL << (nr
& SZLONG_MASK
))), "m" (*m
));
142 volatile unsigned long *a
= addr
;
146 a
+= nr
>> SZLONG_LOG
;
147 mask
= 1UL << (nr
& SZLONG_MASK
);
148 __bi_local_irq_save(flags
);
150 __bi_local_irq_restore(flags
);
155 * change_bit - Toggle a bit in memory
157 * @addr: Address to start counting from
159 * change_bit() is atomic and may not be reordered.
160 * Note that @nr may be almost arbitrarily large; this function is not
161 * restricted to acting on a single-word quantity.
163 static inline void change_bit(unsigned long nr
, volatile unsigned long *addr
)
165 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
166 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
169 __asm__
__volatile__(
171 "1: " __LL
"%0, %1 # change_bit \n"
176 : "=&r" (temp
), "=m" (*m
)
177 : "ir" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
));
178 } else if (cpu_has_llsc
) {
179 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
182 __asm__
__volatile__(
184 "1: " __LL
"%0, %1 # change_bit \n"
189 : "=&r" (temp
), "=m" (*m
)
190 : "ir" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
));
192 volatile unsigned long *a
= addr
;
196 a
+= nr
>> SZLONG_LOG
;
197 mask
= 1UL << (nr
& SZLONG_MASK
);
198 __bi_local_irq_save(flags
);
200 __bi_local_irq_restore(flags
);
205 * test_and_set_bit - Set a bit and return its old value
207 * @addr: Address to count from
209 * This operation is atomic and cannot be reordered.
210 * It also implies a memory barrier.
212 static inline int test_and_set_bit(unsigned long nr
,
213 volatile unsigned long *addr
)
215 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
216 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
217 unsigned long temp
, res
;
219 __asm__
__volatile__(
221 "1: " __LL
"%0, %1 # test_and_set_bit \n"
230 : "=&r" (temp
), "=m" (*m
), "=&r" (res
)
231 : "r" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
)
235 } else if (cpu_has_llsc
) {
236 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
237 unsigned long temp
, res
;
239 __asm__
__volatile__(
243 "1: " __LL
"%0, %1 # test_and_set_bit \n"
252 : "=&r" (temp
), "=m" (*m
), "=&r" (res
)
253 : "r" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
)
258 volatile unsigned long *a
= addr
;
263 a
+= nr
>> SZLONG_LOG
;
264 mask
= 1UL << (nr
& SZLONG_MASK
);
265 __bi_local_irq_save(flags
);
266 retval
= (mask
& *a
) != 0;
268 __bi_local_irq_restore(flags
);
275 * test_and_clear_bit - Clear a bit and return its old value
277 * @addr: Address to count from
279 * This operation is atomic and cannot be reordered.
280 * It also implies a memory barrier.
282 static inline int test_and_clear_bit(unsigned long nr
,
283 volatile unsigned long *addr
)
285 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
286 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
287 unsigned long temp
, res
;
289 __asm__
__volatile__(
291 "1: " __LL
"%0, %1 # test_and_clear_bit \n"
301 : "=&r" (temp
), "=m" (*m
), "=&r" (res
)
302 : "r" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
)
306 } else if (cpu_has_llsc
) {
307 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
308 unsigned long temp
, res
;
310 __asm__
__volatile__(
314 "1: " __LL
"%0, %1 # test_and_clear_bit \n"
324 : "=&r" (temp
), "=m" (*m
), "=&r" (res
)
325 : "r" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
)
330 volatile unsigned long *a
= addr
;
335 a
+= nr
>> SZLONG_LOG
;
336 mask
= 1UL << (nr
& SZLONG_MASK
);
337 __bi_local_irq_save(flags
);
338 retval
= (mask
& *a
) != 0;
340 __bi_local_irq_restore(flags
);
347 * test_and_change_bit - Change a bit and return its old value
349 * @addr: Address to count from
351 * This operation is atomic and cannot be reordered.
352 * It also implies a memory barrier.
354 static inline int test_and_change_bit(unsigned long nr
,
355 volatile unsigned long *addr
)
357 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
358 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
359 unsigned long temp
, res
;
361 __asm__
__volatile__(
363 "1: " __LL
"%0, %1 # test_and_change_bit \n"
372 : "=&r" (temp
), "=m" (*m
), "=&r" (res
)
373 : "r" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
)
377 } else if (cpu_has_llsc
) {
378 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
379 unsigned long temp
, res
;
381 __asm__
__volatile__(
385 "1: " __LL
"%0, %1 # test_and_change_bit \n"
387 " " __SC
"\t%2, %1 \n"
394 : "=&r" (temp
), "=m" (*m
), "=&r" (res
)
395 : "r" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
)
400 volatile unsigned long *a
= addr
;
401 unsigned long mask
, retval
;
404 a
+= nr
>> SZLONG_LOG
;
405 mask
= 1UL << (nr
& SZLONG_MASK
);
406 __bi_local_irq_save(flags
);
407 retval
= (mask
& *a
) != 0;
409 __bi_local_irq_restore(flags
);
416 #undef __bi_local_irq_save
417 #undef __bi_local_irq_restore
419 #include <asm-generic/bitops/non-atomic.h>
422 * Return the bit position (0..63) of the most significant 1 bit in a word
423 * Returns -1 if no 1 bit exists
425 static inline int __ilog2(unsigned long x
)
429 if (sizeof(x
) == 4) {
441 BUG_ON(sizeof(x
) != 8);
454 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
457 * __ffs - find first bit in word.
458 * @word: The word to search
460 * Returns 0..SZLONG-1
461 * Undefined if no bit exists, so code should check against 0 first.
463 static inline unsigned long __ffs(unsigned long word
)
465 return __ilog2(word
& -word
);
469 * fls - find last bit set.
470 * @word: The word to search
472 * This is defined the same way as ffs.
473 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
475 static inline int fls(int word
)
477 __asm__ ("clz %0, %1" : "=r" (word
) : "r" (word
));
482 #if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64)
483 static inline int fls64(__u64 word
)
485 __asm__ ("dclz %0, %1" : "=r" (word
) : "r" (word
));
490 #include <asm-generic/bitops/fls64.h>
494 * ffs - find first bit set.
495 * @word: The word to search
497 * This is defined the same way as
498 * the libc and compiler builtin ffs routines, therefore
499 * differs in spirit from the above ffz (man ffs).
501 static inline int ffs(int word
)
506 return fls(word
& -word
);
511 #include <asm-generic/bitops/__ffs.h>
512 #include <asm-generic/bitops/ffs.h>
513 #include <asm-generic/bitops/fls.h>
514 #include <asm-generic/bitops/fls64.h>
516 #endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
518 #include <asm-generic/bitops/ffz.h>
519 #include <asm-generic/bitops/find.h>
523 #include <asm-generic/bitops/sched.h>
524 #include <asm-generic/bitops/hweight.h>
525 #include <asm-generic/bitops/ext2-non-atomic.h>
526 #include <asm-generic/bitops/ext2-atomic.h>
527 #include <asm-generic/bitops/minix.h>
529 #endif /* __KERNEL__ */
531 #endif /* _ASM_BITOPS_H */