sync hh.org
[hh.org.git] / include / asm-mips / bitops.h
blobb9007411b60f977e8393e450d70bcfe3388a7f50
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_BITOPS_H
10 #define _ASM_BITOPS_H
12 #include <linux/compiler.h>
13 #include <linux/irqflags.h>
14 #include <linux/types.h>
15 #include <asm/bug.h>
16 #include <asm/byteorder.h> /* sigh ... */
17 #include <asm/cpu-features.h>
18 #include <asm/sgidefs.h>
19 #include <asm/war.h>
21 #if (_MIPS_SZLONG == 32)
22 #define SZLONG_LOG 5
23 #define SZLONG_MASK 31UL
24 #define __LL "ll "
25 #define __SC "sc "
26 #elif (_MIPS_SZLONG == 64)
27 #define SZLONG_LOG 6
28 #define SZLONG_MASK 63UL
29 #define __LL "lld "
30 #define __SC "scd "
31 #endif
34 * clear_bit() doesn't provide any barrier for the compiler.
36 #define smp_mb__before_clear_bit() smp_mb()
37 #define smp_mb__after_clear_bit() smp_mb()
40 * set_bit - Atomically set a bit in memory
41 * @nr: the bit to set
42 * @addr: the address to start counting from
44 * This function is atomic and may not be reordered. See __set_bit()
45 * if you do not require the atomic guarantees.
46 * Note that @nr may be almost arbitrarily large; this function is not
47 * restricted to acting on a single-word quantity.
49 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
51 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
52 unsigned long temp;
54 if (cpu_has_llsc && R10000_LLSC_WAR) {
55 __asm__ __volatile__(
56 " .set mips3 \n"
57 "1: " __LL "%0, %1 # set_bit \n"
58 " or %0, %2 \n"
59 " " __SC "%0, %1 \n"
60 " beqzl %0, 1b \n"
61 " .set mips0 \n"
62 : "=&r" (temp), "=m" (*m)
63 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
64 } else if (cpu_has_llsc) {
65 __asm__ __volatile__(
66 " .set mips3 \n"
67 "1: " __LL "%0, %1 # set_bit \n"
68 " or %0, %2 \n"
69 " " __SC "%0, %1 \n"
70 " beqz %0, 1b \n"
71 " .set mips0 \n"
72 : "=&r" (temp), "=m" (*m)
73 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
74 } else {
75 volatile unsigned long *a = addr;
76 unsigned long mask;
77 unsigned long flags;
79 a += nr >> SZLONG_LOG;
80 mask = 1UL << (nr & SZLONG_MASK);
81 local_irq_save(flags);
82 *a |= mask;
83 local_irq_restore(flags);
88 * clear_bit - Clears a bit in memory
89 * @nr: Bit to clear
90 * @addr: Address to start counting from
92 * clear_bit() is atomic and may not be reordered. However, it does
93 * not contain a memory barrier, so if it is used for locking purposes,
94 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
95 * in order to ensure changes are visible on other processors.
97 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
99 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
100 unsigned long temp;
102 if (cpu_has_llsc && R10000_LLSC_WAR) {
103 __asm__ __volatile__(
104 " .set mips3 \n"
105 "1: " __LL "%0, %1 # clear_bit \n"
106 " and %0, %2 \n"
107 " " __SC "%0, %1 \n"
108 " beqzl %0, 1b \n"
109 " .set mips0 \n"
110 : "=&r" (temp), "=m" (*m)
111 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
112 } else if (cpu_has_llsc) {
113 __asm__ __volatile__(
114 " .set mips3 \n"
115 "1: " __LL "%0, %1 # clear_bit \n"
116 " and %0, %2 \n"
117 " " __SC "%0, %1 \n"
118 " beqz %0, 1b \n"
119 " .set mips0 \n"
120 : "=&r" (temp), "=m" (*m)
121 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
122 } else {
123 volatile unsigned long *a = addr;
124 unsigned long mask;
125 unsigned long flags;
127 a += nr >> SZLONG_LOG;
128 mask = 1UL << (nr & SZLONG_MASK);
129 local_irq_save(flags);
130 *a &= ~mask;
131 local_irq_restore(flags);
136 * change_bit - Toggle a bit in memory
137 * @nr: Bit to change
138 * @addr: Address to start counting from
140 * change_bit() is atomic and may not be reordered.
141 * Note that @nr may be almost arbitrarily large; this function is not
142 * restricted to acting on a single-word quantity.
144 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
146 if (cpu_has_llsc && R10000_LLSC_WAR) {
147 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
148 unsigned long temp;
150 __asm__ __volatile__(
151 " .set mips3 \n"
152 "1: " __LL "%0, %1 # change_bit \n"
153 " xor %0, %2 \n"
154 " " __SC "%0, %1 \n"
155 " beqzl %0, 1b \n"
156 " .set mips0 \n"
157 : "=&r" (temp), "=m" (*m)
158 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
159 } else if (cpu_has_llsc) {
160 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
161 unsigned long temp;
163 __asm__ __volatile__(
164 " .set mips3 \n"
165 "1: " __LL "%0, %1 # change_bit \n"
166 " xor %0, %2 \n"
167 " " __SC "%0, %1 \n"
168 " beqz %0, 1b \n"
169 " .set mips0 \n"
170 : "=&r" (temp), "=m" (*m)
171 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
172 } else {
173 volatile unsigned long *a = addr;
174 unsigned long mask;
175 unsigned long flags;
177 a += nr >> SZLONG_LOG;
178 mask = 1UL << (nr & SZLONG_MASK);
179 local_irq_save(flags);
180 *a ^= mask;
181 local_irq_restore(flags);
186 * test_and_set_bit - Set a bit and return its old value
187 * @nr: Bit to set
188 * @addr: Address to count from
190 * This operation is atomic and cannot be reordered.
191 * It also implies a memory barrier.
193 static inline int test_and_set_bit(unsigned long nr,
194 volatile unsigned long *addr)
196 if (cpu_has_llsc && R10000_LLSC_WAR) {
197 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
198 unsigned long temp, res;
200 __asm__ __volatile__(
201 " .set mips3 \n"
202 "1: " __LL "%0, %1 # test_and_set_bit \n"
203 " or %2, %0, %3 \n"
204 " " __SC "%2, %1 \n"
205 " beqzl %2, 1b \n"
206 " and %2, %0, %3 \n"
207 #ifdef CONFIG_SMP
208 " sync \n"
209 #endif
210 " .set mips0 \n"
211 : "=&r" (temp), "=m" (*m), "=&r" (res)
212 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
213 : "memory");
215 return res != 0;
216 } else if (cpu_has_llsc) {
217 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
218 unsigned long temp, res;
220 __asm__ __volatile__(
221 " .set push \n"
222 " .set noreorder \n"
223 " .set mips3 \n"
224 "1: " __LL "%0, %1 # test_and_set_bit \n"
225 " or %2, %0, %3 \n"
226 " " __SC "%2, %1 \n"
227 " beqz %2, 1b \n"
228 " and %2, %0, %3 \n"
229 #ifdef CONFIG_SMP
230 " sync \n"
231 #endif
232 " .set pop \n"
233 : "=&r" (temp), "=m" (*m), "=&r" (res)
234 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
235 : "memory");
237 return res != 0;
238 } else {
239 volatile unsigned long *a = addr;
240 unsigned long mask;
241 int retval;
242 unsigned long flags;
244 a += nr >> SZLONG_LOG;
245 mask = 1UL << (nr & SZLONG_MASK);
246 local_irq_save(flags);
247 retval = (mask & *a) != 0;
248 *a |= mask;
249 local_irq_restore(flags);
251 return retval;
256 * test_and_clear_bit - Clear a bit and return its old value
257 * @nr: Bit to clear
258 * @addr: Address to count from
260 * This operation is atomic and cannot be reordered.
261 * It also implies a memory barrier.
263 static inline int test_and_clear_bit(unsigned long nr,
264 volatile unsigned long *addr)
266 if (cpu_has_llsc && R10000_LLSC_WAR) {
267 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
268 unsigned long temp, res;
270 __asm__ __volatile__(
271 " .set mips3 \n"
272 "1: " __LL "%0, %1 # test_and_clear_bit \n"
273 " or %2, %0, %3 \n"
274 " xor %2, %3 \n"
275 " " __SC "%2, %1 \n"
276 " beqzl %2, 1b \n"
277 " and %2, %0, %3 \n"
278 #ifdef CONFIG_SMP
279 " sync \n"
280 #endif
281 " .set mips0 \n"
282 : "=&r" (temp), "=m" (*m), "=&r" (res)
283 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
284 : "memory");
286 return res != 0;
287 } else if (cpu_has_llsc) {
288 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
289 unsigned long temp, res;
291 __asm__ __volatile__(
292 " .set push \n"
293 " .set noreorder \n"
294 " .set mips3 \n"
295 "1: " __LL "%0, %1 # test_and_clear_bit \n"
296 " or %2, %0, %3 \n"
297 " xor %2, %3 \n"
298 " " __SC "%2, %1 \n"
299 " beqz %2, 1b \n"
300 " and %2, %0, %3 \n"
301 #ifdef CONFIG_SMP
302 " sync \n"
303 #endif
304 " .set pop \n"
305 : "=&r" (temp), "=m" (*m), "=&r" (res)
306 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
307 : "memory");
309 return res != 0;
310 } else {
311 volatile unsigned long *a = addr;
312 unsigned long mask;
313 int retval;
314 unsigned long flags;
316 a += nr >> SZLONG_LOG;
317 mask = 1UL << (nr & SZLONG_MASK);
318 local_irq_save(flags);
319 retval = (mask & *a) != 0;
320 *a &= ~mask;
321 local_irq_restore(flags);
323 return retval;
328 * test_and_change_bit - Change a bit and return its old value
329 * @nr: Bit to change
330 * @addr: Address to count from
332 * This operation is atomic and cannot be reordered.
333 * It also implies a memory barrier.
335 static inline int test_and_change_bit(unsigned long nr,
336 volatile unsigned long *addr)
338 if (cpu_has_llsc && R10000_LLSC_WAR) {
339 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
340 unsigned long temp, res;
342 __asm__ __volatile__(
343 " .set mips3 \n"
344 "1: " __LL "%0, %1 # test_and_change_bit \n"
345 " xor %2, %0, %3 \n"
346 " " __SC "%2, %1 \n"
347 " beqzl %2, 1b \n"
348 " and %2, %0, %3 \n"
349 #ifdef CONFIG_SMP
350 " sync \n"
351 #endif
352 " .set mips0 \n"
353 : "=&r" (temp), "=m" (*m), "=&r" (res)
354 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
355 : "memory");
357 return res != 0;
358 } else if (cpu_has_llsc) {
359 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
360 unsigned long temp, res;
362 __asm__ __volatile__(
363 " .set push \n"
364 " .set noreorder \n"
365 " .set mips3 \n"
366 "1: " __LL "%0, %1 # test_and_change_bit \n"
367 " xor %2, %0, %3 \n"
368 " " __SC "\t%2, %1 \n"
369 " beqz %2, 1b \n"
370 " and %2, %0, %3 \n"
371 #ifdef CONFIG_SMP
372 " sync \n"
373 #endif
374 " .set pop \n"
375 : "=&r" (temp), "=m" (*m), "=&r" (res)
376 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
377 : "memory");
379 return res != 0;
380 } else {
381 volatile unsigned long *a = addr;
382 unsigned long mask, retval;
383 unsigned long flags;
385 a += nr >> SZLONG_LOG;
386 mask = 1UL << (nr & SZLONG_MASK);
387 local_irq_save(flags);
388 retval = (mask & *a) != 0;
389 *a ^= mask;
390 local_irq_restore(flags);
392 return retval;
396 #include <asm-generic/bitops/non-atomic.h>
399 * Return the bit position (0..63) of the most significant 1 bit in a word
400 * Returns -1 if no 1 bit exists
402 static inline int __ilog2(unsigned long x)
404 int lz;
406 if (sizeof(x) == 4) {
407 __asm__ (
408 " .set push \n"
409 " .set mips32 \n"
410 " clz %0, %1 \n"
411 " .set pop \n"
412 : "=r" (lz)
413 : "r" (x));
415 return 31 - lz;
418 BUG_ON(sizeof(x) != 8);
420 __asm__ (
421 " .set push \n"
422 " .set mips64 \n"
423 " dclz %0, %1 \n"
424 " .set pop \n"
425 : "=r" (lz)
426 : "r" (x));
428 return 63 - lz;
431 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
434 * __ffs - find first bit in word.
435 * @word: The word to search
437 * Returns 0..SZLONG-1
438 * Undefined if no bit exists, so code should check against 0 first.
440 static inline unsigned long __ffs(unsigned long word)
442 return __ilog2(word & -word);
446 * fls - find last bit set.
447 * @word: The word to search
449 * This is defined the same way as ffs.
450 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
452 static inline int fls(int word)
454 __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
456 return 32 - word;
459 #if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64)
460 static inline int fls64(__u64 word)
462 __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
464 return 64 - word;
466 #else
467 #include <asm-generic/bitops/fls64.h>
468 #endif
471 * ffs - find first bit set.
472 * @word: The word to search
474 * This is defined the same way as
475 * the libc and compiler builtin ffs routines, therefore
476 * differs in spirit from the above ffz (man ffs).
478 static inline int ffs(int word)
480 if (!word)
481 return 0;
483 return fls(word & -word);
486 #else
488 #include <asm-generic/bitops/__ffs.h>
489 #include <asm-generic/bitops/ffs.h>
490 #include <asm-generic/bitops/fls.h>
491 #include <asm-generic/bitops/fls64.h>
493 #endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
495 #include <asm-generic/bitops/ffz.h>
496 #include <asm-generic/bitops/find.h>
498 #ifdef __KERNEL__
500 #include <asm-generic/bitops/sched.h>
501 #include <asm-generic/bitops/hweight.h>
502 #include <asm-generic/bitops/ext2-non-atomic.h>
503 #include <asm-generic/bitops/ext2-atomic.h>
504 #include <asm-generic/bitops/minix.h>
506 #endif /* __KERNEL__ */
508 #endif /* _ASM_BITOPS_H */