Merge tag 'locks-v3.16-2' of git://git.samba.org/jlayton/linux
[linux/fpc-iii.git] / arch / arc / include / asm / bitops.h
blobebc0cf3164dcc6b3a1f8a14c0ba7df66ee05648f
1 /*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
9 #ifndef _ASM_BITOPS_H
10 #define _ASM_BITOPS_H
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
14 #endif
16 #ifdef __KERNEL__
18 #ifndef __ASSEMBLY__
20 #include <linux/types.h>
21 #include <linux/compiler.h>
22 #include <asm/barrier.h>
25 * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
26 * The Kconfig glue ensures that in SMP, this is only set if the container
27 * SoC/platform has cross-core coherent LLOCK/SCOND
29 #if defined(CONFIG_ARC_HAS_LLSC)
31 static inline void set_bit(unsigned long nr, volatile unsigned long *m)
33 unsigned int temp;
35 m += nr >> 5;
37 if (__builtin_constant_p(nr))
38 nr &= 0x1f;
40 __asm__ __volatile__(
41 "1: llock %0, [%1] \n"
42 " bset %0, %0, %2 \n"
43 " scond %0, [%1] \n"
44 " bnz 1b \n"
45 : "=&r"(temp)
46 : "r"(m), "ir"(nr)
47 : "cc");
50 static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
52 unsigned int temp;
54 m += nr >> 5;
56 if (__builtin_constant_p(nr))
57 nr &= 0x1f;
59 __asm__ __volatile__(
60 "1: llock %0, [%1] \n"
61 " bclr %0, %0, %2 \n"
62 " scond %0, [%1] \n"
63 " bnz 1b \n"
64 : "=&r"(temp)
65 : "r"(m), "ir"(nr)
66 : "cc");
69 static inline void change_bit(unsigned long nr, volatile unsigned long *m)
71 unsigned int temp;
73 m += nr >> 5;
75 if (__builtin_constant_p(nr))
76 nr &= 0x1f;
78 __asm__ __volatile__(
79 "1: llock %0, [%1] \n"
80 " bxor %0, %0, %2 \n"
81 " scond %0, [%1] \n"
82 " bnz 1b \n"
83 : "=&r"(temp)
84 : "r"(m), "ir"(nr)
85 : "cc");
89 * Semantically:
90 * Test the bit
91 * if clear
92 * set it and return 0 (old value)
93 * else
94 * return 1 (old value).
96 * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
97 * and the old value of bit is returned
99 static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
101 unsigned long old, temp;
103 m += nr >> 5;
105 if (__builtin_constant_p(nr))
106 nr &= 0x1f;
108 __asm__ __volatile__(
109 "1: llock %0, [%2] \n"
110 " bset %1, %0, %3 \n"
111 " scond %1, [%2] \n"
112 " bnz 1b \n"
113 : "=&r"(old), "=&r"(temp)
114 : "r"(m), "ir"(nr)
115 : "cc");
117 return (old & (1 << nr)) != 0;
120 static inline int
121 test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
123 unsigned int old, temp;
125 m += nr >> 5;
127 if (__builtin_constant_p(nr))
128 nr &= 0x1f;
130 __asm__ __volatile__(
131 "1: llock %0, [%2] \n"
132 " bclr %1, %0, %3 \n"
133 " scond %1, [%2] \n"
134 " bnz 1b \n"
135 : "=&r"(old), "=&r"(temp)
136 : "r"(m), "ir"(nr)
137 : "cc");
139 return (old & (1 << nr)) != 0;
142 static inline int
143 test_and_change_bit(unsigned long nr, volatile unsigned long *m)
145 unsigned int old, temp;
147 m += nr >> 5;
149 if (__builtin_constant_p(nr))
150 nr &= 0x1f;
152 __asm__ __volatile__(
153 "1: llock %0, [%2] \n"
154 " bxor %1, %0, %3 \n"
155 " scond %1, [%2] \n"
156 " bnz 1b \n"
157 : "=&r"(old), "=&r"(temp)
158 : "r"(m), "ir"(nr)
159 : "cc");
161 return (old & (1 << nr)) != 0;
164 #else /* !CONFIG_ARC_HAS_LLSC */
166 #include <asm/smp.h>
169 * Non hardware assisted Atomic-R-M-W
170 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
172 * There's "significant" micro-optimization in writing our own variants of
173 * bitops (over generic variants)
175 * (1) The generic APIs have "signed" @nr while we have it "unsigned"
176 * This avoids extra code to be generated for pointer arithmatic, since
177 * is "not sure" that index is NOT -ve
178 * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
179 * only consider bottom 5 bits of @nr, so NO need to mask them off.
180 * (GCC Quirk: however for constant @nr we still need to do the masking
181 * at compile time)
184 static inline void set_bit(unsigned long nr, volatile unsigned long *m)
186 unsigned long temp, flags;
187 m += nr >> 5;
189 if (__builtin_constant_p(nr))
190 nr &= 0x1f;
192 bitops_lock(flags);
194 temp = *m;
195 *m = temp | (1UL << nr);
197 bitops_unlock(flags);
200 static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
202 unsigned long temp, flags;
203 m += nr >> 5;
205 if (__builtin_constant_p(nr))
206 nr &= 0x1f;
208 bitops_lock(flags);
210 temp = *m;
211 *m = temp & ~(1UL << nr);
213 bitops_unlock(flags);
216 static inline void change_bit(unsigned long nr, volatile unsigned long *m)
218 unsigned long temp, flags;
219 m += nr >> 5;
221 if (__builtin_constant_p(nr))
222 nr &= 0x1f;
224 bitops_lock(flags);
226 temp = *m;
227 *m = temp ^ (1UL << nr);
229 bitops_unlock(flags);
232 static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
234 unsigned long old, flags;
235 m += nr >> 5;
237 if (__builtin_constant_p(nr))
238 nr &= 0x1f;
240 bitops_lock(flags);
242 old = *m;
243 *m = old | (1 << nr);
245 bitops_unlock(flags);
247 return (old & (1 << nr)) != 0;
250 static inline int
251 test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
253 unsigned long old, flags;
254 m += nr >> 5;
256 if (__builtin_constant_p(nr))
257 nr &= 0x1f;
259 bitops_lock(flags);
261 old = *m;
262 *m = old & ~(1 << nr);
264 bitops_unlock(flags);
266 return (old & (1 << nr)) != 0;
269 static inline int
270 test_and_change_bit(unsigned long nr, volatile unsigned long *m)
272 unsigned long old, flags;
273 m += nr >> 5;
275 if (__builtin_constant_p(nr))
276 nr &= 0x1f;
278 bitops_lock(flags);
280 old = *m;
281 *m = old ^ (1 << nr);
283 bitops_unlock(flags);
285 return (old & (1 << nr)) != 0;
288 #endif /* CONFIG_ARC_HAS_LLSC */
290 /***************************************
291 * Non atomic variants
292 **************************************/
294 static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
296 unsigned long temp;
297 m += nr >> 5;
299 if (__builtin_constant_p(nr))
300 nr &= 0x1f;
302 temp = *m;
303 *m = temp | (1UL << nr);
306 static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
308 unsigned long temp;
309 m += nr >> 5;
311 if (__builtin_constant_p(nr))
312 nr &= 0x1f;
314 temp = *m;
315 *m = temp & ~(1UL << nr);
318 static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
320 unsigned long temp;
321 m += nr >> 5;
323 if (__builtin_constant_p(nr))
324 nr &= 0x1f;
326 temp = *m;
327 *m = temp ^ (1UL << nr);
330 static inline int
331 __test_and_set_bit(unsigned long nr, volatile unsigned long *m)
333 unsigned long old;
334 m += nr >> 5;
336 if (__builtin_constant_p(nr))
337 nr &= 0x1f;
339 old = *m;
340 *m = old | (1 << nr);
342 return (old & (1 << nr)) != 0;
345 static inline int
346 __test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
348 unsigned long old;
349 m += nr >> 5;
351 if (__builtin_constant_p(nr))
352 nr &= 0x1f;
354 old = *m;
355 *m = old & ~(1 << nr);
357 return (old & (1 << nr)) != 0;
360 static inline int
361 __test_and_change_bit(unsigned long nr, volatile unsigned long *m)
363 unsigned long old;
364 m += nr >> 5;
366 if (__builtin_constant_p(nr))
367 nr &= 0x1f;
369 old = *m;
370 *m = old ^ (1 << nr);
372 return (old & (1 << nr)) != 0;
376 * This routine doesn't need to be atomic.
378 static inline int
379 __constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
381 return ((1UL << (nr & 31)) &
382 (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
385 static inline int
386 __test_bit(unsigned int nr, const volatile unsigned long *addr)
388 unsigned long mask;
390 addr += nr >> 5;
392 /* ARC700 only considers 5 bits in bit-fiddling insn */
393 mask = 1 << nr;
395 return ((mask & *addr) != 0);
398 #define test_bit(nr, addr) (__builtin_constant_p(nr) ? \
399 __constant_test_bit((nr), (addr)) : \
400 __test_bit((nr), (addr)))
403 * Count the number of zeros, starting from MSB
404 * Helper for fls( ) friends
405 * This is a pure count, so (1-32) or (0-31) doesn't apply
406 * It could be 0 to 32, based on num of 0's in there
407 * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
409 static inline __attribute__ ((const)) int clz(unsigned int x)
411 unsigned int res;
413 __asm__ __volatile__(
414 " norm.f %0, %1 \n"
415 " mov.n %0, 0 \n"
416 " add.p %0, %0, 1 \n"
417 : "=r"(res)
418 : "r"(x)
419 : "cc");
421 return res;
424 static inline int constant_fls(int x)
426 int r = 32;
428 if (!x)
429 return 0;
430 if (!(x & 0xffff0000u)) {
431 x <<= 16;
432 r -= 16;
434 if (!(x & 0xff000000u)) {
435 x <<= 8;
436 r -= 8;
438 if (!(x & 0xf0000000u)) {
439 x <<= 4;
440 r -= 4;
442 if (!(x & 0xc0000000u)) {
443 x <<= 2;
444 r -= 2;
446 if (!(x & 0x80000000u)) {
447 x <<= 1;
448 r -= 1;
450 return r;
454 * fls = Find Last Set in word
455 * @result: [1-32]
456 * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
458 static inline __attribute__ ((const)) int fls(unsigned long x)
460 if (__builtin_constant_p(x))
461 return constant_fls(x);
463 return 32 - clz(x);
467 * __fls: Similar to fls, but zero based (0-31)
469 static inline __attribute__ ((const)) int __fls(unsigned long x)
471 if (!x)
472 return 0;
473 else
474 return fls(x) - 1;
478 * ffs = Find First Set in word (LSB to MSB)
479 * @result: [1-32], 0 if all 0's
481 #define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
484 * __ffs: Similar to ffs, but zero based (0-31)
486 static inline __attribute__ ((const)) int __ffs(unsigned long word)
488 if (!word)
489 return word;
491 return ffs(word) - 1;
495 * ffz = Find First Zero in word.
496 * @return:[0-31], 32 if all 1's
498 #define ffz(x) __ffs(~(x))
500 #include <asm-generic/bitops/hweight.h>
501 #include <asm-generic/bitops/fls64.h>
502 #include <asm-generic/bitops/sched.h>
503 #include <asm-generic/bitops/lock.h>
505 #include <asm-generic/bitops/find.h>
506 #include <asm-generic/bitops/le.h>
507 #include <asm-generic/bitops/ext2-atomic-setbit.h>
509 #endif /* !__ASSEMBLY__ */
511 #endif /* __KERNEL__ */
513 #endif