drm/rockchip: vop2: Fix the windows switch between different layers
[drm/drm-misc.git] / arch / mips / include / asm / bitops.h
blob89f73d1a4ea4e71191ec34221ac22fe5b4cbdc4a
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_BITOPS_H
10 #define _ASM_BITOPS_H
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
14 #endif
16 #include <linux/bits.h>
17 #include <linux/compiler.h>
18 #include <linux/types.h>
19 #include <asm/asm.h>
20 #include <asm/barrier.h>
21 #include <asm/byteorder.h> /* sigh ... */
22 #include <asm/compiler.h>
23 #include <asm/cpu-features.h>
24 #include <asm/sgidefs.h>
26 #define __bit_op(mem, insn, inputs...) do { \
27 unsigned long __temp; \
29 asm volatile( \
30 " .set push \n" \
31 " .set " MIPS_ISA_LEVEL " \n" \
32 " " __SYNC(full, loongson3_war) " \n" \
33 "1: " __stringify(LONG_LL) " %0, %1 \n" \
34 " " insn " \n" \
35 " " __stringify(LONG_SC) " %0, %1 \n" \
36 " " __stringify(SC_BEQZ) " %0, 1b \n" \
37 " .set pop \n" \
38 : "=&r"(__temp), "+" GCC_OFF_SMALL_ASM()(mem) \
39 : inputs \
40 : __LLSC_CLOBBER); \
41 } while (0)
43 #define __test_bit_op(mem, ll_dst, insn, inputs...) ({ \
44 unsigned long __orig, __temp; \
46 asm volatile( \
47 " .set push \n" \
48 " .set " MIPS_ISA_LEVEL " \n" \
49 " " __SYNC(full, loongson3_war) " \n" \
50 "1: " __stringify(LONG_LL) " " ll_dst ", %2\n" \
51 " " insn " \n" \
52 " " __stringify(LONG_SC) " %1, %2 \n" \
53 " " __stringify(SC_BEQZ) " %1, 1b \n" \
54 " .set pop \n" \
55 : "=&r"(__orig), "=&r"(__temp), \
56 "+" GCC_OFF_SMALL_ASM()(mem) \
57 : inputs \
58 : __LLSC_CLOBBER); \
60 __orig; \
64 * These are the "slower" versions of the functions and are in bitops.c.
65 * These functions call raw_local_irq_{save,restore}().
67 void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
68 void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
69 void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
70 int __mips_test_and_set_bit_lock(unsigned long nr,
71 volatile unsigned long *addr);
72 int __mips_test_and_clear_bit(unsigned long nr,
73 volatile unsigned long *addr);
74 int __mips_test_and_change_bit(unsigned long nr,
75 volatile unsigned long *addr);
76 bool __mips_xor_is_negative_byte(unsigned long mask,
77 volatile unsigned long *addr);
80 * set_bit - Atomically set a bit in memory
81 * @nr: the bit to set
82 * @addr: the address to start counting from
84 * This function is atomic and may not be reordered. See __set_bit()
85 * if you do not require the atomic guarantees.
86 * Note that @nr may be almost arbitrarily large; this function is not
87 * restricted to acting on a single-word quantity.
89 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
91 volatile unsigned long *m = &addr[BIT_WORD(nr)];
92 int bit = nr % BITS_PER_LONG;
94 if (!kernel_uses_llsc) {
95 __mips_set_bit(nr, addr);
96 return;
99 if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit) && (bit >= 16)) {
100 __bit_op(*m, __stringify(LONG_INS) " %0, %3, %2, 1", "i"(bit), "r"(~0));
101 return;
104 __bit_op(*m, "or\t%0, %2", "ir"(BIT(bit)));
108 * clear_bit - Clears a bit in memory
109 * @nr: Bit to clear
110 * @addr: Address to start counting from
112 * clear_bit() is atomic and may not be reordered. However, it does
113 * not contain a memory barrier, so if it is used for locking purposes,
114 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
115 * in order to ensure changes are visible on other processors.
117 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
119 volatile unsigned long *m = &addr[BIT_WORD(nr)];
120 int bit = nr % BITS_PER_LONG;
122 if (!kernel_uses_llsc) {
123 __mips_clear_bit(nr, addr);
124 return;
127 if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit)) {
128 __bit_op(*m, __stringify(LONG_INS) " %0, $0, %2, 1", "i"(bit));
129 return;
132 __bit_op(*m, "and\t%0, %2", "ir"(~BIT(bit)));
136 * clear_bit_unlock - Clears a bit in memory
137 * @nr: Bit to clear
138 * @addr: Address to start counting from
140 * clear_bit() is atomic and implies release semantics before the memory
141 * operation. It can be used for an unlock.
143 static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
145 smp_mb__before_atomic();
146 clear_bit(nr, addr);
150 * change_bit - Toggle a bit in memory
151 * @nr: Bit to change
152 * @addr: Address to start counting from
154 * change_bit() is atomic and may not be reordered.
155 * Note that @nr may be almost arbitrarily large; this function is not
156 * restricted to acting on a single-word quantity.
158 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
160 volatile unsigned long *m = &addr[BIT_WORD(nr)];
161 int bit = nr % BITS_PER_LONG;
163 if (!kernel_uses_llsc) {
164 __mips_change_bit(nr, addr);
165 return;
168 __bit_op(*m, "xor\t%0, %2", "ir"(BIT(bit)));
172 * test_and_set_bit_lock - Set a bit and return its old value
173 * @nr: Bit to set
174 * @addr: Address to count from
176 * This operation is atomic and implies acquire ordering semantics
177 * after the memory operation.
179 static inline int test_and_set_bit_lock(unsigned long nr,
180 volatile unsigned long *addr)
182 volatile unsigned long *m = &addr[BIT_WORD(nr)];
183 int bit = nr % BITS_PER_LONG;
184 unsigned long res, orig;
186 if (!kernel_uses_llsc) {
187 res = __mips_test_and_set_bit_lock(nr, addr);
188 } else {
189 orig = __test_bit_op(*m, "%0",
190 "or\t%1, %0, %3",
191 "ir"(BIT(bit)));
192 res = (orig & BIT(bit)) != 0;
195 smp_llsc_mb();
197 return res;
201 * test_and_set_bit - Set a bit and return its old value
202 * @nr: Bit to set
203 * @addr: Address to count from
205 * This operation is atomic and cannot be reordered.
206 * It also implies a memory barrier.
208 static inline int test_and_set_bit(unsigned long nr,
209 volatile unsigned long *addr)
211 smp_mb__before_atomic();
212 return test_and_set_bit_lock(nr, addr);
216 * test_and_clear_bit - Clear a bit and return its old value
217 * @nr: Bit to clear
218 * @addr: Address to count from
220 * This operation is atomic and cannot be reordered.
221 * It also implies a memory barrier.
223 static inline int test_and_clear_bit(unsigned long nr,
224 volatile unsigned long *addr)
226 volatile unsigned long *m = &addr[BIT_WORD(nr)];
227 int bit = nr % BITS_PER_LONG;
228 unsigned long res, orig;
230 smp_mb__before_atomic();
232 if (!kernel_uses_llsc) {
233 res = __mips_test_and_clear_bit(nr, addr);
234 } else if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(nr)) {
235 res = __test_bit_op(*m, "%1",
236 __stringify(LONG_EXT) " %0, %1, %3, 1;"
237 __stringify(LONG_INS) " %1, $0, %3, 1",
238 "i"(bit));
239 } else {
240 orig = __test_bit_op(*m, "%0",
241 "or\t%1, %0, %3;"
242 "xor\t%1, %1, %3",
243 "ir"(BIT(bit)));
244 res = (orig & BIT(bit)) != 0;
247 smp_llsc_mb();
249 return res;
253 * test_and_change_bit - Change a bit and return its old value
254 * @nr: Bit to change
255 * @addr: Address to count from
257 * This operation is atomic and cannot be reordered.
258 * It also implies a memory barrier.
260 static inline int test_and_change_bit(unsigned long nr,
261 volatile unsigned long *addr)
263 volatile unsigned long *m = &addr[BIT_WORD(nr)];
264 int bit = nr % BITS_PER_LONG;
265 unsigned long res, orig;
267 smp_mb__before_atomic();
269 if (!kernel_uses_llsc) {
270 res = __mips_test_and_change_bit(nr, addr);
271 } else {
272 orig = __test_bit_op(*m, "%0",
273 "xor\t%1, %0, %3",
274 "ir"(BIT(bit)));
275 res = (orig & BIT(bit)) != 0;
278 smp_llsc_mb();
280 return res;
283 static inline bool xor_unlock_is_negative_byte(unsigned long mask,
284 volatile unsigned long *p)
286 unsigned long orig;
287 bool res;
289 smp_mb__before_atomic();
291 if (!kernel_uses_llsc) {
292 res = __mips_xor_is_negative_byte(mask, p);
293 } else {
294 orig = __test_bit_op(*p, "%0",
295 "xor\t%1, %0, %3",
296 "ir"(mask));
297 res = (orig & BIT(7)) != 0;
300 smp_llsc_mb();
302 return res;
305 #undef __bit_op
306 #undef __test_bit_op
308 #include <asm-generic/bitops/non-atomic.h>
311 * __clear_bit_unlock - Clears a bit in memory
312 * @nr: Bit to clear
313 * @addr: Address to start counting from
315 * __clear_bit() is non-atomic and implies release semantics before the memory
316 * operation. It can be used for an unlock if no other CPUs can concurrently
317 * modify other bits in the word.
319 static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
321 smp_mb__before_llsc();
322 __clear_bit(nr, addr);
323 nudge_writes();
327 * Return the bit position (0..63) of the most significant 1 bit in a word
328 * Returns -1 if no 1 bit exists
330 static __always_inline unsigned long __fls(unsigned long word)
332 int num;
334 if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
335 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
336 __asm__(
337 " .set push \n"
338 " .set "MIPS_ISA_LEVEL" \n"
339 " clz %0, %1 \n"
340 " .set pop \n"
341 : "=r" (num)
342 : "r" (word));
344 return 31 - num;
347 if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
348 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
349 __asm__(
350 " .set push \n"
351 " .set "MIPS_ISA_LEVEL" \n"
352 " dclz %0, %1 \n"
353 " .set pop \n"
354 : "=r" (num)
355 : "r" (word));
357 return 63 - num;
360 num = BITS_PER_LONG - 1;
362 #if BITS_PER_LONG == 64
363 if (!(word & (~0ul << 32))) {
364 num -= 32;
365 word <<= 32;
367 #endif
368 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
369 num -= 16;
370 word <<= 16;
372 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
373 num -= 8;
374 word <<= 8;
376 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
377 num -= 4;
378 word <<= 4;
380 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
381 num -= 2;
382 word <<= 2;
384 if (!(word & (~0ul << (BITS_PER_LONG-1))))
385 num -= 1;
386 return num;
390 * __ffs - find first bit in word.
391 * @word: The word to search
393 * Returns 0..SZLONG-1
394 * Undefined if no bit exists, so code should check against 0 first.
396 static __always_inline unsigned long __ffs(unsigned long word)
398 return __fls(word & -word);
402 * fls - find last bit set.
403 * @word: The word to search
405 * This is defined the same way as ffs.
406 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
408 static inline int fls(unsigned int x)
410 int r;
412 if (!__builtin_constant_p(x) &&
413 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
414 __asm__(
415 " .set push \n"
416 " .set "MIPS_ISA_LEVEL" \n"
417 " clz %0, %1 \n"
418 " .set pop \n"
419 : "=r" (x)
420 : "r" (x));
422 return 32 - x;
425 r = 32;
426 if (!x)
427 return 0;
428 if (!(x & 0xffff0000u)) {
429 x <<= 16;
430 r -= 16;
432 if (!(x & 0xff000000u)) {
433 x <<= 8;
434 r -= 8;
436 if (!(x & 0xf0000000u)) {
437 x <<= 4;
438 r -= 4;
440 if (!(x & 0xc0000000u)) {
441 x <<= 2;
442 r -= 2;
444 if (!(x & 0x80000000u)) {
445 x <<= 1;
446 r -= 1;
448 return r;
451 #include <asm-generic/bitops/fls64.h>
454 * ffs - find first bit set.
455 * @word: The word to search
457 * This is defined the same way as
458 * the libc and compiler builtin ffs routines, therefore
459 * differs in spirit from the below ffz (man ffs).
461 static inline int ffs(int word)
463 if (!word)
464 return 0;
466 return fls(word & -word);
469 #include <asm-generic/bitops/ffz.h>
471 #ifdef __KERNEL__
473 #include <asm-generic/bitops/sched.h>
475 #include <asm/arch_hweight.h>
476 #include <asm-generic/bitops/const_hweight.h>
478 #include <asm-generic/bitops/le.h>
479 #include <asm-generic/bitops/ext2-atomic.h>
481 #endif /* __KERNEL__ */
483 #endif /* _ASM_BITOPS_H */