Merge tag 'locking-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / parisc / include / asm / bitops.h
blobaa4e883431c1af43252576a6032a4d2686de6f0a
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _PARISC_BITOPS_H
3 #define _PARISC_BITOPS_H
5 #ifndef _LINUX_BITOPS_H
6 #error only <linux/bitops.h> can be included directly
7 #endif
9 #include <linux/compiler.h>
10 #include <asm/types.h>
11 #include <asm/byteorder.h>
12 #include <asm/barrier.h>
13 #include <linux/atomic.h>
15 /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
16 * on use of volatile and __*_bit() (set/clear/change):
17 * *_bit() want use of volatile.
18 * __*_bit() are "relaxed" and don't use spinlock or volatile.
21 static __inline__ void set_bit(int nr, volatile unsigned long * addr)
23 unsigned long mask = BIT_MASK(nr);
24 unsigned long flags;
26 addr += BIT_WORD(nr);
27 _atomic_spin_lock_irqsave(addr, flags);
28 *addr |= mask;
29 _atomic_spin_unlock_irqrestore(addr, flags);
32 static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
34 unsigned long mask = BIT_MASK(nr);
35 unsigned long flags;
37 addr += BIT_WORD(nr);
38 _atomic_spin_lock_irqsave(addr, flags);
39 *addr &= ~mask;
40 _atomic_spin_unlock_irqrestore(addr, flags);
43 static __inline__ void change_bit(int nr, volatile unsigned long * addr)
45 unsigned long mask = BIT_MASK(nr);
46 unsigned long flags;
48 addr += BIT_WORD(nr);
49 _atomic_spin_lock_irqsave(addr, flags);
50 *addr ^= mask;
51 _atomic_spin_unlock_irqrestore(addr, flags);
54 static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
56 unsigned long mask = BIT_MASK(nr);
57 unsigned long old;
58 unsigned long flags;
59 int set;
61 addr += BIT_WORD(nr);
62 _atomic_spin_lock_irqsave(addr, flags);
63 old = *addr;
64 set = (old & mask) ? 1 : 0;
65 if (!set)
66 *addr = old | mask;
67 _atomic_spin_unlock_irqrestore(addr, flags);
69 return set;
72 static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
74 unsigned long mask = BIT_MASK(nr);
75 unsigned long old;
76 unsigned long flags;
77 int set;
79 addr += BIT_WORD(nr);
80 _atomic_spin_lock_irqsave(addr, flags);
81 old = *addr;
82 set = (old & mask) ? 1 : 0;
83 if (set)
84 *addr = old & ~mask;
85 _atomic_spin_unlock_irqrestore(addr, flags);
87 return set;
90 static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
92 unsigned long mask = BIT_MASK(nr);
93 unsigned long oldbit;
94 unsigned long flags;
96 addr += BIT_WORD(nr);
97 _atomic_spin_lock_irqsave(addr, flags);
98 oldbit = *addr;
99 *addr = oldbit ^ mask;
100 _atomic_spin_unlock_irqrestore(addr, flags);
102 return (oldbit & mask) ? 1 : 0;
105 #include <asm-generic/bitops/non-atomic.h>
107 #ifdef __KERNEL__
110 * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1".
111 * @word: The word to search
113 * __ffs() return is undefined if no bit is set.
115 * 32-bit fast __ffs by LaMont Jones "lamont At hp com".
116 * 64-bit enhancement by Grant Grundler "grundler At parisc-linux org".
117 * (with help from willy/jejb to get the semantics right)
119 * This algorithm avoids branches by making use of nullification.
120 * One side effect of "extr" instructions is it sets PSW[N] bit.
121 * How PSW[N] (nullify next insn) gets set is determined by the
122 * "condition" field (eg "<>" or "TR" below) in the extr* insn.
123 * Only the 1st and one of either the 2cd or 3rd insn will get executed.
124 * Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so
125 * cycles for each mispredicted branch.
128 static __inline__ unsigned long __ffs(unsigned long x)
130 unsigned long ret;
132 __asm__(
133 #ifdef CONFIG_64BIT
134 " ldi 63,%1\n"
135 " extrd,u,*<> %0,63,32,%%r0\n"
136 " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */
137 " addi -32,%1,%1\n"
138 #else
139 " ldi 31,%1\n"
140 #endif
141 " extru,<> %0,31,16,%%r0\n"
142 " extru,TR %0,15,16,%0\n" /* xxxx0000 -> 0000xxxx */
143 " addi -16,%1,%1\n"
144 " extru,<> %0,31,8,%%r0\n"
145 " extru,TR %0,23,8,%0\n" /* 0000xx00 -> 000000xx */
146 " addi -8,%1,%1\n"
147 " extru,<> %0,31,4,%%r0\n"
148 " extru,TR %0,27,4,%0\n" /* 000000x0 -> 0000000x */
149 " addi -4,%1,%1\n"
150 " extru,<> %0,31,2,%%r0\n"
151 " extru,TR %0,29,2,%0\n" /* 0000000y, 1100b -> 0011b */
152 " addi -2,%1,%1\n"
153 " extru,= %0,31,1,%%r0\n" /* check last bit */
154 " addi -1,%1,%1\n"
155 : "+r" (x), "=r" (ret) );
156 return ret;
159 #include <asm-generic/bitops/ffz.h>
162 * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set)
163 * This is defined the same way as the libc and compiler builtin
164 * ffs routines, therefore differs in spirit from the above ffz (man ffs).
166 static __inline__ int ffs(int x)
168 return x ? (__ffs((unsigned long)x) + 1) : 0;
172 * fls: find last (most significant) bit set.
173 * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
176 static __inline__ int fls(unsigned int x)
178 int ret;
179 if (!x)
180 return 0;
182 __asm__(
183 " ldi 1,%1\n"
184 " extru,<> %0,15,16,%%r0\n"
185 " zdep,TR %0,15,16,%0\n" /* xxxx0000 */
186 " addi 16,%1,%1\n"
187 " extru,<> %0,7,8,%%r0\n"
188 " zdep,TR %0,23,24,%0\n" /* xx000000 */
189 " addi 8,%1,%1\n"
190 " extru,<> %0,3,4,%%r0\n"
191 " zdep,TR %0,27,28,%0\n" /* x0000000 */
192 " addi 4,%1,%1\n"
193 " extru,<> %0,1,2,%%r0\n"
194 " zdep,TR %0,29,30,%0\n" /* y0000000 (y&3 = 0) */
195 " addi 2,%1,%1\n"
196 " extru,= %0,0,1,%%r0\n"
197 " addi 1,%1,%1\n" /* if y & 8, add 1 */
198 : "+r" (x), "=r" (ret) );
200 return ret;
203 #include <asm-generic/bitops/__fls.h>
204 #include <asm-generic/bitops/fls64.h>
205 #include <asm-generic/bitops/hweight.h>
206 #include <asm-generic/bitops/lock.h>
207 #include <asm-generic/bitops/sched.h>
209 #endif /* __KERNEL__ */
211 #include <asm-generic/bitops/find.h>
213 #ifdef __KERNEL__
215 #include <asm-generic/bitops/le.h>
216 #include <asm-generic/bitops/ext2-atomic-setbit.h>
218 #endif /* __KERNEL__ */
220 #endif /* _PARISC_BITOPS_H */