WIP FPC-III support
[linux/fpc-iii.git] / arch / riscv / include / asm / bitops.h
blob396a3303c537489bc8b1b7a52a9334e07b5ceda3
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 Regents of the University of California
4 */
6 #ifndef _ASM_RISCV_BITOPS_H
7 #define _ASM_RISCV_BITOPS_H
9 #ifndef _LINUX_BITOPS_H
10 #error "Only <linux/bitops.h> can be included directly"
11 #endif /* _LINUX_BITOPS_H */
13 #include <linux/compiler.h>
14 #include <linux/irqflags.h>
15 #include <asm/barrier.h>
16 #include <asm/bitsperlong.h>
18 #include <asm-generic/bitops/__ffs.h>
19 #include <asm-generic/bitops/ffz.h>
20 #include <asm-generic/bitops/fls.h>
21 #include <asm-generic/bitops/__fls.h>
22 #include <asm-generic/bitops/fls64.h>
23 #include <asm-generic/bitops/find.h>
24 #include <asm-generic/bitops/sched.h>
25 #include <asm-generic/bitops/ffs.h>
27 #include <asm-generic/bitops/hweight.h>
29 #if (BITS_PER_LONG == 64)
30 #define __AMO(op) "amo" #op ".d"
31 #elif (BITS_PER_LONG == 32)
32 #define __AMO(op) "amo" #op ".w"
33 #else
34 #error "Unexpected BITS_PER_LONG"
35 #endif
37 #define __test_and_op_bit_ord(op, mod, nr, addr, ord) \
38 ({ \
39 unsigned long __res, __mask; \
40 __mask = BIT_MASK(nr); \
41 __asm__ __volatile__ ( \
42 __AMO(op) #ord " %0, %2, %1" \
43 : "=r" (__res), "+A" (addr[BIT_WORD(nr)]) \
44 : "r" (mod(__mask)) \
45 : "memory"); \
46 ((__res & __mask) != 0); \
49 #define __op_bit_ord(op, mod, nr, addr, ord) \
50 __asm__ __volatile__ ( \
51 __AMO(op) #ord " zero, %1, %0" \
52 : "+A" (addr[BIT_WORD(nr)]) \
53 : "r" (mod(BIT_MASK(nr))) \
54 : "memory");
56 #define __test_and_op_bit(op, mod, nr, addr) \
57 __test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
58 #define __op_bit(op, mod, nr, addr) \
59 __op_bit_ord(op, mod, nr, addr, )
61 /* Bitmask modifiers */
62 #define __NOP(x) (x)
63 #define __NOT(x) (~(x))
65 /**
66 * test_and_set_bit - Set a bit and return its old value
67 * @nr: Bit to set
68 * @addr: Address to count from
70 * This operation may be reordered on other architectures than x86.
72 static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
74 return __test_and_op_bit(or, __NOP, nr, addr);
77 /**
78 * test_and_clear_bit - Clear a bit and return its old value
79 * @nr: Bit to clear
80 * @addr: Address to count from
82 * This operation can be reordered on other architectures other than x86.
84 static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
86 return __test_and_op_bit(and, __NOT, nr, addr);
89 /**
90 * test_and_change_bit - Change a bit and return its old value
91 * @nr: Bit to change
92 * @addr: Address to count from
94 * This operation is atomic and cannot be reordered.
95 * It also implies a memory barrier.
97 static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
99 return __test_and_op_bit(xor, __NOP, nr, addr);
103 * set_bit - Atomically set a bit in memory
104 * @nr: the bit to set
105 * @addr: the address to start counting from
107 * Note: there are no guarantees that this function will not be reordered
108 * on non x86 architectures, so if you are writing portable code,
109 * make sure not to rely on its reordering guarantees.
111 * Note that @nr may be almost arbitrarily large; this function is not
112 * restricted to acting on a single-word quantity.
114 static inline void set_bit(int nr, volatile unsigned long *addr)
116 __op_bit(or, __NOP, nr, addr);
120 * clear_bit - Clears a bit in memory
121 * @nr: Bit to clear
122 * @addr: Address to start counting from
124 * Note: there are no guarantees that this function will not be reordered
125 * on non x86 architectures, so if you are writing portable code,
126 * make sure not to rely on its reordering guarantees.
128 static inline void clear_bit(int nr, volatile unsigned long *addr)
130 __op_bit(and, __NOT, nr, addr);
134 * change_bit - Toggle a bit in memory
135 * @nr: Bit to change
136 * @addr: Address to start counting from
138 * change_bit() may be reordered on other architectures than x86.
139 * Note that @nr may be almost arbitrarily large; this function is not
140 * restricted to acting on a single-word quantity.
142 static inline void change_bit(int nr, volatile unsigned long *addr)
144 __op_bit(xor, __NOP, nr, addr);
148 * test_and_set_bit_lock - Set a bit and return its old value, for lock
149 * @nr: Bit to set
150 * @addr: Address to count from
152 * This operation is atomic and provides acquire barrier semantics.
153 * It can be used to implement bit locks.
155 static inline int test_and_set_bit_lock(
156 unsigned long nr, volatile unsigned long *addr)
158 return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
162 * clear_bit_unlock - Clear a bit in memory, for unlock
163 * @nr: the bit to set
164 * @addr: the address to start counting from
166 * This operation is atomic and provides release barrier semantics.
168 static inline void clear_bit_unlock(
169 unsigned long nr, volatile unsigned long *addr)
171 __op_bit_ord(and, __NOT, nr, addr, .rl);
175 * __clear_bit_unlock - Clear a bit in memory, for unlock
176 * @nr: the bit to set
177 * @addr: the address to start counting from
179 * This operation is like clear_bit_unlock, however it is not atomic.
180 * It does provide release barrier semantics so it can be used to unlock
181 * a bit lock, however it would only be used if no other CPU can modify
182 * any bits in the memory until the lock is released (a good example is
183 * if the bit lock itself protects access to the other bits in the word).
185 * On RISC-V systems there seems to be no benefit to taking advantage of the
186 * non-atomic property here: it's a lot more instructions and we still have to
187 * provide release semantics anyway.
189 static inline void __clear_bit_unlock(
190 unsigned long nr, volatile unsigned long *addr)
192 clear_bit_unlock(nr, addr);
195 #undef __test_and_op_bit
196 #undef __op_bit
197 #undef __NOP
198 #undef __NOT
199 #undef __AMO
201 #include <asm-generic/bitops/non-atomic.h>
202 #include <asm-generic/bitops/le.h>
203 #include <asm-generic/bitops/ext2-atomic.h>
205 #endif /* _ASM_RISCV_BITOPS_H */