1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_GENERIC_BITOPS_LOCK_H_
3 #define _ASM_GENERIC_BITOPS_LOCK_H_
5 #include <linux/atomic.h>
6 #include <linux/compiler.h>
7 #include <asm/barrier.h>
10 * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
12 * @addr: Address to count from
14 * This operation is atomic and provides acquire barrier semantics if
15 * the returned value is 0.
16 * It can be used to implement bit locks.
18 static __always_inline
int
19 arch_test_and_set_bit_lock(unsigned int nr
, volatile unsigned long *p
)
22 unsigned long mask
= BIT_MASK(nr
);
25 if (READ_ONCE(*p
) & mask
)
28 old
= raw_atomic_long_fetch_or_acquire(mask
, (atomic_long_t
*)p
);
29 return !!(old
& mask
);
34 * arch_clear_bit_unlock - Clear a bit in memory, for unlock
36 * @addr: the address to start counting from
38 * This operation is atomic and provides release barrier semantics.
40 static __always_inline
void
41 arch_clear_bit_unlock(unsigned int nr
, volatile unsigned long *p
)
44 raw_atomic_long_fetch_andnot_release(BIT_MASK(nr
), (atomic_long_t
*)p
);
48 * arch___clear_bit_unlock - Clear a bit in memory, for unlock
50 * @addr: the address to start counting from
52 * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
53 * the bits in the word are protected by this lock some archs can use weaker
54 * ops to safely unlock.
56 * See for example x86's implementation.
59 arch___clear_bit_unlock(unsigned int nr
, volatile unsigned long *p
)
66 raw_atomic_long_set_release((atomic_long_t
*)p
, old
);
69 #ifndef arch_xor_unlock_is_negative_byte
70 static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask
,
71 volatile unsigned long *p
)
75 old
= raw_atomic_long_fetch_xor_release(mask
, (atomic_long_t
*)p
);
76 return !!(old
& BIT(7));
80 #include <asm-generic/bitops/instrumented-lock.h>
82 #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */