locking/refcounts: Include fewer headers in <linux/refcount.h>
[linux/fpc-iii.git] / arch / x86 / include / asm / sync_bitops.h
blob2fe745356fb119d1dc9497c00caeccb3a04ab5c3
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_SYNC_BITOPS_H
3 #define _ASM_X86_SYNC_BITOPS_H
5 /*
6 * Copyright 1992, Linus Torvalds.
7 */
9 /*
10 * These have to be done with inline assembly: that way the bit-setting
11 * is guaranteed to be atomic. All bit operations return 0 if the bit
12 * was cleared before the operation and != 0 if it was not.
14 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
17 #define ADDR (*(volatile long *)addr)
19 /**
20 * sync_set_bit - Atomically set a bit in memory
21 * @nr: the bit to set
22 * @addr: the address to start counting from
24 * This function is atomic and may not be reordered. See __set_bit()
25 * if you do not require the atomic guarantees.
27 * Note that @nr may be almost arbitrarily large; this function is not
28 * restricted to acting on a single-word quantity.
30 static inline void sync_set_bit(long nr, volatile unsigned long *addr)
32 asm volatile("lock; bts %1,%0"
33 : "+m" (ADDR)
34 : "Ir" (nr)
35 : "memory");
38 /**
39 * sync_clear_bit - Clears a bit in memory
40 * @nr: Bit to clear
41 * @addr: Address to start counting from
43 * sync_clear_bit() is atomic and may not be reordered. However, it does
44 * not contain a memory barrier, so if it is used for locking purposes,
45 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
46 * in order to ensure changes are visible on other processors.
48 static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
50 asm volatile("lock; btr %1,%0"
51 : "+m" (ADDR)
52 : "Ir" (nr)
53 : "memory");
56 /**
57 * sync_change_bit - Toggle a bit in memory
58 * @nr: Bit to change
59 * @addr: Address to start counting from
61 * sync_change_bit() is atomic and may not be reordered.
62 * Note that @nr may be almost arbitrarily large; this function is not
63 * restricted to acting on a single-word quantity.
65 static inline void sync_change_bit(long nr, volatile unsigned long *addr)
67 asm volatile("lock; btc %1,%0"
68 : "+m" (ADDR)
69 : "Ir" (nr)
70 : "memory");
73 /**
74 * sync_test_and_set_bit - Set a bit and return its old value
75 * @nr: Bit to set
76 * @addr: Address to count from
78 * This operation is atomic and cannot be reordered.
79 * It also implies a memory barrier.
81 static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr)
83 unsigned char oldbit;
85 asm volatile("lock; bts %2,%1\n\tsetc %0"
86 : "=qm" (oldbit), "+m" (ADDR)
87 : "Ir" (nr) : "memory");
88 return oldbit;
91 /**
92 * sync_test_and_clear_bit - Clear a bit and return its old value
93 * @nr: Bit to clear
94 * @addr: Address to count from
96 * This operation is atomic and cannot be reordered.
97 * It also implies a memory barrier.
99 static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
101 unsigned char oldbit;
103 asm volatile("lock; btr %2,%1\n\tsetc %0"
104 : "=qm" (oldbit), "+m" (ADDR)
105 : "Ir" (nr) : "memory");
106 return oldbit;
110 * sync_test_and_change_bit - Change a bit and return its old value
111 * @nr: Bit to change
112 * @addr: Address to count from
114 * This operation is atomic and cannot be reordered.
115 * It also implies a memory barrier.
117 static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr)
119 unsigned char oldbit;
121 asm volatile("lock; btc %2,%1\n\tsetc %0"
122 : "=qm" (oldbit), "+m" (ADDR)
123 : "Ir" (nr) : "memory");
124 return oldbit;
127 #define sync_test_bit(nr, addr) test_bit(nr, addr)
129 #undef ADDR
131 #endif /* _ASM_X86_SYNC_BITOPS_H */