x86: consolidate header guards
[linux/fpc-iii.git] / include / asm-x86 / sync_bitops.h
blobb689bee71104da4ba5b9eff3fa597606cfdd9de3
1 #ifndef ASM_X86__SYNC_BITOPS_H
2 #define ASM_X86__SYNC_BITOPS_H
4 /*
5 * Copyright 1992, Linus Torvalds.
6 */
8 /*
9 * These have to be done with inline assembly: that way the bit-setting
10 * is guaranteed to be atomic. All bit operations return 0 if the bit
11 * was cleared before the operation and != 0 if it was not.
13 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
16 #define ADDR (*(volatile long *)addr)
18 /**
19 * sync_set_bit - Atomically set a bit in memory
20 * @nr: the bit to set
21 * @addr: the address to start counting from
23 * This function is atomic and may not be reordered. See __set_bit()
24 * if you do not require the atomic guarantees.
26 * Note that @nr may be almost arbitrarily large; this function is not
27 * restricted to acting on a single-word quantity.
29 static inline void sync_set_bit(int nr, volatile unsigned long *addr)
31 asm volatile("lock; btsl %1,%0"
32 : "+m" (ADDR)
33 : "Ir" (nr)
34 : "memory");
37 /**
38 * sync_clear_bit - Clears a bit in memory
39 * @nr: Bit to clear
40 * @addr: Address to start counting from
42 * sync_clear_bit() is atomic and may not be reordered. However, it does
43 * not contain a memory barrier, so if it is used for locking purposes,
44 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
45 * in order to ensure changes are visible on other processors.
47 static inline void sync_clear_bit(int nr, volatile unsigned long *addr)
49 asm volatile("lock; btrl %1,%0"
50 : "+m" (ADDR)
51 : "Ir" (nr)
52 : "memory");
55 /**
56 * sync_change_bit - Toggle a bit in memory
57 * @nr: Bit to change
58 * @addr: Address to start counting from
60 * sync_change_bit() is atomic and may not be reordered.
61 * Note that @nr may be almost arbitrarily large; this function is not
62 * restricted to acting on a single-word quantity.
64 static inline void sync_change_bit(int nr, volatile unsigned long *addr)
66 asm volatile("lock; btcl %1,%0"
67 : "+m" (ADDR)
68 : "Ir" (nr)
69 : "memory");
72 /**
73 * sync_test_and_set_bit - Set a bit and return its old value
74 * @nr: Bit to set
75 * @addr: Address to count from
77 * This operation is atomic and cannot be reordered.
78 * It also implies a memory barrier.
80 static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr)
82 int oldbit;
84 asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0"
85 : "=r" (oldbit), "+m" (ADDR)
86 : "Ir" (nr) : "memory");
87 return oldbit;
90 /**
91 * sync_test_and_clear_bit - Clear a bit and return its old value
92 * @nr: Bit to clear
93 * @addr: Address to count from
95 * This operation is atomic and cannot be reordered.
96 * It also implies a memory barrier.
98 static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr)
100 int oldbit;
102 asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0"
103 : "=r" (oldbit), "+m" (ADDR)
104 : "Ir" (nr) : "memory");
105 return oldbit;
109 * sync_test_and_change_bit - Change a bit and return its old value
110 * @nr: Bit to change
111 * @addr: Address to count from
113 * This operation is atomic and cannot be reordered.
114 * It also implies a memory barrier.
116 static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr)
118 int oldbit;
120 asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0"
121 : "=r" (oldbit), "+m" (ADDR)
122 : "Ir" (nr) : "memory");
123 return oldbit;
126 #define sync_test_bit(nr, addr) test_bit(nr, addr)
128 #undef ADDR
130 #endif /* ASM_X86__SYNC_BITOPS_H */