1 #ifndef ASM_X86_CMPXCHG_H
2 #define ASM_X86_CMPXCHG_H
4 #include <linux/compiler.h>
5 #include <asm/cpufeatures.h>
6 #include <asm/alternative.h> /* Provides LOCK_PREFIX */
9 * Non-existant functions to indicate usage errors at link time
10 * (or compile-time if the compiler implements __compiletime_error().
12 extern void __xchg_wrong_size(void)
13 __compiletime_error("Bad argument size for xchg");
14 extern void __cmpxchg_wrong_size(void)
15 __compiletime_error("Bad argument size for cmpxchg");
16 extern void __xadd_wrong_size(void)
17 __compiletime_error("Bad argument size for xadd");
18 extern void __add_wrong_size(void)
19 __compiletime_error("Bad argument size for add");
22 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
23 * -1 because sizeof will never return -1, thereby making those switch
24 * case statements guaranteeed dead code which the compiler will
25 * eliminate, and allowing the "missing symbol in the default case" to
26 * indicate a usage error.
28 #define __X86_CASE_B 1
29 #define __X86_CASE_W 2
30 #define __X86_CASE_L 4
32 #define __X86_CASE_Q 8
34 #define __X86_CASE_Q -1 /* sizeof will never return -1 */
38 * An exchange-type operation, which takes a value and a pointer, and
39 * returns the old value.
41 #define __xchg_op(ptr, arg, op, lock) \
43 __typeof__ (*(ptr)) __ret = (arg); \
44 switch (sizeof(*(ptr))) { \
46 asm volatile (lock #op "b %b0, %1\n" \
47 : "+q" (__ret), "+m" (*(ptr)) \
48 : : "memory", "cc"); \
51 asm volatile (lock #op "w %w0, %1\n" \
52 : "+r" (__ret), "+m" (*(ptr)) \
53 : : "memory", "cc"); \
56 asm volatile (lock #op "l %0, %1\n" \
57 : "+r" (__ret), "+m" (*(ptr)) \
58 : : "memory", "cc"); \
61 asm volatile (lock #op "q %q0, %1\n" \
62 : "+r" (__ret), "+m" (*(ptr)) \
63 : : "memory", "cc"); \
66 __ ## op ## _wrong_size(); \
72 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
73 * Since this is generally used to protect other memory information, we
74 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
77 #define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
80 * Atomic compare and exchange. Compare OLD with MEM, if identical,
81 * store NEW in MEM. Return the initial value in MEM. Success is
82 * indicated by comparing RETURN with OLD.
84 #define __raw_cmpxchg(ptr, old, new, size, lock) \
86 __typeof__(*(ptr)) __ret; \
87 __typeof__(*(ptr)) __old = (old); \
88 __typeof__(*(ptr)) __new = (new); \
92 volatile u8 *__ptr = (volatile u8 *)(ptr); \
93 asm volatile(lock "cmpxchgb %2,%1" \
94 : "=a" (__ret), "+m" (*__ptr) \
95 : "q" (__new), "0" (__old) \
101 volatile u16 *__ptr = (volatile u16 *)(ptr); \
102 asm volatile(lock "cmpxchgw %2,%1" \
103 : "=a" (__ret), "+m" (*__ptr) \
104 : "r" (__new), "0" (__old) \
110 volatile u32 *__ptr = (volatile u32 *)(ptr); \
111 asm volatile(lock "cmpxchgl %2,%1" \
112 : "=a" (__ret), "+m" (*__ptr) \
113 : "r" (__new), "0" (__old) \
119 volatile u64 *__ptr = (volatile u64 *)(ptr); \
120 asm volatile(lock "cmpxchgq %2,%1" \
121 : "=a" (__ret), "+m" (*__ptr) \
122 : "r" (__new), "0" (__old) \
127 __cmpxchg_wrong_size(); \
132 #define __cmpxchg(ptr, old, new, size) \
133 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
135 #define __sync_cmpxchg(ptr, old, new, size) \
136 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
138 #define __cmpxchg_local(ptr, old, new, size) \
139 __raw_cmpxchg((ptr), (old), (new), (size), "")
142 # include <asm/cmpxchg_32.h>
144 # include <asm/cmpxchg_64.h>
147 #define cmpxchg(ptr, old, new) \
148 __cmpxchg(ptr, old, new, sizeof(*(ptr)))
150 #define sync_cmpxchg(ptr, old, new) \
151 __sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
153 #define cmpxchg_local(ptr, old, new) \
154 __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
157 #define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock) \
160 __typeof__(_ptr) _old = (_pold); \
161 __typeof__(*(_ptr)) __old = *_old; \
162 __typeof__(*(_ptr)) __new = (_new); \
166 volatile u8 *__ptr = (volatile u8 *)(_ptr); \
167 asm volatile(lock "cmpxchgb %[new], %[ptr]" \
169 : CC_OUT(z) (success), \
170 [ptr] "+m" (*__ptr), \
172 : [new] "q" (__new) \
178 volatile u16 *__ptr = (volatile u16 *)(_ptr); \
179 asm volatile(lock "cmpxchgw %[new], %[ptr]" \
181 : CC_OUT(z) (success), \
182 [ptr] "+m" (*__ptr), \
184 : [new] "r" (__new) \
190 volatile u32 *__ptr = (volatile u32 *)(_ptr); \
191 asm volatile(lock "cmpxchgl %[new], %[ptr]" \
193 : CC_OUT(z) (success), \
194 [ptr] "+m" (*__ptr), \
196 : [new] "r" (__new) \
202 volatile u64 *__ptr = (volatile u64 *)(_ptr); \
203 asm volatile(lock "cmpxchgq %[new], %[ptr]" \
205 : CC_OUT(z) (success), \
206 [ptr] "+m" (*__ptr), \
208 : [new] "r" (__new) \
213 __cmpxchg_wrong_size(); \
215 if (unlikely(!success)) \
220 #define __try_cmpxchg(ptr, pold, new, size) \
221 __raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX)
223 #define try_cmpxchg(ptr, pold, new) \
224 __try_cmpxchg((ptr), (pold), (new), sizeof(*(ptr)))
227 * xadd() adds "inc" to "*ptr" and atomically returns the previous
230 * xadd() is locked when multiple CPUs are online
232 #define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
233 #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
235 #define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
238 __typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \
239 __typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \
240 BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
241 BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
242 VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \
243 VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \
244 asm volatile(pfx "cmpxchg%c4b %2; sete %0" \
245 : "=a" (__ret), "+d" (__old2), \
246 "+m" (*(p1)), "+m" (*(p2)) \
247 : "i" (2 * sizeof(long)), "a" (__old1), \
248 "b" (__new1), "c" (__new2)); \
252 #define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
253 __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
255 #define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
256 __cmpxchg_double(, p1, p2, o1, o2, n1, n2)
258 #endif /* ASM_X86_CMPXCHG_H */