1 #ifndef _ASM_X86_CMPXCHG_32_H
2 #define _ASM_X86_CMPXCHG_32_H
4 #include <linux/bitops.h> /* for LOCK_PREFIX */
7 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
8 * you need to test for the feature in boot_cpu_data.
11 #define xchg(ptr, v) \
12 ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
17 #define __xg(x) ((struct __xchg_dummy *)(x))
20 * CMPXCHG8B only writes to the target if we had the previous
21 * value in registers, otherwise it acts as a read and gives us the
22 * "new previous" value. That is why there is a loop. Preloading
23 * EDX:EAX is a performance optimization: in the common case it means
24 * we need only one locked operation.
26 * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
27 * least an FPU save and/or %cr0.ts manipulation.
29 * cmpxchg8b must be used with the lock prefix here to allow the
30 * instruction to be executed atomically. We need to have the reader
31 * side to see the coherent 64bit value.
33 static inline void set_64bit(volatile u64
*ptr
, u64 value
)
36 u32 high
= value
>> 32;
40 LOCK_PREFIX
"cmpxchg8b %0\n\t"
42 : "=m" (*ptr
), "+A" (prev
)
43 : "b" (low
), "c" (high
)
48 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
49 * Note 2: xchg has side effect, so that attribute volatile is necessary,
50 * but generally the primitive is invalid, *ptr is output argument. --ANK
52 static inline unsigned long __xchg(unsigned long x
, volatile void *ptr
,
57 asm volatile("xchgb %b0,%1"
58 : "=q" (x
), "+m" (*__xg(ptr
))
63 asm volatile("xchgw %w0,%1"
64 : "=r" (x
), "+m" (*__xg(ptr
))
69 asm volatile("xchgl %0,%1"
70 : "=r" (x
), "+m" (*__xg(ptr
))
79 * Atomic compare and exchange. Compare OLD with MEM, if identical,
80 * store NEW in MEM. Return the initial value in MEM. Success is
81 * indicated by comparing RETURN with OLD.
84 #ifdef CONFIG_X86_CMPXCHG
85 #define __HAVE_ARCH_CMPXCHG 1
86 #define cmpxchg(ptr, o, n) \
87 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
90 #define sync_cmpxchg(ptr, o, n) \
91 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
94 #define cmpxchg_local(ptr, o, n) \
95 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
100 #ifdef CONFIG_X86_CMPXCHG64
101 #define cmpxchg64(ptr, o, n) \
102 ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
103 (unsigned long long)(n)))
104 #define cmpxchg64_local(ptr, o, n) \
105 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
106 (unsigned long long)(n)))
109 static inline unsigned long __cmpxchg(volatile void *ptr
, unsigned long old
,
110 unsigned long new, int size
)
115 asm volatile(LOCK_PREFIX
"cmpxchgb %b2,%1"
116 : "=a"(prev
), "+m"(*__xg(ptr
))
121 asm volatile(LOCK_PREFIX
"cmpxchgw %w2,%1"
122 : "=a"(prev
), "+m"(*__xg(ptr
))
127 asm volatile(LOCK_PREFIX
"cmpxchgl %2,%1"
128 : "=a"(prev
), "+m"(*__xg(ptr
))
137 * Always use locked operations when touching memory shared with a
138 * hypervisor, since the system may be SMP even if the guest kernel
141 static inline unsigned long __sync_cmpxchg(volatile void *ptr
,
143 unsigned long new, int size
)
148 asm volatile("lock; cmpxchgb %b2,%1"
149 : "=a"(prev
), "+m"(*__xg(ptr
))
154 asm volatile("lock; cmpxchgw %w2,%1"
155 : "=a"(prev
), "+m"(*__xg(ptr
))
160 asm volatile("lock; cmpxchgl %2,%1"
161 : "=a"(prev
), "+m"(*__xg(ptr
))
169 static inline unsigned long __cmpxchg_local(volatile void *ptr
,
171 unsigned long new, int size
)
176 asm volatile("cmpxchgb %b2,%1"
177 : "=a"(prev
), "+m"(*__xg(ptr
))
182 asm volatile("cmpxchgw %w2,%1"
183 : "=a"(prev
), "+m"(*__xg(ptr
))
188 asm volatile("cmpxchgl %2,%1"
189 : "=a"(prev
), "+m"(*__xg(ptr
))
197 static inline unsigned long long __cmpxchg64(volatile void *ptr
,
198 unsigned long long old
,
199 unsigned long long new)
201 unsigned long long prev
;
202 asm volatile(LOCK_PREFIX
"cmpxchg8b %1"
203 : "=A"(prev
), "+m" (*__xg(ptr
))
204 : "b"((unsigned long)new),
205 "c"((unsigned long)(new >> 32)),
211 static inline unsigned long long __cmpxchg64_local(volatile void *ptr
,
212 unsigned long long old
,
213 unsigned long long new)
215 unsigned long long prev
;
216 asm volatile("cmpxchg8b %1"
217 : "=A"(prev
), "+m"(*__xg(ptr
))
218 : "b"((unsigned long)new),
219 "c"((unsigned long)(new >> 32)),
225 #ifndef CONFIG_X86_CMPXCHG
227 * Building a kernel capable running on 80386. It may be necessary to
228 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
229 * a function for each of the sizes we support.
232 extern unsigned long cmpxchg_386_u8(volatile void *, u8
, u8
);
233 extern unsigned long cmpxchg_386_u16(volatile void *, u16
, u16
);
234 extern unsigned long cmpxchg_386_u32(volatile void *, u32
, u32
);
236 static inline unsigned long cmpxchg_386(volatile void *ptr
, unsigned long old
,
237 unsigned long new, int size
)
241 return cmpxchg_386_u8(ptr
, old
, new);
243 return cmpxchg_386_u16(ptr
, old
, new);
245 return cmpxchg_386_u32(ptr
, old
, new);
250 #define cmpxchg(ptr, o, n) \
252 __typeof__(*(ptr)) __ret; \
253 if (likely(boot_cpu_data.x86 > 3)) \
254 __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
255 (unsigned long)(o), (unsigned long)(n), \
258 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
259 (unsigned long)(o), (unsigned long)(n), \
263 #define cmpxchg_local(ptr, o, n) \
265 __typeof__(*(ptr)) __ret; \
266 if (likely(boot_cpu_data.x86 > 3)) \
267 __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
268 (unsigned long)(o), (unsigned long)(n), \
271 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
272 (unsigned long)(o), (unsigned long)(n), \
278 #ifndef CONFIG_X86_CMPXCHG64
280 * Building a kernel capable running on 80386 and 80486. It may be necessary
281 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
284 extern unsigned long long cmpxchg_486_u64(volatile void *, u64
, u64
);
286 #define cmpxchg64(ptr, o, n) \
288 __typeof__(*(ptr)) __ret; \
289 __typeof__(*(ptr)) __old = (o); \
290 __typeof__(*(ptr)) __new = (n); \
291 alternative_io("call cmpxchg8b_emu", \
292 "lock; cmpxchg8b (%%esi)" , \
295 "S" ((ptr)), "0" (__old), \
296 "b" ((unsigned int)__new), \
297 "c" ((unsigned int)(__new>>32)) \
303 #define cmpxchg64_local(ptr, o, n) \
305 __typeof__(*(ptr)) __ret; \
306 if (likely(boot_cpu_data.x86 > 4)) \
307 __ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \
308 (unsigned long long)(o), \
309 (unsigned long long)(n)); \
311 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \
312 (unsigned long long)(o), \
313 (unsigned long long)(n)); \
319 #endif /* _ASM_X86_CMPXCHG_32_H */