1 #ifndef _ASM_X86_CMPXCHG_32_H
2 #define _ASM_X86_CMPXCHG_32_H
5 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
6 * you need to test for the feature in boot_cpu_data.
10 * CMPXCHG8B only writes to the target if we had the previous
11 * value in registers, otherwise it acts as a read and gives us the
12 * "new previous" value. That is why there is a loop. Preloading
13 * EDX:EAX is a performance optimization: in the common case it means
14 * we need only one locked operation.
16 * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
17 * least an FPU save and/or %cr0.ts manipulation.
19 * cmpxchg8b must be used with the lock prefix here to allow the
20 * instruction to be executed atomically. We need to have the reader
21 * side to see the coherent 64bit value.
23 static inline void set_64bit(volatile u64
*ptr
, u64 value
)
26 u32 high
= value
>> 32;
30 LOCK_PREFIX
"cmpxchg8b %0\n\t"
32 : "=m" (*ptr
), "+A" (prev
)
33 : "b" (low
), "c" (high
)
37 #ifdef CONFIG_X86_CMPXCHG
38 #define __HAVE_ARCH_CMPXCHG 1
41 #ifdef CONFIG_X86_CMPXCHG64
42 #define cmpxchg64(ptr, o, n) \
43 ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
44 (unsigned long long)(n)))
45 #define cmpxchg64_local(ptr, o, n) \
46 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
47 (unsigned long long)(n)))
50 static inline u64
__cmpxchg64(volatile u64
*ptr
, u64 old
, u64
new)
53 asm volatile(LOCK_PREFIX
"cmpxchg8b %1"
57 "c" ((u32
)(new >> 32)),
63 static inline u64
__cmpxchg64_local(volatile u64
*ptr
, u64 old
, u64
new)
66 asm volatile("cmpxchg8b %1"
70 "c" ((u32
)(new >> 32)),
76 #ifndef CONFIG_X86_CMPXCHG
78 * Building a kernel capable running on 80386. It may be necessary to
79 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
80 * a function for each of the sizes we support.
83 extern unsigned long cmpxchg_386_u8(volatile void *, u8
, u8
);
84 extern unsigned long cmpxchg_386_u16(volatile void *, u16
, u16
);
85 extern unsigned long cmpxchg_386_u32(volatile void *, u32
, u32
);
87 static inline unsigned long cmpxchg_386(volatile void *ptr
, unsigned long old
,
88 unsigned long new, int size
)
92 return cmpxchg_386_u8(ptr
, old
, new);
94 return cmpxchg_386_u16(ptr
, old
, new);
96 return cmpxchg_386_u32(ptr
, old
, new);
101 #define cmpxchg(ptr, o, n) \
103 __typeof__(*(ptr)) __ret; \
104 if (likely(boot_cpu_data.x86 > 3)) \
105 __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
106 (unsigned long)(o), (unsigned long)(n), \
109 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
110 (unsigned long)(o), (unsigned long)(n), \
114 #define cmpxchg_local(ptr, o, n) \
116 __typeof__(*(ptr)) __ret; \
117 if (likely(boot_cpu_data.x86 > 3)) \
118 __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
119 (unsigned long)(o), (unsigned long)(n), \
122 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
123 (unsigned long)(o), (unsigned long)(n), \
129 #ifndef CONFIG_X86_CMPXCHG64
131 * Building a kernel capable running on 80386 and 80486. It may be necessary
132 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
135 #define cmpxchg64(ptr, o, n) \
137 __typeof__(*(ptr)) __ret; \
138 __typeof__(*(ptr)) __old = (o); \
139 __typeof__(*(ptr)) __new = (n); \
140 alternative_io(LOCK_PREFIX_HERE \
141 "call cmpxchg8b_emu", \
142 "lock; cmpxchg8b (%%esi)" , \
145 "S" ((ptr)), "0" (__old), \
146 "b" ((unsigned int)__new), \
147 "c" ((unsigned int)(__new>>32)) \
152 #define cmpxchg64_local(ptr, o, n) \
154 __typeof__(*(ptr)) __ret; \
155 __typeof__(*(ptr)) __old = (o); \
156 __typeof__(*(ptr)) __new = (n); \
157 alternative_io("call cmpxchg8b_emu", \
158 "cmpxchg8b (%%esi)" , \
161 "S" ((ptr)), "0" (__old), \
162 "b" ((unsigned int)__new), \
163 "c" ((unsigned int)(__new>>32)) \
169 #define system_has_cmpxchg_double() cpu_has_cx8
171 #endif /* _ASM_X86_CMPXCHG_32_H */