spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / arch / x86 / include / asm / cmpxchg_32.h
blob53f4b219336be527e017c821eee57b7bcd1bd966
1 #ifndef _ASM_X86_CMPXCHG_32_H
2 #define _ASM_X86_CMPXCHG_32_H
4 /*
5 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
6 * you need to test for the feature in boot_cpu_data.
7 */
9 /*
10 * CMPXCHG8B only writes to the target if we had the previous
11 * value in registers, otherwise it acts as a read and gives us the
12 * "new previous" value. That is why there is a loop. Preloading
13 * EDX:EAX is a performance optimization: in the common case it means
14 * we need only one locked operation.
16 * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
17 * least an FPU save and/or %cr0.ts manipulation.
19 * cmpxchg8b must be used with the lock prefix here to allow the
20 * instruction to be executed atomically. We need to have the reader
21 * side to see the coherent 64bit value.
23 static inline void set_64bit(volatile u64 *ptr, u64 value)
25 u32 low = value;
26 u32 high = value >> 32;
27 u64 prev = *ptr;
29 asm volatile("\n1:\t"
30 LOCK_PREFIX "cmpxchg8b %0\n\t"
31 "jnz 1b"
32 : "=m" (*ptr), "+A" (prev)
33 : "b" (low), "c" (high)
34 : "memory");
37 #ifdef CONFIG_X86_CMPXCHG
38 #define __HAVE_ARCH_CMPXCHG 1
39 #endif
41 #ifdef CONFIG_X86_CMPXCHG64
42 #define cmpxchg64(ptr, o, n) \
43 ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
44 (unsigned long long)(n)))
45 #define cmpxchg64_local(ptr, o, n) \
46 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
47 (unsigned long long)(n)))
48 #endif
50 static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
52 u64 prev;
53 asm volatile(LOCK_PREFIX "cmpxchg8b %1"
54 : "=A" (prev),
55 "+m" (*ptr)
56 : "b" ((u32)new),
57 "c" ((u32)(new >> 32)),
58 "0" (old)
59 : "memory");
60 return prev;
63 static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
65 u64 prev;
66 asm volatile("cmpxchg8b %1"
67 : "=A" (prev),
68 "+m" (*ptr)
69 : "b" ((u32)new),
70 "c" ((u32)(new >> 32)),
71 "0" (old)
72 : "memory");
73 return prev;
76 #ifndef CONFIG_X86_CMPXCHG
78 * Building a kernel capable running on 80386. It may be necessary to
79 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
80 * a function for each of the sizes we support.
83 extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
84 extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
85 extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
87 static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
88 unsigned long new, int size)
90 switch (size) {
91 case 1:
92 return cmpxchg_386_u8(ptr, old, new);
93 case 2:
94 return cmpxchg_386_u16(ptr, old, new);
95 case 4:
96 return cmpxchg_386_u32(ptr, old, new);
98 return old;
101 #define cmpxchg(ptr, o, n) \
102 ({ \
103 __typeof__(*(ptr)) __ret; \
104 if (likely(boot_cpu_data.x86 > 3)) \
105 __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
106 (unsigned long)(o), (unsigned long)(n), \
107 sizeof(*(ptr))); \
108 else \
109 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
110 (unsigned long)(o), (unsigned long)(n), \
111 sizeof(*(ptr))); \
112 __ret; \
114 #define cmpxchg_local(ptr, o, n) \
115 ({ \
116 __typeof__(*(ptr)) __ret; \
117 if (likely(boot_cpu_data.x86 > 3)) \
118 __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
119 (unsigned long)(o), (unsigned long)(n), \
120 sizeof(*(ptr))); \
121 else \
122 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
123 (unsigned long)(o), (unsigned long)(n), \
124 sizeof(*(ptr))); \
125 __ret; \
127 #endif
129 #ifndef CONFIG_X86_CMPXCHG64
131 * Building a kernel capable running on 80386 and 80486. It may be necessary
132 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
135 #define cmpxchg64(ptr, o, n) \
136 ({ \
137 __typeof__(*(ptr)) __ret; \
138 __typeof__(*(ptr)) __old = (o); \
139 __typeof__(*(ptr)) __new = (n); \
140 alternative_io(LOCK_PREFIX_HERE \
141 "call cmpxchg8b_emu", \
142 "lock; cmpxchg8b (%%esi)" , \
143 X86_FEATURE_CX8, \
144 "=A" (__ret), \
145 "S" ((ptr)), "0" (__old), \
146 "b" ((unsigned int)__new), \
147 "c" ((unsigned int)(__new>>32)) \
148 : "memory"); \
149 __ret; })
152 #define cmpxchg64_local(ptr, o, n) \
153 ({ \
154 __typeof__(*(ptr)) __ret; \
155 __typeof__(*(ptr)) __old = (o); \
156 __typeof__(*(ptr)) __new = (n); \
157 alternative_io("call cmpxchg8b_emu", \
158 "cmpxchg8b (%%esi)" , \
159 X86_FEATURE_CX8, \
160 "=A" (__ret), \
161 "S" ((ptr)), "0" (__old), \
162 "b" ((unsigned int)__new), \
163 "c" ((unsigned int)(__new>>32)) \
164 : "memory"); \
165 __ret; })
167 #endif
169 #define system_has_cmpxchg_double() cpu_has_cx8
171 #endif /* _ASM_X86_CMPXCHG_32_H */