1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_ARM_CMPXCHG_H
3 #define __ASM_ARM_CMPXCHG_H
5 #include <linux/irqflags.h>
6 #include <linux/prefetch.h>
7 #include <asm/barrier.h>
9 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
11 * On the StrongARM, "swp" is terminally broken since it bypasses the
12 * cache totally. This means that the cache becomes inconsistent, and,
13 * since we use normal loads/stores as well, this is really bad.
14 * Typically, this causes oopsen in filp_close, but could have other,
15 * more disastrous effects. There are two work-arounds:
16 * 1. Disable interrupts and emulate the atomic swap
17 * 2. Clean the cache, perform atomic swap, flush the cache
19 * We choose (1) since its the "easiest" to achieve here and is not
20 * dependent on the processor type.
22 * NOTE that this solution won't work on an SMP system, so explcitly
28 static inline unsigned long __xchg(unsigned long x
, volatile void *ptr
, int size
)
30 extern void __bad_xchg(volatile void *, int);
35 #if __LINUX_ARM_ARCH__ >= 6
39 prefetchw((const void *)ptr
);
42 #if __LINUX_ARM_ARCH__ >= 6
43 #ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */
45 asm volatile("@ __xchg1\n"
46 "1: ldrexb %0, [%3]\n"
47 " strexb %1, %2, [%3]\n"
50 : "=&r" (ret
), "=&r" (tmp
)
55 asm volatile("@ __xchg2\n"
56 "1: ldrexh %0, [%3]\n"
57 " strexh %1, %2, [%3]\n"
60 : "=&r" (ret
), "=&r" (tmp
)
66 asm volatile("@ __xchg4\n"
68 " strex %1, %2, [%3]\n"
71 : "=&r" (ret
), "=&r" (tmp
)
75 #elif defined(swp_is_buggy)
77 #error SMP is not supported on this platform
80 raw_local_irq_save(flags
);
81 ret
= *(volatile unsigned char *)ptr
;
82 *(volatile unsigned char *)ptr
= x
;
83 raw_local_irq_restore(flags
);
87 raw_local_irq_save(flags
);
88 ret
= *(volatile unsigned long *)ptr
;
89 *(volatile unsigned long *)ptr
= x
;
90 raw_local_irq_restore(flags
);
94 asm volatile("@ __xchg1\n"
101 asm volatile("@ __xchg4\n"
109 /* Cause a link-time error, the xchg() size is not supported */
110 __bad_xchg(ptr
, size
), ret
= 0;
117 #define xchg_relaxed(ptr, x) ({ \
118 (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
122 #include <asm-generic/cmpxchg-local.h>
124 #if __LINUX_ARM_ARCH__ < 6
125 /* min ARCH < ARMv6 */
128 #error "SMP is not supported on this platform"
131 #define xchg xchg_relaxed
134 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
137 #define cmpxchg_local(ptr, o, n) ({ \
138 (__typeof(*ptr))__cmpxchg_local_generic((ptr), \
139 (unsigned long)(o), \
140 (unsigned long)(n), \
144 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
146 #include <asm-generic/cmpxchg.h>
148 #else /* min ARCH >= ARMv6 */
150 extern void __bad_cmpxchg(volatile void *ptr
, int size
);
153 * cmpxchg only support 32-bits operands on ARMv6.
156 static inline unsigned long __cmpxchg(volatile void *ptr
, unsigned long old
,
157 unsigned long new, int size
)
159 unsigned long oldval
, res
;
161 prefetchw((const void *)ptr
);
164 #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
167 asm volatile("@ __cmpxchg1\n"
171 " strexbeq %0, %4, [%2]\n"
172 : "=&r" (res
), "=&r" (oldval
)
173 : "r" (ptr
), "Ir" (old
), "r" (new)
179 asm volatile("@ __cmpxchg1\n"
183 " strexheq %0, %4, [%2]\n"
184 : "=&r" (res
), "=&r" (oldval
)
185 : "r" (ptr
), "Ir" (old
), "r" (new)
192 asm volatile("@ __cmpxchg4\n"
196 " strexeq %0, %4, [%2]\n"
197 : "=&r" (res
), "=&r" (oldval
)
198 : "r" (ptr
), "Ir" (old
), "r" (new)
203 __bad_cmpxchg(ptr
, size
);
210 #define cmpxchg_relaxed(ptr,o,n) ({ \
211 (__typeof__(*(ptr)))__cmpxchg((ptr), \
212 (unsigned long)(o), \
213 (unsigned long)(n), \
217 static inline unsigned long __cmpxchg_local(volatile void *ptr
,
219 unsigned long new, int size
)
224 #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
227 ret
= __cmpxchg_local_generic(ptr
, old
, new, size
);
231 ret
= __cmpxchg(ptr
, old
, new, size
);
237 #define cmpxchg_local(ptr, o, n) ({ \
238 (__typeof(*ptr))__cmpxchg_local((ptr), \
239 (unsigned long)(o), \
240 (unsigned long)(n), \
244 static inline unsigned long long __cmpxchg64(unsigned long long *ptr
,
245 unsigned long long old
,
246 unsigned long long new)
248 unsigned long long oldval
;
253 __asm__
__volatile__(
254 "1: ldrexd %1, %H1, [%3]\n"
258 " strexd %0, %5, %H5, [%3]\n"
262 : "=&r" (res
), "=&r" (oldval
), "+Qo" (*ptr
)
263 : "r" (ptr
), "r" (old
), "r" (new)
269 #define cmpxchg64_relaxed(ptr, o, n) ({ \
270 (__typeof__(*(ptr)))__cmpxchg64((ptr), \
271 (unsigned long long)(o), \
272 (unsigned long long)(n)); \
275 #define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
277 #endif /* __LINUX_ARM_ARCH__ >= 6 */
279 #endif /* __ASM_ARM_CMPXCHG_H */