1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/include/asm/cmpxchg.h
5 * Copyright (C) 2012 ARM Ltd.
7 #ifndef __ASM_CMPXCHG_H
8 #define __ASM_CMPXCHG_H
10 #include <linux/build_bug.h>
11 #include <linux/compiler.h>
13 #include <asm/atomic.h>
14 #include <asm/barrier.h>
18 * We need separate acquire parameters for ll/sc and lse, since the full
19 * barrier case is generated as release+dmb for the former and
20 * acquire+release for the latter.
22 #define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl) \
23 static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr) \
28 asm volatile(ARM64_LSE_ATOMIC_INSN( \
30 " prfm pstl1strm, %2\n" \
31 "1: ld" #acq "xr" #sfx "\t%" #w "0, %2\n" \
32 " st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n" \
36 " swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n" \
39 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr) \
46 __XCHG_CASE(w
, b
, , 8, , , , , , )
47 __XCHG_CASE(w
, h
, , 16, , , , , , )
48 __XCHG_CASE(w
, , , 32, , , , , , )
49 __XCHG_CASE( , , , 64, , , , , , )
50 __XCHG_CASE(w
, b
, acq_
, 8, , , a
, a
, , "memory")
51 __XCHG_CASE(w
, h
, acq_
, 16, , , a
, a
, , "memory")
52 __XCHG_CASE(w
, , acq_
, 32, , , a
, a
, , "memory")
53 __XCHG_CASE( , , acq_
, 64, , , a
, a
, , "memory")
54 __XCHG_CASE(w
, b
, rel_
, 8, , , , , l
, "memory")
55 __XCHG_CASE(w
, h
, rel_
, 16, , , , , l
, "memory")
56 __XCHG_CASE(w
, , rel_
, 32, , , , , l
, "memory")
57 __XCHG_CASE( , , rel_
, 64, , , , , l
, "memory")
58 __XCHG_CASE(w
, b
, mb_
, 8, dmb ish
, nop
, , a
, l
, "memory")
59 __XCHG_CASE(w
, h
, mb_
, 16, dmb ish
, nop
, , a
, l
, "memory")
60 __XCHG_CASE(w
, , mb_
, 32, dmb ish
, nop
, , a
, l
, "memory")
61 __XCHG_CASE( , , mb_
, 64, dmb ish
, nop
, , a
, l
, "memory")
65 #define __XCHG_GEN(sfx) \
66 static inline unsigned long __xchg##sfx(unsigned long x, \
72 return __xchg_case##sfx##_8(x, ptr); \
74 return __xchg_case##sfx##_16(x, ptr); \
76 return __xchg_case##sfx##_32(x, ptr); \
78 return __xchg_case##sfx##_64(x, ptr); \
93 #define __xchg_wrapper(sfx, ptr, x) \
95 __typeof__(*(ptr)) __ret; \
96 __ret = (__typeof__(*(ptr))) \
97 __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
102 #define arch_xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
103 #define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
104 #define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
105 #define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
107 #define __CMPXCHG_GEN(sfx) \
108 static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
115 return __cmpxchg_case##sfx##_8(ptr, old, new); \
117 return __cmpxchg_case##sfx##_16(ptr, old, new); \
119 return __cmpxchg_case##sfx##_32(ptr, old, new); \
121 return __cmpxchg_case##sfx##_64(ptr, old, new); \
136 #define __cmpxchg_wrapper(sfx, ptr, o, n) \
138 __typeof__(*(ptr)) __ret; \
139 __ret = (__typeof__(*(ptr))) \
140 __cmpxchg##sfx((ptr), (unsigned long)(o), \
141 (unsigned long)(n), sizeof(*(ptr))); \
146 #define arch_cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
147 #define arch_cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
148 #define arch_cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
149 #define arch_cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
150 #define arch_cmpxchg_local arch_cmpxchg_relaxed
153 #define arch_cmpxchg64_relaxed arch_cmpxchg_relaxed
154 #define arch_cmpxchg64_acquire arch_cmpxchg_acquire
155 #define arch_cmpxchg64_release arch_cmpxchg_release
156 #define arch_cmpxchg64 arch_cmpxchg
157 #define arch_cmpxchg64_local arch_cmpxchg_local
160 #define system_has_cmpxchg_double() 1
162 #define __cmpxchg_double_check(ptr1, ptr2) \
164 if (sizeof(*(ptr1)) != 8) \
166 VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
169 #define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
172 __cmpxchg_double_check(ptr1, ptr2); \
173 __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
174 (unsigned long)(n1), (unsigned long)(n2), \
179 #define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
182 __cmpxchg_double_check(ptr1, ptr2); \
183 __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
184 (unsigned long)(n1), (unsigned long)(n2), \
189 #define __CMPWAIT_CASE(w, sfx, sz) \
190 static inline void __cmpwait_case_##sz(volatile void *ptr, \
198 " ldxr" #sfx "\t%" #w "[tmp], %[v]\n" \
199 " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
200 " cbnz %" #w "[tmp], 1f\n" \
203 : [tmp] "=&r" (tmp), [v] "+Q" (*(unsigned long *)ptr) \
204 : [val] "r" (val)); \
207 __CMPWAIT_CASE(w
, b
, 8);
208 __CMPWAIT_CASE(w
, h
, 16);
209 __CMPWAIT_CASE(w
, , 32);
210 __CMPWAIT_CASE( , , 64);
212 #undef __CMPWAIT_CASE
214 #define __CMPWAIT_GEN(sfx) \
215 static inline void __cmpwait##sfx(volatile void *ptr, \
221 return __cmpwait_case##sfx##_8(ptr, (u8)val); \
223 return __cmpwait_case##sfx##_16(ptr, (u16)val); \
225 return __cmpwait_case##sfx##_32(ptr, val); \
227 return __cmpwait_case##sfx##_64(ptr, val); \
239 #define __cmpwait_relaxed(ptr, val) \
240 __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
242 #endif /* __ASM_CMPXCHG_H */