2 * Based on arch/arm/include/asm/cmpxchg.h
4 * Copyright (C) 2012 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #ifndef __ASM_CMPXCHG_H
19 #define __ASM_CMPXCHG_H
21 #include <linux/bug.h>
22 #include <linux/mmdebug.h>
24 #include <asm/atomic.h>
25 #include <asm/barrier.h>
29 * We need separate acquire parameters for ll/sc and lse, since the full
30 * barrier case is generated as release+dmb for the former and
31 * acquire+release for the latter.
33 #define __XCHG_CASE(w, sz, name, mb, nop_lse, acq, acq_lse, rel, cl) \
34 static inline unsigned long __xchg_case_##name(unsigned long x, \
37 unsigned long ret, tmp; \
39 asm volatile(ARM64_LSE_ATOMIC_INSN( \
41 " prfm pstl1strm, %2\n" \
42 "1: ld" #acq "xr" #sz "\t%" #w "0, %2\n" \
43 " st" #rel "xr" #sz "\t%w1, %" #w "3, %2\n" \
49 " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
52 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) \
59 __XCHG_CASE(w
, b
, 1, , , , , , )
60 __XCHG_CASE(w
, h
, 2, , , , , , )
61 __XCHG_CASE(w
, , 4, , , , , , )
62 __XCHG_CASE( , , 8, , , , , , )
63 __XCHG_CASE(w
, b
, acq_1
, , , a
, a
, , "memory")
64 __XCHG_CASE(w
, h
, acq_2
, , , a
, a
, , "memory")
65 __XCHG_CASE(w
, , acq_4
, , , a
, a
, , "memory")
66 __XCHG_CASE( , , acq_8
, , , a
, a
, , "memory")
67 __XCHG_CASE(w
, b
, rel_1
, , , , , l
, "memory")
68 __XCHG_CASE(w
, h
, rel_2
, , , , , l
, "memory")
69 __XCHG_CASE(w
, , rel_4
, , , , , l
, "memory")
70 __XCHG_CASE( , , rel_8
, , , , , l
, "memory")
71 __XCHG_CASE(w
, b
, mb_1
, dmb ish
, nop
, , a
, l
, "memory")
72 __XCHG_CASE(w
, h
, mb_2
, dmb ish
, nop
, , a
, l
, "memory")
73 __XCHG_CASE(w
, , mb_4
, dmb ish
, nop
, , a
, l
, "memory")
74 __XCHG_CASE( , , mb_8
, dmb ish
, nop
, , a
, l
, "memory")
78 #define __XCHG_GEN(sfx) \
79 static inline unsigned long __xchg##sfx(unsigned long x, \
85 return __xchg_case##sfx##_1(x, ptr); \
87 return __xchg_case##sfx##_2(x, ptr); \
89 return __xchg_case##sfx##_4(x, ptr); \
91 return __xchg_case##sfx##_8(x, ptr); \
106 #define __xchg_wrapper(sfx, ptr, x) \
108 __typeof__(*(ptr)) __ret; \
109 __ret = (__typeof__(*(ptr))) \
110 __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
115 #define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
116 #define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
117 #define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
118 #define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
120 #define __CMPXCHG_GEN(sfx) \
121 static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
128 return __cmpxchg_case##sfx##_1(ptr, (u8)old, new); \
130 return __cmpxchg_case##sfx##_2(ptr, (u16)old, new); \
132 return __cmpxchg_case##sfx##_4(ptr, old, new); \
134 return __cmpxchg_case##sfx##_8(ptr, old, new); \
149 #define __cmpxchg_wrapper(sfx, ptr, o, n) \
151 __typeof__(*(ptr)) __ret; \
152 __ret = (__typeof__(*(ptr))) \
153 __cmpxchg##sfx((ptr), (unsigned long)(o), \
154 (unsigned long)(n), sizeof(*(ptr))); \
159 #define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
160 #define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
161 #define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
162 #define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
163 #define cmpxchg_local cmpxchg_relaxed
166 #define cmpxchg64_relaxed cmpxchg_relaxed
167 #define cmpxchg64_acquire cmpxchg_acquire
168 #define cmpxchg64_release cmpxchg_release
169 #define cmpxchg64 cmpxchg
170 #define cmpxchg64_local cmpxchg_local
173 #define system_has_cmpxchg_double() 1
175 #define __cmpxchg_double_check(ptr1, ptr2) \
177 if (sizeof(*(ptr1)) != 8) \
179 VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
182 #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
185 __cmpxchg_double_check(ptr1, ptr2); \
186 __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
187 (unsigned long)(n1), (unsigned long)(n2), \
192 #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
195 __cmpxchg_double_check(ptr1, ptr2); \
196 __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
197 (unsigned long)(n1), (unsigned long)(n2), \
202 /* this_cpu_cmpxchg */
203 #define _protect_cmpxchg_local(pcp, o, n) \
205 typeof(*raw_cpu_ptr(&(pcp))) __ret; \
207 __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
212 #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
213 #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
214 #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
215 #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
217 #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
221 __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
222 raw_cpu_ptr(&(ptr2)), \
228 #endif /* __ASM_CMPXCHG_H */