2 * Copyright (C) 2014 Regents of the University of California
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #ifndef _ASM_RISCV_CMPXCHG_H
15 #define _ASM_RISCV_CMPXCHG_H
17 #include <linux/bug.h>
19 #include <asm/barrier.h>
20 #include <asm/fence.h>
22 #define __xchg_relaxed(ptr, new, size) \
24 __typeof__(ptr) __ptr = (ptr); \
25 __typeof__(new) __new = (new); \
26 __typeof__(*(ptr)) __ret; \
29 __asm__ __volatile__ ( \
30 " amoswap.w %0, %2, %1\n" \
31 : "=r" (__ret), "+A" (*__ptr) \
36 __asm__ __volatile__ ( \
37 " amoswap.d %0, %2, %1\n" \
38 : "=r" (__ret), "+A" (*__ptr) \
48 #define xchg_relaxed(ptr, x) \
50 __typeof__(*(ptr)) _x_ = (x); \
51 (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
52 _x_, sizeof(*(ptr))); \
55 #define __xchg_acquire(ptr, new, size) \
57 __typeof__(ptr) __ptr = (ptr); \
58 __typeof__(new) __new = (new); \
59 __typeof__(*(ptr)) __ret; \
62 __asm__ __volatile__ ( \
63 " amoswap.w %0, %2, %1\n" \
64 RISCV_ACQUIRE_BARRIER \
65 : "=r" (__ret), "+A" (*__ptr) \
70 __asm__ __volatile__ ( \
71 " amoswap.d %0, %2, %1\n" \
72 RISCV_ACQUIRE_BARRIER \
73 : "=r" (__ret), "+A" (*__ptr) \
83 #define xchg_acquire(ptr, x) \
85 __typeof__(*(ptr)) _x_ = (x); \
86 (__typeof__(*(ptr))) __xchg_acquire((ptr), \
87 _x_, sizeof(*(ptr))); \
90 #define __xchg_release(ptr, new, size) \
92 __typeof__(ptr) __ptr = (ptr); \
93 __typeof__(new) __new = (new); \
94 __typeof__(*(ptr)) __ret; \
97 __asm__ __volatile__ ( \
98 RISCV_RELEASE_BARRIER \
99 " amoswap.w %0, %2, %1\n" \
100 : "=r" (__ret), "+A" (*__ptr) \
105 __asm__ __volatile__ ( \
106 RISCV_RELEASE_BARRIER \
107 " amoswap.d %0, %2, %1\n" \
108 : "=r" (__ret), "+A" (*__ptr) \
118 #define xchg_release(ptr, x) \
120 __typeof__(*(ptr)) _x_ = (x); \
121 (__typeof__(*(ptr))) __xchg_release((ptr), \
122 _x_, sizeof(*(ptr))); \
125 #define __xchg(ptr, new, size) \
127 __typeof__(ptr) __ptr = (ptr); \
128 __typeof__(new) __new = (new); \
129 __typeof__(*(ptr)) __ret; \
132 __asm__ __volatile__ ( \
133 " amoswap.w.aqrl %0, %2, %1\n" \
134 : "=r" (__ret), "+A" (*__ptr) \
139 __asm__ __volatile__ ( \
140 " amoswap.d.aqrl %0, %2, %1\n" \
141 : "=r" (__ret), "+A" (*__ptr) \
151 #define xchg(ptr, x) \
153 __typeof__(*(ptr)) _x_ = (x); \
154 (__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr))); \
157 #define xchg32(ptr, x) \
159 BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
163 #define xchg64(ptr, x) \
165 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
170 * Atomic compare and exchange. Compare OLD with MEM, if identical,
171 * store NEW in MEM. Return the initial value in MEM. Success is
172 * indicated by comparing RETURN with OLD.
174 #define __cmpxchg_relaxed(ptr, old, new, size) \
176 __typeof__(ptr) __ptr = (ptr); \
177 __typeof__(*(ptr)) __old = (old); \
178 __typeof__(*(ptr)) __new = (new); \
179 __typeof__(*(ptr)) __ret; \
180 register unsigned int __rc; \
183 __asm__ __volatile__ ( \
185 " bne %0, %z3, 1f\n" \
186 " sc.w %1, %z4, %2\n" \
189 : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
190 : "rJ" (__old), "rJ" (__new) \
194 __asm__ __volatile__ ( \
196 " bne %0, %z3, 1f\n" \
197 " sc.d %1, %z4, %2\n" \
200 : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
201 : "rJ" (__old), "rJ" (__new) \
210 #define cmpxchg_relaxed(ptr, o, n) \
212 __typeof__(*(ptr)) _o_ = (o); \
213 __typeof__(*(ptr)) _n_ = (n); \
214 (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
215 _o_, _n_, sizeof(*(ptr))); \
218 #define __cmpxchg_acquire(ptr, old, new, size) \
220 __typeof__(ptr) __ptr = (ptr); \
221 __typeof__(*(ptr)) __old = (old); \
222 __typeof__(*(ptr)) __new = (new); \
223 __typeof__(*(ptr)) __ret; \
224 register unsigned int __rc; \
227 __asm__ __volatile__ ( \
229 " bne %0, %z3, 1f\n" \
230 " sc.w %1, %z4, %2\n" \
232 RISCV_ACQUIRE_BARRIER \
234 : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
235 : "rJ" (__old), "rJ" (__new) \
239 __asm__ __volatile__ ( \
241 " bne %0, %z3, 1f\n" \
242 " sc.d %1, %z4, %2\n" \
244 RISCV_ACQUIRE_BARRIER \
246 : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
247 : "rJ" (__old), "rJ" (__new) \
256 #define cmpxchg_acquire(ptr, o, n) \
258 __typeof__(*(ptr)) _o_ = (o); \
259 __typeof__(*(ptr)) _n_ = (n); \
260 (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
261 _o_, _n_, sizeof(*(ptr))); \
264 #define __cmpxchg_release(ptr, old, new, size) \
266 __typeof__(ptr) __ptr = (ptr); \
267 __typeof__(*(ptr)) __old = (old); \
268 __typeof__(*(ptr)) __new = (new); \
269 __typeof__(*(ptr)) __ret; \
270 register unsigned int __rc; \
273 __asm__ __volatile__ ( \
274 RISCV_RELEASE_BARRIER \
276 " bne %0, %z3, 1f\n" \
277 " sc.w %1, %z4, %2\n" \
280 : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
281 : "rJ" (__old), "rJ" (__new) \
285 __asm__ __volatile__ ( \
286 RISCV_RELEASE_BARRIER \
288 " bne %0, %z3, 1f\n" \
289 " sc.d %1, %z4, %2\n" \
292 : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
293 : "rJ" (__old), "rJ" (__new) \
302 #define cmpxchg_release(ptr, o, n) \
304 __typeof__(*(ptr)) _o_ = (o); \
305 __typeof__(*(ptr)) _n_ = (n); \
306 (__typeof__(*(ptr))) __cmpxchg_release((ptr), \
307 _o_, _n_, sizeof(*(ptr))); \
310 #define __cmpxchg(ptr, old, new, size) \
312 __typeof__(ptr) __ptr = (ptr); \
313 __typeof__(*(ptr)) __old = (old); \
314 __typeof__(*(ptr)) __new = (new); \
315 __typeof__(*(ptr)) __ret; \
316 register unsigned int __rc; \
319 __asm__ __volatile__ ( \
321 " bne %0, %z3, 1f\n" \
322 " sc.w.rl %1, %z4, %2\n" \
326 : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
327 : "rJ" (__old), "rJ" (__new) \
331 __asm__ __volatile__ ( \
333 " bne %0, %z3, 1f\n" \
334 " sc.d.rl %1, %z4, %2\n" \
338 : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
339 : "rJ" (__old), "rJ" (__new) \
348 #define cmpxchg(ptr, o, n) \
350 __typeof__(*(ptr)) _o_ = (o); \
351 __typeof__(*(ptr)) _n_ = (n); \
352 (__typeof__(*(ptr))) __cmpxchg((ptr), \
353 _o_, _n_, sizeof(*(ptr))); \
356 #define cmpxchg_local(ptr, o, n) \
357 (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
359 #define cmpxchg32(ptr, o, n) \
361 BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
362 cmpxchg((ptr), (o), (n)); \
365 #define cmpxchg32_local(ptr, o, n) \
367 BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
368 cmpxchg_relaxed((ptr), (o), (n)) \
371 #define cmpxchg64(ptr, o, n) \
373 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
374 cmpxchg((ptr), (o), (n)); \
377 #define cmpxchg64_local(ptr, o, n) \
379 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
380 cmpxchg_relaxed((ptr), (o), (n)); \
383 #endif /* _ASM_RISCV_CMPXCHG_H */