Linux 5.6.13
[linux/fpc-iii.git] / arch / riscv / include / asm / cmpxchg.h
blobd969bab4a26b5f588a5284ae7c8cdf7fa14cbf85
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2014 Regents of the University of California
4 */
6 #ifndef _ASM_RISCV_CMPXCHG_H
7 #define _ASM_RISCV_CMPXCHG_H
9 #include <linux/bug.h>
11 #include <asm/barrier.h>
12 #include <asm/fence.h>
14 #define __xchg_relaxed(ptr, new, size) \
15 ({ \
16 __typeof__(ptr) __ptr = (ptr); \
17 __typeof__(new) __new = (new); \
18 __typeof__(*(ptr)) __ret; \
19 switch (size) { \
20 case 4: \
21 __asm__ __volatile__ ( \
22 " amoswap.w %0, %2, %1\n" \
23 : "=r" (__ret), "+A" (*__ptr) \
24 : "r" (__new) \
25 : "memory"); \
26 break; \
27 case 8: \
28 __asm__ __volatile__ ( \
29 " amoswap.d %0, %2, %1\n" \
30 : "=r" (__ret), "+A" (*__ptr) \
31 : "r" (__new) \
32 : "memory"); \
33 break; \
34 default: \
35 BUILD_BUG(); \
36 } \
37 __ret; \
40 #define xchg_relaxed(ptr, x) \
41 ({ \
42 __typeof__(*(ptr)) _x_ = (x); \
43 (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
44 _x_, sizeof(*(ptr))); \
47 #define __xchg_acquire(ptr, new, size) \
48 ({ \
49 __typeof__(ptr) __ptr = (ptr); \
50 __typeof__(new) __new = (new); \
51 __typeof__(*(ptr)) __ret; \
52 switch (size) { \
53 case 4: \
54 __asm__ __volatile__ ( \
55 " amoswap.w %0, %2, %1\n" \
56 RISCV_ACQUIRE_BARRIER \
57 : "=r" (__ret), "+A" (*__ptr) \
58 : "r" (__new) \
59 : "memory"); \
60 break; \
61 case 8: \
62 __asm__ __volatile__ ( \
63 " amoswap.d %0, %2, %1\n" \
64 RISCV_ACQUIRE_BARRIER \
65 : "=r" (__ret), "+A" (*__ptr) \
66 : "r" (__new) \
67 : "memory"); \
68 break; \
69 default: \
70 BUILD_BUG(); \
71 } \
72 __ret; \
75 #define xchg_acquire(ptr, x) \
76 ({ \
77 __typeof__(*(ptr)) _x_ = (x); \
78 (__typeof__(*(ptr))) __xchg_acquire((ptr), \
79 _x_, sizeof(*(ptr))); \
82 #define __xchg_release(ptr, new, size) \
83 ({ \
84 __typeof__(ptr) __ptr = (ptr); \
85 __typeof__(new) __new = (new); \
86 __typeof__(*(ptr)) __ret; \
87 switch (size) { \
88 case 4: \
89 __asm__ __volatile__ ( \
90 RISCV_RELEASE_BARRIER \
91 " amoswap.w %0, %2, %1\n" \
92 : "=r" (__ret), "+A" (*__ptr) \
93 : "r" (__new) \
94 : "memory"); \
95 break; \
96 case 8: \
97 __asm__ __volatile__ ( \
98 RISCV_RELEASE_BARRIER \
99 " amoswap.d %0, %2, %1\n" \
100 : "=r" (__ret), "+A" (*__ptr) \
101 : "r" (__new) \
102 : "memory"); \
103 break; \
104 default: \
105 BUILD_BUG(); \
107 __ret; \
110 #define xchg_release(ptr, x) \
111 ({ \
112 __typeof__(*(ptr)) _x_ = (x); \
113 (__typeof__(*(ptr))) __xchg_release((ptr), \
114 _x_, sizeof(*(ptr))); \
117 #define __xchg(ptr, new, size) \
118 ({ \
119 __typeof__(ptr) __ptr = (ptr); \
120 __typeof__(new) __new = (new); \
121 __typeof__(*(ptr)) __ret; \
122 switch (size) { \
123 case 4: \
124 __asm__ __volatile__ ( \
125 " amoswap.w.aqrl %0, %2, %1\n" \
126 : "=r" (__ret), "+A" (*__ptr) \
127 : "r" (__new) \
128 : "memory"); \
129 break; \
130 case 8: \
131 __asm__ __volatile__ ( \
132 " amoswap.d.aqrl %0, %2, %1\n" \
133 : "=r" (__ret), "+A" (*__ptr) \
134 : "r" (__new) \
135 : "memory"); \
136 break; \
137 default: \
138 BUILD_BUG(); \
140 __ret; \
143 #define xchg(ptr, x) \
144 ({ \
145 __typeof__(*(ptr)) _x_ = (x); \
146 (__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr))); \
149 #define xchg32(ptr, x) \
150 ({ \
151 BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
152 xchg((ptr), (x)); \
155 #define xchg64(ptr, x) \
156 ({ \
157 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
158 xchg((ptr), (x)); \
162 * Atomic compare and exchange. Compare OLD with MEM, if identical,
163 * store NEW in MEM. Return the initial value in MEM. Success is
164 * indicated by comparing RETURN with OLD.
166 #define __cmpxchg_relaxed(ptr, old, new, size) \
167 ({ \
168 __typeof__(ptr) __ptr = (ptr); \
169 __typeof__(*(ptr)) __old = (old); \
170 __typeof__(*(ptr)) __new = (new); \
171 __typeof__(*(ptr)) __ret; \
172 register unsigned int __rc; \
173 switch (size) { \
174 case 4: \
175 __asm__ __volatile__ ( \
176 "0: lr.w %0, %2\n" \
177 " bne %0, %z3, 1f\n" \
178 " sc.w %1, %z4, %2\n" \
179 " bnez %1, 0b\n" \
180 "1:\n" \
181 : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
182 : "rJ" (__old), "rJ" (__new) \
183 : "memory"); \
184 break; \
185 case 8: \
186 __asm__ __volatile__ ( \
187 "0: lr.d %0, %2\n" \
188 " bne %0, %z3, 1f\n" \
189 " sc.d %1, %z4, %2\n" \
190 " bnez %1, 0b\n" \
191 "1:\n" \
192 : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
193 : "rJ" (__old), "rJ" (__new) \
194 : "memory"); \
195 break; \
196 default: \
197 BUILD_BUG(); \
199 __ret; \
202 #define cmpxchg_relaxed(ptr, o, n) \
203 ({ \
204 __typeof__(*(ptr)) _o_ = (o); \
205 __typeof__(*(ptr)) _n_ = (n); \
206 (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
207 _o_, _n_, sizeof(*(ptr))); \
210 #define __cmpxchg_acquire(ptr, old, new, size) \
211 ({ \
212 __typeof__(ptr) __ptr = (ptr); \
213 __typeof__(*(ptr)) __old = (old); \
214 __typeof__(*(ptr)) __new = (new); \
215 __typeof__(*(ptr)) __ret; \
216 register unsigned int __rc; \
217 switch (size) { \
218 case 4: \
219 __asm__ __volatile__ ( \
220 "0: lr.w %0, %2\n" \
221 " bne %0, %z3, 1f\n" \
222 " sc.w %1, %z4, %2\n" \
223 " bnez %1, 0b\n" \
224 RISCV_ACQUIRE_BARRIER \
225 "1:\n" \
226 : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
227 : "rJ" (__old), "rJ" (__new) \
228 : "memory"); \
229 break; \
230 case 8: \
231 __asm__ __volatile__ ( \
232 "0: lr.d %0, %2\n" \
233 " bne %0, %z3, 1f\n" \
234 " sc.d %1, %z4, %2\n" \
235 " bnez %1, 0b\n" \
236 RISCV_ACQUIRE_BARRIER \
237 "1:\n" \
238 : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
239 : "rJ" (__old), "rJ" (__new) \
240 : "memory"); \
241 break; \
242 default: \
243 BUILD_BUG(); \
245 __ret; \
248 #define cmpxchg_acquire(ptr, o, n) \
249 ({ \
250 __typeof__(*(ptr)) _o_ = (o); \
251 __typeof__(*(ptr)) _n_ = (n); \
252 (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
253 _o_, _n_, sizeof(*(ptr))); \
256 #define __cmpxchg_release(ptr, old, new, size) \
257 ({ \
258 __typeof__(ptr) __ptr = (ptr); \
259 __typeof__(*(ptr)) __old = (old); \
260 __typeof__(*(ptr)) __new = (new); \
261 __typeof__(*(ptr)) __ret; \
262 register unsigned int __rc; \
263 switch (size) { \
264 case 4: \
265 __asm__ __volatile__ ( \
266 RISCV_RELEASE_BARRIER \
267 "0: lr.w %0, %2\n" \
268 " bne %0, %z3, 1f\n" \
269 " sc.w %1, %z4, %2\n" \
270 " bnez %1, 0b\n" \
271 "1:\n" \
272 : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
273 : "rJ" (__old), "rJ" (__new) \
274 : "memory"); \
275 break; \
276 case 8: \
277 __asm__ __volatile__ ( \
278 RISCV_RELEASE_BARRIER \
279 "0: lr.d %0, %2\n" \
280 " bne %0, %z3, 1f\n" \
281 " sc.d %1, %z4, %2\n" \
282 " bnez %1, 0b\n" \
283 "1:\n" \
284 : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
285 : "rJ" (__old), "rJ" (__new) \
286 : "memory"); \
287 break; \
288 default: \
289 BUILD_BUG(); \
291 __ret; \
294 #define cmpxchg_release(ptr, o, n) \
295 ({ \
296 __typeof__(*(ptr)) _o_ = (o); \
297 __typeof__(*(ptr)) _n_ = (n); \
298 (__typeof__(*(ptr))) __cmpxchg_release((ptr), \
299 _o_, _n_, sizeof(*(ptr))); \
302 #define __cmpxchg(ptr, old, new, size) \
303 ({ \
304 __typeof__(ptr) __ptr = (ptr); \
305 __typeof__(*(ptr)) __old = (old); \
306 __typeof__(*(ptr)) __new = (new); \
307 __typeof__(*(ptr)) __ret; \
308 register unsigned int __rc; \
309 switch (size) { \
310 case 4: \
311 __asm__ __volatile__ ( \
312 "0: lr.w %0, %2\n" \
313 " bne %0, %z3, 1f\n" \
314 " sc.w.rl %1, %z4, %2\n" \
315 " bnez %1, 0b\n" \
316 " fence rw, rw\n" \
317 "1:\n" \
318 : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
319 : "rJ" (__old), "rJ" (__new) \
320 : "memory"); \
321 break; \
322 case 8: \
323 __asm__ __volatile__ ( \
324 "0: lr.d %0, %2\n" \
325 " bne %0, %z3, 1f\n" \
326 " sc.d.rl %1, %z4, %2\n" \
327 " bnez %1, 0b\n" \
328 " fence rw, rw\n" \
329 "1:\n" \
330 : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
331 : "rJ" (__old), "rJ" (__new) \
332 : "memory"); \
333 break; \
334 default: \
335 BUILD_BUG(); \
337 __ret; \
340 #define cmpxchg(ptr, o, n) \
341 ({ \
342 __typeof__(*(ptr)) _o_ = (o); \
343 __typeof__(*(ptr)) _n_ = (n); \
344 (__typeof__(*(ptr))) __cmpxchg((ptr), \
345 _o_, _n_, sizeof(*(ptr))); \
348 #define cmpxchg_local(ptr, o, n) \
349 (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
351 #define cmpxchg32(ptr, o, n) \
352 ({ \
353 BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
354 cmpxchg((ptr), (o), (n)); \
357 #define cmpxchg32_local(ptr, o, n) \
358 ({ \
359 BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
360 cmpxchg_relaxed((ptr), (o), (n)) \
363 #define cmpxchg64(ptr, o, n) \
364 ({ \
365 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
366 cmpxchg((ptr), (o), (n)); \
369 #define cmpxchg64_local(ptr, o, n) \
370 ({ \
371 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
372 cmpxchg_relaxed((ptr), (o), (n)); \
375 #endif /* _ASM_RISCV_CMPXCHG_H */