1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_CMPXCHG_H_
3 #define _ASM_POWERPC_CMPXCHG_H_
6 #include <linux/compiler.h>
9 #include <asm/asm-405.h>
12 #define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE)
14 #define BITOFF_CAL(size, off) (off * BITS_PER_BYTE)
17 #define XCHG_GEN(type, sfx, cl) \
18 static inline u32 __xchg_##type##sfx(volatile void *p, u32 val) \
20 unsigned int prev, prev_mask, tmp, bitoff, off; \
22 off = (unsigned long)p % sizeof(u32); \
23 bitoff = BITOFF_CAL(sizeof(type), off); \
26 prev_mask = (u32)(type)-1 << bitoff; \
28 __asm__ __volatile__( \
29 "1: lwarx %0,0,%3\n" \
35 : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
36 : "r" (p), "r" (val), "r" (prev_mask) \
39 return prev >> bitoff; \
42 #define CMPXCHG_GEN(type, sfx, br, br2, cl) \
44 u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new) \
46 unsigned int prev, prev_mask, tmp, bitoff, off; \
48 off = (unsigned long)p % sizeof(u32); \
49 bitoff = BITOFF_CAL(sizeof(type), off); \
53 prev_mask = (u32)(type)-1 << bitoff; \
55 __asm__ __volatile__( \
57 "1: lwarx %0,0,%3\n" \
69 : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
70 : "r" (p), "r" (old), "r" (new), "r" (prev_mask) \
73 return prev >> bitoff; \
79 * Changes the memory location '*p' to be val and returns
80 * the previous value stored there.
83 XCHG_GEN(u8
, _local
, "memory");
84 XCHG_GEN(u8
, _relaxed
, "cc");
85 XCHG_GEN(u16
, _local
, "memory");
86 XCHG_GEN(u16
, _relaxed
, "cc");
88 static __always_inline
unsigned long
89 __xchg_u32_local(volatile void *p
, unsigned long val
)
98 : "=&r" (prev
), "+m" (*(volatile unsigned int *)p
)
105 static __always_inline
unsigned long
106 __xchg_u32_relaxed(u32
*p
, unsigned long val
)
110 __asm__
__volatile__(
115 : "=&r" (prev
), "+m" (*p
)
123 static __always_inline
unsigned long
124 __xchg_u64_local(volatile void *p
, unsigned long val
)
128 __asm__
__volatile__(
129 "1: ldarx %0,0,%2 \n"
133 : "=&r" (prev
), "+m" (*(volatile unsigned long *)p
)
140 static __always_inline
unsigned long
141 __xchg_u64_relaxed(u64
*p
, unsigned long val
)
145 __asm__
__volatile__(
150 : "=&r" (prev
), "+m" (*p
)
158 static __always_inline
unsigned long
159 __xchg_local(void *ptr
, unsigned long x
, unsigned int size
)
163 return __xchg_u8_local(ptr
, x
);
165 return __xchg_u16_local(ptr
, x
);
167 return __xchg_u32_local(ptr
, x
);
170 return __xchg_u64_local(ptr
, x
);
173 BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg");
177 static __always_inline
unsigned long
178 __xchg_relaxed(void *ptr
, unsigned long x
, unsigned int size
)
182 return __xchg_u8_relaxed(ptr
, x
);
184 return __xchg_u16_relaxed(ptr
, x
);
186 return __xchg_u32_relaxed(ptr
, x
);
189 return __xchg_u64_relaxed(ptr
, x
);
192 BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
195 #define xchg_local(ptr,x) \
197 __typeof__(*(ptr)) _x_ = (x); \
198 (__typeof__(*(ptr))) __xchg_local((ptr), \
199 (unsigned long)_x_, sizeof(*(ptr))); \
202 #define xchg_relaxed(ptr, x) \
204 __typeof__(*(ptr)) _x_ = (x); \
205 (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
206 (unsigned long)_x_, sizeof(*(ptr))); \
209 * Compare and exchange - if *p == old, set it to new,
210 * and return the old value of *p.
213 CMPXCHG_GEN(u8
, , PPC_ATOMIC_ENTRY_BARRIER
, PPC_ATOMIC_EXIT_BARRIER
, "memory");
214 CMPXCHG_GEN(u8
, _local
, , , "memory");
215 CMPXCHG_GEN(u8
, _acquire
, , PPC_ACQUIRE_BARRIER
, "memory");
216 CMPXCHG_GEN(u8
, _relaxed
, , , "cc");
217 CMPXCHG_GEN(u16
, , PPC_ATOMIC_ENTRY_BARRIER
, PPC_ATOMIC_EXIT_BARRIER
, "memory");
218 CMPXCHG_GEN(u16
, _local
, , , "memory");
219 CMPXCHG_GEN(u16
, _acquire
, , PPC_ACQUIRE_BARRIER
, "memory");
220 CMPXCHG_GEN(u16
, _relaxed
, , , "cc");
222 static __always_inline
unsigned long
223 __cmpxchg_u32(volatile unsigned int *p
, unsigned long old
, unsigned long new)
227 __asm__
__volatile__ (
228 PPC_ATOMIC_ENTRY_BARRIER
229 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
235 PPC_ATOMIC_EXIT_BARRIER
238 : "=&r" (prev
), "+m" (*p
)
239 : "r" (p
), "r" (old
), "r" (new)
245 static __always_inline
unsigned long
246 __cmpxchg_u32_local(volatile unsigned int *p
, unsigned long old
,
251 __asm__
__volatile__ (
252 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
260 : "=&r" (prev
), "+m" (*p
)
261 : "r" (p
), "r" (old
), "r" (new)
267 static __always_inline
unsigned long
268 __cmpxchg_u32_relaxed(u32
*p
, unsigned long old
, unsigned long new)
272 __asm__
__volatile__ (
273 "1: lwarx %0,0,%2 # __cmpxchg_u32_relaxed\n"
280 : "=&r" (prev
), "+m" (*p
)
281 : "r" (p
), "r" (old
), "r" (new)
288 * cmpxchg family don't have order guarantee if cmp part fails, therefore we
289 * can avoid superfluous barriers if we use assembly code to implement
290 * cmpxchg() and cmpxchg_acquire(), however we don't do the similar for
291 * cmpxchg_release() because that will result in putting a barrier in the
292 * middle of a ll/sc loop, which is probably a bad idea. For example, this
293 * might cause the conditional store more likely to fail.
295 static __always_inline
unsigned long
296 __cmpxchg_u32_acquire(u32
*p
, unsigned long old
, unsigned long new)
300 __asm__
__volatile__ (
301 "1: lwarx %0,0,%2 # __cmpxchg_u32_acquire\n"
310 : "=&r" (prev
), "+m" (*p
)
311 : "r" (p
), "r" (old
), "r" (new)
318 static __always_inline
unsigned long
319 __cmpxchg_u64(volatile unsigned long *p
, unsigned long old
, unsigned long new)
323 __asm__
__volatile__ (
324 PPC_ATOMIC_ENTRY_BARRIER
325 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
330 PPC_ATOMIC_EXIT_BARRIER
333 : "=&r" (prev
), "+m" (*p
)
334 : "r" (p
), "r" (old
), "r" (new)
340 static __always_inline
unsigned long
341 __cmpxchg_u64_local(volatile unsigned long *p
, unsigned long old
,
346 __asm__
__volatile__ (
347 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
354 : "=&r" (prev
), "+m" (*p
)
355 : "r" (p
), "r" (old
), "r" (new)
361 static __always_inline
unsigned long
362 __cmpxchg_u64_relaxed(u64
*p
, unsigned long old
, unsigned long new)
366 __asm__
__volatile__ (
367 "1: ldarx %0,0,%2 # __cmpxchg_u64_relaxed\n"
373 : "=&r" (prev
), "+m" (*p
)
374 : "r" (p
), "r" (old
), "r" (new)
380 static __always_inline
unsigned long
381 __cmpxchg_u64_acquire(u64
*p
, unsigned long old
, unsigned long new)
385 __asm__
__volatile__ (
386 "1: ldarx %0,0,%2 # __cmpxchg_u64_acquire\n"
394 : "=&r" (prev
), "+m" (*p
)
395 : "r" (p
), "r" (old
), "r" (new)
402 static __always_inline
unsigned long
403 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new,
408 return __cmpxchg_u8(ptr
, old
, new);
410 return __cmpxchg_u16(ptr
, old
, new);
412 return __cmpxchg_u32(ptr
, old
, new);
415 return __cmpxchg_u64(ptr
, old
, new);
418 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg");
422 static __always_inline
unsigned long
423 __cmpxchg_local(void *ptr
, unsigned long old
, unsigned long new,
428 return __cmpxchg_u8_local(ptr
, old
, new);
430 return __cmpxchg_u16_local(ptr
, old
, new);
432 return __cmpxchg_u32_local(ptr
, old
, new);
435 return __cmpxchg_u64_local(ptr
, old
, new);
438 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_local");
442 static __always_inline
unsigned long
443 __cmpxchg_relaxed(void *ptr
, unsigned long old
, unsigned long new,
448 return __cmpxchg_u8_relaxed(ptr
, old
, new);
450 return __cmpxchg_u16_relaxed(ptr
, old
, new);
452 return __cmpxchg_u32_relaxed(ptr
, old
, new);
455 return __cmpxchg_u64_relaxed(ptr
, old
, new);
458 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_relaxed");
462 static __always_inline
unsigned long
463 __cmpxchg_acquire(void *ptr
, unsigned long old
, unsigned long new,
468 return __cmpxchg_u8_acquire(ptr
, old
, new);
470 return __cmpxchg_u16_acquire(ptr
, old
, new);
472 return __cmpxchg_u32_acquire(ptr
, old
, new);
475 return __cmpxchg_u64_acquire(ptr
, old
, new);
478 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
481 #define cmpxchg(ptr, o, n) \
483 __typeof__(*(ptr)) _o_ = (o); \
484 __typeof__(*(ptr)) _n_ = (n); \
485 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
486 (unsigned long)_n_, sizeof(*(ptr))); \
490 #define cmpxchg_local(ptr, o, n) \
492 __typeof__(*(ptr)) _o_ = (o); \
493 __typeof__(*(ptr)) _n_ = (n); \
494 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
495 (unsigned long)_n_, sizeof(*(ptr))); \
498 #define cmpxchg_relaxed(ptr, o, n) \
500 __typeof__(*(ptr)) _o_ = (o); \
501 __typeof__(*(ptr)) _n_ = (n); \
502 (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
503 (unsigned long)_o_, (unsigned long)_n_, \
507 #define cmpxchg_acquire(ptr, o, n) \
509 __typeof__(*(ptr)) _o_ = (o); \
510 __typeof__(*(ptr)) _n_ = (n); \
511 (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
512 (unsigned long)_o_, (unsigned long)_n_, \
516 #define cmpxchg64(ptr, o, n) \
518 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
519 cmpxchg((ptr), (o), (n)); \
521 #define cmpxchg64_local(ptr, o, n) \
523 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
524 cmpxchg_local((ptr), (o), (n)); \
526 #define cmpxchg64_relaxed(ptr, o, n) \
528 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
529 cmpxchg_relaxed((ptr), (o), (n)); \
531 #define cmpxchg64_acquire(ptr, o, n) \
533 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
534 cmpxchg_acquire((ptr), (o), (n)); \
537 #include <asm-generic/cmpxchg-local.h>
538 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
541 #endif /* __KERNEL__ */
542 #endif /* _ASM_POWERPC_CMPXCHG_H_ */