1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_CMPXCHG_H_
3 #define _ASM_POWERPC_CMPXCHG_H_
6 #include <linux/compiler.h>
11 #define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE)
13 #define BITOFF_CAL(size, off) (off * BITS_PER_BYTE)
16 #define XCHG_GEN(type, sfx, cl) \
17 static inline u32 __xchg_##type##sfx(volatile void *p, u32 val) \
19 unsigned int prev, prev_mask, tmp, bitoff, off; \
21 off = (unsigned long)p % sizeof(u32); \
22 bitoff = BITOFF_CAL(sizeof(type), off); \
25 prev_mask = (u32)(type)-1 << bitoff; \
27 __asm__ __volatile__( \
28 "1: lwarx %0,0,%3\n" \
33 : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
34 : "r" (p), "r" (val), "r" (prev_mask) \
37 return prev >> bitoff; \
40 #define CMPXCHG_GEN(type, sfx, br, br2, cl) \
42 u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new) \
44 unsigned int prev, prev_mask, tmp, bitoff, off; \
46 off = (unsigned long)p % sizeof(u32); \
47 bitoff = BITOFF_CAL(sizeof(type), off); \
51 prev_mask = (u32)(type)-1 << bitoff; \
53 __asm__ __volatile__( \
55 "1: lwarx %0,0,%3\n" \
66 : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
67 : "r" (p), "r" (old), "r" (new), "r" (prev_mask) \
70 return prev >> bitoff; \
76 * Changes the memory location '*p' to be val and returns
77 * the previous value stored there.
80 #ifndef CONFIG_PPC_HAS_LBARX_LHARX
81 XCHG_GEN(u8
, _local
, "memory");
82 XCHG_GEN(u8
, _relaxed
, "cc");
83 XCHG_GEN(u16
, _local
, "memory");
84 XCHG_GEN(u16
, _relaxed
, "cc");
86 static __always_inline
unsigned long
87 __xchg_u8_local(volatile void *p
, unsigned long val
)
92 "1: lbarx %0,0,%2 # __xchg_u8_local\n"
95 : "=&r" (prev
), "+m" (*(volatile unsigned char *)p
)
102 static __always_inline
unsigned long
103 __xchg_u8_relaxed(u8
*p
, unsigned long val
)
107 __asm__
__volatile__(
108 "1: lbarx %0,0,%2 # __xchg_u8_relaxed\n"
111 : "=&r" (prev
), "+m" (*p
)
118 static __always_inline
unsigned long
119 __xchg_u16_local(volatile void *p
, unsigned long val
)
123 __asm__
__volatile__(
124 "1: lharx %0,0,%2 # __xchg_u16_local\n"
127 : "=&r" (prev
), "+m" (*(volatile unsigned short *)p
)
134 static __always_inline
unsigned long
135 __xchg_u16_relaxed(u16
*p
, unsigned long val
)
139 __asm__
__volatile__(
140 "1: lharx %0,0,%2 # __xchg_u16_relaxed\n"
143 : "=&r" (prev
), "+m" (*p
)
151 static __always_inline
unsigned long
152 __xchg_u32_local(volatile void *p
, unsigned long val
)
156 __asm__
__volatile__(
157 "1: lwarx %0,0,%2 \n"
160 : "=&r" (prev
), "+m" (*(volatile unsigned int *)p
)
167 static __always_inline
unsigned long
168 __xchg_u32_relaxed(u32
*p
, unsigned long val
)
172 __asm__
__volatile__(
176 : "=&r" (prev
), "+m" (*p
)
184 static __always_inline
unsigned long
185 __xchg_u64_local(volatile void *p
, unsigned long val
)
189 __asm__
__volatile__(
190 "1: ldarx %0,0,%2 \n"
193 : "=&r" (prev
), "+m" (*(volatile unsigned long *)p
)
200 static __always_inline
unsigned long
201 __xchg_u64_relaxed(u64
*p
, unsigned long val
)
205 __asm__
__volatile__(
209 : "=&r" (prev
), "+m" (*p
)
217 static __always_inline
unsigned long
218 __xchg_local(void *ptr
, unsigned long x
, unsigned int size
)
222 return __xchg_u8_local(ptr
, x
);
224 return __xchg_u16_local(ptr
, x
);
226 return __xchg_u32_local(ptr
, x
);
229 return __xchg_u64_local(ptr
, x
);
232 BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
236 static __always_inline
unsigned long
237 __xchg_relaxed(void *ptr
, unsigned long x
, unsigned int size
)
241 return __xchg_u8_relaxed(ptr
, x
);
243 return __xchg_u16_relaxed(ptr
, x
);
245 return __xchg_u32_relaxed(ptr
, x
);
248 return __xchg_u64_relaxed(ptr
, x
);
251 BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_relaxed");
254 #define arch_xchg_local(ptr,x) \
256 __typeof__(*(ptr)) _x_ = (x); \
257 (__typeof__(*(ptr))) __xchg_local((ptr), \
258 (unsigned long)_x_, sizeof(*(ptr))); \
261 #define arch_xchg_relaxed(ptr, x) \
263 __typeof__(*(ptr)) _x_ = (x); \
264 (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
265 (unsigned long)_x_, sizeof(*(ptr))); \
269 * Compare and exchange - if *p == old, set it to new,
270 * and return the old value of *p.
272 #ifndef CONFIG_PPC_HAS_LBARX_LHARX
273 CMPXCHG_GEN(u8
, , PPC_ATOMIC_ENTRY_BARRIER
, PPC_ATOMIC_EXIT_BARRIER
, "memory");
274 CMPXCHG_GEN(u8
, _local
, , , "memory");
275 CMPXCHG_GEN(u8
, _acquire
, , PPC_ACQUIRE_BARRIER
, "memory");
276 CMPXCHG_GEN(u8
, _relaxed
, , , "cc");
277 CMPXCHG_GEN(u16
, , PPC_ATOMIC_ENTRY_BARRIER
, PPC_ATOMIC_EXIT_BARRIER
, "memory");
278 CMPXCHG_GEN(u16
, _local
, , , "memory");
279 CMPXCHG_GEN(u16
, _acquire
, , PPC_ACQUIRE_BARRIER
, "memory");
280 CMPXCHG_GEN(u16
, _relaxed
, , , "cc");
282 static __always_inline
unsigned long
283 __cmpxchg_u8(volatile unsigned char *p
, unsigned long old
, unsigned long new)
287 __asm__
__volatile__ (
288 PPC_ATOMIC_ENTRY_BARRIER
289 "1: lbarx %0,0,%2 # __cmpxchg_u8\n"
294 PPC_ATOMIC_EXIT_BARRIER
297 : "=&r" (prev
), "+m" (*p
)
298 : "r" (p
), "r" (old
), "r" (new)
304 static __always_inline
unsigned long
305 __cmpxchg_u8_local(volatile unsigned char *p
, unsigned long old
,
310 __asm__
__volatile__ (
311 "1: lbarx %0,0,%2 # __cmpxchg_u8_local\n"
317 : "=&r" (prev
), "+m" (*p
)
318 : "r" (p
), "r" (old
), "r" (new)
324 static __always_inline
unsigned long
325 __cmpxchg_u8_relaxed(u8
*p
, unsigned long old
, unsigned long new)
329 __asm__
__volatile__ (
330 "1: lbarx %0,0,%2 # __cmpxchg_u8_relaxed\n"
336 : "=&r" (prev
), "+m" (*p
)
337 : "r" (p
), "r" (old
), "r" (new)
343 static __always_inline
unsigned long
344 __cmpxchg_u8_acquire(u8
*p
, unsigned long old
, unsigned long new)
348 __asm__
__volatile__ (
349 "1: lbarx %0,0,%2 # __cmpxchg_u8_acquire\n"
356 : "=&r" (prev
), "+m" (*p
)
357 : "r" (p
), "r" (old
), "r" (new)
363 static __always_inline
unsigned long
364 __cmpxchg_u16(volatile unsigned short *p
, unsigned long old
, unsigned long new)
368 __asm__
__volatile__ (
369 PPC_ATOMIC_ENTRY_BARRIER
370 "1: lharx %0,0,%2 # __cmpxchg_u16\n"
375 PPC_ATOMIC_EXIT_BARRIER
377 : "=&r" (prev
), "+m" (*p
)
378 : "r" (p
), "r" (old
), "r" (new)
384 static __always_inline
unsigned long
385 __cmpxchg_u16_local(volatile unsigned short *p
, unsigned long old
,
390 __asm__
__volatile__ (
391 "1: lharx %0,0,%2 # __cmpxchg_u16_local\n"
397 : "=&r" (prev
), "+m" (*p
)
398 : "r" (p
), "r" (old
), "r" (new)
404 static __always_inline
unsigned long
405 __cmpxchg_u16_relaxed(u16
*p
, unsigned long old
, unsigned long new)
409 __asm__
__volatile__ (
410 "1: lharx %0,0,%2 # __cmpxchg_u16_relaxed\n"
416 : "=&r" (prev
), "+m" (*p
)
417 : "r" (p
), "r" (old
), "r" (new)
423 static __always_inline
unsigned long
424 __cmpxchg_u16_acquire(u16
*p
, unsigned long old
, unsigned long new)
428 __asm__
__volatile__ (
429 "1: lharx %0,0,%2 # __cmpxchg_u16_acquire\n"
436 : "=&r" (prev
), "+m" (*p
)
437 : "r" (p
), "r" (old
), "r" (new)
444 static __always_inline
unsigned long
445 __cmpxchg_u32(volatile unsigned int *p
, unsigned long old
, unsigned long new)
449 __asm__
__volatile__ (
450 PPC_ATOMIC_ENTRY_BARRIER
451 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
456 PPC_ATOMIC_EXIT_BARRIER
459 : "=&r" (prev
), "+m" (*p
)
460 : "r" (p
), "r" (old
), "r" (new)
466 static __always_inline
unsigned long
467 __cmpxchg_u32_local(volatile unsigned int *p
, unsigned long old
,
472 __asm__
__volatile__ (
473 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
480 : "=&r" (prev
), "+m" (*p
)
481 : "r" (p
), "r" (old
), "r" (new)
487 static __always_inline
unsigned long
488 __cmpxchg_u32_relaxed(u32
*p
, unsigned long old
, unsigned long new)
492 __asm__
__volatile__ (
493 "1: lwarx %0,0,%2 # __cmpxchg_u32_relaxed\n"
499 : "=&r" (prev
), "+m" (*p
)
500 : "r" (p
), "r" (old
), "r" (new)
507 * cmpxchg family don't have order guarantee if cmp part fails, therefore we
508 * can avoid superfluous barriers if we use assembly code to implement
509 * cmpxchg() and cmpxchg_acquire(), however we don't do the similar for
510 * cmpxchg_release() because that will result in putting a barrier in the
511 * middle of a ll/sc loop, which is probably a bad idea. For example, this
512 * might cause the conditional store more likely to fail.
514 static __always_inline
unsigned long
515 __cmpxchg_u32_acquire(u32
*p
, unsigned long old
, unsigned long new)
519 __asm__
__volatile__ (
520 "1: lwarx %0,0,%2 # __cmpxchg_u32_acquire\n"
528 : "=&r" (prev
), "+m" (*p
)
529 : "r" (p
), "r" (old
), "r" (new)
536 static __always_inline
unsigned long
537 __cmpxchg_u64(volatile unsigned long *p
, unsigned long old
, unsigned long new)
541 __asm__
__volatile__ (
542 PPC_ATOMIC_ENTRY_BARRIER
543 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
548 PPC_ATOMIC_EXIT_BARRIER
551 : "=&r" (prev
), "+m" (*p
)
552 : "r" (p
), "r" (old
), "r" (new)
558 static __always_inline
unsigned long
559 __cmpxchg_u64_local(volatile unsigned long *p
, unsigned long old
,
564 __asm__
__volatile__ (
565 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
572 : "=&r" (prev
), "+m" (*p
)
573 : "r" (p
), "r" (old
), "r" (new)
579 static __always_inline
unsigned long
580 __cmpxchg_u64_relaxed(u64
*p
, unsigned long old
, unsigned long new)
584 __asm__
__volatile__ (
585 "1: ldarx %0,0,%2 # __cmpxchg_u64_relaxed\n"
591 : "=&r" (prev
), "+m" (*p
)
592 : "r" (p
), "r" (old
), "r" (new)
598 static __always_inline
unsigned long
599 __cmpxchg_u64_acquire(u64
*p
, unsigned long old
, unsigned long new)
603 __asm__
__volatile__ (
604 "1: ldarx %0,0,%2 # __cmpxchg_u64_acquire\n"
612 : "=&r" (prev
), "+m" (*p
)
613 : "r" (p
), "r" (old
), "r" (new)
620 static __always_inline
unsigned long
621 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new,
626 return __cmpxchg_u8(ptr
, old
, new);
628 return __cmpxchg_u16(ptr
, old
, new);
630 return __cmpxchg_u32(ptr
, old
, new);
633 return __cmpxchg_u64(ptr
, old
, new);
636 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg");
640 static __always_inline
unsigned long
641 __cmpxchg_local(void *ptr
, unsigned long old
, unsigned long new,
646 return __cmpxchg_u8_local(ptr
, old
, new);
648 return __cmpxchg_u16_local(ptr
, old
, new);
650 return __cmpxchg_u32_local(ptr
, old
, new);
653 return __cmpxchg_u64_local(ptr
, old
, new);
656 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_local");
660 static __always_inline
unsigned long
661 __cmpxchg_relaxed(void *ptr
, unsigned long old
, unsigned long new,
666 return __cmpxchg_u8_relaxed(ptr
, old
, new);
668 return __cmpxchg_u16_relaxed(ptr
, old
, new);
670 return __cmpxchg_u32_relaxed(ptr
, old
, new);
673 return __cmpxchg_u64_relaxed(ptr
, old
, new);
676 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_relaxed");
680 static __always_inline
unsigned long
681 __cmpxchg_acquire(void *ptr
, unsigned long old
, unsigned long new,
686 return __cmpxchg_u8_acquire(ptr
, old
, new);
688 return __cmpxchg_u16_acquire(ptr
, old
, new);
690 return __cmpxchg_u32_acquire(ptr
, old
, new);
693 return __cmpxchg_u64_acquire(ptr
, old
, new);
696 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
699 #define arch_cmpxchg(ptr, o, n) \
701 __typeof__(*(ptr)) _o_ = (o); \
702 __typeof__(*(ptr)) _n_ = (n); \
703 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
704 (unsigned long)_n_, sizeof(*(ptr))); \
708 #define arch_cmpxchg_local(ptr, o, n) \
710 __typeof__(*(ptr)) _o_ = (o); \
711 __typeof__(*(ptr)) _n_ = (n); \
712 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
713 (unsigned long)_n_, sizeof(*(ptr))); \
716 #define arch_cmpxchg_relaxed(ptr, o, n) \
718 __typeof__(*(ptr)) _o_ = (o); \
719 __typeof__(*(ptr)) _n_ = (n); \
720 (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
721 (unsigned long)_o_, (unsigned long)_n_, \
725 #define arch_cmpxchg_acquire(ptr, o, n) \
727 __typeof__(*(ptr)) _o_ = (o); \
728 __typeof__(*(ptr)) _n_ = (n); \
729 (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
730 (unsigned long)_o_, (unsigned long)_n_, \
734 #define arch_cmpxchg64(ptr, o, n) \
736 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
737 arch_cmpxchg((ptr), (o), (n)); \
739 #define arch_cmpxchg64_local(ptr, o, n) \
741 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
742 arch_cmpxchg_local((ptr), (o), (n)); \
744 #define arch_cmpxchg64_relaxed(ptr, o, n) \
746 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
747 arch_cmpxchg_relaxed((ptr), (o), (n)); \
749 #define arch_cmpxchg64_acquire(ptr, o, n) \
751 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
752 arch_cmpxchg_acquire((ptr), (o), (n)); \
755 #include <asm-generic/cmpxchg-local.h>
756 #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
759 #endif /* __KERNEL__ */
760 #endif /* _ASM_POWERPC_CMPXCHG_H_ */