1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_CMPXCHG_H_
3 #define _ASM_POWERPC_CMPXCHG_H_
6 #include <linux/compiler.h>
11 #define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE)
13 #define BITOFF_CAL(size, off) (off * BITS_PER_BYTE)
16 #define XCHG_GEN(type, sfx, cl) \
17 static inline u32 __xchg_##type##sfx(volatile void *p, u32 val) \
19 unsigned int prev, prev_mask, tmp, bitoff, off; \
21 off = (unsigned long)p % sizeof(u32); \
22 bitoff = BITOFF_CAL(sizeof(type), off); \
25 prev_mask = (u32)(type)-1 << bitoff; \
27 __asm__ __volatile__( \
28 "1: lwarx %0,0,%3\n" \
33 : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
34 : "r" (p), "r" (val), "r" (prev_mask) \
37 return prev >> bitoff; \
40 #define CMPXCHG_GEN(type, sfx, br, br2, cl) \
42 u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new) \
44 unsigned int prev, prev_mask, tmp, bitoff, off; \
46 off = (unsigned long)p % sizeof(u32); \
47 bitoff = BITOFF_CAL(sizeof(type), off); \
51 prev_mask = (u32)(type)-1 << bitoff; \
53 __asm__ __volatile__( \
55 "1: lwarx %0,0,%3\n" \
66 : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
67 : "r" (p), "r" (old), "r" (new), "r" (prev_mask) \
70 return prev >> bitoff; \
76 * Changes the memory location '*p' to be val and returns
77 * the previous value stored there.
80 XCHG_GEN(u8
, _local
, "memory");
81 XCHG_GEN(u8
, _relaxed
, "cc");
82 XCHG_GEN(u16
, _local
, "memory");
83 XCHG_GEN(u16
, _relaxed
, "cc");
85 static __always_inline
unsigned long
86 __xchg_u32_local(volatile void *p
, unsigned long val
)
94 : "=&r" (prev
), "+m" (*(volatile unsigned int *)p
)
101 static __always_inline
unsigned long
102 __xchg_u32_relaxed(u32
*p
, unsigned long val
)
106 __asm__
__volatile__(
110 : "=&r" (prev
), "+m" (*p
)
118 static __always_inline
unsigned long
119 __xchg_u64_local(volatile void *p
, unsigned long val
)
123 __asm__
__volatile__(
124 "1: ldarx %0,0,%2 \n"
127 : "=&r" (prev
), "+m" (*(volatile unsigned long *)p
)
134 static __always_inline
unsigned long
135 __xchg_u64_relaxed(u64
*p
, unsigned long val
)
139 __asm__
__volatile__(
143 : "=&r" (prev
), "+m" (*p
)
151 static __always_inline
unsigned long
152 __xchg_local(void *ptr
, unsigned long x
, unsigned int size
)
156 return __xchg_u8_local(ptr
, x
);
158 return __xchg_u16_local(ptr
, x
);
160 return __xchg_u32_local(ptr
, x
);
163 return __xchg_u64_local(ptr
, x
);
166 BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg");
170 static __always_inline
unsigned long
171 __xchg_relaxed(void *ptr
, unsigned long x
, unsigned int size
)
175 return __xchg_u8_relaxed(ptr
, x
);
177 return __xchg_u16_relaxed(ptr
, x
);
179 return __xchg_u32_relaxed(ptr
, x
);
182 return __xchg_u64_relaxed(ptr
, x
);
185 BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
188 #define xchg_local(ptr,x) \
190 __typeof__(*(ptr)) _x_ = (x); \
191 (__typeof__(*(ptr))) __xchg_local((ptr), \
192 (unsigned long)_x_, sizeof(*(ptr))); \
195 #define xchg_relaxed(ptr, x) \
197 __typeof__(*(ptr)) _x_ = (x); \
198 (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
199 (unsigned long)_x_, sizeof(*(ptr))); \
202 * Compare and exchange - if *p == old, set it to new,
203 * and return the old value of *p.
206 CMPXCHG_GEN(u8
, , PPC_ATOMIC_ENTRY_BARRIER
, PPC_ATOMIC_EXIT_BARRIER
, "memory");
207 CMPXCHG_GEN(u8
, _local
, , , "memory");
208 CMPXCHG_GEN(u8
, _acquire
, , PPC_ACQUIRE_BARRIER
, "memory");
209 CMPXCHG_GEN(u8
, _relaxed
, , , "cc");
210 CMPXCHG_GEN(u16
, , PPC_ATOMIC_ENTRY_BARRIER
, PPC_ATOMIC_EXIT_BARRIER
, "memory");
211 CMPXCHG_GEN(u16
, _local
, , , "memory");
212 CMPXCHG_GEN(u16
, _acquire
, , PPC_ACQUIRE_BARRIER
, "memory");
213 CMPXCHG_GEN(u16
, _relaxed
, , , "cc");
215 static __always_inline
unsigned long
216 __cmpxchg_u32(volatile unsigned int *p
, unsigned long old
, unsigned long new)
220 __asm__
__volatile__ (
221 PPC_ATOMIC_ENTRY_BARRIER
222 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
227 PPC_ATOMIC_EXIT_BARRIER
230 : "=&r" (prev
), "+m" (*p
)
231 : "r" (p
), "r" (old
), "r" (new)
237 static __always_inline
unsigned long
238 __cmpxchg_u32_local(volatile unsigned int *p
, unsigned long old
,
243 __asm__
__volatile__ (
244 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
251 : "=&r" (prev
), "+m" (*p
)
252 : "r" (p
), "r" (old
), "r" (new)
258 static __always_inline
unsigned long
259 __cmpxchg_u32_relaxed(u32
*p
, unsigned long old
, unsigned long new)
263 __asm__
__volatile__ (
264 "1: lwarx %0,0,%2 # __cmpxchg_u32_relaxed\n"
270 : "=&r" (prev
), "+m" (*p
)
271 : "r" (p
), "r" (old
), "r" (new)
278 * cmpxchg family don't have order guarantee if cmp part fails, therefore we
279 * can avoid superfluous barriers if we use assembly code to implement
280 * cmpxchg() and cmpxchg_acquire(), however we don't do the similar for
281 * cmpxchg_release() because that will result in putting a barrier in the
282 * middle of a ll/sc loop, which is probably a bad idea. For example, this
283 * might cause the conditional store more likely to fail.
285 static __always_inline
unsigned long
286 __cmpxchg_u32_acquire(u32
*p
, unsigned long old
, unsigned long new)
290 __asm__
__volatile__ (
291 "1: lwarx %0,0,%2 # __cmpxchg_u32_acquire\n"
299 : "=&r" (prev
), "+m" (*p
)
300 : "r" (p
), "r" (old
), "r" (new)
307 static __always_inline
unsigned long
308 __cmpxchg_u64(volatile unsigned long *p
, unsigned long old
, unsigned long new)
312 __asm__
__volatile__ (
313 PPC_ATOMIC_ENTRY_BARRIER
314 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
319 PPC_ATOMIC_EXIT_BARRIER
322 : "=&r" (prev
), "+m" (*p
)
323 : "r" (p
), "r" (old
), "r" (new)
329 static __always_inline
unsigned long
330 __cmpxchg_u64_local(volatile unsigned long *p
, unsigned long old
,
335 __asm__
__volatile__ (
336 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
343 : "=&r" (prev
), "+m" (*p
)
344 : "r" (p
), "r" (old
), "r" (new)
350 static __always_inline
unsigned long
351 __cmpxchg_u64_relaxed(u64
*p
, unsigned long old
, unsigned long new)
355 __asm__
__volatile__ (
356 "1: ldarx %0,0,%2 # __cmpxchg_u64_relaxed\n"
362 : "=&r" (prev
), "+m" (*p
)
363 : "r" (p
), "r" (old
), "r" (new)
369 static __always_inline
unsigned long
370 __cmpxchg_u64_acquire(u64
*p
, unsigned long old
, unsigned long new)
374 __asm__
__volatile__ (
375 "1: ldarx %0,0,%2 # __cmpxchg_u64_acquire\n"
383 : "=&r" (prev
), "+m" (*p
)
384 : "r" (p
), "r" (old
), "r" (new)
391 static __always_inline
unsigned long
392 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new,
397 return __cmpxchg_u8(ptr
, old
, new);
399 return __cmpxchg_u16(ptr
, old
, new);
401 return __cmpxchg_u32(ptr
, old
, new);
404 return __cmpxchg_u64(ptr
, old
, new);
407 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg");
411 static __always_inline
unsigned long
412 __cmpxchg_local(void *ptr
, unsigned long old
, unsigned long new,
417 return __cmpxchg_u8_local(ptr
, old
, new);
419 return __cmpxchg_u16_local(ptr
, old
, new);
421 return __cmpxchg_u32_local(ptr
, old
, new);
424 return __cmpxchg_u64_local(ptr
, old
, new);
427 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_local");
431 static __always_inline
unsigned long
432 __cmpxchg_relaxed(void *ptr
, unsigned long old
, unsigned long new,
437 return __cmpxchg_u8_relaxed(ptr
, old
, new);
439 return __cmpxchg_u16_relaxed(ptr
, old
, new);
441 return __cmpxchg_u32_relaxed(ptr
, old
, new);
444 return __cmpxchg_u64_relaxed(ptr
, old
, new);
447 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_relaxed");
451 static __always_inline
unsigned long
452 __cmpxchg_acquire(void *ptr
, unsigned long old
, unsigned long new,
457 return __cmpxchg_u8_acquire(ptr
, old
, new);
459 return __cmpxchg_u16_acquire(ptr
, old
, new);
461 return __cmpxchg_u32_acquire(ptr
, old
, new);
464 return __cmpxchg_u64_acquire(ptr
, old
, new);
467 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
470 #define cmpxchg(ptr, o, n) \
472 __typeof__(*(ptr)) _o_ = (o); \
473 __typeof__(*(ptr)) _n_ = (n); \
474 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
475 (unsigned long)_n_, sizeof(*(ptr))); \
479 #define cmpxchg_local(ptr, o, n) \
481 __typeof__(*(ptr)) _o_ = (o); \
482 __typeof__(*(ptr)) _n_ = (n); \
483 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
484 (unsigned long)_n_, sizeof(*(ptr))); \
487 #define cmpxchg_relaxed(ptr, o, n) \
489 __typeof__(*(ptr)) _o_ = (o); \
490 __typeof__(*(ptr)) _n_ = (n); \
491 (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
492 (unsigned long)_o_, (unsigned long)_n_, \
496 #define cmpxchg_acquire(ptr, o, n) \
498 __typeof__(*(ptr)) _o_ = (o); \
499 __typeof__(*(ptr)) _n_ = (n); \
500 (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
501 (unsigned long)_o_, (unsigned long)_n_, \
505 #define cmpxchg64(ptr, o, n) \
507 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
508 cmpxchg((ptr), (o), (n)); \
510 #define cmpxchg64_local(ptr, o, n) \
512 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
513 cmpxchg_local((ptr), (o), (n)); \
515 #define cmpxchg64_relaxed(ptr, o, n) \
517 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
518 cmpxchg_relaxed((ptr), (o), (n)); \
520 #define cmpxchg64_acquire(ptr, o, n) \
522 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
523 cmpxchg_acquire((ptr), (o), (n)); \
526 #include <asm-generic/cmpxchg-local.h>
527 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
530 #endif /* __KERNEL__ */
531 #endif /* _ASM_POWERPC_CMPXCHG_H_ */