1 #ifndef _ASM_POWERPC_CMPXCHG_H_
2 #define _ASM_POWERPC_CMPXCHG_H_
5 #include <linux/compiler.h>
7 #include <asm/asm-compat.h>
11 #define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE)
13 #define BITOFF_CAL(size, off) (off * BITS_PER_BYTE)
16 #define XCHG_GEN(type, sfx, cl) \
17 static inline u32 __xchg_##type##sfx(volatile void *p, u32 val) \
19 unsigned int prev, prev_mask, tmp, bitoff, off; \
21 off = (unsigned long)p % sizeof(u32); \
22 bitoff = BITOFF_CAL(sizeof(type), off); \
25 prev_mask = (u32)(type)-1 << bitoff; \
27 __asm__ __volatile__( \
28 "1: lwarx %0,0,%3\n" \
34 : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
35 : "r" (p), "r" (val), "r" (prev_mask) \
38 return prev >> bitoff; \
41 #define CMPXCHG_GEN(type, sfx, br, br2, cl) \
43 u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new) \
45 unsigned int prev, prev_mask, tmp, bitoff, off; \
47 off = (unsigned long)p % sizeof(u32); \
48 bitoff = BITOFF_CAL(sizeof(type), off); \
52 prev_mask = (u32)(type)-1 << bitoff; \
54 __asm__ __volatile__( \
56 "1: lwarx %0,0,%3\n" \
68 : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
69 : "r" (p), "r" (old), "r" (new), "r" (prev_mask) \
72 return prev >> bitoff; \
78 * Changes the memory location '*p' to be val and returns
79 * the previous value stored there.
82 XCHG_GEN(u8
, _local
, "memory");
83 XCHG_GEN(u8
, _relaxed
, "cc");
84 XCHG_GEN(u16
, _local
, "memory");
85 XCHG_GEN(u16
, _relaxed
, "cc");
87 static __always_inline
unsigned long
88 __xchg_u32_local(volatile void *p
, unsigned long val
)
97 : "=&r" (prev
), "+m" (*(volatile unsigned int *)p
)
104 static __always_inline
unsigned long
105 __xchg_u32_relaxed(u32
*p
, unsigned long val
)
109 __asm__
__volatile__(
114 : "=&r" (prev
), "+m" (*p
)
122 static __always_inline
unsigned long
123 __xchg_u64_local(volatile void *p
, unsigned long val
)
127 __asm__
__volatile__(
128 "1: ldarx %0,0,%2 \n"
132 : "=&r" (prev
), "+m" (*(volatile unsigned long *)p
)
139 static __always_inline
unsigned long
140 __xchg_u64_relaxed(u64
*p
, unsigned long val
)
144 __asm__
__volatile__(
149 : "=&r" (prev
), "+m" (*p
)
157 static __always_inline
unsigned long
158 __xchg_local(void *ptr
, unsigned long x
, unsigned int size
)
162 return __xchg_u8_local(ptr
, x
);
164 return __xchg_u16_local(ptr
, x
);
166 return __xchg_u32_local(ptr
, x
);
169 return __xchg_u64_local(ptr
, x
);
172 BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg");
176 static __always_inline
unsigned long
177 __xchg_relaxed(void *ptr
, unsigned long x
, unsigned int size
)
181 return __xchg_u8_relaxed(ptr
, x
);
183 return __xchg_u16_relaxed(ptr
, x
);
185 return __xchg_u32_relaxed(ptr
, x
);
188 return __xchg_u64_relaxed(ptr
, x
);
191 BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
194 #define xchg_local(ptr,x) \
196 __typeof__(*(ptr)) _x_ = (x); \
197 (__typeof__(*(ptr))) __xchg_local((ptr), \
198 (unsigned long)_x_, sizeof(*(ptr))); \
201 #define xchg_relaxed(ptr, x) \
203 __typeof__(*(ptr)) _x_ = (x); \
204 (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
205 (unsigned long)_x_, sizeof(*(ptr))); \
208 * Compare and exchange - if *p == old, set it to new,
209 * and return the old value of *p.
212 CMPXCHG_GEN(u8
, , PPC_ATOMIC_ENTRY_BARRIER
, PPC_ATOMIC_EXIT_BARRIER
, "memory");
213 CMPXCHG_GEN(u8
, _local
, , , "memory");
214 CMPXCHG_GEN(u8
, _acquire
, , PPC_ACQUIRE_BARRIER
, "memory");
215 CMPXCHG_GEN(u8
, _relaxed
, , , "cc");
216 CMPXCHG_GEN(u16
, , PPC_ATOMIC_ENTRY_BARRIER
, PPC_ATOMIC_EXIT_BARRIER
, "memory");
217 CMPXCHG_GEN(u16
, _local
, , , "memory");
218 CMPXCHG_GEN(u16
, _acquire
, , PPC_ACQUIRE_BARRIER
, "memory");
219 CMPXCHG_GEN(u16
, _relaxed
, , , "cc");
221 static __always_inline
unsigned long
222 __cmpxchg_u32(volatile unsigned int *p
, unsigned long old
, unsigned long new)
226 __asm__
__volatile__ (
227 PPC_ATOMIC_ENTRY_BARRIER
228 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
234 PPC_ATOMIC_EXIT_BARRIER
237 : "=&r" (prev
), "+m" (*p
)
238 : "r" (p
), "r" (old
), "r" (new)
244 static __always_inline
unsigned long
245 __cmpxchg_u32_local(volatile unsigned int *p
, unsigned long old
,
250 __asm__
__volatile__ (
251 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
259 : "=&r" (prev
), "+m" (*p
)
260 : "r" (p
), "r" (old
), "r" (new)
266 static __always_inline
unsigned long
267 __cmpxchg_u32_relaxed(u32
*p
, unsigned long old
, unsigned long new)
271 __asm__
__volatile__ (
272 "1: lwarx %0,0,%2 # __cmpxchg_u32_relaxed\n"
279 : "=&r" (prev
), "+m" (*p
)
280 : "r" (p
), "r" (old
), "r" (new)
287 * cmpxchg family don't have order guarantee if cmp part fails, therefore we
288 * can avoid superfluous barriers if we use assembly code to implement
289 * cmpxchg() and cmpxchg_acquire(), however we don't do the similar for
290 * cmpxchg_release() because that will result in putting a barrier in the
291 * middle of a ll/sc loop, which is probably a bad idea. For example, this
292 * might cause the conditional store more likely to fail.
294 static __always_inline
unsigned long
295 __cmpxchg_u32_acquire(u32
*p
, unsigned long old
, unsigned long new)
299 __asm__
__volatile__ (
300 "1: lwarx %0,0,%2 # __cmpxchg_u32_acquire\n"
309 : "=&r" (prev
), "+m" (*p
)
310 : "r" (p
), "r" (old
), "r" (new)
317 static __always_inline
unsigned long
318 __cmpxchg_u64(volatile unsigned long *p
, unsigned long old
, unsigned long new)
322 __asm__
__volatile__ (
323 PPC_ATOMIC_ENTRY_BARRIER
324 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
329 PPC_ATOMIC_EXIT_BARRIER
332 : "=&r" (prev
), "+m" (*p
)
333 : "r" (p
), "r" (old
), "r" (new)
339 static __always_inline
unsigned long
340 __cmpxchg_u64_local(volatile unsigned long *p
, unsigned long old
,
345 __asm__
__volatile__ (
346 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
353 : "=&r" (prev
), "+m" (*p
)
354 : "r" (p
), "r" (old
), "r" (new)
360 static __always_inline
unsigned long
361 __cmpxchg_u64_relaxed(u64
*p
, unsigned long old
, unsigned long new)
365 __asm__
__volatile__ (
366 "1: ldarx %0,0,%2 # __cmpxchg_u64_relaxed\n"
372 : "=&r" (prev
), "+m" (*p
)
373 : "r" (p
), "r" (old
), "r" (new)
379 static __always_inline
unsigned long
380 __cmpxchg_u64_acquire(u64
*p
, unsigned long old
, unsigned long new)
384 __asm__
__volatile__ (
385 "1: ldarx %0,0,%2 # __cmpxchg_u64_acquire\n"
393 : "=&r" (prev
), "+m" (*p
)
394 : "r" (p
), "r" (old
), "r" (new)
401 static __always_inline
unsigned long
402 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new,
407 return __cmpxchg_u8(ptr
, old
, new);
409 return __cmpxchg_u16(ptr
, old
, new);
411 return __cmpxchg_u32(ptr
, old
, new);
414 return __cmpxchg_u64(ptr
, old
, new);
417 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg");
421 static __always_inline
unsigned long
422 __cmpxchg_local(void *ptr
, unsigned long old
, unsigned long new,
427 return __cmpxchg_u8_local(ptr
, old
, new);
429 return __cmpxchg_u16_local(ptr
, old
, new);
431 return __cmpxchg_u32_local(ptr
, old
, new);
434 return __cmpxchg_u64_local(ptr
, old
, new);
437 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_local");
441 static __always_inline
unsigned long
442 __cmpxchg_relaxed(void *ptr
, unsigned long old
, unsigned long new,
447 return __cmpxchg_u8_relaxed(ptr
, old
, new);
449 return __cmpxchg_u16_relaxed(ptr
, old
, new);
451 return __cmpxchg_u32_relaxed(ptr
, old
, new);
454 return __cmpxchg_u64_relaxed(ptr
, old
, new);
457 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_relaxed");
461 static __always_inline
unsigned long
462 __cmpxchg_acquire(void *ptr
, unsigned long old
, unsigned long new,
467 return __cmpxchg_u8_acquire(ptr
, old
, new);
469 return __cmpxchg_u16_acquire(ptr
, old
, new);
471 return __cmpxchg_u32_acquire(ptr
, old
, new);
474 return __cmpxchg_u64_acquire(ptr
, old
, new);
477 BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
480 #define cmpxchg(ptr, o, n) \
482 __typeof__(*(ptr)) _o_ = (o); \
483 __typeof__(*(ptr)) _n_ = (n); \
484 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
485 (unsigned long)_n_, sizeof(*(ptr))); \
489 #define cmpxchg_local(ptr, o, n) \
491 __typeof__(*(ptr)) _o_ = (o); \
492 __typeof__(*(ptr)) _n_ = (n); \
493 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
494 (unsigned long)_n_, sizeof(*(ptr))); \
497 #define cmpxchg_relaxed(ptr, o, n) \
499 __typeof__(*(ptr)) _o_ = (o); \
500 __typeof__(*(ptr)) _n_ = (n); \
501 (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
502 (unsigned long)_o_, (unsigned long)_n_, \
506 #define cmpxchg_acquire(ptr, o, n) \
508 __typeof__(*(ptr)) _o_ = (o); \
509 __typeof__(*(ptr)) _n_ = (n); \
510 (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
511 (unsigned long)_o_, (unsigned long)_n_, \
515 #define cmpxchg64(ptr, o, n) \
517 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
518 cmpxchg((ptr), (o), (n)); \
520 #define cmpxchg64_local(ptr, o, n) \
522 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
523 cmpxchg_local((ptr), (o), (n)); \
525 #define cmpxchg64_relaxed(ptr, o, n) \
527 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
528 cmpxchg_relaxed((ptr), (o), (n)); \
530 #define cmpxchg64_acquire(ptr, o, n) \
532 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
533 cmpxchg_acquire((ptr), (o), (n)); \
536 #include <asm-generic/cmpxchg-local.h>
537 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
540 #endif /* __KERNEL__ */
541 #endif /* _ASM_POWERPC_CMPXCHG_H_ */