treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / sparc / include / asm / cmpxchg_64.h
blob316faa0130bab987e0818a33f3654fe9a50be0b0
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* 64-bit atomic xchg() and cmpxchg() definitions.
4 * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
5 */
7 #ifndef __ARCH_SPARC64_CMPXCHG__
8 #define __ARCH_SPARC64_CMPXCHG__
10 static inline unsigned long
11 __cmpxchg_u32(volatile int *m, int old, int new)
13 __asm__ __volatile__("cas [%2], %3, %0"
14 : "=&r" (new)
15 : "0" (new), "r" (m), "r" (old)
16 : "memory");
18 return new;
21 static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
23 unsigned long tmp1, tmp2;
25 __asm__ __volatile__(
26 " mov %0, %1\n"
27 "1: lduw [%4], %2\n"
28 " cas [%4], %2, %0\n"
29 " cmp %2, %0\n"
30 " bne,a,pn %%icc, 1b\n"
31 " mov %1, %0\n"
32 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
33 : "0" (val), "r" (m)
34 : "cc", "memory");
35 return val;
38 static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
40 unsigned long tmp1, tmp2;
42 __asm__ __volatile__(
43 " mov %0, %1\n"
44 "1: ldx [%4], %2\n"
45 " casx [%4], %2, %0\n"
46 " cmp %2, %0\n"
47 " bne,a,pn %%xcc, 1b\n"
48 " mov %1, %0\n"
49 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
50 : "0" (val), "r" (m)
51 : "cc", "memory");
52 return val;
55 #define xchg(ptr,x) \
56 ({ __typeof__(*(ptr)) __ret; \
57 __ret = (__typeof__(*(ptr))) \
58 __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
59 __ret; \
62 void __xchg_called_with_bad_pointer(void);
65 * Use 4 byte cas instruction to achieve 2 byte xchg. Main logic
66 * here is to get the bit shift of the byte we are interested in.
67 * The XOR is handy for reversing the bits for big-endian byte order.
69 static inline unsigned long
70 xchg16(__volatile__ unsigned short *m, unsigned short val)
72 unsigned long maddr = (unsigned long)m;
73 int bit_shift = (((unsigned long)m & 2) ^ 2) << 3;
74 unsigned int mask = 0xffff << bit_shift;
75 unsigned int *ptr = (unsigned int *) (maddr & ~2);
76 unsigned int old32, new32, load32;
78 /* Read the old value */
79 load32 = *ptr;
81 do {
82 old32 = load32;
83 new32 = (load32 & (~mask)) | val << bit_shift;
84 load32 = __cmpxchg_u32(ptr, old32, new32);
85 } while (load32 != old32);
87 return (load32 & mask) >> bit_shift;
90 static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
91 int size)
93 switch (size) {
94 case 2:
95 return xchg16(ptr, x);
96 case 4:
97 return xchg32(ptr, x);
98 case 8:
99 return xchg64(ptr, x);
101 __xchg_called_with_bad_pointer();
102 return x;
106 * Atomic compare and exchange. Compare OLD with MEM, if identical,
107 * store NEW in MEM. Return the initial value in MEM. Success is
108 * indicated by comparing RETURN with OLD.
111 #include <asm-generic/cmpxchg-local.h>
114 static inline unsigned long
115 __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
117 __asm__ __volatile__("casx [%2], %3, %0"
118 : "=&r" (new)
119 : "0" (new), "r" (m), "r" (old)
120 : "memory");
122 return new;
126 * Use 4 byte cas instruction to achieve 1 byte cmpxchg. Main logic
127 * here is to get the bit shift of the byte we are interested in.
128 * The XOR is handy for reversing the bits for big-endian byte order
130 static inline unsigned long
131 __cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new)
133 unsigned long maddr = (unsigned long)m;
134 int bit_shift = (((unsigned long)m & 3) ^ 3) << 3;
135 unsigned int mask = 0xff << bit_shift;
136 unsigned int *ptr = (unsigned int *) (maddr & ~3);
137 unsigned int old32, new32, load;
138 unsigned int load32 = *ptr;
140 do {
141 new32 = (load32 & ~mask) | (new << bit_shift);
142 old32 = (load32 & ~mask) | (old << bit_shift);
143 load32 = __cmpxchg_u32(ptr, old32, new32);
144 if (load32 == old32)
145 return old;
146 load = (load32 & mask) >> bit_shift;
147 } while (load == old);
149 return load;
152 /* This function doesn't exist, so you'll get a linker error
153 if something tries to do an invalid cmpxchg(). */
154 void __cmpxchg_called_with_bad_pointer(void);
156 static inline unsigned long
157 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
159 switch (size) {
160 case 1:
161 return __cmpxchg_u8(ptr, old, new);
162 case 4:
163 return __cmpxchg_u32(ptr, old, new);
164 case 8:
165 return __cmpxchg_u64(ptr, old, new);
167 __cmpxchg_called_with_bad_pointer();
168 return old;
171 #define cmpxchg(ptr,o,n) \
172 ({ \
173 __typeof__(*(ptr)) _o_ = (o); \
174 __typeof__(*(ptr)) _n_ = (n); \
175 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
176 (unsigned long)_n_, sizeof(*(ptr))); \
180 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
181 * them available.
184 static inline unsigned long __cmpxchg_local(volatile void *ptr,
185 unsigned long old,
186 unsigned long new, int size)
188 switch (size) {
189 case 4:
190 case 8: return __cmpxchg(ptr, old, new, size);
191 default:
192 return __cmpxchg_local_generic(ptr, old, new, size);
195 return old;
198 #define cmpxchg_local(ptr, o, n) \
199 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
200 (unsigned long)(n), sizeof(*(ptr))))
201 #define cmpxchg64_local(ptr, o, n) \
202 ({ \
203 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
204 cmpxchg_local((ptr), (o), (n)); \
206 #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
208 #endif /* __ARCH_SPARC64_CMPXCHG__ */