treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / arc / include / asm / cmpxchg.h
blobc11398160240228a7b7939817e8bfbc0e89c8b80
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 */
6 #ifndef __ASM_ARC_CMPXCHG_H
7 #define __ASM_ARC_CMPXCHG_H
9 #include <linux/types.h>
11 #include <asm/barrier.h>
12 #include <asm/smp.h>
14 #ifdef CONFIG_ARC_HAS_LLSC
16 static inline unsigned long
17 __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
19 unsigned long prev;
22 * Explicit full memory barrier needed before/after as
23 * LLOCK/SCOND thmeselves don't provide any such semantics
25 smp_mb();
27 __asm__ __volatile__(
28 "1: llock %0, [%1] \n"
29 " brne %0, %2, 2f \n"
30 " scond %3, [%1] \n"
31 " bnz 1b \n"
32 "2: \n"
33 : "=&r"(prev) /* Early clobber, to prevent reg reuse */
34 : "r"(ptr), /* Not "m": llock only supports reg direct addr mode */
35 "ir"(expected),
36 "r"(new) /* can't be "ir". scond can't take LIMM for "b" */
37 : "cc", "memory"); /* so that gcc knows memory is being written here */
39 smp_mb();
41 return prev;
44 #elif !defined(CONFIG_ARC_PLAT_EZNPS)
46 static inline unsigned long
47 __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
49 unsigned long flags;
50 int prev;
51 volatile unsigned long *p = ptr;
54 * spin lock/unlock provide the needed smp_mb() before/after
56 atomic_ops_lock(flags);
57 prev = *p;
58 if (prev == expected)
59 *p = new;
60 atomic_ops_unlock(flags);
61 return prev;
64 #else /* CONFIG_ARC_PLAT_EZNPS */
66 static inline unsigned long
67 __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
70 * Explicit full memory barrier needed before/after
72 smp_mb();
74 write_aux_reg(CTOP_AUX_GPA1, expected);
76 __asm__ __volatile__(
77 " mov r2, %0\n"
78 " mov r3, %1\n"
79 " .word %2\n"
80 " mov %0, r2"
81 : "+r"(new)
82 : "r"(ptr), "i"(CTOP_INST_EXC_DI_R2_R2_R3)
83 : "r2", "r3", "memory");
85 smp_mb();
87 return new;
90 #endif /* CONFIG_ARC_HAS_LLSC */
92 #define cmpxchg(ptr, o, n) ({ \
93 (typeof(*(ptr)))__cmpxchg((ptr), \
94 (unsigned long)(o), \
95 (unsigned long)(n)); \
99 * atomic_cmpxchg is same as cmpxchg
100 * LLSC: only different in data-type, semantics are exactly same
101 * !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
102 * semantics, and this lock also happens to be used by atomic_*()
104 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
107 #ifndef CONFIG_ARC_PLAT_EZNPS
110 * xchg (reg with memory) based on "Native atomic" EX insn
112 static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
113 int size)
115 extern unsigned long __xchg_bad_pointer(void);
117 switch (size) {
118 case 4:
119 smp_mb();
121 __asm__ __volatile__(
122 " ex %0, [%1] \n"
123 : "+r"(val)
124 : "r"(ptr)
125 : "memory");
127 smp_mb();
129 return val;
131 return __xchg_bad_pointer();
134 #define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
135 sizeof(*(ptr))))
138 * xchg() maps directly to ARC EX instruction which guarantees atomicity.
139 * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock
140 * due to a subtle reason:
141 * - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot
142 * of kernel code which calls xchg()/cmpxchg() on same data (see llist.h)
143 * Hence xchg() needs to follow same locking rules.
145 * Technically the lock is also needed for UP (boils down to irq save/restore)
146 * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
147 * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
148 * Other way around, xchg is one instruction anyways, so can't be interrupted
149 * as such
152 #if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
154 #define xchg(ptr, with) \
155 ({ \
156 unsigned long flags; \
157 typeof(*(ptr)) old_val; \
159 atomic_ops_lock(flags); \
160 old_val = _xchg(ptr, with); \
161 atomic_ops_unlock(flags); \
162 old_val; \
165 #else
167 #define xchg(ptr, with) _xchg(ptr, with)
169 #endif
171 #else /* CONFIG_ARC_PLAT_EZNPS */
173 static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
174 int size)
176 extern unsigned long __xchg_bad_pointer(void);
178 switch (size) {
179 case 4:
181 * Explicit full memory barrier needed before/after
183 smp_mb();
185 __asm__ __volatile__(
186 " mov r2, %0\n"
187 " mov r3, %1\n"
188 " .word %2\n"
189 " mov %0, r2\n"
190 : "+r"(val)
191 : "r"(ptr), "i"(CTOP_INST_XEX_DI_R2_R2_R3)
192 : "r2", "r3", "memory");
194 smp_mb();
196 return val;
198 return __xchg_bad_pointer();
201 #define xchg(ptr, with) ({ \
202 (typeof(*(ptr)))__xchg((unsigned long)(with), \
203 (ptr), \
204 sizeof(*(ptr))); \
207 #endif /* CONFIG_ARC_PLAT_EZNPS */
210 * "atomic" variant of xchg()
211 * REQ: It needs to follow the same serialization rules as other atomic_xxx()
212 * Since xchg() doesn't always do that, it would seem that following defintion
213 * is incorrect. But here's the rationale:
214 * SMP : Even xchg() takes the atomic_ops_lock, so OK.
215 * LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
216 * is natively "SMP safe", no serialization required).
217 * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
218 * could clobber them. atomic_xchg() itself would be 1 insn, so it
219 * can't be clobbered by others. Thus no serialization required when
220 * atomic_xchg is involved.
222 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
224 #endif