treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / csky / include / asm / spinlock.h
blob7cf3f2b34ceaff1c00f8120d84680697e9a76fc5
1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef __ASM_CSKY_SPINLOCK_H
4 #define __ASM_CSKY_SPINLOCK_H
6 #include <linux/spinlock_types.h>
7 #include <asm/barrier.h>
9 #ifdef CONFIG_QUEUED_RWLOCKS
12 * Ticket-based spin-locking.
14 static inline void arch_spin_lock(arch_spinlock_t *lock)
16 arch_spinlock_t lockval;
17 u32 ticket_next = 1 << TICKET_NEXT;
18 u32 *p = &lock->lock;
19 u32 tmp;
21 asm volatile (
22 "1: ldex.w %0, (%2) \n"
23 " mov %1, %0 \n"
24 " add %0, %3 \n"
25 " stex.w %0, (%2) \n"
26 " bez %0, 1b \n"
27 : "=&r" (tmp), "=&r" (lockval)
28 : "r"(p), "r"(ticket_next)
29 : "cc");
31 while (lockval.tickets.next != lockval.tickets.owner)
32 lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
34 smp_mb();
37 static inline int arch_spin_trylock(arch_spinlock_t *lock)
39 u32 tmp, contended, res;
40 u32 ticket_next = 1 << TICKET_NEXT;
41 u32 *p = &lock->lock;
43 do {
44 asm volatile (
45 " ldex.w %0, (%3) \n"
46 " movi %2, 1 \n"
47 " rotli %1, %0, 16 \n"
48 " cmpne %1, %0 \n"
49 " bt 1f \n"
50 " movi %2, 0 \n"
51 " add %0, %0, %4 \n"
52 " stex.w %0, (%3) \n"
53 "1: \n"
54 : "=&r" (res), "=&r" (tmp), "=&r" (contended)
55 : "r"(p), "r"(ticket_next)
56 : "cc");
57 } while (!res);
59 if (!contended)
60 smp_mb();
62 return !contended;
65 static inline void arch_spin_unlock(arch_spinlock_t *lock)
67 smp_mb();
68 WRITE_ONCE(lock->tickets.owner, lock->tickets.owner + 1);
71 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
73 return lock.tickets.owner == lock.tickets.next;
76 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
78 return !arch_spin_value_unlocked(READ_ONCE(*lock));
81 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
83 struct __raw_tickets tickets = READ_ONCE(lock->tickets);
85 return (tickets.next - tickets.owner) > 1;
87 #define arch_spin_is_contended arch_spin_is_contended
89 #include <asm/qrwlock.h>
91 /* See include/linux/spinlock.h */
92 #define smp_mb__after_spinlock() smp_mb()
94 #else /* CONFIG_QUEUED_RWLOCKS */
97 * Test-and-set spin-locking.
99 static inline void arch_spin_lock(arch_spinlock_t *lock)
101 u32 *p = &lock->lock;
102 u32 tmp;
104 asm volatile (
105 "1: ldex.w %0, (%1) \n"
106 " bnez %0, 1b \n"
107 " movi %0, 1 \n"
108 " stex.w %0, (%1) \n"
109 " bez %0, 1b \n"
110 : "=&r" (tmp)
111 : "r"(p)
112 : "cc");
113 smp_mb();
116 static inline void arch_spin_unlock(arch_spinlock_t *lock)
118 smp_mb();
119 WRITE_ONCE(lock->lock, 0);
122 static inline int arch_spin_trylock(arch_spinlock_t *lock)
124 u32 *p = &lock->lock;
125 u32 tmp;
127 asm volatile (
128 "1: ldex.w %0, (%1) \n"
129 " bnez %0, 2f \n"
130 " movi %0, 1 \n"
131 " stex.w %0, (%1) \n"
132 " bez %0, 1b \n"
133 " movi %0, 0 \n"
134 "2: \n"
135 : "=&r" (tmp)
136 : "r"(p)
137 : "cc");
139 if (!tmp)
140 smp_mb();
142 return !tmp;
145 #define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
148 * read lock/unlock/trylock
150 static inline void arch_read_lock(arch_rwlock_t *lock)
152 u32 *p = &lock->lock;
153 u32 tmp;
155 asm volatile (
156 "1: ldex.w %0, (%1) \n"
157 " blz %0, 1b \n"
158 " addi %0, 1 \n"
159 " stex.w %0, (%1) \n"
160 " bez %0, 1b \n"
161 : "=&r" (tmp)
162 : "r"(p)
163 : "cc");
164 smp_mb();
167 static inline void arch_read_unlock(arch_rwlock_t *lock)
169 u32 *p = &lock->lock;
170 u32 tmp;
172 smp_mb();
173 asm volatile (
174 "1: ldex.w %0, (%1) \n"
175 " subi %0, 1 \n"
176 " stex.w %0, (%1) \n"
177 " bez %0, 1b \n"
178 : "=&r" (tmp)
179 : "r"(p)
180 : "cc");
183 static inline int arch_read_trylock(arch_rwlock_t *lock)
185 u32 *p = &lock->lock;
186 u32 tmp;
188 asm volatile (
189 "1: ldex.w %0, (%1) \n"
190 " blz %0, 2f \n"
191 " addi %0, 1 \n"
192 " stex.w %0, (%1) \n"
193 " bez %0, 1b \n"
194 " movi %0, 0 \n"
195 "2: \n"
196 : "=&r" (tmp)
197 : "r"(p)
198 : "cc");
200 if (!tmp)
201 smp_mb();
203 return !tmp;
207 * write lock/unlock/trylock
209 static inline void arch_write_lock(arch_rwlock_t *lock)
211 u32 *p = &lock->lock;
212 u32 tmp;
214 asm volatile (
215 "1: ldex.w %0, (%1) \n"
216 " bnez %0, 1b \n"
217 " subi %0, 1 \n"
218 " stex.w %0, (%1) \n"
219 " bez %0, 1b \n"
220 : "=&r" (tmp)
221 : "r"(p)
222 : "cc");
223 smp_mb();
226 static inline void arch_write_unlock(arch_rwlock_t *lock)
228 smp_mb();
229 WRITE_ONCE(lock->lock, 0);
232 static inline int arch_write_trylock(arch_rwlock_t *lock)
234 u32 *p = &lock->lock;
235 u32 tmp;
237 asm volatile (
238 "1: ldex.w %0, (%1) \n"
239 " bnez %0, 2f \n"
240 " subi %0, 1 \n"
241 " stex.w %0, (%1) \n"
242 " bez %0, 1b \n"
243 " movi %0, 0 \n"
244 "2: \n"
245 : "=&r" (tmp)
246 : "r"(p)
247 : "cc");
249 if (!tmp)
250 smp_mb();
252 return !tmp;
255 #endif /* CONFIG_QUEUED_RWLOCKS */
256 #endif /* __ASM_CSKY_SPINLOCK_H */