staging: erofs: fix warning Comparison to bool
[linux/fpc-iii.git] / arch / arc / include / asm / spinlock.h
blobdaa914da796886de6a3ae3744e3428d30f3804c7
1 /*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
12 #include <asm/spinlock_types.h>
13 #include <asm/processor.h>
14 #include <asm/barrier.h>
16 #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
18 #ifdef CONFIG_ARC_HAS_LLSC
20 static inline void arch_spin_lock(arch_spinlock_t *lock)
22 unsigned int val;
24 __asm__ __volatile__(
25 "1: llock %[val], [%[slock]] \n"
26 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
27 " scond %[LOCKED], [%[slock]] \n" /* acquire */
28 " bnz 1b \n"
29 " \n"
30 : [val] "=&r" (val)
31 : [slock] "r" (&(lock->slock)),
32 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
33 : "memory", "cc");
36 * ACQUIRE barrier to ensure load/store after taking the lock
37 * don't "bleed-up" out of the critical section (leak-in is allowed)
38 * http://www.spinics.net/lists/kernel/msg2010409.html
40 * ARCv2 only has load-load, store-store and all-all barrier
41 * thus need the full all-all barrier
43 smp_mb();
46 /* 1 - lock taken successfully */
47 static inline int arch_spin_trylock(arch_spinlock_t *lock)
49 unsigned int val, got_it = 0;
51 __asm__ __volatile__(
52 "1: llock %[val], [%[slock]] \n"
53 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
54 " scond %[LOCKED], [%[slock]] \n" /* acquire */
55 " bnz 1b \n"
56 " mov %[got_it], 1 \n"
57 "4: \n"
58 " \n"
59 : [val] "=&r" (val),
60 [got_it] "+&r" (got_it)
61 : [slock] "r" (&(lock->slock)),
62 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
63 : "memory", "cc");
65 smp_mb();
67 return got_it;
70 static inline void arch_spin_unlock(arch_spinlock_t *lock)
72 smp_mb();
74 WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
78 * Read-write spinlocks, allowing multiple readers but only one writer.
79 * Unfair locking as Writers could be starved indefinitely by Reader(s)
82 static inline void arch_read_lock(arch_rwlock_t *rw)
84 unsigned int val;
87 * zero means writer holds the lock exclusively, deny Reader.
88 * Otherwise grant lock to first/subseq reader
90 * if (rw->counter > 0) {
91 * rw->counter--;
92 * ret = 1;
93 * }
96 __asm__ __volatile__(
97 "1: llock %[val], [%[rwlock]] \n"
98 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
99 " sub %[val], %[val], 1 \n" /* reader lock */
100 " scond %[val], [%[rwlock]] \n"
101 " bnz 1b \n"
102 " \n"
103 : [val] "=&r" (val)
104 : [rwlock] "r" (&(rw->counter)),
105 [WR_LOCKED] "ir" (0)
106 : "memory", "cc");
108 smp_mb();
111 /* 1 - lock taken successfully */
112 static inline int arch_read_trylock(arch_rwlock_t *rw)
114 unsigned int val, got_it = 0;
116 __asm__ __volatile__(
117 "1: llock %[val], [%[rwlock]] \n"
118 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
119 " sub %[val], %[val], 1 \n" /* counter-- */
120 " scond %[val], [%[rwlock]] \n"
121 " bnz 1b \n" /* retry if collided with someone */
122 " mov %[got_it], 1 \n"
123 " \n"
124 "4: ; --- done --- \n"
126 : [val] "=&r" (val),
127 [got_it] "+&r" (got_it)
128 : [rwlock] "r" (&(rw->counter)),
129 [WR_LOCKED] "ir" (0)
130 : "memory", "cc");
132 smp_mb();
134 return got_it;
137 static inline void arch_write_lock(arch_rwlock_t *rw)
139 unsigned int val;
142 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
143 * deny writer. Otherwise if unlocked grant to writer
144 * Hence the claim that Linux rwlocks are unfair to writers.
145 * (can be starved for an indefinite time by readers).
147 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
148 * rw->counter = 0;
149 * ret = 1;
153 __asm__ __volatile__(
154 "1: llock %[val], [%[rwlock]] \n"
155 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
156 " mov %[val], %[WR_LOCKED] \n"
157 " scond %[val], [%[rwlock]] \n"
158 " bnz 1b \n"
159 " \n"
160 : [val] "=&r" (val)
161 : [rwlock] "r" (&(rw->counter)),
162 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
163 [WR_LOCKED] "ir" (0)
164 : "memory", "cc");
166 smp_mb();
169 /* 1 - lock taken successfully */
170 static inline int arch_write_trylock(arch_rwlock_t *rw)
172 unsigned int val, got_it = 0;
174 __asm__ __volatile__(
175 "1: llock %[val], [%[rwlock]] \n"
176 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
177 " mov %[val], %[WR_LOCKED] \n"
178 " scond %[val], [%[rwlock]] \n"
179 " bnz 1b \n" /* retry if collided with someone */
180 " mov %[got_it], 1 \n"
181 " \n"
182 "4: ; --- done --- \n"
184 : [val] "=&r" (val),
185 [got_it] "+&r" (got_it)
186 : [rwlock] "r" (&(rw->counter)),
187 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
188 [WR_LOCKED] "ir" (0)
189 : "memory", "cc");
191 smp_mb();
193 return got_it;
196 static inline void arch_read_unlock(arch_rwlock_t *rw)
198 unsigned int val;
200 smp_mb();
203 * rw->counter++;
205 __asm__ __volatile__(
206 "1: llock %[val], [%[rwlock]] \n"
207 " add %[val], %[val], 1 \n"
208 " scond %[val], [%[rwlock]] \n"
209 " bnz 1b \n"
210 " \n"
211 : [val] "=&r" (val)
212 : [rwlock] "r" (&(rw->counter))
213 : "memory", "cc");
216 static inline void arch_write_unlock(arch_rwlock_t *rw)
218 smp_mb();
220 WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
223 #else /* !CONFIG_ARC_HAS_LLSC */
225 static inline void arch_spin_lock(arch_spinlock_t *lock)
227 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
230 * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
231 * for ACQ and REL semantics respectively. However EX based spinlocks
232 * need the extra smp_mb to workaround a hardware quirk.
234 smp_mb();
236 __asm__ __volatile__(
237 "1: ex %0, [%1] \n"
238 #ifdef CONFIG_EZNPS_MTM_EXT
239 " .word %3 \n"
240 #endif
241 " breq %0, %2, 1b \n"
242 : "+&r" (val)
243 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
244 #ifdef CONFIG_EZNPS_MTM_EXT
245 , "i"(CTOP_INST_SCHD_RW)
246 #endif
247 : "memory");
249 smp_mb();
252 /* 1 - lock taken successfully */
253 static inline int arch_spin_trylock(arch_spinlock_t *lock)
255 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
257 smp_mb();
259 __asm__ __volatile__(
260 "1: ex %0, [%1] \n"
261 : "+r" (val)
262 : "r"(&(lock->slock))
263 : "memory");
265 smp_mb();
267 return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
270 static inline void arch_spin_unlock(arch_spinlock_t *lock)
272 unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
275 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
276 * is the only option
278 smp_mb();
281 * EX is not really required here, a simple STore of 0 suffices.
282 * However this causes tasklist livelocks in SystemC based SMP virtual
283 * platforms where the systemc core scheduler uses EX as a cue for
284 * moving to next core. Do a git log of this file for details
286 __asm__ __volatile__(
287 " ex %0, [%1] \n"
288 : "+r" (val)
289 : "r"(&(lock->slock))
290 : "memory");
293 * see pairing version/comment in arch_spin_lock above
295 smp_mb();
299 * Read-write spinlocks, allowing multiple readers but only one writer.
300 * Unfair locking as Writers could be starved indefinitely by Reader(s)
302 * The spinlock itself is contained in @counter and access to it is
303 * serialized with @lock_mutex.
306 /* 1 - lock taken successfully */
307 static inline int arch_read_trylock(arch_rwlock_t *rw)
309 int ret = 0;
310 unsigned long flags;
312 local_irq_save(flags);
313 arch_spin_lock(&(rw->lock_mutex));
316 * zero means writer holds the lock exclusively, deny Reader.
317 * Otherwise grant lock to first/subseq reader
319 if (rw->counter > 0) {
320 rw->counter--;
321 ret = 1;
324 arch_spin_unlock(&(rw->lock_mutex));
325 local_irq_restore(flags);
327 return ret;
330 /* 1 - lock taken successfully */
331 static inline int arch_write_trylock(arch_rwlock_t *rw)
333 int ret = 0;
334 unsigned long flags;
336 local_irq_save(flags);
337 arch_spin_lock(&(rw->lock_mutex));
340 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
341 * deny writer. Otherwise if unlocked grant to writer
342 * Hence the claim that Linux rwlocks are unfair to writers.
343 * (can be starved for an indefinite time by readers).
345 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
346 rw->counter = 0;
347 ret = 1;
349 arch_spin_unlock(&(rw->lock_mutex));
350 local_irq_restore(flags);
352 return ret;
355 static inline void arch_read_lock(arch_rwlock_t *rw)
357 while (!arch_read_trylock(rw))
358 cpu_relax();
361 static inline void arch_write_lock(arch_rwlock_t *rw)
363 while (!arch_write_trylock(rw))
364 cpu_relax();
367 static inline void arch_read_unlock(arch_rwlock_t *rw)
369 unsigned long flags;
371 local_irq_save(flags);
372 arch_spin_lock(&(rw->lock_mutex));
373 rw->counter++;
374 arch_spin_unlock(&(rw->lock_mutex));
375 local_irq_restore(flags);
378 static inline void arch_write_unlock(arch_rwlock_t *rw)
380 unsigned long flags;
382 local_irq_save(flags);
383 arch_spin_lock(&(rw->lock_mutex));
384 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
385 arch_spin_unlock(&(rw->lock_mutex));
386 local_irq_restore(flags);
389 #endif
391 #endif /* __ASM_SPINLOCK_H */