2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
12 #include <asm/spinlock_types.h>
13 #include <asm/processor.h>
14 #include <asm/barrier.h>
16 #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
18 #ifdef CONFIG_ARC_HAS_LLSC
20 static inline void arch_spin_lock(arch_spinlock_t
*lock
)
25 "1: llock %[val], [%[slock]] \n"
26 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
27 " scond %[LOCKED], [%[slock]] \n" /* acquire */
31 : [slock
] "r" (&(lock
->slock
)),
32 [LOCKED
] "r" (__ARCH_SPIN_LOCK_LOCKED__
)
36 * ACQUIRE barrier to ensure load/store after taking the lock
37 * don't "bleed-up" out of the critical section (leak-in is allowed)
38 * http://www.spinics.net/lists/kernel/msg2010409.html
40 * ARCv2 only has load-load, store-store and all-all barrier
41 * thus need the full all-all barrier
46 /* 1 - lock taken successfully */
47 static inline int arch_spin_trylock(arch_spinlock_t
*lock
)
49 unsigned int val
, got_it
= 0;
52 "1: llock %[val], [%[slock]] \n"
53 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
54 " scond %[LOCKED], [%[slock]] \n" /* acquire */
56 " mov %[got_it], 1 \n"
60 [got_it
] "+&r" (got_it
)
61 : [slock
] "r" (&(lock
->slock
)),
62 [LOCKED
] "r" (__ARCH_SPIN_LOCK_LOCKED__
)
70 static inline void arch_spin_unlock(arch_spinlock_t
*lock
)
74 WRITE_ONCE(lock
->slock
, __ARCH_SPIN_LOCK_UNLOCKED__
);
78 * Read-write spinlocks, allowing multiple readers but only one writer.
79 * Unfair locking as Writers could be starved indefinitely by Reader(s)
82 static inline void arch_read_lock(arch_rwlock_t
*rw
)
87 * zero means writer holds the lock exclusively, deny Reader.
88 * Otherwise grant lock to first/subseq reader
90 * if (rw->counter > 0) {
97 "1: llock %[val], [%[rwlock]] \n"
98 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
99 " sub %[val], %[val], 1 \n" /* reader lock */
100 " scond %[val], [%[rwlock]] \n"
104 : [rwlock
] "r" (&(rw
->counter
)),
111 /* 1 - lock taken successfully */
112 static inline int arch_read_trylock(arch_rwlock_t
*rw
)
114 unsigned int val
, got_it
= 0;
116 __asm__
__volatile__(
117 "1: llock %[val], [%[rwlock]] \n"
118 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
119 " sub %[val], %[val], 1 \n" /* counter-- */
120 " scond %[val], [%[rwlock]] \n"
121 " bnz 1b \n" /* retry if collided with someone */
122 " mov %[got_it], 1 \n"
124 "4: ; --- done --- \n"
127 [got_it
] "+&r" (got_it
)
128 : [rwlock
] "r" (&(rw
->counter
)),
137 static inline void arch_write_lock(arch_rwlock_t
*rw
)
142 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
143 * deny writer. Otherwise if unlocked grant to writer
144 * Hence the claim that Linux rwlocks are unfair to writers.
145 * (can be starved for an indefinite time by readers).
147 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
153 __asm__
__volatile__(
154 "1: llock %[val], [%[rwlock]] \n"
155 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
156 " mov %[val], %[WR_LOCKED] \n"
157 " scond %[val], [%[rwlock]] \n"
161 : [rwlock
] "r" (&(rw
->counter
)),
162 [UNLOCKED
] "ir" (__ARCH_RW_LOCK_UNLOCKED__
),
169 /* 1 - lock taken successfully */
170 static inline int arch_write_trylock(arch_rwlock_t
*rw
)
172 unsigned int val
, got_it
= 0;
174 __asm__
__volatile__(
175 "1: llock %[val], [%[rwlock]] \n"
176 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
177 " mov %[val], %[WR_LOCKED] \n"
178 " scond %[val], [%[rwlock]] \n"
179 " bnz 1b \n" /* retry if collided with someone */
180 " mov %[got_it], 1 \n"
182 "4: ; --- done --- \n"
185 [got_it
] "+&r" (got_it
)
186 : [rwlock
] "r" (&(rw
->counter
)),
187 [UNLOCKED
] "ir" (__ARCH_RW_LOCK_UNLOCKED__
),
196 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
205 __asm__
__volatile__(
206 "1: llock %[val], [%[rwlock]] \n"
207 " add %[val], %[val], 1 \n"
208 " scond %[val], [%[rwlock]] \n"
212 : [rwlock
] "r" (&(rw
->counter
))
216 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
220 WRITE_ONCE(rw
->counter
, __ARCH_RW_LOCK_UNLOCKED__
);
223 #else /* !CONFIG_ARC_HAS_LLSC */
225 static inline void arch_spin_lock(arch_spinlock_t
*lock
)
227 unsigned int val
= __ARCH_SPIN_LOCK_LOCKED__
;
230 * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
231 * for ACQ and REL semantics respectively. However EX based spinlocks
232 * need the extra smp_mb to workaround a hardware quirk.
236 __asm__
__volatile__(
238 #ifdef CONFIG_EZNPS_MTM_EXT
241 " breq %0, %2, 1b \n"
243 : "r"(&(lock
->slock
)), "ir"(__ARCH_SPIN_LOCK_LOCKED__
)
244 #ifdef CONFIG_EZNPS_MTM_EXT
245 , "i"(CTOP_INST_SCHD_RW
)
252 /* 1 - lock taken successfully */
253 static inline int arch_spin_trylock(arch_spinlock_t
*lock
)
255 unsigned int val
= __ARCH_SPIN_LOCK_LOCKED__
;
259 __asm__
__volatile__(
262 : "r"(&(lock
->slock
))
267 return (val
== __ARCH_SPIN_LOCK_UNLOCKED__
);
270 static inline void arch_spin_unlock(arch_spinlock_t
*lock
)
272 unsigned int val
= __ARCH_SPIN_LOCK_UNLOCKED__
;
275 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
281 * EX is not really required here, a simple STore of 0 suffices.
282 * However this causes tasklist livelocks in SystemC based SMP virtual
283 * platforms where the systemc core scheduler uses EX as a cue for
284 * moving to next core. Do a git log of this file for details
286 __asm__
__volatile__(
289 : "r"(&(lock
->slock
))
293 * see pairing version/comment in arch_spin_lock above
299 * Read-write spinlocks, allowing multiple readers but only one writer.
300 * Unfair locking as Writers could be starved indefinitely by Reader(s)
302 * The spinlock itself is contained in @counter and access to it is
303 * serialized with @lock_mutex.
306 /* 1 - lock taken successfully */
307 static inline int arch_read_trylock(arch_rwlock_t
*rw
)
312 local_irq_save(flags
);
313 arch_spin_lock(&(rw
->lock_mutex
));
316 * zero means writer holds the lock exclusively, deny Reader.
317 * Otherwise grant lock to first/subseq reader
319 if (rw
->counter
> 0) {
324 arch_spin_unlock(&(rw
->lock_mutex
));
325 local_irq_restore(flags
);
330 /* 1 - lock taken successfully */
331 static inline int arch_write_trylock(arch_rwlock_t
*rw
)
336 local_irq_save(flags
);
337 arch_spin_lock(&(rw
->lock_mutex
));
340 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
341 * deny writer. Otherwise if unlocked grant to writer
342 * Hence the claim that Linux rwlocks are unfair to writers.
343 * (can be starved for an indefinite time by readers).
345 if (rw
->counter
== __ARCH_RW_LOCK_UNLOCKED__
) {
349 arch_spin_unlock(&(rw
->lock_mutex
));
350 local_irq_restore(flags
);
355 static inline void arch_read_lock(arch_rwlock_t
*rw
)
357 while (!arch_read_trylock(rw
))
361 static inline void arch_write_lock(arch_rwlock_t
*rw
)
363 while (!arch_write_trylock(rw
))
367 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
371 local_irq_save(flags
);
372 arch_spin_lock(&(rw
->lock_mutex
));
374 arch_spin_unlock(&(rw
->lock_mutex
));
375 local_irq_restore(flags
);
378 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
382 local_irq_save(flags
);
383 arch_spin_lock(&(rw
->lock_mutex
));
384 rw
->counter
= __ARCH_RW_LOCK_UNLOCKED__
;
385 arch_spin_unlock(&(rw
->lock_mutex
));
386 local_irq_restore(flags
);
391 #endif /* __ASM_SPINLOCK_H */