2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
12 #include <asm/spinlock_types.h>
13 #include <asm/processor.h>
14 #include <asm/barrier.h>
16 #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
19 #ifdef CONFIG_ARC_HAS_LLSC
21 static inline void arch_spin_lock(arch_spinlock_t
*lock
)
28 "1: llock %[val], [%[slock]] \n"
29 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
30 " scond %[LOCKED], [%[slock]] \n" /* acquire */
34 : [slock
] "r" (&(lock
->slock
)),
35 [LOCKED
] "r" (__ARCH_SPIN_LOCK_LOCKED__
)
41 /* 1 - lock taken successfully */
42 static inline int arch_spin_trylock(arch_spinlock_t
*lock
)
44 unsigned int val
, got_it
= 0;
49 "1: llock %[val], [%[slock]] \n"
50 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
51 " scond %[LOCKED], [%[slock]] \n" /* acquire */
53 " mov %[got_it], 1 \n"
57 [got_it
] "+&r" (got_it
)
58 : [slock
] "r" (&(lock
->slock
)),
59 [LOCKED
] "r" (__ARCH_SPIN_LOCK_LOCKED__
)
67 static inline void arch_spin_unlock(arch_spinlock_t
*lock
)
71 lock
->slock
= __ARCH_SPIN_LOCK_UNLOCKED__
;
77 * Read-write spinlocks, allowing multiple readers but only one writer.
78 * Unfair locking as Writers could be starved indefinitely by Reader(s)
81 static inline void arch_read_lock(arch_rwlock_t
*rw
)
88 * zero means writer holds the lock exclusively, deny Reader.
89 * Otherwise grant lock to first/subseq reader
91 * if (rw->counter > 0) {
98 "1: llock %[val], [%[rwlock]] \n"
99 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
100 " sub %[val], %[val], 1 \n" /* reader lock */
101 " scond %[val], [%[rwlock]] \n"
105 : [rwlock
] "r" (&(rw
->counter
)),
112 /* 1 - lock taken successfully */
113 static inline int arch_read_trylock(arch_rwlock_t
*rw
)
115 unsigned int val
, got_it
= 0;
119 __asm__
__volatile__(
120 "1: llock %[val], [%[rwlock]] \n"
121 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
122 " sub %[val], %[val], 1 \n" /* counter-- */
123 " scond %[val], [%[rwlock]] \n"
124 " bnz 1b \n" /* retry if collided with someone */
125 " mov %[got_it], 1 \n"
127 "4: ; --- done --- \n"
130 [got_it
] "+&r" (got_it
)
131 : [rwlock
] "r" (&(rw
->counter
)),
140 static inline void arch_write_lock(arch_rwlock_t
*rw
)
147 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
148 * deny writer. Otherwise if unlocked grant to writer
149 * Hence the claim that Linux rwlocks are unfair to writers.
150 * (can be starved for an indefinite time by readers).
152 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
158 __asm__
__volatile__(
159 "1: llock %[val], [%[rwlock]] \n"
160 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
161 " mov %[val], %[WR_LOCKED] \n"
162 " scond %[val], [%[rwlock]] \n"
166 : [rwlock
] "r" (&(rw
->counter
)),
167 [UNLOCKED
] "ir" (__ARCH_RW_LOCK_UNLOCKED__
),
174 /* 1 - lock taken successfully */
175 static inline int arch_write_trylock(arch_rwlock_t
*rw
)
177 unsigned int val
, got_it
= 0;
181 __asm__
__volatile__(
182 "1: llock %[val], [%[rwlock]] \n"
183 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
184 " mov %[val], %[WR_LOCKED] \n"
185 " scond %[val], [%[rwlock]] \n"
186 " bnz 1b \n" /* retry if collided with someone */
187 " mov %[got_it], 1 \n"
189 "4: ; --- done --- \n"
192 [got_it
] "+&r" (got_it
)
193 : [rwlock
] "r" (&(rw
->counter
)),
194 [UNLOCKED
] "ir" (__ARCH_RW_LOCK_UNLOCKED__
),
203 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
212 __asm__
__volatile__(
213 "1: llock %[val], [%[rwlock]] \n"
214 " add %[val], %[val], 1 \n"
215 " scond %[val], [%[rwlock]] \n"
219 : [rwlock
] "r" (&(rw
->counter
))
225 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
229 rw
->counter
= __ARCH_RW_LOCK_UNLOCKED__
;
234 #else /* !CONFIG_ARC_HAS_LLSC */
236 static inline void arch_spin_lock(arch_spinlock_t
*lock
)
238 unsigned int val
= __ARCH_SPIN_LOCK_LOCKED__
;
241 * This smp_mb() is technically superfluous, we only need the one
242 * after the lock for providing the ACQUIRE semantics.
243 * However doing the "right" thing was regressing hackbench
244 * so keeping this, pending further investigation
248 __asm__
__volatile__(
250 #ifdef CONFIG_EZNPS_MTM_EXT
253 " breq %0, %2, 1b \n"
255 : "r"(&(lock
->slock
)), "ir"(__ARCH_SPIN_LOCK_LOCKED__
)
256 #ifdef CONFIG_EZNPS_MTM_EXT
257 , "i"(CTOP_INST_SCHD_RW
)
262 * ACQUIRE barrier to ensure load/store after taking the lock
263 * don't "bleed-up" out of the critical section (leak-in is allowed)
264 * http://www.spinics.net/lists/kernel/msg2010409.html
266 * ARCv2 only has load-load, store-store and all-all barrier
267 * thus need the full all-all barrier
272 /* 1 - lock taken successfully */
273 static inline int arch_spin_trylock(arch_spinlock_t
*lock
)
275 unsigned int val
= __ARCH_SPIN_LOCK_LOCKED__
;
279 __asm__
__volatile__(
282 : "r"(&(lock
->slock
))
287 return (val
== __ARCH_SPIN_LOCK_UNLOCKED__
);
290 static inline void arch_spin_unlock(arch_spinlock_t
*lock
)
292 unsigned int val
= __ARCH_SPIN_LOCK_UNLOCKED__
;
295 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
301 * EX is not really required here, a simple STore of 0 suffices.
302 * However this causes tasklist livelocks in SystemC based SMP virtual
303 * platforms where the systemc core scheduler uses EX as a cue for
304 * moving to next core. Do a git log of this file for details
306 __asm__
__volatile__(
309 : "r"(&(lock
->slock
))
313 * superfluous, but keeping for now - see pairing version in
314 * arch_spin_lock above
320 * Read-write spinlocks, allowing multiple readers but only one writer.
321 * Unfair locking as Writers could be starved indefinitely by Reader(s)
323 * The spinlock itself is contained in @counter and access to it is
324 * serialized with @lock_mutex.
327 /* 1 - lock taken successfully */
328 static inline int arch_read_trylock(arch_rwlock_t
*rw
)
333 local_irq_save(flags
);
334 arch_spin_lock(&(rw
->lock_mutex
));
337 * zero means writer holds the lock exclusively, deny Reader.
338 * Otherwise grant lock to first/subseq reader
340 if (rw
->counter
> 0) {
345 arch_spin_unlock(&(rw
->lock_mutex
));
346 local_irq_restore(flags
);
352 /* 1 - lock taken successfully */
353 static inline int arch_write_trylock(arch_rwlock_t
*rw
)
358 local_irq_save(flags
);
359 arch_spin_lock(&(rw
->lock_mutex
));
362 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
363 * deny writer. Otherwise if unlocked grant to writer
364 * Hence the claim that Linux rwlocks are unfair to writers.
365 * (can be starved for an indefinite time by readers).
367 if (rw
->counter
== __ARCH_RW_LOCK_UNLOCKED__
) {
371 arch_spin_unlock(&(rw
->lock_mutex
));
372 local_irq_restore(flags
);
377 static inline void arch_read_lock(arch_rwlock_t
*rw
)
379 while (!arch_read_trylock(rw
))
383 static inline void arch_write_lock(arch_rwlock_t
*rw
)
385 while (!arch_write_trylock(rw
))
389 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
393 local_irq_save(flags
);
394 arch_spin_lock(&(rw
->lock_mutex
));
396 arch_spin_unlock(&(rw
->lock_mutex
));
397 local_irq_restore(flags
);
400 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
404 local_irq_save(flags
);
405 arch_spin_lock(&(rw
->lock_mutex
));
406 rw
->counter
= __ARCH_RW_LOCK_UNLOCKED__
;
407 arch_spin_unlock(&(rw
->lock_mutex
));
408 local_irq_restore(flags
);
413 #define arch_read_can_lock(x) ((x)->counter > 0)
414 #define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
416 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
417 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
419 #define arch_spin_relax(lock) cpu_relax()
420 #define arch_read_relax(lock) cpu_relax()
421 #define arch_write_relax(lock) cpu_relax()
423 #endif /* __ASM_SPINLOCK_H */