2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
12 #include <asm/spinlock_types.h>
13 #include <asm/processor.h>
14 #include <asm/barrier.h>
16 #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
19 static inline void arch_spin_unlock_wait(arch_spinlock_t
*lock
)
21 smp_cond_load_acquire(&lock
->slock
, !VAL
);
24 #ifdef CONFIG_ARC_HAS_LLSC
26 static inline void arch_spin_lock(arch_spinlock_t
*lock
)
33 "1: llock %[val], [%[slock]] \n"
34 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
35 " scond %[LOCKED], [%[slock]] \n" /* acquire */
39 : [slock
] "r" (&(lock
->slock
)),
40 [LOCKED
] "r" (__ARCH_SPIN_LOCK_LOCKED__
)
46 /* 1 - lock taken successfully */
47 static inline int arch_spin_trylock(arch_spinlock_t
*lock
)
49 unsigned int val
, got_it
= 0;
54 "1: llock %[val], [%[slock]] \n"
55 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
56 " scond %[LOCKED], [%[slock]] \n" /* acquire */
58 " mov %[got_it], 1 \n"
62 [got_it
] "+&r" (got_it
)
63 : [slock
] "r" (&(lock
->slock
)),
64 [LOCKED
] "r" (__ARCH_SPIN_LOCK_LOCKED__
)
72 static inline void arch_spin_unlock(arch_spinlock_t
*lock
)
76 lock
->slock
= __ARCH_SPIN_LOCK_UNLOCKED__
;
82 * Read-write spinlocks, allowing multiple readers but only one writer.
83 * Unfair locking as Writers could be starved indefinitely by Reader(s)
86 static inline void arch_read_lock(arch_rwlock_t
*rw
)
93 * zero means writer holds the lock exclusively, deny Reader.
94 * Otherwise grant lock to first/subseq reader
96 * if (rw->counter > 0) {
102 __asm__
__volatile__(
103 "1: llock %[val], [%[rwlock]] \n"
104 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
105 " sub %[val], %[val], 1 \n" /* reader lock */
106 " scond %[val], [%[rwlock]] \n"
110 : [rwlock
] "r" (&(rw
->counter
)),
117 /* 1 - lock taken successfully */
118 static inline int arch_read_trylock(arch_rwlock_t
*rw
)
120 unsigned int val
, got_it
= 0;
124 __asm__
__volatile__(
125 "1: llock %[val], [%[rwlock]] \n"
126 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
127 " sub %[val], %[val], 1 \n" /* counter-- */
128 " scond %[val], [%[rwlock]] \n"
129 " bnz 1b \n" /* retry if collided with someone */
130 " mov %[got_it], 1 \n"
132 "4: ; --- done --- \n"
135 [got_it
] "+&r" (got_it
)
136 : [rwlock
] "r" (&(rw
->counter
)),
145 static inline void arch_write_lock(arch_rwlock_t
*rw
)
152 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
153 * deny writer. Otherwise if unlocked grant to writer
154 * Hence the claim that Linux rwlocks are unfair to writers.
155 * (can be starved for an indefinite time by readers).
157 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
163 __asm__
__volatile__(
164 "1: llock %[val], [%[rwlock]] \n"
165 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
166 " mov %[val], %[WR_LOCKED] \n"
167 " scond %[val], [%[rwlock]] \n"
171 : [rwlock
] "r" (&(rw
->counter
)),
172 [UNLOCKED
] "ir" (__ARCH_RW_LOCK_UNLOCKED__
),
179 /* 1 - lock taken successfully */
180 static inline int arch_write_trylock(arch_rwlock_t
*rw
)
182 unsigned int val
, got_it
= 0;
186 __asm__
__volatile__(
187 "1: llock %[val], [%[rwlock]] \n"
188 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
189 " mov %[val], %[WR_LOCKED] \n"
190 " scond %[val], [%[rwlock]] \n"
191 " bnz 1b \n" /* retry if collided with someone */
192 " mov %[got_it], 1 \n"
194 "4: ; --- done --- \n"
197 [got_it
] "+&r" (got_it
)
198 : [rwlock
] "r" (&(rw
->counter
)),
199 [UNLOCKED
] "ir" (__ARCH_RW_LOCK_UNLOCKED__
),
208 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
217 __asm__
__volatile__(
218 "1: llock %[val], [%[rwlock]] \n"
219 " add %[val], %[val], 1 \n"
220 " scond %[val], [%[rwlock]] \n"
224 : [rwlock
] "r" (&(rw
->counter
))
230 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
234 rw
->counter
= __ARCH_RW_LOCK_UNLOCKED__
;
239 #else /* !CONFIG_ARC_HAS_LLSC */
241 static inline void arch_spin_lock(arch_spinlock_t
*lock
)
243 unsigned int val
= __ARCH_SPIN_LOCK_LOCKED__
;
246 * This smp_mb() is technically superfluous, we only need the one
247 * after the lock for providing the ACQUIRE semantics.
248 * However doing the "right" thing was regressing hackbench
249 * so keeping this, pending further investigation
253 __asm__
__volatile__(
255 " breq %0, %2, 1b \n"
257 : "r"(&(lock
->slock
)), "ir"(__ARCH_SPIN_LOCK_LOCKED__
)
261 * ACQUIRE barrier to ensure load/store after taking the lock
262 * don't "bleed-up" out of the critical section (leak-in is allowed)
263 * http://www.spinics.net/lists/kernel/msg2010409.html
265 * ARCv2 only has load-load, store-store and all-all barrier
266 * thus need the full all-all barrier
271 /* 1 - lock taken successfully */
272 static inline int arch_spin_trylock(arch_spinlock_t
*lock
)
274 unsigned int val
= __ARCH_SPIN_LOCK_LOCKED__
;
278 __asm__
__volatile__(
281 : "r"(&(lock
->slock
))
286 return (val
== __ARCH_SPIN_LOCK_UNLOCKED__
);
289 static inline void arch_spin_unlock(arch_spinlock_t
*lock
)
291 unsigned int val
= __ARCH_SPIN_LOCK_UNLOCKED__
;
294 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
299 __asm__
__volatile__(
302 : "r"(&(lock
->slock
))
306 * superfluous, but keeping for now - see pairing version in
307 * arch_spin_lock above
313 * Read-write spinlocks, allowing multiple readers but only one writer.
314 * Unfair locking as Writers could be starved indefinitely by Reader(s)
316 * The spinlock itself is contained in @counter and access to it is
317 * serialized with @lock_mutex.
320 /* 1 - lock taken successfully */
321 static inline int arch_read_trylock(arch_rwlock_t
*rw
)
326 local_irq_save(flags
);
327 arch_spin_lock(&(rw
->lock_mutex
));
330 * zero means writer holds the lock exclusively, deny Reader.
331 * Otherwise grant lock to first/subseq reader
333 if (rw
->counter
> 0) {
338 arch_spin_unlock(&(rw
->lock_mutex
));
339 local_irq_restore(flags
);
345 /* 1 - lock taken successfully */
346 static inline int arch_write_trylock(arch_rwlock_t
*rw
)
351 local_irq_save(flags
);
352 arch_spin_lock(&(rw
->lock_mutex
));
355 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
356 * deny writer. Otherwise if unlocked grant to writer
357 * Hence the claim that Linux rwlocks are unfair to writers.
358 * (can be starved for an indefinite time by readers).
360 if (rw
->counter
== __ARCH_RW_LOCK_UNLOCKED__
) {
364 arch_spin_unlock(&(rw
->lock_mutex
));
365 local_irq_restore(flags
);
370 static inline void arch_read_lock(arch_rwlock_t
*rw
)
372 while (!arch_read_trylock(rw
))
376 static inline void arch_write_lock(arch_rwlock_t
*rw
)
378 while (!arch_write_trylock(rw
))
382 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
386 local_irq_save(flags
);
387 arch_spin_lock(&(rw
->lock_mutex
));
389 arch_spin_unlock(&(rw
->lock_mutex
));
390 local_irq_restore(flags
);
393 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
397 local_irq_save(flags
);
398 arch_spin_lock(&(rw
->lock_mutex
));
399 rw
->counter
= __ARCH_RW_LOCK_UNLOCKED__
;
400 arch_spin_unlock(&(rw
->lock_mutex
));
401 local_irq_restore(flags
);
406 #define arch_read_can_lock(x) ((x)->counter > 0)
407 #define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
409 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
410 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
412 #define arch_spin_relax(lock) cpu_relax()
413 #define arch_read_relax(lock) cpu_relax()
414 #define arch_write_relax(lock) cpu_relax()
416 #endif /* __ASM_SPINLOCK_H */