mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / arch / arc / include / asm / spinlock.h
blob47efc8451b7034b752337e0958af0d653470bc57
1 /*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
12 #include <asm/spinlock_types.h>
13 #include <asm/processor.h>
14 #include <asm/barrier.h>
16 #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
19 #ifdef CONFIG_ARC_HAS_LLSC
21 static inline void arch_spin_lock(arch_spinlock_t *lock)
23 unsigned int val;
25 smp_mb();
27 __asm__ __volatile__(
28 "1: llock %[val], [%[slock]] \n"
29 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
30 " scond %[LOCKED], [%[slock]] \n" /* acquire */
31 " bnz 1b \n"
32 " \n"
33 : [val] "=&r" (val)
34 : [slock] "r" (&(lock->slock)),
35 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
36 : "memory", "cc");
38 smp_mb();
41 /* 1 - lock taken successfully */
42 static inline int arch_spin_trylock(arch_spinlock_t *lock)
44 unsigned int val, got_it = 0;
46 smp_mb();
48 __asm__ __volatile__(
49 "1: llock %[val], [%[slock]] \n"
50 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
51 " scond %[LOCKED], [%[slock]] \n" /* acquire */
52 " bnz 1b \n"
53 " mov %[got_it], 1 \n"
54 "4: \n"
55 " \n"
56 : [val] "=&r" (val),
57 [got_it] "+&r" (got_it)
58 : [slock] "r" (&(lock->slock)),
59 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
60 : "memory", "cc");
62 smp_mb();
64 return got_it;
67 static inline void arch_spin_unlock(arch_spinlock_t *lock)
69 smp_mb();
71 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
73 smp_mb();
77 * Read-write spinlocks, allowing multiple readers but only one writer.
78 * Unfair locking as Writers could be starved indefinitely by Reader(s)
81 static inline void arch_read_lock(arch_rwlock_t *rw)
83 unsigned int val;
85 smp_mb();
88 * zero means writer holds the lock exclusively, deny Reader.
89 * Otherwise grant lock to first/subseq reader
91 * if (rw->counter > 0) {
92 * rw->counter--;
93 * ret = 1;
94 * }
97 __asm__ __volatile__(
98 "1: llock %[val], [%[rwlock]] \n"
99 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
100 " sub %[val], %[val], 1 \n" /* reader lock */
101 " scond %[val], [%[rwlock]] \n"
102 " bnz 1b \n"
103 " \n"
104 : [val] "=&r" (val)
105 : [rwlock] "r" (&(rw->counter)),
106 [WR_LOCKED] "ir" (0)
107 : "memory", "cc");
109 smp_mb();
112 /* 1 - lock taken successfully */
113 static inline int arch_read_trylock(arch_rwlock_t *rw)
115 unsigned int val, got_it = 0;
117 smp_mb();
119 __asm__ __volatile__(
120 "1: llock %[val], [%[rwlock]] \n"
121 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
122 " sub %[val], %[val], 1 \n" /* counter-- */
123 " scond %[val], [%[rwlock]] \n"
124 " bnz 1b \n" /* retry if collided with someone */
125 " mov %[got_it], 1 \n"
126 " \n"
127 "4: ; --- done --- \n"
129 : [val] "=&r" (val),
130 [got_it] "+&r" (got_it)
131 : [rwlock] "r" (&(rw->counter)),
132 [WR_LOCKED] "ir" (0)
133 : "memory", "cc");
135 smp_mb();
137 return got_it;
140 static inline void arch_write_lock(arch_rwlock_t *rw)
142 unsigned int val;
144 smp_mb();
147 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
148 * deny writer. Otherwise if unlocked grant to writer
149 * Hence the claim that Linux rwlocks are unfair to writers.
150 * (can be starved for an indefinite time by readers).
152 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
153 * rw->counter = 0;
154 * ret = 1;
158 __asm__ __volatile__(
159 "1: llock %[val], [%[rwlock]] \n"
160 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
161 " mov %[val], %[WR_LOCKED] \n"
162 " scond %[val], [%[rwlock]] \n"
163 " bnz 1b \n"
164 " \n"
165 : [val] "=&r" (val)
166 : [rwlock] "r" (&(rw->counter)),
167 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
168 [WR_LOCKED] "ir" (0)
169 : "memory", "cc");
171 smp_mb();
174 /* 1 - lock taken successfully */
175 static inline int arch_write_trylock(arch_rwlock_t *rw)
177 unsigned int val, got_it = 0;
179 smp_mb();
181 __asm__ __volatile__(
182 "1: llock %[val], [%[rwlock]] \n"
183 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
184 " mov %[val], %[WR_LOCKED] \n"
185 " scond %[val], [%[rwlock]] \n"
186 " bnz 1b \n" /* retry if collided with someone */
187 " mov %[got_it], 1 \n"
188 " \n"
189 "4: ; --- done --- \n"
191 : [val] "=&r" (val),
192 [got_it] "+&r" (got_it)
193 : [rwlock] "r" (&(rw->counter)),
194 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
195 [WR_LOCKED] "ir" (0)
196 : "memory", "cc");
198 smp_mb();
200 return got_it;
203 static inline void arch_read_unlock(arch_rwlock_t *rw)
205 unsigned int val;
207 smp_mb();
210 * rw->counter++;
212 __asm__ __volatile__(
213 "1: llock %[val], [%[rwlock]] \n"
214 " add %[val], %[val], 1 \n"
215 " scond %[val], [%[rwlock]] \n"
216 " bnz 1b \n"
217 " \n"
218 : [val] "=&r" (val)
219 : [rwlock] "r" (&(rw->counter))
220 : "memory", "cc");
222 smp_mb();
225 static inline void arch_write_unlock(arch_rwlock_t *rw)
227 smp_mb();
229 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
231 smp_mb();
234 #else /* !CONFIG_ARC_HAS_LLSC */
236 static inline void arch_spin_lock(arch_spinlock_t *lock)
238 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
241 * This smp_mb() is technically superfluous, we only need the one
242 * after the lock for providing the ACQUIRE semantics.
243 * However doing the "right" thing was regressing hackbench
244 * so keeping this, pending further investigation
246 smp_mb();
248 __asm__ __volatile__(
249 "1: ex %0, [%1] \n"
250 #ifdef CONFIG_EZNPS_MTM_EXT
251 " .word %3 \n"
252 #endif
253 " breq %0, %2, 1b \n"
254 : "+&r" (val)
255 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
256 #ifdef CONFIG_EZNPS_MTM_EXT
257 , "i"(CTOP_INST_SCHD_RW)
258 #endif
259 : "memory");
262 * ACQUIRE barrier to ensure load/store after taking the lock
263 * don't "bleed-up" out of the critical section (leak-in is allowed)
264 * http://www.spinics.net/lists/kernel/msg2010409.html
266 * ARCv2 only has load-load, store-store and all-all barrier
267 * thus need the full all-all barrier
269 smp_mb();
272 /* 1 - lock taken successfully */
273 static inline int arch_spin_trylock(arch_spinlock_t *lock)
275 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
277 smp_mb();
279 __asm__ __volatile__(
280 "1: ex %0, [%1] \n"
281 : "+r" (val)
282 : "r"(&(lock->slock))
283 : "memory");
285 smp_mb();
287 return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
290 static inline void arch_spin_unlock(arch_spinlock_t *lock)
292 unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
295 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
296 * is the only option
298 smp_mb();
301 * EX is not really required here, a simple STore of 0 suffices.
302 * However this causes tasklist livelocks in SystemC based SMP virtual
303 * platforms where the systemc core scheduler uses EX as a cue for
304 * moving to next core. Do a git log of this file for details
306 __asm__ __volatile__(
307 " ex %0, [%1] \n"
308 : "+r" (val)
309 : "r"(&(lock->slock))
310 : "memory");
313 * superfluous, but keeping for now - see pairing version in
314 * arch_spin_lock above
316 smp_mb();
320 * Read-write spinlocks, allowing multiple readers but only one writer.
321 * Unfair locking as Writers could be starved indefinitely by Reader(s)
323 * The spinlock itself is contained in @counter and access to it is
324 * serialized with @lock_mutex.
327 /* 1 - lock taken successfully */
328 static inline int arch_read_trylock(arch_rwlock_t *rw)
330 int ret = 0;
331 unsigned long flags;
333 local_irq_save(flags);
334 arch_spin_lock(&(rw->lock_mutex));
337 * zero means writer holds the lock exclusively, deny Reader.
338 * Otherwise grant lock to first/subseq reader
340 if (rw->counter > 0) {
341 rw->counter--;
342 ret = 1;
345 arch_spin_unlock(&(rw->lock_mutex));
346 local_irq_restore(flags);
348 smp_mb();
349 return ret;
352 /* 1 - lock taken successfully */
353 static inline int arch_write_trylock(arch_rwlock_t *rw)
355 int ret = 0;
356 unsigned long flags;
358 local_irq_save(flags);
359 arch_spin_lock(&(rw->lock_mutex));
362 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
363 * deny writer. Otherwise if unlocked grant to writer
364 * Hence the claim that Linux rwlocks are unfair to writers.
365 * (can be starved for an indefinite time by readers).
367 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
368 rw->counter = 0;
369 ret = 1;
371 arch_spin_unlock(&(rw->lock_mutex));
372 local_irq_restore(flags);
374 return ret;
377 static inline void arch_read_lock(arch_rwlock_t *rw)
379 while (!arch_read_trylock(rw))
380 cpu_relax();
383 static inline void arch_write_lock(arch_rwlock_t *rw)
385 while (!arch_write_trylock(rw))
386 cpu_relax();
389 static inline void arch_read_unlock(arch_rwlock_t *rw)
391 unsigned long flags;
393 local_irq_save(flags);
394 arch_spin_lock(&(rw->lock_mutex));
395 rw->counter++;
396 arch_spin_unlock(&(rw->lock_mutex));
397 local_irq_restore(flags);
400 static inline void arch_write_unlock(arch_rwlock_t *rw)
402 unsigned long flags;
404 local_irq_save(flags);
405 arch_spin_lock(&(rw->lock_mutex));
406 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
407 arch_spin_unlock(&(rw->lock_mutex));
408 local_irq_restore(flags);
411 #endif
413 #define arch_read_can_lock(x) ((x)->counter > 0)
414 #define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
416 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
417 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
419 #define arch_spin_relax(lock) cpu_relax()
420 #define arch_read_relax(lock) cpu_relax()
421 #define arch_write_relax(lock) cpu_relax()
423 #endif /* __ASM_SPINLOCK_H */