2 * Out of line spinlock code.
4 * Copyright IBM Corp. 2004, 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
15 int spin_retry
= 1000;
18 * spin_retry= parameter
20 static int __init
spin_retry_setup(char *str
)
22 spin_retry
= simple_strtoul(str
, &str
, 0);
25 __setup("spin_retry=", spin_retry_setup
);
27 void arch_spin_lock_wait(arch_spinlock_t
*lp
)
29 unsigned int cpu
= SPINLOCK_LOCKVAL
;
34 owner
= ACCESS_ONCE(lp
->lock
);
35 /* Try to get the lock if it is free. */
37 if (_raw_compare_and_swap(&lp
->lock
, 0, cpu
))
41 /* Check if the lock owner is running. */
42 if (!smp_vcpu_scheduled(~owner
)) {
43 smp_yield_cpu(~owner
);
46 /* Loop for a while on the lock value. */
49 owner
= ACCESS_ONCE(lp
->lock
);
50 } while (owner
&& count
-- > 0);
54 * For multiple layers of hypervisors, e.g. z/VM + LPAR
55 * yield the CPU if the lock is still unavailable.
58 smp_yield_cpu(~owner
);
61 EXPORT_SYMBOL(arch_spin_lock_wait
);
63 void arch_spin_lock_wait_flags(arch_spinlock_t
*lp
, unsigned long flags
)
65 unsigned int cpu
= SPINLOCK_LOCKVAL
;
69 local_irq_restore(flags
);
71 owner
= ACCESS_ONCE(lp
->lock
);
72 /* Try to get the lock if it is free. */
75 if (_raw_compare_and_swap(&lp
->lock
, 0, cpu
))
77 local_irq_restore(flags
);
79 /* Check if the lock owner is running. */
80 if (!smp_vcpu_scheduled(~owner
)) {
81 smp_yield_cpu(~owner
);
84 /* Loop for a while on the lock value. */
87 owner
= ACCESS_ONCE(lp
->lock
);
88 } while (owner
&& count
-- > 0);
92 * For multiple layers of hypervisors, e.g. z/VM + LPAR
93 * yield the CPU if the lock is still unavailable.
96 smp_yield_cpu(~owner
);
99 EXPORT_SYMBOL(arch_spin_lock_wait_flags
);
101 void arch_spin_relax(arch_spinlock_t
*lp
)
103 unsigned int cpu
= lp
->lock
;
105 if (MACHINE_IS_VM
|| MACHINE_IS_KVM
||
106 !smp_vcpu_scheduled(~cpu
))
110 EXPORT_SYMBOL(arch_spin_relax
);
112 int arch_spin_trylock_retry(arch_spinlock_t
*lp
)
116 for (count
= spin_retry
; count
> 0; count
--)
117 if (arch_spin_trylock_once(lp
))
121 EXPORT_SYMBOL(arch_spin_trylock_retry
);
123 void _raw_read_lock_wait(arch_rwlock_t
*rw
)
126 int count
= spin_retry
;
133 old
= ACCESS_ONCE(rw
->lock
);
136 if (_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1))
140 EXPORT_SYMBOL(_raw_read_lock_wait
);
142 void _raw_read_lock_wait_flags(arch_rwlock_t
*rw
, unsigned long flags
)
145 int count
= spin_retry
;
147 local_irq_restore(flags
);
153 old
= ACCESS_ONCE(rw
->lock
);
157 if (_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1))
159 local_irq_restore(flags
);
162 EXPORT_SYMBOL(_raw_read_lock_wait_flags
);
164 int _raw_read_trylock_retry(arch_rwlock_t
*rw
)
167 int count
= spin_retry
;
169 while (count
-- > 0) {
170 old
= ACCESS_ONCE(rw
->lock
);
173 if (_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1))
178 EXPORT_SYMBOL(_raw_read_trylock_retry
);
180 void _raw_write_lock_wait(arch_rwlock_t
*rw
)
183 int count
= spin_retry
;
190 old
= ACCESS_ONCE(rw
->lock
);
193 if (_raw_compare_and_swap(&rw
->lock
, 0, 0x80000000))
197 EXPORT_SYMBOL(_raw_write_lock_wait
);
199 void _raw_write_lock_wait_flags(arch_rwlock_t
*rw
, unsigned long flags
)
202 int count
= spin_retry
;
204 local_irq_restore(flags
);
210 old
= ACCESS_ONCE(rw
->lock
);
214 if (_raw_compare_and_swap(&rw
->lock
, 0, 0x80000000))
216 local_irq_restore(flags
);
219 EXPORT_SYMBOL(_raw_write_lock_wait_flags
);
221 int _raw_write_trylock_retry(arch_rwlock_t
*rw
)
224 int count
= spin_retry
;
226 while (count
-- > 0) {
227 old
= ACCESS_ONCE(rw
->lock
);
230 if (_raw_compare_and_swap(&rw
->lock
, 0, 0x80000000))
235 EXPORT_SYMBOL(_raw_write_trylock_retry
);