2 * Out of line spinlock code.
4 * Copyright IBM Corp. 2004, 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
17 static int __init
spin_retry_init(void)
20 spin_retry
= MACHINE_HAS_CAD
? 10 : 1000;
23 early_initcall(spin_retry_init
);
26 * spin_retry= parameter
28 static int __init
spin_retry_setup(char *str
)
30 spin_retry
= simple_strtoul(str
, &str
, 0);
33 __setup("spin_retry=", spin_retry_setup
);
35 static inline void _raw_compare_and_delay(unsigned int *lock
, unsigned int old
)
37 asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old
), "Q" (*lock
));
40 void arch_spin_lock_wait(arch_spinlock_t
*lp
)
42 unsigned int cpu
= SPINLOCK_LOCKVAL
;
47 owner
= ACCESS_ONCE(lp
->lock
);
48 /* Try to get the lock if it is free. */
50 if (_raw_compare_and_swap(&lp
->lock
, 0, cpu
))
54 /* Check if the lock owner is running. */
55 if (!smp_vcpu_scheduled(~owner
)) {
56 smp_yield_cpu(~owner
);
59 /* Loop for a while on the lock value. */
63 _raw_compare_and_delay(&lp
->lock
, owner
);
64 owner
= ACCESS_ONCE(lp
->lock
);
65 } while (owner
&& count
-- > 0);
69 * For multiple layers of hypervisors, e.g. z/VM + LPAR
70 * yield the CPU if the lock is still unavailable.
73 smp_yield_cpu(~owner
);
76 EXPORT_SYMBOL(arch_spin_lock_wait
);
78 void arch_spin_lock_wait_flags(arch_spinlock_t
*lp
, unsigned long flags
)
80 unsigned int cpu
= SPINLOCK_LOCKVAL
;
84 local_irq_restore(flags
);
86 owner
= ACCESS_ONCE(lp
->lock
);
87 /* Try to get the lock if it is free. */
90 if (_raw_compare_and_swap(&lp
->lock
, 0, cpu
))
92 local_irq_restore(flags
);
94 /* Check if the lock owner is running. */
95 if (!smp_vcpu_scheduled(~owner
)) {
96 smp_yield_cpu(~owner
);
99 /* Loop for a while on the lock value. */
103 _raw_compare_and_delay(&lp
->lock
, owner
);
104 owner
= ACCESS_ONCE(lp
->lock
);
105 } while (owner
&& count
-- > 0);
109 * For multiple layers of hypervisors, e.g. z/VM + LPAR
110 * yield the CPU if the lock is still unavailable.
112 if (!MACHINE_IS_LPAR
)
113 smp_yield_cpu(~owner
);
116 EXPORT_SYMBOL(arch_spin_lock_wait_flags
);
118 int arch_spin_trylock_retry(arch_spinlock_t
*lp
)
120 unsigned int cpu
= SPINLOCK_LOCKVAL
;
124 for (count
= spin_retry
; count
> 0; count
--) {
125 owner
= ACCESS_ONCE(lp
->lock
);
126 /* Try to get the lock if it is free. */
128 if (_raw_compare_and_swap(&lp
->lock
, 0, cpu
))
130 } else if (MACHINE_HAS_CAD
)
131 _raw_compare_and_delay(&lp
->lock
, owner
);
135 EXPORT_SYMBOL(arch_spin_trylock_retry
);
137 void _raw_read_lock_wait(arch_rwlock_t
*rw
)
139 unsigned int owner
, old
;
140 int count
= spin_retry
;
142 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
143 __RAW_LOCK(&rw
->lock
, -1, __RAW_OP_ADD
);
148 if (owner
&& !smp_vcpu_scheduled(~owner
))
149 smp_yield_cpu(~owner
);
152 old
= ACCESS_ONCE(rw
->lock
);
153 owner
= ACCESS_ONCE(rw
->owner
);
156 _raw_compare_and_delay(&rw
->lock
, old
);
159 if (_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1))
163 EXPORT_SYMBOL(_raw_read_lock_wait
);
165 int _raw_read_trylock_retry(arch_rwlock_t
*rw
)
168 int count
= spin_retry
;
170 while (count
-- > 0) {
171 old
= ACCESS_ONCE(rw
->lock
);
174 _raw_compare_and_delay(&rw
->lock
, old
);
177 if (_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1))
182 EXPORT_SYMBOL(_raw_read_trylock_retry
);
184 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
186 void _raw_write_lock_wait(arch_rwlock_t
*rw
, unsigned int prev
)
188 unsigned int owner
, old
;
189 int count
= spin_retry
;
194 if (owner
&& !smp_vcpu_scheduled(~owner
))
195 smp_yield_cpu(~owner
);
198 old
= ACCESS_ONCE(rw
->lock
);
199 owner
= ACCESS_ONCE(rw
->owner
);
201 if ((int) old
>= 0) {
202 prev
= __RAW_LOCK(&rw
->lock
, 0x80000000, __RAW_OP_OR
);
205 if ((old
& 0x7fffffff) == 0 && (int) prev
>= 0)
208 _raw_compare_and_delay(&rw
->lock
, old
);
211 EXPORT_SYMBOL(_raw_write_lock_wait
);
213 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
215 void _raw_write_lock_wait(arch_rwlock_t
*rw
)
217 unsigned int owner
, old
, prev
;
218 int count
= spin_retry
;
224 if (owner
&& !smp_vcpu_scheduled(~owner
))
225 smp_yield_cpu(~owner
);
228 old
= ACCESS_ONCE(rw
->lock
);
229 owner
= ACCESS_ONCE(rw
->owner
);
230 if ((int) old
>= 0 &&
231 _raw_compare_and_swap(&rw
->lock
, old
, old
| 0x80000000))
235 if ((old
& 0x7fffffff) == 0 && (int) prev
>= 0)
238 _raw_compare_and_delay(&rw
->lock
, old
);
241 EXPORT_SYMBOL(_raw_write_lock_wait
);
243 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
245 int _raw_write_trylock_retry(arch_rwlock_t
*rw
)
248 int count
= spin_retry
;
250 while (count
-- > 0) {
251 old
= ACCESS_ONCE(rw
->lock
);
254 _raw_compare_and_delay(&rw
->lock
, old
);
257 if (_raw_compare_and_swap(&rw
->lock
, 0, 0x80000000))
262 EXPORT_SYMBOL(_raw_write_trylock_retry
);
264 void arch_lock_relax(unsigned int cpu
)
268 if (MACHINE_IS_LPAR
&& smp_vcpu_scheduled(~cpu
))
272 EXPORT_SYMBOL(arch_lock_relax
);