2 * Out of line spinlock code.
4 * Copyright IBM Corp. 2004, 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 #include <linux/types.h>
9 #include <linux/export.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
17 static int __init
spin_retry_init(void)
23 early_initcall(spin_retry_init
);
26 * spin_retry= parameter
28 static int __init
spin_retry_setup(char *str
)
30 spin_retry
= simple_strtoul(str
, &str
, 0);
33 __setup("spin_retry=", spin_retry_setup
);
35 void arch_spin_lock_wait(arch_spinlock_t
*lp
)
37 int cpu
= SPINLOCK_LOCKVAL
;
38 int owner
, count
, first_diag
;
42 owner
= ACCESS_ONCE(lp
->lock
);
43 /* Try to get the lock if it is free. */
45 if (__atomic_cmpxchg_bool(&lp
->lock
, 0, cpu
))
49 /* First iteration: check if the lock owner is running. */
50 if (first_diag
&& arch_vcpu_is_preempted(~owner
)) {
51 smp_yield_cpu(~owner
);
55 /* Loop for a while on the lock value. */
58 owner
= ACCESS_ONCE(lp
->lock
);
59 } while (owner
&& count
-- > 0);
63 * For multiple layers of hypervisors, e.g. z/VM + LPAR
64 * yield the CPU unconditionally. For LPAR rely on the
65 * sense running status.
67 if (!MACHINE_IS_LPAR
|| arch_vcpu_is_preempted(~owner
)) {
68 smp_yield_cpu(~owner
);
73 EXPORT_SYMBOL(arch_spin_lock_wait
);
75 void arch_spin_lock_wait_flags(arch_spinlock_t
*lp
, unsigned long flags
)
77 int cpu
= SPINLOCK_LOCKVAL
;
78 int owner
, count
, first_diag
;
80 local_irq_restore(flags
);
83 owner
= ACCESS_ONCE(lp
->lock
);
84 /* Try to get the lock if it is free. */
87 if (__atomic_cmpxchg_bool(&lp
->lock
, 0, cpu
))
89 local_irq_restore(flags
);
92 /* Check if the lock owner is running. */
93 if (first_diag
&& arch_vcpu_is_preempted(~owner
)) {
94 smp_yield_cpu(~owner
);
98 /* Loop for a while on the lock value. */
101 owner
= ACCESS_ONCE(lp
->lock
);
102 } while (owner
&& count
-- > 0);
106 * For multiple layers of hypervisors, e.g. z/VM + LPAR
107 * yield the CPU unconditionally. For LPAR rely on the
108 * sense running status.
110 if (!MACHINE_IS_LPAR
|| arch_vcpu_is_preempted(~owner
)) {
111 smp_yield_cpu(~owner
);
116 EXPORT_SYMBOL(arch_spin_lock_wait_flags
);
118 int arch_spin_trylock_retry(arch_spinlock_t
*lp
)
120 int cpu
= SPINLOCK_LOCKVAL
;
123 for (count
= spin_retry
; count
> 0; count
--) {
124 owner
= READ_ONCE(lp
->lock
);
125 /* Try to get the lock if it is free. */
127 if (__atomic_cmpxchg_bool(&lp
->lock
, 0, cpu
))
133 EXPORT_SYMBOL(arch_spin_trylock_retry
);
135 void _raw_read_lock_wait(arch_rwlock_t
*rw
)
137 int count
= spin_retry
;
140 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
141 __RAW_LOCK(&rw
->lock
, -1, __RAW_OP_ADD
);
146 if (owner
&& arch_vcpu_is_preempted(~owner
))
147 smp_yield_cpu(~owner
);
150 old
= ACCESS_ONCE(rw
->lock
);
151 owner
= ACCESS_ONCE(rw
->owner
);
154 if (__atomic_cmpxchg_bool(&rw
->lock
, old
, old
+ 1))
158 EXPORT_SYMBOL(_raw_read_lock_wait
);
160 int _raw_read_trylock_retry(arch_rwlock_t
*rw
)
162 int count
= spin_retry
;
165 while (count
-- > 0) {
166 old
= ACCESS_ONCE(rw
->lock
);
169 if (__atomic_cmpxchg_bool(&rw
->lock
, old
, old
+ 1))
174 EXPORT_SYMBOL(_raw_read_trylock_retry
);
176 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
178 void _raw_write_lock_wait(arch_rwlock_t
*rw
, int prev
)
180 int count
= spin_retry
;
186 if (owner
&& arch_vcpu_is_preempted(~owner
))
187 smp_yield_cpu(~owner
);
190 old
= ACCESS_ONCE(rw
->lock
);
191 owner
= ACCESS_ONCE(rw
->owner
);
194 prev
= __RAW_LOCK(&rw
->lock
, 0x80000000, __RAW_OP_OR
);
197 if ((old
& 0x7fffffff) == 0 && prev
>= 0)
201 EXPORT_SYMBOL(_raw_write_lock_wait
);
203 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
205 void _raw_write_lock_wait(arch_rwlock_t
*rw
)
207 int count
= spin_retry
;
208 int owner
, old
, prev
;
214 if (owner
&& arch_vcpu_is_preempted(~owner
))
215 smp_yield_cpu(~owner
);
218 old
= ACCESS_ONCE(rw
->lock
);
219 owner
= ACCESS_ONCE(rw
->owner
);
221 __atomic_cmpxchg_bool(&rw
->lock
, old
, old
| 0x80000000))
225 if ((old
& 0x7fffffff) == 0 && prev
>= 0)
229 EXPORT_SYMBOL(_raw_write_lock_wait
);
231 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
233 int _raw_write_trylock_retry(arch_rwlock_t
*rw
)
235 int count
= spin_retry
;
238 while (count
-- > 0) {
239 old
= ACCESS_ONCE(rw
->lock
);
242 if (__atomic_cmpxchg_bool(&rw
->lock
, 0, 0x80000000))
247 EXPORT_SYMBOL(_raw_write_trylock_retry
);
249 void arch_lock_relax(int cpu
)
253 if (MACHINE_IS_LPAR
&& !arch_vcpu_is_preempted(~cpu
))
257 EXPORT_SYMBOL(arch_lock_relax
);