2 * Out of line spinlock code.
4 * Copyright IBM Corp. 2004, 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
17 static int __init
spin_retry_init(void)
20 spin_retry
= MACHINE_HAS_CAD
? 10 : 1000;
23 early_initcall(spin_retry_init
);
26 * spin_retry= parameter
28 static int __init
spin_retry_setup(char *str
)
30 spin_retry
= simple_strtoul(str
, &str
, 0);
33 __setup("spin_retry=", spin_retry_setup
);
35 static inline void _raw_compare_and_delay(unsigned int *lock
, unsigned int old
)
37 asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old
), "Q" (*lock
));
40 static inline int cpu_is_preempted(int cpu
)
42 if (test_cpu_flag_of(CIF_ENABLED_WAIT
, cpu
))
44 if (smp_vcpu_scheduled(cpu
))
49 void arch_spin_lock_wait(arch_spinlock_t
*lp
)
51 unsigned int cpu
= SPINLOCK_LOCKVAL
;
53 int count
, first_diag
;
57 owner
= ACCESS_ONCE(lp
->lock
);
58 /* Try to get the lock if it is free. */
60 if (_raw_compare_and_swap(&lp
->lock
, 0, cpu
))
64 /* First iteration: check if the lock owner is running. */
65 if (first_diag
&& cpu_is_preempted(~owner
)) {
66 smp_yield_cpu(~owner
);
70 /* Loop for a while on the lock value. */
74 _raw_compare_and_delay(&lp
->lock
, owner
);
75 owner
= ACCESS_ONCE(lp
->lock
);
76 } while (owner
&& count
-- > 0);
80 * For multiple layers of hypervisors, e.g. z/VM + LPAR
81 * yield the CPU unconditionally. For LPAR rely on the
82 * sense running status.
84 if (!MACHINE_IS_LPAR
|| cpu_is_preempted(~owner
)) {
85 smp_yield_cpu(~owner
);
90 EXPORT_SYMBOL(arch_spin_lock_wait
);
92 void arch_spin_lock_wait_flags(arch_spinlock_t
*lp
, unsigned long flags
)
94 unsigned int cpu
= SPINLOCK_LOCKVAL
;
96 int count
, first_diag
;
98 local_irq_restore(flags
);
101 owner
= ACCESS_ONCE(lp
->lock
);
102 /* Try to get the lock if it is free. */
105 if (_raw_compare_and_swap(&lp
->lock
, 0, cpu
))
107 local_irq_restore(flags
);
110 /* Check if the lock owner is running. */
111 if (first_diag
&& cpu_is_preempted(~owner
)) {
112 smp_yield_cpu(~owner
);
116 /* Loop for a while on the lock value. */
120 _raw_compare_and_delay(&lp
->lock
, owner
);
121 owner
= ACCESS_ONCE(lp
->lock
);
122 } while (owner
&& count
-- > 0);
126 * For multiple layers of hypervisors, e.g. z/VM + LPAR
127 * yield the CPU unconditionally. For LPAR rely on the
128 * sense running status.
130 if (!MACHINE_IS_LPAR
|| cpu_is_preempted(~owner
)) {
131 smp_yield_cpu(~owner
);
136 EXPORT_SYMBOL(arch_spin_lock_wait_flags
);
138 int arch_spin_trylock_retry(arch_spinlock_t
*lp
)
140 unsigned int cpu
= SPINLOCK_LOCKVAL
;
144 for (count
= spin_retry
; count
> 0; count
--) {
145 owner
= ACCESS_ONCE(lp
->lock
);
146 /* Try to get the lock if it is free. */
148 if (_raw_compare_and_swap(&lp
->lock
, 0, cpu
))
150 } else if (MACHINE_HAS_CAD
)
151 _raw_compare_and_delay(&lp
->lock
, owner
);
155 EXPORT_SYMBOL(arch_spin_trylock_retry
);
157 void _raw_read_lock_wait(arch_rwlock_t
*rw
)
159 unsigned int owner
, old
;
160 int count
= spin_retry
;
162 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
163 __RAW_LOCK(&rw
->lock
, -1, __RAW_OP_ADD
);
168 if (owner
&& cpu_is_preempted(~owner
))
169 smp_yield_cpu(~owner
);
172 old
= ACCESS_ONCE(rw
->lock
);
173 owner
= ACCESS_ONCE(rw
->owner
);
176 _raw_compare_and_delay(&rw
->lock
, old
);
179 if (_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1))
183 EXPORT_SYMBOL(_raw_read_lock_wait
);
185 int _raw_read_trylock_retry(arch_rwlock_t
*rw
)
188 int count
= spin_retry
;
190 while (count
-- > 0) {
191 old
= ACCESS_ONCE(rw
->lock
);
194 _raw_compare_and_delay(&rw
->lock
, old
);
197 if (_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1))
202 EXPORT_SYMBOL(_raw_read_trylock_retry
);
204 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
206 void _raw_write_lock_wait(arch_rwlock_t
*rw
, unsigned int prev
)
208 unsigned int owner
, old
;
209 int count
= spin_retry
;
214 if (owner
&& cpu_is_preempted(~owner
))
215 smp_yield_cpu(~owner
);
218 old
= ACCESS_ONCE(rw
->lock
);
219 owner
= ACCESS_ONCE(rw
->owner
);
221 if ((int) old
>= 0) {
222 prev
= __RAW_LOCK(&rw
->lock
, 0x80000000, __RAW_OP_OR
);
225 if ((old
& 0x7fffffff) == 0 && (int) prev
>= 0)
228 _raw_compare_and_delay(&rw
->lock
, old
);
231 EXPORT_SYMBOL(_raw_write_lock_wait
);
233 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
235 void _raw_write_lock_wait(arch_rwlock_t
*rw
)
237 unsigned int owner
, old
, prev
;
238 int count
= spin_retry
;
244 if (owner
&& cpu_is_preempted(~owner
))
245 smp_yield_cpu(~owner
);
248 old
= ACCESS_ONCE(rw
->lock
);
249 owner
= ACCESS_ONCE(rw
->owner
);
250 if ((int) old
>= 0 &&
251 _raw_compare_and_swap(&rw
->lock
, old
, old
| 0x80000000))
255 if ((old
& 0x7fffffff) == 0 && (int) prev
>= 0)
258 _raw_compare_and_delay(&rw
->lock
, old
);
261 EXPORT_SYMBOL(_raw_write_lock_wait
);
263 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
265 int _raw_write_trylock_retry(arch_rwlock_t
*rw
)
268 int count
= spin_retry
;
270 while (count
-- > 0) {
271 old
= ACCESS_ONCE(rw
->lock
);
274 _raw_compare_and_delay(&rw
->lock
, old
);
277 if (_raw_compare_and_swap(&rw
->lock
, 0, 0x80000000))
282 EXPORT_SYMBOL(_raw_write_trylock_retry
);
284 void arch_lock_relax(unsigned int cpu
)
288 if (MACHINE_IS_LPAR
&& !cpu_is_preempted(~cpu
))
292 EXPORT_SYMBOL(arch_lock_relax
);