1 // SPDX-License-Identifier: GPL-2.0
3 * Out of line spinlock code.
5 * Copyright IBM Corp. 2004, 2006
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
9 #include <linux/types.h>
10 #include <linux/export.h>
11 #include <linux/spinlock.h>
12 #include <linux/init.h>
13 #include <linux/smp.h>
18 static int __init
spin_retry_init(void)
24 early_initcall(spin_retry_init
);
27 * spin_retry= parameter
29 static int __init
spin_retry_setup(char *str
)
31 spin_retry
= simple_strtoul(str
, &str
, 0);
34 __setup("spin_retry=", spin_retry_setup
);
36 static inline int arch_load_niai4(int *lock
)
41 #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
42 " .long 0xb2fa0040\n" /* NIAI 4 */
45 : "=d" (owner
) : "Q" (*lock
) : "memory");
49 static inline int arch_cmpxchg_niai8(int *lock
, int old
, int new)
54 #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
55 " .long 0xb2fa0080\n" /* NIAI 8 */
58 : "=d" (old
), "=Q" (*lock
)
59 : "0" (old
), "d" (new), "Q" (*lock
)
61 return expected
== old
;
64 void arch_spin_lock_wait(arch_spinlock_t
*lp
)
66 int cpu
= SPINLOCK_LOCKVAL
;
69 /* Pass the virtual CPU to the lock holder if it is not running */
70 owner
= arch_load_niai4(&lp
->lock
);
71 if (owner
&& arch_vcpu_is_preempted(~owner
))
72 smp_yield_cpu(~owner
);
76 owner
= arch_load_niai4(&lp
->lock
);
77 /* Try to get the lock if it is free. */
79 if (arch_cmpxchg_niai8(&lp
->lock
, 0, cpu
))
87 * For multiple layers of hypervisors, e.g. z/VM + LPAR
88 * yield the CPU unconditionally. For LPAR rely on the
89 * sense running status.
91 if (!MACHINE_IS_LPAR
|| arch_vcpu_is_preempted(~owner
))
92 smp_yield_cpu(~owner
);
95 EXPORT_SYMBOL(arch_spin_lock_wait
);
97 void arch_spin_lock_wait_flags(arch_spinlock_t
*lp
, unsigned long flags
)
99 int cpu
= SPINLOCK_LOCKVAL
;
102 local_irq_restore(flags
);
104 /* Pass the virtual CPU to the lock holder if it is not running */
105 owner
= arch_load_niai4(&lp
->lock
);
106 if (owner
&& arch_vcpu_is_preempted(~owner
))
107 smp_yield_cpu(~owner
);
111 owner
= arch_load_niai4(&lp
->lock
);
112 /* Try to get the lock if it is free. */
115 if (arch_cmpxchg_niai8(&lp
->lock
, 0, cpu
))
117 local_irq_restore(flags
);
124 * For multiple layers of hypervisors, e.g. z/VM + LPAR
125 * yield the CPU unconditionally. For LPAR rely on the
126 * sense running status.
128 if (!MACHINE_IS_LPAR
|| arch_vcpu_is_preempted(~owner
))
129 smp_yield_cpu(~owner
);
132 EXPORT_SYMBOL(arch_spin_lock_wait_flags
);
134 int arch_spin_trylock_retry(arch_spinlock_t
*lp
)
136 int cpu
= SPINLOCK_LOCKVAL
;
139 for (count
= spin_retry
; count
> 0; count
--) {
140 owner
= READ_ONCE(lp
->lock
);
141 /* Try to get the lock if it is free. */
143 if (__atomic_cmpxchg_bool(&lp
->lock
, 0, cpu
))
149 EXPORT_SYMBOL(arch_spin_trylock_retry
);
151 void _raw_read_lock_wait(arch_rwlock_t
*rw
)
153 int count
= spin_retry
;
156 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
157 __RAW_LOCK(&rw
->lock
, -1, __RAW_OP_ADD
);
162 if (owner
&& arch_vcpu_is_preempted(~owner
))
163 smp_yield_cpu(~owner
);
166 old
= ACCESS_ONCE(rw
->lock
);
167 owner
= ACCESS_ONCE(rw
->owner
);
170 if (__atomic_cmpxchg_bool(&rw
->lock
, old
, old
+ 1))
174 EXPORT_SYMBOL(_raw_read_lock_wait
);
176 int _raw_read_trylock_retry(arch_rwlock_t
*rw
)
178 int count
= spin_retry
;
181 while (count
-- > 0) {
182 old
= ACCESS_ONCE(rw
->lock
);
185 if (__atomic_cmpxchg_bool(&rw
->lock
, old
, old
+ 1))
190 EXPORT_SYMBOL(_raw_read_trylock_retry
);
192 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
194 void _raw_write_lock_wait(arch_rwlock_t
*rw
, int prev
)
196 int count
= spin_retry
;
202 if (owner
&& arch_vcpu_is_preempted(~owner
))
203 smp_yield_cpu(~owner
);
206 old
= ACCESS_ONCE(rw
->lock
);
207 owner
= ACCESS_ONCE(rw
->owner
);
210 prev
= __RAW_LOCK(&rw
->lock
, 0x80000000, __RAW_OP_OR
);
213 if ((old
& 0x7fffffff) == 0 && prev
>= 0)
217 EXPORT_SYMBOL(_raw_write_lock_wait
);
219 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
221 void _raw_write_lock_wait(arch_rwlock_t
*rw
)
223 int count
= spin_retry
;
224 int owner
, old
, prev
;
230 if (owner
&& arch_vcpu_is_preempted(~owner
))
231 smp_yield_cpu(~owner
);
234 old
= ACCESS_ONCE(rw
->lock
);
235 owner
= ACCESS_ONCE(rw
->owner
);
237 __atomic_cmpxchg_bool(&rw
->lock
, old
, old
| 0x80000000))
241 if ((old
& 0x7fffffff) == 0 && prev
>= 0)
245 EXPORT_SYMBOL(_raw_write_lock_wait
);
247 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
249 int _raw_write_trylock_retry(arch_rwlock_t
*rw
)
251 int count
= spin_retry
;
254 while (count
-- > 0) {
255 old
= ACCESS_ONCE(rw
->lock
);
258 if (__atomic_cmpxchg_bool(&rw
->lock
, 0, 0x80000000))
263 EXPORT_SYMBOL(_raw_write_trylock_retry
);
265 void arch_lock_relax(int cpu
)
269 if (MACHINE_IS_LPAR
&& !arch_vcpu_is_preempted(~cpu
))
273 EXPORT_SYMBOL(arch_lock_relax
);