Merge tag 'locks-v3.16-2' of git://git.samba.org/jlayton/linux
[linux/fpc-iii.git] / arch / s390 / lib / spinlock.c
blob5b0e445bc3f39930317bafc745c76c674874bf0e
1 /*
2 * Out of line spinlock code.
4 * Copyright IBM Corp. 2004, 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <asm/io.h>
15 int spin_retry = 1000;
17 /**
18 * spin_retry= parameter
20 static int __init spin_retry_setup(char *str)
22 spin_retry = simple_strtoul(str, &str, 0);
23 return 1;
25 __setup("spin_retry=", spin_retry_setup);
27 void arch_spin_lock_wait(arch_spinlock_t *lp)
29 unsigned int cpu = SPINLOCK_LOCKVAL;
30 unsigned int owner;
31 int count;
33 while (1) {
34 owner = ACCESS_ONCE(lp->lock);
35 /* Try to get the lock if it is free. */
36 if (!owner) {
37 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
38 return;
39 continue;
41 /* Check if the lock owner is running. */
42 if (!smp_vcpu_scheduled(~owner)) {
43 smp_yield_cpu(~owner);
44 continue;
46 /* Loop for a while on the lock value. */
47 count = spin_retry;
48 do {
49 owner = ACCESS_ONCE(lp->lock);
50 } while (owner && count-- > 0);
51 if (!owner)
52 continue;
54 * For multiple layers of hypervisors, e.g. z/VM + LPAR
55 * yield the CPU if the lock is still unavailable.
57 if (!MACHINE_IS_LPAR)
58 smp_yield_cpu(~owner);
61 EXPORT_SYMBOL(arch_spin_lock_wait);
63 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
65 unsigned int cpu = SPINLOCK_LOCKVAL;
66 unsigned int owner;
67 int count;
69 local_irq_restore(flags);
70 while (1) {
71 owner = ACCESS_ONCE(lp->lock);
72 /* Try to get the lock if it is free. */
73 if (!owner) {
74 local_irq_disable();
75 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
76 return;
77 local_irq_restore(flags);
79 /* Check if the lock owner is running. */
80 if (!smp_vcpu_scheduled(~owner)) {
81 smp_yield_cpu(~owner);
82 continue;
84 /* Loop for a while on the lock value. */
85 count = spin_retry;
86 do {
87 owner = ACCESS_ONCE(lp->lock);
88 } while (owner && count-- > 0);
89 if (!owner)
90 continue;
92 * For multiple layers of hypervisors, e.g. z/VM + LPAR
93 * yield the CPU if the lock is still unavailable.
95 if (!MACHINE_IS_LPAR)
96 smp_yield_cpu(~owner);
99 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
101 void arch_spin_relax(arch_spinlock_t *lp)
103 unsigned int cpu = lp->lock;
104 if (cpu != 0) {
105 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
106 !smp_vcpu_scheduled(~cpu))
107 smp_yield_cpu(~cpu);
110 EXPORT_SYMBOL(arch_spin_relax);
112 int arch_spin_trylock_retry(arch_spinlock_t *lp)
114 int count;
116 for (count = spin_retry; count > 0; count--)
117 if (arch_spin_trylock_once(lp))
118 return 1;
119 return 0;
121 EXPORT_SYMBOL(arch_spin_trylock_retry);
123 void _raw_read_lock_wait(arch_rwlock_t *rw)
125 unsigned int old;
126 int count = spin_retry;
128 while (1) {
129 if (count-- <= 0) {
130 smp_yield();
131 count = spin_retry;
133 old = ACCESS_ONCE(rw->lock);
134 if ((int) old < 0)
135 continue;
136 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
137 return;
140 EXPORT_SYMBOL(_raw_read_lock_wait);
142 void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
144 unsigned int old;
145 int count = spin_retry;
147 local_irq_restore(flags);
148 while (1) {
149 if (count-- <= 0) {
150 smp_yield();
151 count = spin_retry;
153 old = ACCESS_ONCE(rw->lock);
154 if ((int) old < 0)
155 continue;
156 local_irq_disable();
157 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
158 return;
159 local_irq_restore(flags);
162 EXPORT_SYMBOL(_raw_read_lock_wait_flags);
164 int _raw_read_trylock_retry(arch_rwlock_t *rw)
166 unsigned int old;
167 int count = spin_retry;
169 while (count-- > 0) {
170 old = ACCESS_ONCE(rw->lock);
171 if ((int) old < 0)
172 continue;
173 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
174 return 1;
176 return 0;
178 EXPORT_SYMBOL(_raw_read_trylock_retry);
180 void _raw_write_lock_wait(arch_rwlock_t *rw)
182 unsigned int old;
183 int count = spin_retry;
185 while (1) {
186 if (count-- <= 0) {
187 smp_yield();
188 count = spin_retry;
190 old = ACCESS_ONCE(rw->lock);
191 if (old)
192 continue;
193 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
194 return;
197 EXPORT_SYMBOL(_raw_write_lock_wait);
199 void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
201 unsigned int old;
202 int count = spin_retry;
204 local_irq_restore(flags);
205 while (1) {
206 if (count-- <= 0) {
207 smp_yield();
208 count = spin_retry;
210 old = ACCESS_ONCE(rw->lock);
211 if (old)
212 continue;
213 local_irq_disable();
214 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
215 return;
216 local_irq_restore(flags);
219 EXPORT_SYMBOL(_raw_write_lock_wait_flags);
221 int _raw_write_trylock_retry(arch_rwlock_t *rw)
223 unsigned int old;
224 int count = spin_retry;
226 while (count-- > 0) {
227 old = ACCESS_ONCE(rw->lock);
228 if (old)
229 continue;
230 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
231 return 1;
233 return 0;
235 EXPORT_SYMBOL(_raw_write_trylock_retry);