2 * arch/s390/lib/spinlock.c
3 * Out of line spinlock code.
5 * Copyright (C) IBM Corp. 2004, 2006
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
9 #include <linux/types.h>
10 #include <linux/module.h>
11 #include <linux/spinlock.h>
12 #include <linux/init.h>
15 int spin_retry
= 1000;
18 * spin_retry= parameter
20 static int __init
spin_retry_setup(char *str
)
22 spin_retry
= simple_strtoul(str
, &str
, 0);
25 __setup("spin_retry=", spin_retry_setup
);
27 static inline void _raw_yield(void)
29 if (MACHINE_HAS_DIAG44
)
30 asm volatile("diag 0,0,0x44");
33 static inline void _raw_yield_cpu(int cpu
)
35 if (MACHINE_HAS_DIAG9C
)
36 asm volatile("diag %0,0,0x9c"
37 : : "d" (cpu_logical_map(cpu
)));
42 void arch_spin_lock_wait(arch_spinlock_t
*lp
)
44 int count
= spin_retry
;
45 unsigned int cpu
= ~smp_processor_id();
49 owner
= lp
->owner_cpu
;
50 if (!owner
|| smp_vcpu_scheduled(~owner
)) {
51 for (count
= spin_retry
; count
> 0; count
--) {
52 if (arch_spin_is_locked(lp
))
54 if (_raw_compare_and_swap(&lp
->owner_cpu
, 0,
61 owner
= lp
->owner_cpu
;
63 _raw_yield_cpu(~owner
);
64 if (_raw_compare_and_swap(&lp
->owner_cpu
, 0, cpu
) == 0)
68 EXPORT_SYMBOL(arch_spin_lock_wait
);
70 void arch_spin_lock_wait_flags(arch_spinlock_t
*lp
, unsigned long flags
)
72 int count
= spin_retry
;
73 unsigned int cpu
= ~smp_processor_id();
76 local_irq_restore(flags
);
78 owner
= lp
->owner_cpu
;
79 if (!owner
|| smp_vcpu_scheduled(~owner
)) {
80 for (count
= spin_retry
; count
> 0; count
--) {
81 if (arch_spin_is_locked(lp
))
84 if (_raw_compare_and_swap(&lp
->owner_cpu
, 0,
87 local_irq_restore(flags
);
92 owner
= lp
->owner_cpu
;
94 _raw_yield_cpu(~owner
);
96 if (_raw_compare_and_swap(&lp
->owner_cpu
, 0, cpu
) == 0)
98 local_irq_restore(flags
);
101 EXPORT_SYMBOL(arch_spin_lock_wait_flags
);
103 int arch_spin_trylock_retry(arch_spinlock_t
*lp
)
105 unsigned int cpu
= ~smp_processor_id();
108 for (count
= spin_retry
; count
> 0; count
--) {
109 if (arch_spin_is_locked(lp
))
111 if (_raw_compare_and_swap(&lp
->owner_cpu
, 0, cpu
) == 0)
116 EXPORT_SYMBOL(arch_spin_trylock_retry
);
118 void arch_spin_relax(arch_spinlock_t
*lock
)
120 unsigned int cpu
= lock
->owner_cpu
;
122 if (MACHINE_IS_VM
|| MACHINE_IS_KVM
||
123 !smp_vcpu_scheduled(~cpu
))
124 _raw_yield_cpu(~cpu
);
127 EXPORT_SYMBOL(arch_spin_relax
);
129 void _raw_read_lock_wait(arch_rwlock_t
*rw
)
132 int count
= spin_retry
;
139 if (!arch_read_can_lock(rw
))
141 old
= rw
->lock
& 0x7fffffffU
;
142 if (_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1) == old
)
146 EXPORT_SYMBOL(_raw_read_lock_wait
);
148 void _raw_read_lock_wait_flags(arch_rwlock_t
*rw
, unsigned long flags
)
151 int count
= spin_retry
;
153 local_irq_restore(flags
);
159 if (!arch_read_can_lock(rw
))
161 old
= rw
->lock
& 0x7fffffffU
;
163 if (_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1) == old
)
167 EXPORT_SYMBOL(_raw_read_lock_wait_flags
);
169 int _raw_read_trylock_retry(arch_rwlock_t
*rw
)
172 int count
= spin_retry
;
174 while (count
-- > 0) {
175 if (!arch_read_can_lock(rw
))
177 old
= rw
->lock
& 0x7fffffffU
;
178 if (_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1) == old
)
183 EXPORT_SYMBOL(_raw_read_trylock_retry
);
185 void _raw_write_lock_wait(arch_rwlock_t
*rw
)
187 int count
= spin_retry
;
194 if (!arch_write_can_lock(rw
))
196 if (_raw_compare_and_swap(&rw
->lock
, 0, 0x80000000) == 0)
200 EXPORT_SYMBOL(_raw_write_lock_wait
);
202 void _raw_write_lock_wait_flags(arch_rwlock_t
*rw
, unsigned long flags
)
204 int count
= spin_retry
;
206 local_irq_restore(flags
);
212 if (!arch_write_can_lock(rw
))
215 if (_raw_compare_and_swap(&rw
->lock
, 0, 0x80000000) == 0)
219 EXPORT_SYMBOL(_raw_write_lock_wait_flags
);
221 int _raw_write_trylock_retry(arch_rwlock_t
*rw
)
223 int count
= spin_retry
;
225 while (count
-- > 0) {
226 if (!arch_write_can_lock(rw
))
228 if (_raw_compare_and_swap(&rw
->lock
, 0, 0x80000000) == 0)
233 EXPORT_SYMBOL(_raw_write_trylock_retry
);