2 * Out of line spinlock code.
4 * Copyright IBM Corp. 2004, 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
15 int spin_retry
= 1000;
18 * spin_retry= parameter
20 static int __init
spin_retry_setup(char *str
)
22 spin_retry
= simple_strtoul(str
, &str
, 0);
25 __setup("spin_retry=", spin_retry_setup
);
27 void arch_spin_lock_wait(arch_spinlock_t
*lp
)
29 int count
= spin_retry
;
30 unsigned int cpu
= ~smp_processor_id();
34 owner
= lp
->owner_cpu
;
35 if (!owner
|| smp_vcpu_scheduled(~owner
)) {
36 for (count
= spin_retry
; count
> 0; count
--) {
37 if (arch_spin_is_locked(lp
))
39 if (_raw_compare_and_swap(&lp
->owner_cpu
, 0,
46 owner
= lp
->owner_cpu
;
48 smp_yield_cpu(~owner
);
49 if (_raw_compare_and_swap(&lp
->owner_cpu
, 0, cpu
) == 0)
53 EXPORT_SYMBOL(arch_spin_lock_wait
);
55 void arch_spin_lock_wait_flags(arch_spinlock_t
*lp
, unsigned long flags
)
57 int count
= spin_retry
;
58 unsigned int cpu
= ~smp_processor_id();
61 local_irq_restore(flags
);
63 owner
= lp
->owner_cpu
;
64 if (!owner
|| smp_vcpu_scheduled(~owner
)) {
65 for (count
= spin_retry
; count
> 0; count
--) {
66 if (arch_spin_is_locked(lp
))
69 if (_raw_compare_and_swap(&lp
->owner_cpu
, 0,
72 local_irq_restore(flags
);
77 owner
= lp
->owner_cpu
;
79 smp_yield_cpu(~owner
);
81 if (_raw_compare_and_swap(&lp
->owner_cpu
, 0, cpu
) == 0)
83 local_irq_restore(flags
);
86 EXPORT_SYMBOL(arch_spin_lock_wait_flags
);
88 int arch_spin_trylock_retry(arch_spinlock_t
*lp
)
90 unsigned int cpu
= ~smp_processor_id();
93 for (count
= spin_retry
; count
> 0; count
--) {
94 if (arch_spin_is_locked(lp
))
96 if (_raw_compare_and_swap(&lp
->owner_cpu
, 0, cpu
) == 0)
101 EXPORT_SYMBOL(arch_spin_trylock_retry
);
103 void arch_spin_relax(arch_spinlock_t
*lock
)
105 unsigned int cpu
= lock
->owner_cpu
;
107 if (MACHINE_IS_VM
|| MACHINE_IS_KVM
||
108 !smp_vcpu_scheduled(~cpu
))
112 EXPORT_SYMBOL(arch_spin_relax
);
114 void _raw_read_lock_wait(arch_rwlock_t
*rw
)
117 int count
= spin_retry
;
124 if (!arch_read_can_lock(rw
))
126 old
= rw
->lock
& 0x7fffffffU
;
127 if (_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1) == old
)
131 EXPORT_SYMBOL(_raw_read_lock_wait
);
133 void _raw_read_lock_wait_flags(arch_rwlock_t
*rw
, unsigned long flags
)
136 int count
= spin_retry
;
138 local_irq_restore(flags
);
144 if (!arch_read_can_lock(rw
))
146 old
= rw
->lock
& 0x7fffffffU
;
148 if (_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1) == old
)
152 EXPORT_SYMBOL(_raw_read_lock_wait_flags
);
154 int _raw_read_trylock_retry(arch_rwlock_t
*rw
)
157 int count
= spin_retry
;
159 while (count
-- > 0) {
160 if (!arch_read_can_lock(rw
))
162 old
= rw
->lock
& 0x7fffffffU
;
163 if (_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1) == old
)
168 EXPORT_SYMBOL(_raw_read_trylock_retry
);
170 void _raw_write_lock_wait(arch_rwlock_t
*rw
)
172 int count
= spin_retry
;
179 if (!arch_write_can_lock(rw
))
181 if (_raw_compare_and_swap(&rw
->lock
, 0, 0x80000000) == 0)
185 EXPORT_SYMBOL(_raw_write_lock_wait
);
187 void _raw_write_lock_wait_flags(arch_rwlock_t
*rw
, unsigned long flags
)
189 int count
= spin_retry
;
191 local_irq_restore(flags
);
197 if (!arch_write_can_lock(rw
))
200 if (_raw_compare_and_swap(&rw
->lock
, 0, 0x80000000) == 0)
204 EXPORT_SYMBOL(_raw_write_lock_wait_flags
);
206 int _raw_write_trylock_retry(arch_rwlock_t
*rw
)
208 int count
= spin_retry
;
210 while (count
-- > 0) {
211 if (!arch_write_can_lock(rw
))
213 if (_raw_compare_and_swap(&rw
->lock
, 0, 0x80000000) == 0)
218 EXPORT_SYMBOL(_raw_write_trylock_retry
);