3 * Copyright IBM Corp. 1999
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 * Derived from "include/asm-i386/spinlock.h"
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
12 #include <linux/smp.h>
14 extern int spin_retry
;
17 _raw_compare_and_swap(volatile unsigned int *lock
,
18 unsigned int old
, unsigned int new)
22 : "=d" (old
), "=Q" (*lock
)
23 : "0" (old
), "d" (new), "Q" (*lock
)
29 * Simple spin lock operations. There are two variants, one clears IRQ's
30 * on the local processor, one does not.
32 * We make no fairness assumptions. They have a cost.
34 * (the type definitions are in asm/spinlock_types.h)
37 #define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
38 #define arch_spin_unlock_wait(lock) \
39 do { while (arch_spin_is_locked(lock)) \
40 arch_spin_relax(lock); } while (0)
42 extern void arch_spin_lock_wait(arch_spinlock_t
*);
43 extern void arch_spin_lock_wait_flags(arch_spinlock_t
*, unsigned long flags
);
44 extern int arch_spin_trylock_retry(arch_spinlock_t
*);
45 extern void arch_spin_relax(arch_spinlock_t
*lock
);
47 static inline int arch_spin_value_unlocked(arch_spinlock_t lock
)
49 return lock
.owner_cpu
== 0;
52 static inline void arch_spin_lock(arch_spinlock_t
*lp
)
56 old
= _raw_compare_and_swap(&lp
->owner_cpu
, 0, ~smp_processor_id());
59 arch_spin_lock_wait(lp
);
62 static inline void arch_spin_lock_flags(arch_spinlock_t
*lp
,
67 old
= _raw_compare_and_swap(&lp
->owner_cpu
, 0, ~smp_processor_id());
70 arch_spin_lock_wait_flags(lp
, flags
);
73 static inline int arch_spin_trylock(arch_spinlock_t
*lp
)
77 old
= _raw_compare_and_swap(&lp
->owner_cpu
, 0, ~smp_processor_id());
80 return arch_spin_trylock_retry(lp
);
83 static inline void arch_spin_unlock(arch_spinlock_t
*lp
)
85 _raw_compare_and_swap(&lp
->owner_cpu
, lp
->owner_cpu
, 0);
89 * Read-write spinlocks, allowing multiple readers
90 * but only one writer.
92 * NOTE! it is quite common to have readers in interrupts
93 * but no interrupt writers. For those circumstances we
94 * can "mix" irq-safe locks - any writer needs to get a
95 * irq-safe write-lock, but readers can get non-irqsafe
100 * read_can_lock - would read_trylock() succeed?
101 * @lock: the rwlock in question.
103 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
106 * write_can_lock - would write_trylock() succeed?
107 * @lock: the rwlock in question.
109 #define arch_write_can_lock(x) ((x)->lock == 0)
111 extern void _raw_read_lock_wait(arch_rwlock_t
*lp
);
112 extern void _raw_read_lock_wait_flags(arch_rwlock_t
*lp
, unsigned long flags
);
113 extern int _raw_read_trylock_retry(arch_rwlock_t
*lp
);
114 extern void _raw_write_lock_wait(arch_rwlock_t
*lp
);
115 extern void _raw_write_lock_wait_flags(arch_rwlock_t
*lp
, unsigned long flags
);
116 extern int _raw_write_trylock_retry(arch_rwlock_t
*lp
);
118 static inline void arch_read_lock(arch_rwlock_t
*rw
)
121 old
= rw
->lock
& 0x7fffffffU
;
122 if (_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1) != old
)
123 _raw_read_lock_wait(rw
);
126 static inline void arch_read_lock_flags(arch_rwlock_t
*rw
, unsigned long flags
)
129 old
= rw
->lock
& 0x7fffffffU
;
130 if (_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1) != old
)
131 _raw_read_lock_wait_flags(rw
, flags
);
134 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
136 unsigned int old
, cmp
;
141 old
= _raw_compare_and_swap(&rw
->lock
, old
, old
- 1);
142 } while (cmp
!= old
);
145 static inline void arch_write_lock(arch_rwlock_t
*rw
)
147 if (unlikely(_raw_compare_and_swap(&rw
->lock
, 0, 0x80000000) != 0))
148 _raw_write_lock_wait(rw
);
151 static inline void arch_write_lock_flags(arch_rwlock_t
*rw
, unsigned long flags
)
153 if (unlikely(_raw_compare_and_swap(&rw
->lock
, 0, 0x80000000) != 0))
154 _raw_write_lock_wait_flags(rw
, flags
);
157 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
159 _raw_compare_and_swap(&rw
->lock
, 0x80000000, 0);
162 static inline int arch_read_trylock(arch_rwlock_t
*rw
)
165 old
= rw
->lock
& 0x7fffffffU
;
166 if (likely(_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1) == old
))
168 return _raw_read_trylock_retry(rw
);
171 static inline int arch_write_trylock(arch_rwlock_t
*rw
)
173 if (likely(_raw_compare_and_swap(&rw
->lock
, 0, 0x80000000) == 0))
175 return _raw_write_trylock_retry(rw
);
178 #define arch_read_relax(lock) cpu_relax()
179 #define arch_write_relax(lock) cpu_relax()
181 #endif /* __ASM_SPINLOCK_H */