2 * include/asm-s390/spinlock.h
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Derived from "include/asm-i386/spinlock.h"
11 #ifndef __ASM_SPINLOCK_H
12 #define __ASM_SPINLOCK_H
14 #include <linux/smp.h>
16 extern int spin_retry
;
19 _raw_compare_and_swap(volatile unsigned int *lock
,
20 unsigned int old
, unsigned int new)
24 : "=d" (old
), "=Q" (*lock
)
25 : "0" (old
), "d" (new), "Q" (*lock
)
31 * Simple spin lock operations. There are two variants, one clears IRQ's
32 * on the local processor, one does not.
34 * We make no fairness assumptions. They have a cost.
36 * (the type definitions are in asm/spinlock_types.h)
39 #define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
40 #define arch_spin_unlock_wait(lock) \
41 do { while (arch_spin_is_locked(lock)) \
42 arch_spin_relax(lock); } while (0)
44 extern void arch_spin_lock_wait(arch_spinlock_t
*);
45 extern void arch_spin_lock_wait_flags(arch_spinlock_t
*, unsigned long flags
);
46 extern int arch_spin_trylock_retry(arch_spinlock_t
*);
47 extern void arch_spin_relax(arch_spinlock_t
*lock
);
49 static inline void arch_spin_lock(arch_spinlock_t
*lp
)
53 old
= _raw_compare_and_swap(&lp
->owner_cpu
, 0, ~smp_processor_id());
56 arch_spin_lock_wait(lp
);
59 static inline void arch_spin_lock_flags(arch_spinlock_t
*lp
,
64 old
= _raw_compare_and_swap(&lp
->owner_cpu
, 0, ~smp_processor_id());
67 arch_spin_lock_wait_flags(lp
, flags
);
70 static inline int arch_spin_trylock(arch_spinlock_t
*lp
)
74 old
= _raw_compare_and_swap(&lp
->owner_cpu
, 0, ~smp_processor_id());
77 return arch_spin_trylock_retry(lp
);
80 static inline void arch_spin_unlock(arch_spinlock_t
*lp
)
82 _raw_compare_and_swap(&lp
->owner_cpu
, lp
->owner_cpu
, 0);
86 * Read-write spinlocks, allowing multiple readers
87 * but only one writer.
89 * NOTE! it is quite common to have readers in interrupts
90 * but no interrupt writers. For those circumstances we
91 * can "mix" irq-safe locks - any writer needs to get a
92 * irq-safe write-lock, but readers can get non-irqsafe
97 * read_can_lock - would read_trylock() succeed?
98 * @lock: the rwlock in question.
100 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
103 * write_can_lock - would write_trylock() succeed?
104 * @lock: the rwlock in question.
106 #define arch_write_can_lock(x) ((x)->lock == 0)
108 extern void _raw_read_lock_wait(arch_rwlock_t
*lp
);
109 extern void _raw_read_lock_wait_flags(arch_rwlock_t
*lp
, unsigned long flags
);
110 extern int _raw_read_trylock_retry(arch_rwlock_t
*lp
);
111 extern void _raw_write_lock_wait(arch_rwlock_t
*lp
);
112 extern void _raw_write_lock_wait_flags(arch_rwlock_t
*lp
, unsigned long flags
);
113 extern int _raw_write_trylock_retry(arch_rwlock_t
*lp
);
115 static inline void arch_read_lock(arch_rwlock_t
*rw
)
118 old
= rw
->lock
& 0x7fffffffU
;
119 if (_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1) != old
)
120 _raw_read_lock_wait(rw
);
123 static inline void arch_read_lock_flags(arch_rwlock_t
*rw
, unsigned long flags
)
126 old
= rw
->lock
& 0x7fffffffU
;
127 if (_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1) != old
)
128 _raw_read_lock_wait_flags(rw
, flags
);
131 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
133 unsigned int old
, cmp
;
138 old
= _raw_compare_and_swap(&rw
->lock
, old
, old
- 1);
139 } while (cmp
!= old
);
142 static inline void arch_write_lock(arch_rwlock_t
*rw
)
144 if (unlikely(_raw_compare_and_swap(&rw
->lock
, 0, 0x80000000) != 0))
145 _raw_write_lock_wait(rw
);
148 static inline void arch_write_lock_flags(arch_rwlock_t
*rw
, unsigned long flags
)
150 if (unlikely(_raw_compare_and_swap(&rw
->lock
, 0, 0x80000000) != 0))
151 _raw_write_lock_wait_flags(rw
, flags
);
154 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
156 _raw_compare_and_swap(&rw
->lock
, 0x80000000, 0);
159 static inline int arch_read_trylock(arch_rwlock_t
*rw
)
162 old
= rw
->lock
& 0x7fffffffU
;
163 if (likely(_raw_compare_and_swap(&rw
->lock
, old
, old
+ 1) == old
))
165 return _raw_read_trylock_retry(rw
);
168 static inline int arch_write_trylock(arch_rwlock_t
*rw
)
170 if (likely(_raw_compare_and_swap(&rw
->lock
, 0, 0x80000000) == 0))
172 return _raw_write_trylock_retry(rw
);
175 #define arch_read_relax(lock) cpu_relax()
176 #define arch_write_relax(lock) cpu_relax()
178 #endif /* __ASM_SPINLOCK_H */