1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/system.h>
5 #include <asm/processor.h>
6 #include <asm/spinlock_types.h>
8 static inline int __raw_spin_is_locked(raw_spinlock_t
*x
)
10 volatile unsigned int *a
= __ldcw_align(x
);
14 #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
15 #define __raw_spin_unlock_wait(x) \
16 do { cpu_relax(); } while (__raw_spin_is_locked(x))
18 static inline void __raw_spin_lock_flags(raw_spinlock_t
*x
,
21 volatile unsigned int *a
;
25 while (__ldcw(a
) == 0)
27 if (flags
& PSW_SM_I
) {
36 static inline void __raw_spin_unlock(raw_spinlock_t
*x
)
38 volatile unsigned int *a
;
45 static inline int __raw_spin_trylock(raw_spinlock_t
*x
)
47 volatile unsigned int *a
;
59 * Read-write spinlocks, allowing multiple readers but only one writer.
60 * The spinlock is held by the writer, preventing any readers or other
61 * writers from grabbing the rwlock. Readers use the lock to serialise their
62 * access to the counter (which records how many readers currently hold the
63 * lock). Linux rwlocks are unfair to writers; they can be starved for
64 * an indefinite time by readers. They can also be taken in interrupt context,
65 * so we have to disable interrupts when acquiring the spin lock to be sure
66 * that an interrupting reader doesn't get an inconsistent view of the lock.
69 static __inline__
void __raw_read_lock(raw_rwlock_t
*rw
)
72 local_irq_save(flags
);
73 __raw_spin_lock(&rw
->lock
);
75 __raw_spin_unlock(&rw
->lock
);
76 local_irq_restore(flags
);
79 static __inline__
void __raw_read_unlock(raw_rwlock_t
*rw
)
82 local_irq_save(flags
);
83 __raw_spin_lock(&rw
->lock
);
85 __raw_spin_unlock(&rw
->lock
);
86 local_irq_restore(flags
);
89 static __inline__
int __raw_read_trylock(raw_rwlock_t
*rw
)
93 local_irq_save(flags
);
94 if (__raw_spin_trylock(&rw
->lock
)) {
96 __raw_spin_unlock(&rw
->lock
);
97 local_irq_restore(flags
);
101 local_irq_restore(flags
);
102 /* If write-locked, we fail to acquire the lock */
106 /* Wait until we have a realistic chance at the lock */
107 while (__raw_spin_is_locked(&rw
->lock
) && rw
->counter
>= 0)
113 static __inline__
void __raw_write_lock(raw_rwlock_t
*rw
)
117 local_irq_save(flags
);
118 __raw_spin_lock(&rw
->lock
);
120 if (rw
->counter
!= 0) {
121 __raw_spin_unlock(&rw
->lock
);
122 local_irq_restore(flags
);
124 while (rw
->counter
!= 0)
130 rw
->counter
= -1; /* mark as write-locked */
132 local_irq_restore(flags
);
135 static __inline__
void __raw_write_unlock(raw_rwlock_t
*rw
)
138 __raw_spin_unlock(&rw
->lock
);
141 static __inline__
int __raw_write_trylock(raw_rwlock_t
*rw
)
146 local_irq_save(flags
);
147 if (__raw_spin_trylock(&rw
->lock
)) {
148 if (rw
->counter
== 0) {
152 /* Read-locked. Oh well. */
153 __raw_spin_unlock(&rw
->lock
);
156 local_irq_restore(flags
);
162 * read_can_lock - would read_trylock() succeed?
163 * @lock: the rwlock in question.
165 static __inline__
int __raw_read_can_lock(raw_rwlock_t
*rw
)
167 return rw
->counter
>= 0;
171 * write_can_lock - would write_trylock() succeed?
172 * @lock: the rwlock in question.
174 static __inline__
int __raw_write_can_lock(raw_rwlock_t
*rw
)
179 #define _raw_spin_relax(lock) cpu_relax()
180 #define _raw_read_relax(lock) cpu_relax()
181 #define _raw_write_relax(lock) cpu_relax()
183 #endif /* __ASM_SPINLOCK_H */