1 #ifndef __ASM_ARCH_SPINLOCK_H
2 #define __ASM_ARCH_SPINLOCK_H
4 #include <linux/spinlock_types.h>
6 #define RW_LOCK_BIAS 0x01000000
8 extern void cris_spin_unlock(void *l
, int val
);
9 extern void cris_spin_lock(void *l
);
10 extern int cris_spin_trylock(void *l
);
12 static inline int __raw_spin_is_locked(raw_spinlock_t
*x
)
14 return *(volatile signed char *)(&(x
)->slock
) <= 0;
17 static inline void __raw_spin_unlock(raw_spinlock_t
*lock
)
19 __asm__
volatile ("move.d %1,%0" \
20 : "=m" (lock
->slock
) \
25 static inline void __raw_spin_unlock_wait(raw_spinlock_t
*lock
)
27 while (__raw_spin_is_locked(lock
))
31 static inline int __raw_spin_trylock(raw_spinlock_t
*lock
)
33 return cris_spin_trylock((void *)&lock
->slock
);
36 static inline void __raw_spin_lock(raw_spinlock_t
*lock
)
38 cris_spin_lock((void *)&lock
->slock
);
42 __raw_spin_lock_flags(raw_spinlock_t
*lock
, unsigned long flags
)
44 __raw_spin_lock(lock
);
48 * Read-write spinlocks, allowing multiple readers
49 * but only one writer.
51 * NOTE! it is quite common to have readers in interrupts
52 * but no interrupt writers. For those circumstances we
53 * can "mix" irq-safe locks - any writer needs to get a
54 * irq-safe write-lock, but readers can get non-irqsafe
59 static inline int __raw_read_can_lock(raw_rwlock_t
*x
)
61 return (int)(x
)->lock
> 0;
64 static inline int __raw_write_can_lock(raw_rwlock_t
*x
)
66 return (x
)->lock
== RW_LOCK_BIAS
;
69 static inline void __raw_read_lock(raw_rwlock_t
*rw
)
71 __raw_spin_lock(&rw
->slock
);
72 while (rw
->lock
== 0);
74 __raw_spin_unlock(&rw
->slock
);
77 static inline void __raw_write_lock(raw_rwlock_t
*rw
)
79 __raw_spin_lock(&rw
->slock
);
80 while (rw
->lock
!= RW_LOCK_BIAS
);
82 __raw_spin_unlock(&rw
->slock
);
85 static inline void __raw_read_unlock(raw_rwlock_t
*rw
)
87 __raw_spin_lock(&rw
->slock
);
89 __raw_spin_unlock(&rw
->slock
);
92 static inline void __raw_write_unlock(raw_rwlock_t
*rw
)
94 __raw_spin_lock(&rw
->slock
);
95 while (rw
->lock
!= RW_LOCK_BIAS
);
96 rw
->lock
== RW_LOCK_BIAS
;
97 __raw_spin_unlock(&rw
->slock
);
100 static inline int __raw_read_trylock(raw_rwlock_t
*rw
)
103 __raw_spin_lock(&rw
->slock
);
108 __raw_spin_unlock(&rw
->slock
);
112 static inline int __raw_write_trylock(raw_rwlock_t
*rw
)
115 __raw_spin_lock(&rw
->slock
);
116 if (rw
->lock
== RW_LOCK_BIAS
) {
120 __raw_spin_unlock(&rw
->slock
);
125 #define _raw_spin_relax(lock) cpu_relax()
126 #define _raw_read_relax(lock) cpu_relax()
127 #define _raw_write_relax(lock) cpu_relax()
129 #endif /* __ASM_ARCH_SPINLOCK_H */