1 /* spinlock.h: 32-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #ifndef __SPARC_SPINLOCK_H
7 #define __SPARC_SPINLOCK_H
12 #include <asm/processor.h> /* for cpu_relax */
14 #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
16 #define arch_spin_unlock_wait(lock) \
17 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
19 static inline void arch_spin_lock(arch_spinlock_t
*lock
)
23 "ldstub [%0], %%g2\n\t"
24 "orcc %%g2, 0x0, %%g0\n\t"
26 " ldub [%0], %%g2\n\t"
29 "orcc %%g2, 0x0, %%g0\n\t"
31 " ldub [%0], %%g2\n\t"
36 : "g2", "memory", "cc");
39 static inline int arch_spin_trylock(arch_spinlock_t
*lock
)
42 __asm__
__volatile__("ldstub [%1], %0"
49 static inline void arch_spin_unlock(arch_spinlock_t
*lock
)
51 __asm__
__volatile__("stb %%g0, [%0]" : : "r" (lock
) : "memory");
54 /* Read-write spinlocks, allowing multiple readers
55 * but only one writer.
57 * NOTE! it is quite common to have readers in interrupts
58 * but no interrupt writers. For those circumstances we
59 * can "mix" irq-safe locks - any writer needs to get a
60 * irq-safe write-lock, but readers can get non-irqsafe
63 * XXX This might create some problems with my dual spinlock
64 * XXX scheme, deadlocks etc. -DaveM
66 * Sort of like atomic_t's on Sparc, but even more clever.
68 * ------------------------------------
69 * | 24-bit counter | wlock | arch_rwlock_t
70 * ------------------------------------
73 * wlock signifies the one writer is in or somebody is updating
74 * counter. For a writer, if he successfully acquires the wlock,
75 * but counter is non-zero, he has to release the lock and wait,
76 * till both counter and wlock are zero.
78 * Unfortunately this scheme limits us to ~16,000,000 cpus.
80 static inline void __arch_read_lock(arch_rwlock_t
*rw
)
82 register arch_rwlock_t
*lp
asm("g1");
86 "call ___rw_read_enter\n\t"
87 " ldstub [%%g1 + 3], %%g2\n"
90 : "g2", "g4", "memory", "cc");
93 #define arch_read_lock(lock) \
94 do { unsigned long flags; \
95 local_irq_save(flags); \
96 __arch_read_lock(lock); \
97 local_irq_restore(flags); \
100 static inline void __arch_read_unlock(arch_rwlock_t
*rw
)
102 register arch_rwlock_t
*lp
asm("g1");
104 __asm__
__volatile__(
106 "call ___rw_read_exit\n\t"
107 " ldstub [%%g1 + 3], %%g2\n"
110 : "g2", "g4", "memory", "cc");
113 #define arch_read_unlock(lock) \
114 do { unsigned long flags; \
115 local_irq_save(flags); \
116 __arch_read_unlock(lock); \
117 local_irq_restore(flags); \
120 static inline void arch_write_lock(arch_rwlock_t
*rw
)
122 register arch_rwlock_t
*lp
asm("g1");
124 __asm__
__volatile__(
126 "call ___rw_write_enter\n\t"
127 " ldstub [%%g1 + 3], %%g2\n"
130 : "g2", "g4", "memory", "cc");
131 *(volatile __u32
*)&lp
->lock
= ~0U;
134 static inline int arch_write_trylock(arch_rwlock_t
*rw
)
138 __asm__
__volatile__("ldstub [%1 + 3], %0"
144 val
= rw
->lock
& ~0xff;
146 ((volatile u8
*)&rw
->lock
)[3] = 0;
148 *(volatile u32
*)&rw
->lock
= ~0U;
154 static inline int __arch_read_trylock(arch_rwlock_t
*rw
)
156 register arch_rwlock_t
*lp
asm("g1");
157 register int res
asm("o0");
159 __asm__
__volatile__(
161 "call ___rw_read_try\n\t"
162 " ldstub [%%g1 + 3], %%g2\n"
165 : "g2", "g4", "memory", "cc");
169 #define arch_read_trylock(lock) \
170 ({ unsigned long flags; \
172 local_irq_save(flags); \
173 res = __arch_read_trylock(lock); \
174 local_irq_restore(flags); \
178 #define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0)
180 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
181 #define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
182 #define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
184 #define arch_spin_relax(lock) cpu_relax()
185 #define arch_read_relax(lock) cpu_relax()
186 #define arch_write_relax(lock) cpu_relax()
188 #define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
189 #define arch_write_can_lock(rw) (!(rw)->lock)
191 #endif /* !(__ASSEMBLY__) */
193 #endif /* __SPARC_SPINLOCK_H */