Linux 4.8-rc8
[linux/fpc-iii.git] / arch / sparc / include / asm / spinlock_32.h
blobd9c5876c61215494df0238992da09c03f5d82211
1 /* spinlock.h: 32-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
6 #ifndef __SPARC_SPINLOCK_H
7 #define __SPARC_SPINLOCK_H
9 #ifndef __ASSEMBLY__
11 #include <asm/psr.h>
12 #include <asm/barrier.h>
13 #include <asm/processor.h> /* for cpu_relax */
15 #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
17 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
19 smp_cond_load_acquire(&lock->lock, !VAL);
22 static inline void arch_spin_lock(arch_spinlock_t *lock)
24 __asm__ __volatile__(
25 "\n1:\n\t"
26 "ldstub [%0], %%g2\n\t"
27 "orcc %%g2, 0x0, %%g0\n\t"
28 "bne,a 2f\n\t"
29 " ldub [%0], %%g2\n\t"
30 ".subsection 2\n"
31 "2:\n\t"
32 "orcc %%g2, 0x0, %%g0\n\t"
33 "bne,a 2b\n\t"
34 " ldub [%0], %%g2\n\t"
35 "b,a 1b\n\t"
36 ".previous\n"
37 : /* no outputs */
38 : "r" (lock)
39 : "g2", "memory", "cc");
42 static inline int arch_spin_trylock(arch_spinlock_t *lock)
44 unsigned int result;
45 __asm__ __volatile__("ldstub [%1], %0"
46 : "=r" (result)
47 : "r" (lock)
48 : "memory");
49 return (result == 0);
52 static inline void arch_spin_unlock(arch_spinlock_t *lock)
54 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
57 /* Read-write spinlocks, allowing multiple readers
58 * but only one writer.
60 * NOTE! it is quite common to have readers in interrupts
61 * but no interrupt writers. For those circumstances we
62 * can "mix" irq-safe locks - any writer needs to get a
63 * irq-safe write-lock, but readers can get non-irqsafe
64 * read-locks.
66 * XXX This might create some problems with my dual spinlock
67 * XXX scheme, deadlocks etc. -DaveM
69 * Sort of like atomic_t's on Sparc, but even more clever.
71 * ------------------------------------
72 * | 24-bit counter | wlock | arch_rwlock_t
73 * ------------------------------------
74 * 31 8 7 0
76 * wlock signifies the one writer is in or somebody is updating
77 * counter. For a writer, if he successfully acquires the wlock,
78 * but counter is non-zero, he has to release the lock and wait,
79 * till both counter and wlock are zero.
81 * Unfortunately this scheme limits us to ~16,000,000 cpus.
83 static inline void __arch_read_lock(arch_rwlock_t *rw)
85 register arch_rwlock_t *lp asm("g1");
86 lp = rw;
87 __asm__ __volatile__(
88 "mov %%o7, %%g4\n\t"
89 "call ___rw_read_enter\n\t"
90 " ldstub [%%g1 + 3], %%g2\n"
91 : /* no outputs */
92 : "r" (lp)
93 : "g2", "g4", "memory", "cc");
96 #define arch_read_lock(lock) \
97 do { unsigned long flags; \
98 local_irq_save(flags); \
99 __arch_read_lock(lock); \
100 local_irq_restore(flags); \
101 } while(0)
103 static inline void __arch_read_unlock(arch_rwlock_t *rw)
105 register arch_rwlock_t *lp asm("g1");
106 lp = rw;
107 __asm__ __volatile__(
108 "mov %%o7, %%g4\n\t"
109 "call ___rw_read_exit\n\t"
110 " ldstub [%%g1 + 3], %%g2\n"
111 : /* no outputs */
112 : "r" (lp)
113 : "g2", "g4", "memory", "cc");
116 #define arch_read_unlock(lock) \
117 do { unsigned long flags; \
118 local_irq_save(flags); \
119 __arch_read_unlock(lock); \
120 local_irq_restore(flags); \
121 } while(0)
123 static inline void arch_write_lock(arch_rwlock_t *rw)
125 register arch_rwlock_t *lp asm("g1");
126 lp = rw;
127 __asm__ __volatile__(
128 "mov %%o7, %%g4\n\t"
129 "call ___rw_write_enter\n\t"
130 " ldstub [%%g1 + 3], %%g2\n"
131 : /* no outputs */
132 : "r" (lp)
133 : "g2", "g4", "memory", "cc");
134 *(volatile __u32 *)&lp->lock = ~0U;
137 static void inline arch_write_unlock(arch_rwlock_t *lock)
139 __asm__ __volatile__(
140 " st %%g0, [%0]"
141 : /* no outputs */
142 : "r" (lock)
143 : "memory");
146 static inline int arch_write_trylock(arch_rwlock_t *rw)
148 unsigned int val;
150 __asm__ __volatile__("ldstub [%1 + 3], %0"
151 : "=r" (val)
152 : "r" (&rw->lock)
153 : "memory");
155 if (val == 0) {
156 val = rw->lock & ~0xff;
157 if (val)
158 ((volatile u8*)&rw->lock)[3] = 0;
159 else
160 *(volatile u32*)&rw->lock = ~0U;
163 return (val == 0);
166 static inline int __arch_read_trylock(arch_rwlock_t *rw)
168 register arch_rwlock_t *lp asm("g1");
169 register int res asm("o0");
170 lp = rw;
171 __asm__ __volatile__(
172 "mov %%o7, %%g4\n\t"
173 "call ___rw_read_try\n\t"
174 " ldstub [%%g1 + 3], %%g2\n"
175 : "=r" (res)
176 : "r" (lp)
177 : "g2", "g4", "memory", "cc");
178 return res;
181 #define arch_read_trylock(lock) \
182 ({ unsigned long flags; \
183 int res; \
184 local_irq_save(flags); \
185 res = __arch_read_trylock(lock); \
186 local_irq_restore(flags); \
187 res; \
190 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
191 #define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
192 #define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
194 #define arch_spin_relax(lock) cpu_relax()
195 #define arch_read_relax(lock) cpu_relax()
196 #define arch_write_relax(lock) cpu_relax()
198 #define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
199 #define arch_write_can_lock(rw) (!(rw)->lock)
201 #endif /* !(__ASSEMBLY__) */
203 #endif /* __SPARC_SPINLOCK_H */