x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / arch / xtensa / include / asm / spinlock.h
blob3bb49681ee242803e13c1ef1eae81011a33a98b4
1 /*
2 * include/asm-xtensa/spinlock.h
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 */
11 #ifndef _XTENSA_SPINLOCK_H
12 #define _XTENSA_SPINLOCK_H
14 #include <asm/barrier.h>
15 #include <asm/processor.h>
18 * spinlock
20 * There is at most one owner of a spinlock. There are not different
21 * types of spinlock owners like there are for rwlocks (see below).
23 * When trying to obtain a spinlock, the function "spins" forever, or busy-
24 * waits, until the lock is obtained. When spinning, presumably some other
25 * owner will soon give up the spinlock making it available to others. Use
26 * the trylock functions to avoid spinning forever.
28 * possible values:
30 * 0 nobody owns the spinlock
31 * 1 somebody owns the spinlock
34 #define arch_spin_is_locked(x) ((x)->slock != 0)
36 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
38 static inline void arch_spin_lock(arch_spinlock_t *lock)
40 unsigned long tmp;
42 __asm__ __volatile__(
43 " movi %0, 0\n"
44 " wsr %0, scompare1\n"
45 "1: movi %0, 1\n"
46 " s32c1i %0, %1, 0\n"
47 " bnez %0, 1b\n"
48 : "=&a" (tmp)
49 : "a" (&lock->slock)
50 : "memory");
53 /* Returns 1 if the lock is obtained, 0 otherwise. */
55 static inline int arch_spin_trylock(arch_spinlock_t *lock)
57 unsigned long tmp;
59 __asm__ __volatile__(
60 " movi %0, 0\n"
61 " wsr %0, scompare1\n"
62 " movi %0, 1\n"
63 " s32c1i %0, %1, 0\n"
64 : "=&a" (tmp)
65 : "a" (&lock->slock)
66 : "memory");
68 return tmp == 0 ? 1 : 0;
71 static inline void arch_spin_unlock(arch_spinlock_t *lock)
73 unsigned long tmp;
75 __asm__ __volatile__(
76 " movi %0, 0\n"
77 " s32ri %0, %1, 0\n"
78 : "=&a" (tmp)
79 : "a" (&lock->slock)
80 : "memory");
84 * rwlock
86 * Read-write locks are really a more flexible spinlock. They allow
87 * multiple readers but only one writer. Write ownership is exclusive
88 * (i.e., all other readers and writers are blocked from ownership while
89 * there is a write owner). These rwlocks are unfair to writers. Writers
90 * can be starved for an indefinite time by readers.
92 * possible values:
94 * 0 nobody owns the rwlock
95 * >0 one or more readers own the rwlock
96 * (the positive value is the actual number of readers)
97 * 0x80000000 one writer owns the rwlock, no other writers, no readers
100 #define arch_write_can_lock(x) ((x)->lock == 0)
102 static inline void arch_write_lock(arch_rwlock_t *rw)
104 unsigned long tmp;
106 __asm__ __volatile__(
107 " movi %0, 0\n"
108 " wsr %0, scompare1\n"
109 "1: movi %0, 1\n"
110 " slli %0, %0, 31\n"
111 " s32c1i %0, %1, 0\n"
112 " bnez %0, 1b\n"
113 : "=&a" (tmp)
114 : "a" (&rw->lock)
115 : "memory");
118 /* Returns 1 if the lock is obtained, 0 otherwise. */
120 static inline int arch_write_trylock(arch_rwlock_t *rw)
122 unsigned long tmp;
124 __asm__ __volatile__(
125 " movi %0, 0\n"
126 " wsr %0, scompare1\n"
127 " movi %0, 1\n"
128 " slli %0, %0, 31\n"
129 " s32c1i %0, %1, 0\n"
130 : "=&a" (tmp)
131 : "a" (&rw->lock)
132 : "memory");
134 return tmp == 0 ? 1 : 0;
137 static inline void arch_write_unlock(arch_rwlock_t *rw)
139 unsigned long tmp;
141 __asm__ __volatile__(
142 " movi %0, 0\n"
143 " s32ri %0, %1, 0\n"
144 : "=&a" (tmp)
145 : "a" (&rw->lock)
146 : "memory");
149 static inline void arch_read_lock(arch_rwlock_t *rw)
151 unsigned long tmp;
152 unsigned long result;
154 __asm__ __volatile__(
155 "1: l32i %1, %2, 0\n"
156 " bltz %1, 1b\n"
157 " wsr %1, scompare1\n"
158 " addi %0, %1, 1\n"
159 " s32c1i %0, %2, 0\n"
160 " bne %0, %1, 1b\n"
161 : "=&a" (result), "=&a" (tmp)
162 : "a" (&rw->lock)
163 : "memory");
166 /* Returns 1 if the lock is obtained, 0 otherwise. */
168 static inline int arch_read_trylock(arch_rwlock_t *rw)
170 unsigned long result;
171 unsigned long tmp;
173 __asm__ __volatile__(
174 " l32i %1, %2, 0\n"
175 " addi %0, %1, 1\n"
176 " bltz %0, 1f\n"
177 " wsr %1, scompare1\n"
178 " s32c1i %0, %2, 0\n"
179 " sub %0, %0, %1\n"
180 "1:\n"
181 : "=&a" (result), "=&a" (tmp)
182 : "a" (&rw->lock)
183 : "memory");
185 return result == 0;
188 static inline void arch_read_unlock(arch_rwlock_t *rw)
190 unsigned long tmp1, tmp2;
192 __asm__ __volatile__(
193 "1: l32i %1, %2, 0\n"
194 " addi %0, %1, -1\n"
195 " wsr %1, scompare1\n"
196 " s32c1i %0, %2, 0\n"
197 " bne %0, %1, 1b\n"
198 : "=&a" (tmp1), "=&a" (tmp2)
199 : "a" (&rw->lock)
200 : "memory");
203 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
204 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
206 #endif /* _XTENSA_SPINLOCK_H */