1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef __ASM_CSKY_SPINLOCK_H
4 #define __ASM_CSKY_SPINLOCK_H
6 #include <linux/spinlock_types.h>
7 #include <asm/barrier.h>
9 #ifdef CONFIG_QUEUED_RWLOCKS
12 * Ticket-based spin-locking.
14 static inline void arch_spin_lock(arch_spinlock_t
*lock
)
16 arch_spinlock_t lockval
;
17 u32 ticket_next
= 1 << TICKET_NEXT
;
22 "1: ldex.w %0, (%2) \n"
27 : "=&r" (tmp
), "=&r" (lockval
)
28 : "r"(p
), "r"(ticket_next
)
31 while (lockval
.tickets
.next
!= lockval
.tickets
.owner
)
32 lockval
.tickets
.owner
= READ_ONCE(lock
->tickets
.owner
);
37 static inline int arch_spin_trylock(arch_spinlock_t
*lock
)
39 u32 tmp
, contended
, res
;
40 u32 ticket_next
= 1 << TICKET_NEXT
;
47 " rotli %1, %0, 16 \n"
54 : "=&r" (res
), "=&r" (tmp
), "=&r" (contended
)
55 : "r"(p
), "r"(ticket_next
)
65 static inline void arch_spin_unlock(arch_spinlock_t
*lock
)
68 WRITE_ONCE(lock
->tickets
.owner
, lock
->tickets
.owner
+ 1);
71 static inline int arch_spin_value_unlocked(arch_spinlock_t lock
)
73 return lock
.tickets
.owner
== lock
.tickets
.next
;
76 static inline int arch_spin_is_locked(arch_spinlock_t
*lock
)
78 return !arch_spin_value_unlocked(READ_ONCE(*lock
));
81 static inline int arch_spin_is_contended(arch_spinlock_t
*lock
)
83 struct __raw_tickets tickets
= READ_ONCE(lock
->tickets
);
85 return (tickets
.next
- tickets
.owner
) > 1;
87 #define arch_spin_is_contended arch_spin_is_contended
89 #include <asm/qrwlock.h>
91 /* See include/linux/spinlock.h */
92 #define smp_mb__after_spinlock() smp_mb()
94 #else /* CONFIG_QUEUED_RWLOCKS */
97 * Test-and-set spin-locking.
99 static inline void arch_spin_lock(arch_spinlock_t
*lock
)
101 u32
*p
= &lock
->lock
;
105 "1: ldex.w %0, (%1) \n"
108 " stex.w %0, (%1) \n"
116 static inline void arch_spin_unlock(arch_spinlock_t
*lock
)
119 WRITE_ONCE(lock
->lock
, 0);
122 static inline int arch_spin_trylock(arch_spinlock_t
*lock
)
124 u32
*p
= &lock
->lock
;
128 "1: ldex.w %0, (%1) \n"
131 " stex.w %0, (%1) \n"
145 #define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
148 * read lock/unlock/trylock
150 static inline void arch_read_lock(arch_rwlock_t
*lock
)
152 u32
*p
= &lock
->lock
;
156 "1: ldex.w %0, (%1) \n"
159 " stex.w %0, (%1) \n"
167 static inline void arch_read_unlock(arch_rwlock_t
*lock
)
169 u32
*p
= &lock
->lock
;
174 "1: ldex.w %0, (%1) \n"
176 " stex.w %0, (%1) \n"
183 static inline int arch_read_trylock(arch_rwlock_t
*lock
)
185 u32
*p
= &lock
->lock
;
189 "1: ldex.w %0, (%1) \n"
192 " stex.w %0, (%1) \n"
207 * write lock/unlock/trylock
209 static inline void arch_write_lock(arch_rwlock_t
*lock
)
211 u32
*p
= &lock
->lock
;
215 "1: ldex.w %0, (%1) \n"
218 " stex.w %0, (%1) \n"
226 static inline void arch_write_unlock(arch_rwlock_t
*lock
)
229 WRITE_ONCE(lock
->lock
, 0);
232 static inline int arch_write_trylock(arch_rwlock_t
*lock
)
234 u32
*p
= &lock
->lock
;
238 "1: ldex.w %0, (%1) \n"
241 " stex.w %0, (%1) \n"
255 #endif /* CONFIG_QUEUED_RWLOCKS */
256 #endif /* __ASM_CSKY_SPINLOCK_H */