1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SPINLOCK_H
3 #define __ASM_SPINLOCK_H
5 #include <asm/barrier.h>
7 #include <asm/processor.h>
8 #include <asm/spinlock_types.h>
10 static inline int arch_spin_is_locked(arch_spinlock_t
*x
)
12 volatile unsigned int *a
= __ldcw_align(x
);
17 static inline void arch_spin_lock(arch_spinlock_t
*x
)
19 volatile unsigned int *a
;
22 while (__ldcw(a
) == 0)
27 static inline void arch_spin_lock_flags(arch_spinlock_t
*x
,
30 volatile unsigned int *a
;
31 unsigned long flags_dis
;
34 while (__ldcw(a
) == 0) {
35 local_save_flags(flags_dis
);
36 local_irq_restore(flags
);
39 local_irq_restore(flags_dis
);
42 #define arch_spin_lock_flags arch_spin_lock_flags
44 static inline void arch_spin_unlock(arch_spinlock_t
*x
)
46 volatile unsigned int *a
;
57 static inline int arch_spin_trylock(arch_spinlock_t
*x
)
59 volatile unsigned int *a
;
69 * Read-write spinlocks, allowing multiple readers but only one writer.
70 * Unfair locking as Writers could be starved indefinitely by Reader(s)
72 * The spinlock itself is contained in @counter and access to it is
73 * serialized with @lock_mutex.
76 /* 1 - lock taken successfully */
77 static inline int arch_read_trylock(arch_rwlock_t
*rw
)
82 local_irq_save(flags
);
83 arch_spin_lock(&(rw
->lock_mutex
));
86 * zero means writer holds the lock exclusively, deny Reader.
87 * Otherwise grant lock to first/subseq reader
89 if (rw
->counter
> 0) {
94 arch_spin_unlock(&(rw
->lock_mutex
));
95 local_irq_restore(flags
);
100 /* 1 - lock taken successfully */
101 static inline int arch_write_trylock(arch_rwlock_t
*rw
)
106 local_irq_save(flags
);
107 arch_spin_lock(&(rw
->lock_mutex
));
110 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
111 * deny writer. Otherwise if unlocked grant to writer
112 * Hence the claim that Linux rwlocks are unfair to writers.
113 * (can be starved for an indefinite time by readers).
115 if (rw
->counter
== __ARCH_RW_LOCK_UNLOCKED__
) {
119 arch_spin_unlock(&(rw
->lock_mutex
));
120 local_irq_restore(flags
);
125 static inline void arch_read_lock(arch_rwlock_t
*rw
)
127 while (!arch_read_trylock(rw
))
131 static inline void arch_write_lock(arch_rwlock_t
*rw
)
133 while (!arch_write_trylock(rw
))
137 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
141 local_irq_save(flags
);
142 arch_spin_lock(&(rw
->lock_mutex
));
144 arch_spin_unlock(&(rw
->lock_mutex
));
145 local_irq_restore(flags
);
148 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
152 local_irq_save(flags
);
153 arch_spin_lock(&(rw
->lock_mutex
));
154 rw
->counter
= __ARCH_RW_LOCK_UNLOCKED__
;
155 arch_spin_unlock(&(rw
->lock_mutex
));
156 local_irq_restore(flags
);
159 #endif /* __ASM_SPINLOCK_H */