1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SPINLOCK_H
3 #define __ASM_SPINLOCK_H
5 #include <asm/barrier.h>
7 #include <asm/processor.h>
8 #include <asm/spinlock_types.h>
10 static inline int arch_spin_is_locked(arch_spinlock_t
*x
)
12 volatile unsigned int *a
= __ldcw_align(x
);
13 return READ_ONCE(*a
) == 0;
16 static inline void arch_spin_lock(arch_spinlock_t
*x
)
18 volatile unsigned int *a
;
21 while (__ldcw(a
) == 0)
26 static inline void arch_spin_lock_flags(arch_spinlock_t
*x
,
29 volatile unsigned int *a
;
32 while (__ldcw(a
) == 0)
34 if (flags
& PSW_SM_I
) {
39 #define arch_spin_lock_flags arch_spin_lock_flags
41 static inline void arch_spin_unlock(arch_spinlock_t
*x
)
43 volatile unsigned int *a
;
46 /* Release with ordered store. */
47 __asm__
__volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a
) : "memory");
50 static inline int arch_spin_trylock(arch_spinlock_t
*x
)
52 volatile unsigned int *a
;
55 return __ldcw(a
) != 0;
59 * Read-write spinlocks, allowing multiple readers but only one writer.
60 * Unfair locking as Writers could be starved indefinitely by Reader(s)
62 * The spinlock itself is contained in @counter and access to it is
63 * serialized with @lock_mutex.
66 /* 1 - lock taken successfully */
67 static inline int arch_read_trylock(arch_rwlock_t
*rw
)
72 local_irq_save(flags
);
73 arch_spin_lock(&(rw
->lock_mutex
));
76 * zero means writer holds the lock exclusively, deny Reader.
77 * Otherwise grant lock to first/subseq reader
79 if (rw
->counter
> 0) {
84 arch_spin_unlock(&(rw
->lock_mutex
));
85 local_irq_restore(flags
);
90 /* 1 - lock taken successfully */
91 static inline int arch_write_trylock(arch_rwlock_t
*rw
)
96 local_irq_save(flags
);
97 arch_spin_lock(&(rw
->lock_mutex
));
100 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
101 * deny writer. Otherwise if unlocked grant to writer
102 * Hence the claim that Linux rwlocks are unfair to writers.
103 * (can be starved for an indefinite time by readers).
105 if (rw
->counter
== __ARCH_RW_LOCK_UNLOCKED__
) {
109 arch_spin_unlock(&(rw
->lock_mutex
));
110 local_irq_restore(flags
);
115 static inline void arch_read_lock(arch_rwlock_t
*rw
)
117 while (!arch_read_trylock(rw
))
121 static inline void arch_write_lock(arch_rwlock_t
*rw
)
123 while (!arch_write_trylock(rw
))
127 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
131 local_irq_save(flags
);
132 arch_spin_lock(&(rw
->lock_mutex
));
134 arch_spin_unlock(&(rw
->lock_mutex
));
135 local_irq_restore(flags
);
138 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
142 local_irq_save(flags
);
143 arch_spin_lock(&(rw
->lock_mutex
));
144 rw
->counter
= __ARCH_RW_LOCK_UNLOCKED__
;
145 arch_spin_unlock(&(rw
->lock_mutex
));
146 local_irq_restore(flags
);
149 #endif /* __ASM_SPINLOCK_H */