1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ALPHA_SPINLOCK_H
3 #define _ALPHA_SPINLOCK_H
5 #include <linux/kernel.h>
6 #include <asm/current.h>
7 #include <asm/barrier.h>
8 #include <asm/processor.h>
11 * Simple spin lock operations. There are two variants, one clears IRQ's
12 * on the local processor, one does not.
14 * We make no fairness assumptions. They have a cost.
17 #define arch_spin_is_locked(x) ((x)->lock != 0)
19 static inline int arch_spin_value_unlocked(arch_spinlock_t lock
)
21 return lock
.lock
== 0;
24 static inline void arch_spin_unlock(arch_spinlock_t
* lock
)
30 static inline void arch_spin_lock(arch_spinlock_t
* lock
)
46 : "=&r" (tmp
), "=m" (lock
->lock
)
47 : "m"(lock
->lock
) : "memory");
50 static inline int arch_spin_trylock(arch_spinlock_t
*lock
)
52 return !test_and_set_bit(0, &lock
->lock
);
55 /***********************************************************/
57 static inline void arch_read_lock(arch_rwlock_t
*lock
)
73 : "=m" (*lock
), "=&r" (regx
)
74 : "m" (*lock
) : "memory");
77 static inline void arch_write_lock(arch_rwlock_t
*lock
)
93 : "=m" (*lock
), "=&r" (regx
)
94 : "m" (*lock
) : "memory");
97 static inline int arch_read_trylock(arch_rwlock_t
* lock
)
102 __asm__
__volatile__(
113 : "=m" (*lock
), "=&r" (regx
), "=&r" (success
)
114 : "m" (*lock
) : "memory");
119 static inline int arch_write_trylock(arch_rwlock_t
* lock
)
124 __asm__
__volatile__(
135 : "=m" (*lock
), "=&r" (regx
), "=&r" (success
)
136 : "m" (*lock
) : "memory");
141 static inline void arch_read_unlock(arch_rwlock_t
* lock
)
144 __asm__
__volatile__(
153 : "=m" (*lock
), "=&r" (regx
)
154 : "m" (*lock
) : "memory");
157 static inline void arch_write_unlock(arch_rwlock_t
* lock
)
163 #endif /* _ALPHA_SPINLOCK_H */