Linux 5.6.13
[linux/fpc-iii.git] / arch / riscv / include / asm / spinlock.h
blobf4f7fa1b7ca87606aa0c741c2617c1b72aded256
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2015 Regents of the University of California
4 * Copyright (C) 2017 SiFive
5 */
7 #ifndef _ASM_RISCV_SPINLOCK_H
8 #define _ASM_RISCV_SPINLOCK_H
10 #include <linux/kernel.h>
11 #include <asm/current.h>
12 #include <asm/fence.h>
15 * Simple spin lock operations. These provide no fairness guarantees.
18 /* FIXME: Replace this with a ticket lock, like MIPS. */
20 #define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
22 static inline void arch_spin_unlock(arch_spinlock_t *lock)
24 smp_store_release(&lock->lock, 0);
27 static inline int arch_spin_trylock(arch_spinlock_t *lock)
29 int tmp = 1, busy;
31 __asm__ __volatile__ (
32 " amoswap.w %0, %2, %1\n"
33 RISCV_ACQUIRE_BARRIER
34 : "=r" (busy), "+A" (lock->lock)
35 : "r" (tmp)
36 : "memory");
38 return !busy;
41 static inline void arch_spin_lock(arch_spinlock_t *lock)
43 while (1) {
44 if (arch_spin_is_locked(lock))
45 continue;
47 if (arch_spin_trylock(lock))
48 break;
52 /***********************************************************/
54 static inline void arch_read_lock(arch_rwlock_t *lock)
56 int tmp;
58 __asm__ __volatile__(
59 "1: lr.w %1, %0\n"
60 " bltz %1, 1b\n"
61 " addi %1, %1, 1\n"
62 " sc.w %1, %1, %0\n"
63 " bnez %1, 1b\n"
64 RISCV_ACQUIRE_BARRIER
65 : "+A" (lock->lock), "=&r" (tmp)
66 :: "memory");
69 static inline void arch_write_lock(arch_rwlock_t *lock)
71 int tmp;
73 __asm__ __volatile__(
74 "1: lr.w %1, %0\n"
75 " bnez %1, 1b\n"
76 " li %1, -1\n"
77 " sc.w %1, %1, %0\n"
78 " bnez %1, 1b\n"
79 RISCV_ACQUIRE_BARRIER
80 : "+A" (lock->lock), "=&r" (tmp)
81 :: "memory");
84 static inline int arch_read_trylock(arch_rwlock_t *lock)
86 int busy;
88 __asm__ __volatile__(
89 "1: lr.w %1, %0\n"
90 " bltz %1, 1f\n"
91 " addi %1, %1, 1\n"
92 " sc.w %1, %1, %0\n"
93 " bnez %1, 1b\n"
94 RISCV_ACQUIRE_BARRIER
95 "1:\n"
96 : "+A" (lock->lock), "=&r" (busy)
97 :: "memory");
99 return !busy;
102 static inline int arch_write_trylock(arch_rwlock_t *lock)
104 int busy;
106 __asm__ __volatile__(
107 "1: lr.w %1, %0\n"
108 " bnez %1, 1f\n"
109 " li %1, -1\n"
110 " sc.w %1, %1, %0\n"
111 " bnez %1, 1b\n"
112 RISCV_ACQUIRE_BARRIER
113 "1:\n"
114 : "+A" (lock->lock), "=&r" (busy)
115 :: "memory");
117 return !busy;
120 static inline void arch_read_unlock(arch_rwlock_t *lock)
122 __asm__ __volatile__(
123 RISCV_RELEASE_BARRIER
124 " amoadd.w x0, %1, %0\n"
125 : "+A" (lock->lock)
126 : "r" (-1)
127 : "memory");
130 static inline void arch_write_unlock(arch_rwlock_t *lock)
132 smp_store_release(&lock->lock, 0);
135 #endif /* _ASM_RISCV_SPINLOCK_H */