Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / arch / riscv / include / asm / spinlock.h
blob2fd27e8ef1fd686d8cf234143174a538223acc79
1 /*
2 * Copyright (C) 2015 Regents of the University of California
3 * Copyright (C) 2017 SiFive
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #ifndef _ASM_RISCV_SPINLOCK_H
16 #define _ASM_RISCV_SPINLOCK_H
18 #include <linux/kernel.h>
19 #include <asm/current.h>
22 * Simple spin lock operations. These provide no fairness guarantees.
25 /* FIXME: Replace this with a ticket lock, like MIPS. */
27 #define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
29 static inline void arch_spin_unlock(arch_spinlock_t *lock)
31 __asm__ __volatile__ (
32 "amoswap.w.rl x0, x0, %0"
33 : "=A" (lock->lock)
34 :: "memory");
37 static inline int arch_spin_trylock(arch_spinlock_t *lock)
39 int tmp = 1, busy;
41 __asm__ __volatile__ (
42 "amoswap.w.aq %0, %2, %1"
43 : "=r" (busy), "+A" (lock->lock)
44 : "r" (tmp)
45 : "memory");
47 return !busy;
50 static inline void arch_spin_lock(arch_spinlock_t *lock)
52 while (1) {
53 if (arch_spin_is_locked(lock))
54 continue;
56 if (arch_spin_trylock(lock))
57 break;
61 /***********************************************************/
63 static inline void arch_read_lock(arch_rwlock_t *lock)
65 int tmp;
67 __asm__ __volatile__(
68 "1: lr.w %1, %0\n"
69 " bltz %1, 1b\n"
70 " addi %1, %1, 1\n"
71 " sc.w.aq %1, %1, %0\n"
72 " bnez %1, 1b\n"
73 : "+A" (lock->lock), "=&r" (tmp)
74 :: "memory");
77 static inline void arch_write_lock(arch_rwlock_t *lock)
79 int tmp;
81 __asm__ __volatile__(
82 "1: lr.w %1, %0\n"
83 " bnez %1, 1b\n"
84 " li %1, -1\n"
85 " sc.w.aq %1, %1, %0\n"
86 " bnez %1, 1b\n"
87 : "+A" (lock->lock), "=&r" (tmp)
88 :: "memory");
91 static inline int arch_read_trylock(arch_rwlock_t *lock)
93 int busy;
95 __asm__ __volatile__(
96 "1: lr.w %1, %0\n"
97 " bltz %1, 1f\n"
98 " addi %1, %1, 1\n"
99 " sc.w.aq %1, %1, %0\n"
100 " bnez %1, 1b\n"
101 "1:\n"
102 : "+A" (lock->lock), "=&r" (busy)
103 :: "memory");
105 return !busy;
108 static inline int arch_write_trylock(arch_rwlock_t *lock)
110 int busy;
112 __asm__ __volatile__(
113 "1: lr.w %1, %0\n"
114 " bnez %1, 1f\n"
115 " li %1, -1\n"
116 " sc.w.aq %1, %1, %0\n"
117 " bnez %1, 1b\n"
118 "1:\n"
119 : "+A" (lock->lock), "=&r" (busy)
120 :: "memory");
122 return !busy;
125 static inline void arch_read_unlock(arch_rwlock_t *lock)
127 __asm__ __volatile__(
128 "amoadd.w.rl x0, %1, %0"
129 : "+A" (lock->lock)
130 : "r" (-1)
131 : "memory");
134 static inline void arch_write_unlock(arch_rwlock_t *lock)
136 __asm__ __volatile__ (
137 "amoswap.w.rl x0, x0, %0"
138 : "=A" (lock->lock)
139 :: "memory");
142 #endif /* _ASM_RISCV_SPINLOCK_H */