mtd: nand: omap: Fix comment in platform data using wrong Kconfig symbol
[linux/fpc-iii.git] / arch / riscv / include / asm / spinlock.h
blob8eb26d1ede819b95876631dd06c21bcea3e4404f
1 /*
2 * Copyright (C) 2015 Regents of the University of California
3 * Copyright (C) 2017 SiFive
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #ifndef _ASM_RISCV_SPINLOCK_H
16 #define _ASM_RISCV_SPINLOCK_H
18 #include <linux/kernel.h>
19 #include <asm/current.h>
20 #include <asm/fence.h>
23 * Simple spin lock operations. These provide no fairness guarantees.
26 /* FIXME: Replace this with a ticket lock, like MIPS. */
28 #define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
30 static inline void arch_spin_unlock(arch_spinlock_t *lock)
32 smp_store_release(&lock->lock, 0);
35 static inline int arch_spin_trylock(arch_spinlock_t *lock)
37 int tmp = 1, busy;
39 __asm__ __volatile__ (
40 " amoswap.w %0, %2, %1\n"
41 RISCV_ACQUIRE_BARRIER
42 : "=r" (busy), "+A" (lock->lock)
43 : "r" (tmp)
44 : "memory");
46 return !busy;
49 static inline void arch_spin_lock(arch_spinlock_t *lock)
51 while (1) {
52 if (arch_spin_is_locked(lock))
53 continue;
55 if (arch_spin_trylock(lock))
56 break;
60 /***********************************************************/
62 static inline void arch_read_lock(arch_rwlock_t *lock)
64 int tmp;
66 __asm__ __volatile__(
67 "1: lr.w %1, %0\n"
68 " bltz %1, 1b\n"
69 " addi %1, %1, 1\n"
70 " sc.w %1, %1, %0\n"
71 " bnez %1, 1b\n"
72 RISCV_ACQUIRE_BARRIER
73 : "+A" (lock->lock), "=&r" (tmp)
74 :: "memory");
77 static inline void arch_write_lock(arch_rwlock_t *lock)
79 int tmp;
81 __asm__ __volatile__(
82 "1: lr.w %1, %0\n"
83 " bnez %1, 1b\n"
84 " li %1, -1\n"
85 " sc.w %1, %1, %0\n"
86 " bnez %1, 1b\n"
87 RISCV_ACQUIRE_BARRIER
88 : "+A" (lock->lock), "=&r" (tmp)
89 :: "memory");
92 static inline int arch_read_trylock(arch_rwlock_t *lock)
94 int busy;
96 __asm__ __volatile__(
97 "1: lr.w %1, %0\n"
98 " bltz %1, 1f\n"
99 " addi %1, %1, 1\n"
100 " sc.w %1, %1, %0\n"
101 " bnez %1, 1b\n"
102 RISCV_ACQUIRE_BARRIER
103 "1:\n"
104 : "+A" (lock->lock), "=&r" (busy)
105 :: "memory");
107 return !busy;
110 static inline int arch_write_trylock(arch_rwlock_t *lock)
112 int busy;
114 __asm__ __volatile__(
115 "1: lr.w %1, %0\n"
116 " bnez %1, 1f\n"
117 " li %1, -1\n"
118 " sc.w %1, %1, %0\n"
119 " bnez %1, 1b\n"
120 RISCV_ACQUIRE_BARRIER
121 "1:\n"
122 : "+A" (lock->lock), "=&r" (busy)
123 :: "memory");
125 return !busy;
128 static inline void arch_read_unlock(arch_rwlock_t *lock)
130 __asm__ __volatile__(
131 RISCV_RELEASE_BARRIER
132 " amoadd.w x0, %1, %0\n"
133 : "+A" (lock->lock)
134 : "r" (-1)
135 : "memory");
138 static inline void arch_write_unlock(arch_rwlock_t *lock)
140 smp_store_release(&lock->lock, 0);
143 #endif /* _ASM_RISCV_SPINLOCK_H */