Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / arch / arm64 / include / asm / spinlock.h
blobebdae15d665de72ff4b5d60969c1f72a91603827
1 /*
2 * Copyright (C) 2012 ARM Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #ifndef __ASM_SPINLOCK_H
17 #define __ASM_SPINLOCK_H
19 #include <asm/lse.h>
20 #include <asm/spinlock_types.h>
21 #include <asm/processor.h>
24 * Spinlock implementation.
26 * The memory barriers are implicit with the load-acquire and store-release
27 * instructions.
30 static inline void arch_spin_lock(arch_spinlock_t *lock)
32 unsigned int tmp;
33 arch_spinlock_t lockval, newval;
35 asm volatile(
36 /* Atomically increment the next ticket. */
37 ARM64_LSE_ATOMIC_INSN(
38 /* LL/SC */
39 " prfm pstl1strm, %3\n"
40 "1: ldaxr %w0, %3\n"
41 " add %w1, %w0, %w5\n"
42 " stxr %w2, %w1, %3\n"
43 " cbnz %w2, 1b\n",
44 /* LSE atomics */
45 " mov %w2, %w5\n"
46 " ldadda %w2, %w0, %3\n"
47 __nops(3)
50 /* Did we get the lock? */
51 " eor %w1, %w0, %w0, ror #16\n"
52 " cbz %w1, 3f\n"
54 * No: spin on the owner. Send a local event to avoid missing an
55 * unlock before the exclusive load.
57 " sevl\n"
58 "2: wfe\n"
59 " ldaxrh %w2, %4\n"
60 " eor %w1, %w2, %w0, lsr #16\n"
61 " cbnz %w1, 2b\n"
62 /* We got the lock. Critical section starts here. */
63 "3:"
64 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
65 : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
66 : "memory");
69 static inline int arch_spin_trylock(arch_spinlock_t *lock)
71 unsigned int tmp;
72 arch_spinlock_t lockval;
74 asm volatile(ARM64_LSE_ATOMIC_INSN(
75 /* LL/SC */
76 " prfm pstl1strm, %2\n"
77 "1: ldaxr %w0, %2\n"
78 " eor %w1, %w0, %w0, ror #16\n"
79 " cbnz %w1, 2f\n"
80 " add %w0, %w0, %3\n"
81 " stxr %w1, %w0, %2\n"
82 " cbnz %w1, 1b\n"
83 "2:",
84 /* LSE atomics */
85 " ldr %w0, %2\n"
86 " eor %w1, %w0, %w0, ror #16\n"
87 " cbnz %w1, 1f\n"
88 " add %w1, %w0, %3\n"
89 " casa %w0, %w1, %2\n"
90 " sub %w1, %w1, %3\n"
91 " eor %w1, %w1, %w0\n"
92 "1:")
93 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
94 : "I" (1 << TICKET_SHIFT)
95 : "memory");
97 return !tmp;
100 static inline void arch_spin_unlock(arch_spinlock_t *lock)
102 unsigned long tmp;
104 asm volatile(ARM64_LSE_ATOMIC_INSN(
105 /* LL/SC */
106 " ldrh %w1, %0\n"
107 " add %w1, %w1, #1\n"
108 " stlrh %w1, %0",
109 /* LSE atomics */
110 " mov %w1, #1\n"
111 " staddlh %w1, %0\n"
112 __nops(1))
113 : "=Q" (lock->owner), "=&r" (tmp)
115 : "memory");
118 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
120 return lock.owner == lock.next;
123 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
126 * Ensure prior spin_lock operations to other locks have completed
127 * on this CPU before we test whether "lock" is locked.
129 smp_mb(); /* ^^^ */
130 return !arch_spin_value_unlocked(READ_ONCE(*lock));
133 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
135 arch_spinlock_t lockval = READ_ONCE(*lock);
136 return (lockval.next - lockval.owner) > 1;
138 #define arch_spin_is_contended arch_spin_is_contended
140 #include <asm/qrwlock.h>
142 /* See include/linux/spinlock.h */
143 #define smp_mb__after_spinlock() smp_mb()
145 #endif /* __ASM_SPINLOCK_H */