2 * Spinlock support for the Hexagon architecture
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 #ifndef _ASM_SPINLOCK_H
23 #define _ASM_SPINLOCK_H
25 #include <asm/irqflags.h>
26 #include <asm/barrier.h>
27 #include <asm/processor.h>
30 * This file is pulled in for SMP builds.
31 * Really need to check all the barrier stuff for "true" SMP
36 * - load the lock value
38 * - if the lock value is still negative, go back and try again.
39 * - unsuccessful store is unsuccessful. Go back and try again. Loser.
40 * - successful store new lock value if positive -> lock acquired
42 static inline void arch_read_lock(arch_rwlock_t
*lock
)
45 "1: R6 = memw_locked(%0);\n"
46 " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
47 " { if !P3 jump 1b; }\n"
48 " memw_locked(%0,P3) = R6;\n"
49 " { if !P3 jump 1b; }\n"
52 : "memory", "r6", "p3"
57 static inline void arch_read_unlock(arch_rwlock_t
*lock
)
60 "1: R6 = memw_locked(%0);\n"
61 " R6 = add(R6,#-1);\n"
62 " memw_locked(%0,P3) = R6\n"
66 : "memory", "r6", "p3"
71 /* I think this returns 0 on fail, 1 on success. */
72 static inline int arch_read_trylock(arch_rwlock_t
*lock
)
76 " R6 = memw_locked(%1);\n"
77 " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
78 " { if !P3 jump 1f; }\n"
79 " memw_locked(%1,P3) = R6;\n"
84 : "memory", "r6", "p3"
89 /* Stuffs a -1 in the lock value? */
90 static inline void arch_write_lock(arch_rwlock_t
*lock
)
93 "1: R6 = memw_locked(%0)\n"
94 " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
95 " { if !P3 jump 1b; }\n"
96 " memw_locked(%0,P3) = R6;\n"
97 " { if !P3 jump 1b; }\n"
100 : "memory", "r6", "p3"
105 static inline int arch_write_trylock(arch_rwlock_t
*lock
)
108 __asm__
__volatile__(
109 " R6 = memw_locked(%1)\n"
110 " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
111 " { if !P3 jump 1f; }\n"
112 " memw_locked(%1,P3) = R6;\n"
117 : "memory", "r6", "p3"
123 static inline void arch_write_unlock(arch_rwlock_t
*lock
)
129 static inline void arch_spin_lock(arch_spinlock_t
*lock
)
131 __asm__
__volatile__(
132 "1: R6 = memw_locked(%0);\n"
133 " P3 = cmp.eq(R6,#0);\n"
134 " { if !P3 jump 1b; R6 = #1; }\n"
135 " memw_locked(%0,P3) = R6;\n"
136 " { if !P3 jump 1b; }\n"
139 : "memory", "r6", "p3"
144 static inline void arch_spin_unlock(arch_spinlock_t
*lock
)
150 static inline unsigned int arch_spin_trylock(arch_spinlock_t
*lock
)
153 __asm__
__volatile__(
154 " R6 = memw_locked(%1);\n"
155 " P3 = cmp.eq(R6,#0);\n"
156 " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
157 " memw_locked(%1,P3) = R6;\n"
162 : "memory", "r6", "p3"
168 * SMP spinlocks are intended to allow only a single CPU at the lock
170 #define arch_spin_is_locked(x) ((x)->lock != 0)