1 /* $NetBSD: lock.h,v 1.32 2015/02/25 13:52:42 joerg Exp $ */
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Machine-dependent spin lock operations.
35 * NOTE: The SWP insn used here is available only on ARM architecture
36 * version 3 and later (as well as 2a). What we are going to do is
37 * expect that the kernel will trap and emulate the insn. That will
38 * be slow, but give us the atomicity that we need.
45 __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t
*__ptr
)
47 return *__ptr
== __SIMPLELOCK_LOCKED
;
51 __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t
*__ptr
)
53 return *__ptr
== __SIMPLELOCK_UNLOCKED
;
57 __cpu_simple_lock_clear(__cpu_simple_lock_t
*__ptr
)
59 *__ptr
= __SIMPLELOCK_UNLOCKED
;
63 __cpu_simple_lock_set(__cpu_simple_lock_t
*__ptr
)
65 *__ptr
= __SIMPLELOCK_LOCKED
;
69 #include <arm/cpufunc.h>
71 #define mb_read drain_writebuf /* in cpufunc.h */
72 #define mb_write drain_writebuf /* in cpufunc.h */
73 #define mb_memory drain_writebuf /* in cpufunc.h */
77 static __inline
unsigned int
78 __arm_load_exclusive(__cpu_simple_lock_t
*__alp
)
81 if (/*CONSTCOND*/sizeof(*__alp
) == 1) {
82 __asm
__volatile("ldrexb\t%0,[%1]" : "=r"(__rv
) : "r"(__alp
));
84 __asm
__volatile("ldrex\t%0,[%1]" : "=r"(__rv
) : "r"(__alp
));
89 /* returns 0 on success and 1 on failure */
90 static __inline
unsigned int
91 __arm_store_exclusive(__cpu_simple_lock_t
*__alp
, unsigned int __val
)
94 if (/*CONSTCOND*/sizeof(*__alp
) == 1) {
95 __asm
__volatile("strexb\t%0,%1,[%2]"
96 : "=&r"(__rv
) : "r"(__val
), "r"(__alp
) : "cc", "memory");
98 __asm
__volatile("strex\t%0,%1,[%2]"
99 : "=&r"(__rv
) : "r"(__val
), "r"(__alp
) : "cc", "memory");
103 #elif defined(_KERNEL)
104 static __inline
unsigned char
105 __swp(unsigned char __val
, __cpu_simple_lock_t
*__ptr
)
108 __asm
volatile("swpb %0, %1, [%2]"
109 : "=&r" (__val32
) : "r" (__val
), "r" (__ptr
) : "memory");
114 * On MP Cortex, SWP no longer guarantees atomic results. Thus we pad
115 * out SWP so that when the cpu generates an undefined exception we can replace
116 * the SWP/MOV instructions with the right LDREX/STREX instructions.
118 * This is why we force the SWP into the template needed for LDREX/STREX
119 * including the extra instructions and extra register for testing the result.
122 __swp(int __val
, __cpu_simple_lock_t
*__ptr
)
127 "1:\t" "swp %[__rv], %[__val], [%[__ptr]]"
130 "1:\t" "ldrex %[__rv],[%[__ptr]]"
131 "\n\t" "strex %[__tmp],%[__val],[%[__ptr]]"
133 "\n\t" "cmp %[__tmp],#0"
136 : [__rv
] "=&r" (__rv
), [__tmp
] "=&r" (__tmp
)
137 : [__val
] "r" (__val
), [__ptr
] "r" (__ptr
) : "cc", "memory");
140 #endif /* !_ARM_ARCH_6 */
143 __arm_membar_producer(void)
146 __asm
__volatile("dsb" ::: "memory");
147 #elif defined(_ARM_ARCH_6)
148 __asm
__volatile("mcr\tp15,0,%0,c7,c10,4" :: "r"(0) : "memory");
153 __arm_membar_consumer(void)
156 __asm
__volatile("dmb" ::: "memory");
157 #elif defined(_ARM_ARCH_6)
158 __asm
__volatile("mcr\tp15,0,%0,c7,c10,5" :: "r"(0) : "memory");
162 static __inline
void __unused
163 __cpu_simple_lock_init(__cpu_simple_lock_t
*__alp
)
166 *__alp
= __SIMPLELOCK_UNLOCKED
;
167 __arm_membar_producer();
170 #if !defined(__thumb__) || defined(_ARM_ARCH_T2)
171 static __inline
void __unused
172 __cpu_simple_lock(__cpu_simple_lock_t
*__alp
)
175 __arm_membar_consumer();
178 } while (__arm_load_exclusive(__alp
) != __SIMPLELOCK_UNLOCKED
179 || __arm_store_exclusive(__alp
, __SIMPLELOCK_LOCKED
));
180 __arm_membar_producer();
182 while (__swp(__SIMPLELOCK_LOCKED
, __alp
) != __SIMPLELOCK_UNLOCKED
)
187 void __cpu_simple_lock(__cpu_simple_lock_t
*);
190 #if !defined(__thumb__) || defined(_ARM_ARCH_T2)
191 static __inline
int __unused
192 __cpu_simple_lock_try(__cpu_simple_lock_t
*__alp
)
195 __arm_membar_consumer();
197 if (__arm_load_exclusive(__alp
) != __SIMPLELOCK_UNLOCKED
) {
200 } while (__arm_store_exclusive(__alp
, __SIMPLELOCK_LOCKED
));
201 __arm_membar_producer();
204 return (__swp(__SIMPLELOCK_LOCKED
, __alp
) == __SIMPLELOCK_UNLOCKED
);
208 int __cpu_simple_lock_try(__cpu_simple_lock_t
*);
211 static __inline
void __unused
212 __cpu_simple_unlock(__cpu_simple_lock_t
*__alp
)
216 if (sizeof(*__alp
) == 1) {
217 __asm
__volatile("stlb\t%0, [%1]"
218 :: "r"(__SIMPLELOCK_UNLOCKED
), "r"(__alp
) : "memory");
220 __asm
__volatile("stl\t%0, [%1]"
221 :: "r"(__SIMPLELOCK_UNLOCKED
), "r"(__alp
) : "memory");
224 __arm_membar_consumer();
225 *__alp
= __SIMPLELOCK_UNLOCKED
;
226 __arm_membar_producer();
230 #endif /* _ARM_LOCK_H_ */