1 /* $NetBSD: lock.h,v 1.30 2007/10/17 19:57:13 garbled Exp $ */
4 * Copyright (c) 1998, 1999, 2006 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg and Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #ifndef _MACHINE_LOCK_H
33 #define _MACHINE_LOCK_H
36 * Machine dependent spin lock operations.
39 #if __SIMPLELOCK_UNLOCKED != 0
40 #error __SIMPLELOCK_UNLOCKED must be 0 for this implementation
43 /* XXX So we can expose this to userland. */
45 #define __ldstub(__addr) (__addr)
47 static __inline
int __ldstub(__cpu_simple_lock_t
*addr
);
48 static __inline
int __ldstub(__cpu_simple_lock_t
*addr
)
52 __asm
volatile("ldstub [%1],%0"
61 static __inline
void __cpu_simple_lock_init(__cpu_simple_lock_t
*)
62 __attribute__((__unused__
));
63 static __inline
int __cpu_simple_lock_try(__cpu_simple_lock_t
*)
64 __attribute__((__unused__
));
65 static __inline
void __cpu_simple_unlock(__cpu_simple_lock_t
*)
66 __attribute__((__unused__
));
67 #ifndef __CPU_SIMPLE_LOCK_NOINLINE
68 static __inline
void __cpu_simple_lock(__cpu_simple_lock_t
*)
69 __attribute__((__unused__
));
71 extern void __cpu_simple_lock(__cpu_simple_lock_t
*);
75 __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t
*__ptr
)
77 return *__ptr
== __SIMPLELOCK_LOCKED
;
81 __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t
*__ptr
)
83 return *__ptr
== __SIMPLELOCK_UNLOCKED
;
87 __cpu_simple_lock_clear(__cpu_simple_lock_t
*__ptr
)
89 *__ptr
= __SIMPLELOCK_UNLOCKED
;
93 __cpu_simple_lock_set(__cpu_simple_lock_t
*__ptr
)
95 *__ptr
= __SIMPLELOCK_LOCKED
;
99 __cpu_simple_lock_init(__cpu_simple_lock_t
*alp
)
102 *alp
= __SIMPLELOCK_UNLOCKED
;
105 #ifndef __CPU_SIMPLE_LOCK_NOINLINE
107 __cpu_simple_lock(__cpu_simple_lock_t
*alp
)
111 * If someone else holds the lock use simple reads until it
112 * is released, then retry the atomic operation. This reduces
113 * memory bus contention because the cache-coherency logic
114 * does not have to broadcast invalidates on the lock while
117 while (__ldstub(alp
) != __SIMPLELOCK_UNLOCKED
) {
118 while (*alp
!= __SIMPLELOCK_UNLOCKED
)
122 #endif /* __CPU_SIMPLE_LOCK_NOINLINE */
125 __cpu_simple_lock_try(__cpu_simple_lock_t
*alp
)
128 return (__ldstub(alp
) == __SIMPLELOCK_UNLOCKED
);
132 __cpu_simple_unlock(__cpu_simple_lock_t
*alp
)
136 * Insert compiler barrier to prevent instruction re-ordering
137 * around the lock release.
140 *alp
= __SIMPLELOCK_UNLOCKED
;
143 #if defined(__sparc_v9__)
147 __asm
__volatile("membar #LoadLoad" : : : "memory");
153 __asm
__volatile("" : : : "memory");
159 __asm
__volatile("membar #MemIssue" : : : "memory");
161 #else /* __sparc_v9__ */
165 static volatile int junk
;
166 __asm
volatile("st %%g0,[%0]"
183 #endif /* __sparc_v9__ */
185 #endif /* _MACHINE_LOCK_H */