Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / arch / sparc / include / lock.h
blobcd07e8ea80ea9a2540be60cac523679edd61e0a4
1 /* $NetBSD: lock.h,v 1.30 2007/10/17 19:57:13 garbled Exp $ */
3 /*-
4 * Copyright (c) 1998, 1999, 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg and Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #ifndef _MACHINE_LOCK_H
33 #define _MACHINE_LOCK_H
36 * Machine dependent spin lock operations.
39 #if __SIMPLELOCK_UNLOCKED != 0
40 #error __SIMPLELOCK_UNLOCKED must be 0 for this implementation
41 #endif
43 /* XXX So we can expose this to userland. */
44 #ifdef __lint__
45 #define __ldstub(__addr) (__addr)
46 #else /* !__lint__ */
47 static __inline int __ldstub(__cpu_simple_lock_t *addr);
48 static __inline int __ldstub(__cpu_simple_lock_t *addr)
50 int v;
52 __asm volatile("ldstub [%1],%0"
53 : "=&r" (v)
54 : "r" (addr)
55 : "memory");
57 return v;
59 #endif /* __lint__ */
61 static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *)
62 __attribute__((__unused__));
63 static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *)
64 __attribute__((__unused__));
65 static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *)
66 __attribute__((__unused__));
67 #ifndef __CPU_SIMPLE_LOCK_NOINLINE
68 static __inline void __cpu_simple_lock(__cpu_simple_lock_t *)
69 __attribute__((__unused__));
70 #else
71 extern void __cpu_simple_lock(__cpu_simple_lock_t *);
72 #endif
74 static __inline int
75 __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
77 return *__ptr == __SIMPLELOCK_LOCKED;
80 static __inline int
81 __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
83 return *__ptr == __SIMPLELOCK_UNLOCKED;
86 static __inline void
87 __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
89 *__ptr = __SIMPLELOCK_UNLOCKED;
92 static __inline void
93 __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
95 *__ptr = __SIMPLELOCK_LOCKED;
98 static __inline void
99 __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
102 *alp = __SIMPLELOCK_UNLOCKED;
105 #ifndef __CPU_SIMPLE_LOCK_NOINLINE
106 static __inline void
107 __cpu_simple_lock(__cpu_simple_lock_t *alp)
111 * If someone else holds the lock use simple reads until it
112 * is released, then retry the atomic operation. This reduces
113 * memory bus contention because the cache-coherency logic
114 * does not have to broadcast invalidates on the lock while
115 * we spin on it.
117 while (__ldstub(alp) != __SIMPLELOCK_UNLOCKED) {
118 while (*alp != __SIMPLELOCK_UNLOCKED)
119 /* spin */ ;
122 #endif /* __CPU_SIMPLE_LOCK_NOINLINE */
124 static __inline int
125 __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
128 return (__ldstub(alp) == __SIMPLELOCK_UNLOCKED);
131 static __inline void
132 __cpu_simple_unlock(__cpu_simple_lock_t *alp)
136 * Insert compiler barrier to prevent instruction re-ordering
137 * around the lock release.
139 __insn_barrier();
140 *alp = __SIMPLELOCK_UNLOCKED;
143 #if defined(__sparc_v9__)
144 static __inline void
145 mb_read(void)
147 __asm __volatile("membar #LoadLoad" : : : "memory");
150 static __inline void
151 mb_write(void)
153 __asm __volatile("" : : : "memory");
156 static __inline void
157 mb_memory(void)
159 __asm __volatile("membar #MemIssue" : : : "memory");
161 #else /* __sparc_v9__ */
162 static __inline void
163 mb_read(void)
165 static volatile int junk;
166 __asm volatile("st %%g0,[%0]"
168 : "r" (&junk)
169 : "memory");
172 static __inline void
173 mb_write(void)
175 __insn_barrier();
178 static __inline void
179 mb_memory(void)
181 mb_read();
183 #endif /* __sparc_v9__ */
185 #endif /* _MACHINE_LOCK_H */