arm: make signal handlers work
[minix.git] / sys / arch / x86 / include / lock.h
blob543b1c72bcfc3823b5a4b6cf6b43407b230eb1b3
1 /* $NetBSD: lock.h,v 1.26 2012/10/11 11:12:21 apb Exp $ */
3 /*-
4 * Copyright (c) 2000, 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Machine-dependent spin lock operations.
36 #ifndef _X86_LOCK_H_
37 #define _X86_LOCK_H_
39 #include <sys/param.h>
41 static __inline int
42 __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
44 return *__ptr == __SIMPLELOCK_LOCKED;
47 static __inline int
48 __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
50 return *__ptr == __SIMPLELOCK_UNLOCKED;
53 static __inline void
54 __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
57 *__ptr = __SIMPLELOCK_LOCKED;
60 static __inline void
61 __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
64 *__ptr = __SIMPLELOCK_UNLOCKED;
67 #ifdef _HARDKERNEL
69 #include <machine/cpufunc.h>
71 void __cpu_simple_lock_init(__cpu_simple_lock_t *);
72 void __cpu_simple_lock(__cpu_simple_lock_t *);
73 int __cpu_simple_lock_try(__cpu_simple_lock_t *);
74 void __cpu_simple_unlock(__cpu_simple_lock_t *);
76 #define SPINLOCK_SPIN_HOOK /* nothing */
78 #ifdef SPINLOCK_BACKOFF_HOOK
79 #undef SPINLOCK_BACKOFF_HOOK
80 #endif
81 #define SPINLOCK_BACKOFF_HOOK x86_pause()
83 #else
85 static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *)
86 __unused;
87 static __inline void __cpu_simple_lock(__cpu_simple_lock_t *)
88 __unused;
89 static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *)
90 __unused;
91 static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *)
92 __unused;
94 static __inline void
95 __cpu_simple_lock_init(__cpu_simple_lock_t *lockp)
98 *lockp = __SIMPLELOCK_UNLOCKED;
99 __insn_barrier();
102 static __inline int
103 __cpu_simple_lock_try(__cpu_simple_lock_t *lockp)
105 uint8_t val;
107 val = __SIMPLELOCK_LOCKED;
108 __asm volatile ("xchgb %0,(%2)" :
109 "=qQ" (val)
110 :"0" (val), "r" (lockp));
111 __insn_barrier();
112 return val == __SIMPLELOCK_UNLOCKED;
115 static __inline void
116 __cpu_simple_lock(__cpu_simple_lock_t *lockp)
119 while (!__cpu_simple_lock_try(lockp))
120 /* nothing */;
121 __insn_barrier();
125 * Note on x86 memory ordering
127 * When releasing a lock we must ensure that no stores or loads from within
128 * the critical section are re-ordered by the CPU to occur outside of it:
129 * they must have completed and be visible to other processors once the lock
130 * has been released.
132 * NetBSD usually runs with the kernel mapped (via MTRR) in a WB (write
133 * back) memory region. In that case, memory ordering on x86 platforms
134 * looks like this:
136 * i386 All loads/stores occur in instruction sequence.
138 * i486 All loads/stores occur in instruction sequence. In
139 * Pentium exceptional circumstances, loads can be re-ordered around
140 * stores, but for the purposes of releasing a lock it does
141 * not matter. Stores may not be immediately visible to other
142 * processors as they can be buffered. However, since the
143 * stores are buffered in order the lock release will always be
144 * the last operation in the critical section that becomes
145 * visible to other CPUs.
147 * Pentium Pro The "Intel 64 and IA-32 Architectures Software Developer's
148 * onwards Manual" volume 3A (order number 248966) says that (1) "Reads
149 * can be carried out speculatively and in any order" and (2)
150 * "Reads can pass buffered stores, but the processor is
151 * self-consistent.". This would be a problem for the below,
152 * and would mandate a locked instruction cycle or load fence
153 * before releasing the simple lock.
155 * The "Intel Pentium 4 Processor Optimization" guide (order
156 * number 253668-022US) says: "Loads can be moved before stores
157 * that occurred earlier in the program if they are not
158 * predicted to load from the same linear address.". This is
159 * not a problem since the only loads that can be re-ordered
160 * take place once the lock has been released via a store.
162 * The above two documents seem to contradict each other,
163 * however with the exception of early steppings of the Pentium
164 * Pro, the second document is closer to the truth: a store
165 * will always act as a load fence for all loads that precede
166 * the store in instruction order.
168 * Again, note that stores can be buffered and will not always
169 * become immediately visible to other CPUs: they are however
170 * buffered in order.
172 * AMD64 Stores occur in order and are buffered. Loads can be
173 * reordered, however stores act as load fences, meaning that
174 * loads can not be reordered around stores.
176 static __inline void
177 __cpu_simple_unlock(__cpu_simple_lock_t *lockp)
180 __insn_barrier();
181 *lockp = __SIMPLELOCK_UNLOCKED;
184 #endif /* _HARDKERNEL */
186 #endif /* _X86_LOCK_H_ */