No empty .Rs/.Re
[netbsd-mini2440.git] / sys / arch / vax / include / mutex.h
blob58b521291010094dafa971a8ec74b2df08c80b01
1 /* $NetBSD: mutex.h,v 1.11 2008/02/22 03:16:01 matt Exp $ */
3 /*-
4 * Copyright (c) 2002, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #ifndef _VAX_MUTEX_H_
33 #define _VAX_MUTEX_H_
36 * The VAX mutex implementation is troublesome, because VAX lacks a
37 * compare-and-swap operation.
39 * So we emulate compare-and-swap by raising IPL to lock out interrupts
40 * and (if MP) using BBSSI/BBCCI to lock out the other processors.
43 #ifndef __MUTEX_PRIVATE
45 struct kmutex {
46 uintptr_t mtx_pad1;
49 #else /* __MUTEX_PRIVATE */
51 struct kmutex {
52 union {
53 /* Adaptive mutex */
54 volatile uintptr_t mtxa_owner; /* 0-3 */
56 /* Spin mutex */
57 struct {
58 volatile uint8_t mtxs_dummy;
59 ipl_cookie_t mtxs_ipl;
60 __cpu_simple_lock_t mtxs_lock;
61 volatile uint8_t mtxs_unused;
62 } s;
63 } u;
66 #define mtx_owner u.mtxa_owner
67 #define mtx_ipl u.s.mtxs_ipl
68 #define mtx_lock u.s.mtxs_lock
70 #define __HAVE_MUTEX_STUBS 1
71 #define __HAVE_SPIN_MUTEX_STUBS 1
72 #define __HAVE_SIMPLE_MUTEXES 1
75 * MUTEX_RECEIVE: no memory barrier required; we're synchronizing against
76 * interrupts, not multiple processors.
78 #define MUTEX_RECEIVE(mtx) /* nothing */
81 * MUTEX_GIVE: no memory barrier required; same reason.
83 #define MUTEX_GIVE(mtx) /* nothing */
85 #define MUTEX_CAS(p, o, n) (atomic_cas_ulong((p), (o), (n)) == (o))
87 #endif /* __MUTEX_PRIVATE */
89 #endif /* _VAX_MUTEX_H_ */