1 /* $NetBSD: mutex.h,v 1.3 2008/04/28 20:23:25 martin Exp $ */
4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #ifndef _IA64_MUTEX_H_
33 #define _IA64_MUTEX_H_
35 #ifndef __MUTEX_PRIVATE
45 volatile uintptr_t mtx_owner
;
47 __cpu_simple_lock_t mtx_lock
;
51 /* XXX when we implement mutex_enter()/mutex_exit(), uncomment this
52 #define __HAVE_MUTEX_STUBS 1
54 /* XXX when we implement mutex_spin_enter()/mutex_spin_exit(), uncomment this
55 #define __HAVE_SPIN_MUTEX_STUBS 1
57 #define __HAVE_SIMPLE_MUTEXES 1
60 * MUTEX_RECEIVE: no memory barrier required, atomic_cas implies a load fence.
62 #define MUTEX_RECEIVE(mtx) /* nothing */
65 * MUTEX_GIVE: no memory barrier required, as _lock_cas() will take care of it.
67 #define MUTEX_GIVE(mtx) /* nothing */
69 #define MUTEX_CAS(ptr, old, new) \
70 (atomic_cas_ulong((volatile unsigned long *)(ptr), (old), (new)) == (old))
72 #endif /* __MUTEX_PRIVATE */
74 #endif /* _IA64_MUTEX_H_ */