Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / arch / ia64 / include / lock.h
blobc05c6c0d0e8f8480f50ab1f1e8dda57860272895
1 /* $NetBSD: lock.h,v 1.3 2008/04/28 20:23:25 martin Exp $ */
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Machine-dependent spin lock operations.
36 #ifndef _IA64_LOCK_H_
37 #define _IA64_LOCK_H_
39 static __inline int
40 __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
42 return *__ptr == __SIMPLELOCK_LOCKED;
45 static __inline int
46 __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
48 return *__ptr == __SIMPLELOCK_UNLOCKED;
51 static __inline void
52 __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
55 *__ptr = __SIMPLELOCK_LOCKED;
58 static __inline void
59 __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
62 *__ptr = __SIMPLELOCK_UNLOCKED;
65 #ifdef _KERNEL
67 #define SPINLOCK_SPIN_HOOK /* nothing */
68 #define SPINLOCK_BACKOFF_HOOK /* XXX(kochi): hint@pause */
70 #endif
72 static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *)
73 __unused;
74 static __inline void __cpu_simple_lock(__cpu_simple_lock_t *)
75 __unused;
76 static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *)
77 __unused;
78 static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *)
79 __unused;
81 static __inline void
82 __cpu_simple_lock_init(__cpu_simple_lock_t *lockp)
85 *lockp = __SIMPLELOCK_UNLOCKED;
86 __insn_barrier();
89 static __inline int
90 __cpu_simple_lock_try(__cpu_simple_lock_t *lockp)
92 uint8_t val;
94 val = __SIMPLELOCK_LOCKED;
95 __asm volatile ("xchg1 %0=[%1],%2" :
96 "=r" (val)
97 :"r" (lockp), "r" (val)
98 :"memory");
99 return val == __SIMPLELOCK_UNLOCKED;
102 static __inline void
103 __cpu_simple_lock(__cpu_simple_lock_t *lockp)
106 while (!__cpu_simple_lock_try(lockp))
107 /* nothing */;
108 __insn_barrier();
111 static __inline void
112 __cpu_simple_unlock(__cpu_simple_lock_t *lockp)
115 __insn_barrier();
116 *lockp = __SIMPLELOCK_UNLOCKED;
119 #endif /* _IA64_LOCK_H_ */