1 /* $NetBSD: lock.h,v 1.15 2007/11/13 11:37:06 skrll Exp $ */
4 * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and Matthew Fredette.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
34 * Machine-dependent spin lock operations.
40 #include <sys/stdint.h>
42 #define HPPA_LDCW_ALIGN 16
44 #define __SIMPLELOCK_ALIGN(p) \
45 (volatile unsigned long *)(((uintptr_t)(p) + HPPA_LDCW_ALIGN - 1) & \
46 ~(HPPA_LDCW_ALIGN - 1))
48 #define __SIMPLELOCK_RAW_LOCKED 0
49 #define __SIMPLELOCK_RAW_UNLOCKED 1
52 __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t
*__ptr
)
54 return *__SIMPLELOCK_ALIGN(__ptr
) == __SIMPLELOCK_RAW_LOCKED
;
58 __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t
*__ptr
)
60 return *__SIMPLELOCK_ALIGN(__ptr
) == __SIMPLELOCK_RAW_UNLOCKED
;
64 __ldcw(volatile unsigned long *__ptr
)
68 __asm
volatile("ldcw 0(%1), %0"
69 : "=r" (__val
) : "r" (__ptr
)
79 __asm
volatile("sync\n"
86 __cpu_simple_lock_init(__cpu_simple_lock_t
*alp
)
88 alp
->csl_lock
[0] = alp
->csl_lock
[1] =
89 alp
->csl_lock
[2] = alp
->csl_lock
[3] =
90 __SIMPLELOCK_RAW_UNLOCKED
;
95 __cpu_simple_lock(__cpu_simple_lock_t
*alp
)
97 volatile unsigned long *__aptr
= __SIMPLELOCK_ALIGN(alp
);
100 * Note, if we detect that the lock is held when
101 * we do the initial load-clear-word, we spin using
102 * a non-locked load to save the coherency logic
106 while (__ldcw(__aptr
) == __SIMPLELOCK_RAW_LOCKED
)
107 while (*__aptr
== __SIMPLELOCK_RAW_LOCKED
)
112 __cpu_simple_lock_try(__cpu_simple_lock_t
*alp
)
114 volatile unsigned long *__aptr
= __SIMPLELOCK_ALIGN(alp
);
116 return (__ldcw(__aptr
) != __SIMPLELOCK_RAW_LOCKED
);
120 __cpu_simple_unlock(__cpu_simple_lock_t
*alp
)
122 volatile unsigned long *__aptr
= __SIMPLELOCK_ALIGN(alp
);
125 *__aptr
= __SIMPLELOCK_RAW_UNLOCKED
;
129 __cpu_simple_lock_set(__cpu_simple_lock_t
*alp
)
131 volatile unsigned long *__aptr
= __SIMPLELOCK_ALIGN(alp
);
133 *__aptr
= __SIMPLELOCK_RAW_LOCKED
;
137 __cpu_simple_lock_clear(__cpu_simple_lock_t
*alp
)
139 volatile unsigned long *__aptr
= __SIMPLELOCK_ALIGN(alp
);
141 *__aptr
= __SIMPLELOCK_RAW_UNLOCKED
;
162 #endif /* _HPPA_LOCK_H_ */