1 /* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Libr \ary; if not, write to the Free
16 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 #ifndef _LOWLEVELLOCK_H
20 #define _LOWLEVELLOCK_H 1
23 #include <sys/param.h>
24 #include <bits/pthreadtypes.h>
29 #define __NR_futex 394
32 #define FUTEX_REQUEUE 3
33 #define FUTEX_CMP_REQUEUE 4
34 #define FUTEX_WAKE_OP 5
35 #define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
37 /* Initializer for compatibility lock. */
38 #define LLL_MUTEX_LOCK_INITIALIZER (0)
40 #define lll_futex_wait(futexp, val) \
42 INTERNAL_SYSCALL_DECL (__err); \
44 __ret = INTERNAL_SYSCALL (futex, __err, 4, \
45 (futexp), FUTEX_WAIT, (val), 0); \
46 INTERNAL_SYSCALL_ERROR_P (__ret, __err)? -__ret : __ret; \
49 #define lll_futex_timed_wait(futexp, val, timespec) \
51 INTERNAL_SYSCALL_DECL (__err); \
53 __ret = INTERNAL_SYSCALL (futex, __err, 4, \
54 (futexp), FUTEX_WAIT, (val), (timespec)); \
55 INTERNAL_SYSCALL_ERROR_P (__ret, __err)? -__ret : __ret; \
58 #define lll_futex_wake(futexp, nr) \
60 INTERNAL_SYSCALL_DECL (__err); \
62 __ret = INTERNAL_SYSCALL (futex, __err, 4, \
63 (futexp), FUTEX_WAKE, (nr), 0); \
64 INTERNAL_SYSCALL_ERROR_P (__ret, __err)? -__ret : __ret; \
67 #define lll_robust_mutex_dead(futexv) \
70 int *__futexp = &(futexv); \
71 atomic_or (__futexp, FUTEX_OWNER_DIED); \
72 lll_futex_wake (__futexp, 1); \
76 /* Returns non-zero if error happened, zero if success. */
77 #define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
79 INTERNAL_SYSCALL_DECL (__err); \
81 __ret = INTERNAL_SYSCALL (futex, __err, 6, \
82 (futexp), FUTEX_CMP_REQUEUE, (nr_wake), \
83 (nr_move), (mutex), (val)); \
84 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
87 /* Returns non-zero if error happened, zero if success. */
88 #define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2) \
90 INTERNAL_SYSCALL_DECL (__err); \
92 __ret = INTERNAL_SYSCALL (futex, __err, 6, \
93 (futexp), FUTEX_WAKE_OP, (nr_wake), \
94 (nr_wake2), (futexp2), \
95 FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \
96 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
102 static inline int __attribute__((always_inline
))
103 __lll_mutex_trylock(int *futex
)
105 return atomic_compare_and_exchange_val_acq (futex
, 1, 0) != 0;
107 #define lll_mutex_trylock(lock) __lll_mutex_trylock (&(lock))
110 static inline int __attribute__((always_inline
))
111 __lll_mutex_cond_trylock(int *futex
)
113 return atomic_compare_and_exchange_val_acq (futex
, 2, 0) != 0;
115 #define lll_mutex_cond_trylock(lock) __lll_mutex_cond_trylock (&(lock))
118 static inline int __attribute__((always_inline
))
119 __lll_robust_mutex_trylock(int *futex
, int id
)
121 return atomic_compare_and_exchange_val_acq (futex
, id
, 0) != 0;
123 #define lll_robust_mutex_trylock(lock, id) \
124 __lll_robust_mutex_trylock (&(lock), id)
126 extern void __lll_lock_wait (int *futex
) attribute_hidden
;
127 extern int __lll_robust_lock_wait (int *futex
) attribute_hidden
;
129 static inline void __attribute__((always_inline
))
130 __lll_mutex_lock(int *futex
)
132 if (atomic_compare_and_exchange_bool_acq (futex
, 1, 0) != 0)
133 __lll_lock_wait (futex
);
135 #define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
138 static inline int __attribute__ ((always_inline
))
139 __lll_robust_mutex_lock (int *futex
, int id
)
142 if (atomic_compare_and_exchange_bool_acq (futex
, id
, 0) != 0)
143 result
= __lll_robust_lock_wait (futex
);
146 #define lll_robust_mutex_lock(futex, id) \
147 __lll_robust_mutex_lock (&(futex), id)
150 static inline void __attribute__ ((always_inline
))
151 __lll_mutex_cond_lock (int *futex
)
153 if (atomic_compare_and_exchange_bool_acq (futex
, 2, 0) != 0)
154 __lll_lock_wait (futex
);
156 #define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
159 #define lll_robust_mutex_cond_lock(futex, id) \
160 __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
163 extern int __lll_timedlock_wait (int *futex
, const struct timespec
*)
165 extern int __lll_robust_timedlock_wait (int *futex
, const struct timespec
*)
168 static inline int __attribute__ ((always_inline
))
169 __lll_mutex_timedlock (int *futex
, const struct timespec
*abstime
)
172 if (atomic_compare_and_exchange_bool_acq (futex
, 1, 0) != 0)
173 result
= __lll_timedlock_wait (futex
, abstime
);
176 #define lll_mutex_timedlock(futex, abstime) \
177 __lll_mutex_timedlock (&(futex), abstime)
180 static inline int __attribute__ ((always_inline
))
181 __lll_robust_mutex_timedlock (int *futex
, const struct timespec
*abstime
,
185 if (atomic_compare_and_exchange_bool_acq (futex
, id
, 0) != 0)
186 result
= __lll_robust_timedlock_wait (futex
, abstime
);
189 #define lll_robust_mutex_timedlock(futex, abstime, id) \
190 __lll_robust_mutex_timedlock (&(futex), abstime, id)
193 static inline void __attribute__ ((always_inline
))
194 __lll_mutex_unlock (int *futex
)
196 int val
= atomic_exchange_rel (futex
, 0);
197 if (__builtin_expect (val
> 1, 0))
198 lll_futex_wake (futex
, 1);
200 #define lll_mutex_unlock(futex) __lll_mutex_unlock(&(futex))
203 static inline void __attribute__ ((always_inline
))
204 __lll_robust_mutex_unlock (int *futex
, int mask
)
206 int val
= atomic_exchange_rel (futex
, 0);
207 if (__builtin_expect (val
& mask
, 0))
208 lll_futex_wake (futex
, 1);
210 #define lll_robust_mutex_unlock(futex) \
211 __lll_robust_mutex_unlock(&(futex), FUTEX_WAITERS)
214 static inline void __attribute__ ((always_inline
))
215 __lll_mutex_unlock_force (int *futex
)
217 (void) atomic_exchange_rel (futex
, 0);
218 lll_futex_wake (futex
, 1);
220 #define lll_mutex_unlock_force(futex) __lll_mutex_unlock_force(&(futex))
223 #define lll_mutex_islocked(futex) \
227 /* Our internal lock implementation is identical to the binary-compatible
228 mutex implementation. */
230 /* Type for lock object. */
231 typedef int lll_lock_t
;
233 /* Initializers for lock. */
234 #define LLL_LOCK_INITIALIZER (0)
235 #define LLL_LOCK_INITIALIZER_LOCKED (1)
237 extern int lll_unlock_wake_cb (int *__futex
) attribute_hidden
;
239 /* The states of a lock are:
241 1 - taken by one user
242 >1 - taken by more users */
244 #define lll_trylock(lock) lll_mutex_trylock (lock)
245 #define lll_lock(lock) lll_mutex_lock (lock)
246 #define lll_unlock(lock) lll_mutex_unlock (lock)
247 #define lll_islocked(lock) lll_mutex_islocked (lock)
249 /* The kernel notifies a process which uses CLONE_CLEARTID via futex
250 wakeup when the clone terminates. The memory location contains the
251 thread ID while the clone is running and is reset to zero
253 #define lll_wait_tid(tid) \
255 __typeof (tid) __tid; \
256 while ((__tid = (tid)) != 0) \
257 lll_futex_wait (&(tid), __tid); \
260 extern int __lll_timedwait_tid (int *, const struct timespec
*)
263 #define lll_timedwait_tid(tid, abstime) \
267 __res = __lll_timedwait_tid (&(tid), (abstime)); \
272 /* Conditional variable handling. */
274 extern void __lll_cond_wait (pthread_cond_t
*cond
)
276 extern int __lll_cond_timedwait (pthread_cond_t
*cond
,
277 const struct timespec
*abstime
)
279 extern void __lll_cond_wake (pthread_cond_t
*cond
)
281 extern void __lll_cond_broadcast (pthread_cond_t
*cond
)
284 #define lll_cond_wait(cond) \
285 __lll_cond_wait (cond)
286 #define lll_cond_timedwait(cond, abstime) \
287 __lll_cond_timedwait (cond, abstime)
288 #define lll_cond_wake(cond) \
289 __lll_cond_wake (cond)
290 #define lll_cond_broadcast(cond) \
291 __lll_cond_broadcast (cond)
293 #endif /* lowlevellock.h */