1 /* $NetBSD: pthread_mutex.c,v 1.50 2008/05/25 17:05:28 ad Exp $ */
4 * Copyright (c) 2001, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * To track threads waiting for mutexes to be released, we use lockless
34 * lists built on atomic operations and memory barriers.
36 * A simple spinlock would be faster and make the code easier to
37 * follow, but spinlocks are problematic in userspace. If a thread is
38 * preempted by the kernel while holding a spinlock, any other thread
39 * attempting to acquire that spinlock will needlessly busy wait.
41 * There is no good way to know that the holding thread is no longer
42 * running, nor to request a wake-up once it has begun running again.
43 * Of more concern, threads in the SCHED_FIFO class do not have a
44 * limited time quantum and so could spin forever, preventing the
45 * thread holding the spinlock from getting CPU time: it would never
49 #include <sys/cdefs.h>
50 __RCSID("$NetBSD: pthread_mutex.c,v 1.50 2008/05/25 17:05:28 ad Exp $");
52 #include <sys/types.h>
53 #include <sys/lwpctl.h>
63 #include "pthread_int.h"
65 #define MUTEX_WAITERS_BIT ((uintptr_t)0x01)
66 #define MUTEX_RECURSIVE_BIT ((uintptr_t)0x02)
67 #define MUTEX_DEFERRED_BIT ((uintptr_t)0x04)
68 #define MUTEX_THREAD ((uintptr_t)-16L)
70 #define MUTEX_HAS_WAITERS(x) ((uintptr_t)(x) & MUTEX_WAITERS_BIT)
71 #define MUTEX_RECURSIVE(x) ((uintptr_t)(x) & MUTEX_RECURSIVE_BIT)
72 #define MUTEX_OWNER(x) ((uintptr_t)(x) & MUTEX_THREAD)
74 #if __GNUC_PREREQ__(3, 0)
75 #define NOINLINE __attribute ((noinline))
77 #define NOINLINE /* nothing */
80 static void pthread__mutex_wakeup(pthread_t
, pthread_mutex_t
*);
81 static int pthread__mutex_lock_slow(pthread_mutex_t
*);
82 static int pthread__mutex_unlock_slow(pthread_mutex_t
*);
83 static void pthread__mutex_pause(void);
85 int _pthread_mutex_held_np(pthread_mutex_t
*);
86 pthread_t
_pthread_mutex_owner_np(pthread_mutex_t
*);
88 __weak_alias(pthread_mutex_held_np
,_pthread_mutex_held_np
)
89 __weak_alias(pthread_mutex_owner_np
,_pthread_mutex_owner_np
)
91 __strong_alias(__libc_mutex_init
,pthread_mutex_init
)
92 __strong_alias(__libc_mutex_lock
,pthread_mutex_lock
)
93 __strong_alias(__libc_mutex_trylock
,pthread_mutex_trylock
)
94 __strong_alias(__libc_mutex_unlock
,pthread_mutex_unlock
)
95 __strong_alias(__libc_mutex_destroy
,pthread_mutex_destroy
)
97 __strong_alias(__libc_mutexattr_init
,pthread_mutexattr_init
)
98 __strong_alias(__libc_mutexattr_destroy
,pthread_mutexattr_destroy
)
99 __strong_alias(__libc_mutexattr_settype
,pthread_mutexattr_settype
)
101 __strong_alias(__libc_thr_once
,pthread_once
)
104 pthread_mutex_init(pthread_mutex_t
*ptm
, const pthread_mutexattr_t
*attr
)
109 type
= PTHREAD_MUTEX_NORMAL
;
111 type
= (intptr_t)attr
->ptma_private
;
114 case PTHREAD_MUTEX_ERRORCHECK
:
115 __cpu_simple_lock_set(&ptm
->ptm_errorcheck
);
116 ptm
->ptm_owner
= NULL
;
118 case PTHREAD_MUTEX_RECURSIVE
:
119 __cpu_simple_lock_clear(&ptm
->ptm_errorcheck
);
120 ptm
->ptm_owner
= (void *)MUTEX_RECURSIVE_BIT
;
123 __cpu_simple_lock_clear(&ptm
->ptm_errorcheck
);
124 ptm
->ptm_owner
= NULL
;
128 ptm
->ptm_magic
= _PT_MUTEX_MAGIC
;
129 ptm
->ptm_waiters
= NULL
;
130 ptm
->ptm_recursed
= 0;
137 pthread_mutex_destroy(pthread_mutex_t
*ptm
)
140 pthread__error(EINVAL
, "Invalid mutex",
141 ptm
->ptm_magic
== _PT_MUTEX_MAGIC
);
142 pthread__error(EBUSY
, "Destroying locked mutex",
143 MUTEX_OWNER(ptm
->ptm_owner
) == 0);
145 ptm
->ptm_magic
= _PT_MUTEX_DEAD
;
150 pthread_mutex_lock(pthread_mutex_t
*ptm
)
155 self
= pthread__self();
156 val
= atomic_cas_ptr(&ptm
->ptm_owner
, NULL
, self
);
157 if (__predict_true(val
== NULL
)) {
158 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
163 return pthread__mutex_lock_slow(ptm
);
166 /* We want function call overhead. */
168 pthread__mutex_pause(void)
171 pthread__smt_pause();
175 * Spin while the holder is running. 'lwpctl' gives us the true
176 * status of the thread. pt_blocking is set by libpthread in order
177 * to cut out system call and kernel spinlock overhead on remote CPUs
178 * (could represent many thousands of clock cycles). pt_blocking also
179 * makes this thread yield if the target is calling sched_yield().
181 NOINLINE
static void *
182 pthread__mutex_spin(pthread_mutex_t
*ptm
, pthread_t owner
)
185 unsigned int count
, i
;
187 for (count
= 2;; owner
= ptm
->ptm_owner
) {
188 thread
= (pthread_t
)MUTEX_OWNER(owner
);
191 if (thread
->pt_lwpctl
->lc_curcpu
== LWPCTL_CPU_NONE
||
196 for (i
= count
; i
!= 0; i
--)
197 pthread__mutex_pause();
204 pthread__mutex_lock_slow(pthread_mutex_t
*ptm
)
206 void *waiters
, *new, *owner
, *next
;
209 pthread__error(EINVAL
, "Invalid mutex",
210 ptm
->ptm_magic
== _PT_MUTEX_MAGIC
);
212 owner
= ptm
->ptm_owner
;
213 self
= pthread__self();
215 /* Recursive or errorcheck? */
216 if (MUTEX_OWNER(owner
) == (uintptr_t)self
) {
217 if (MUTEX_RECURSIVE(owner
)) {
218 if (ptm
->ptm_recursed
== INT_MAX
)
223 if (__SIMPLELOCK_LOCKED_P(&ptm
->ptm_errorcheck
))
227 for (;; owner
= ptm
->ptm_owner
) {
228 /* Spin while the owner is running. */
229 owner
= pthread__mutex_spin(ptm
, owner
);
231 /* If it has become free, try to acquire it again. */
232 if (MUTEX_OWNER(owner
) == 0) {
235 ((uintptr_t)self
| (uintptr_t)owner
);
236 next
= atomic_cas_ptr(&ptm
->ptm_owner
, owner
,
239 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
245 } while (MUTEX_OWNER(owner
) == 0);
247 * We have lost the race to acquire the mutex.
248 * The new owner could be running on another
249 * CPU, in which case we should spin and avoid
250 * the overhead of blocking.
256 * Nope, still held. Add thread to the list of waiters.
257 * Issue a memory barrier to ensure mutexwait/mutexnext
258 * are visible before we enter the waiters list.
260 self
->pt_mutexwait
= 1;
261 for (waiters
= ptm
->ptm_waiters
;; waiters
= next
) {
262 self
->pt_mutexnext
= waiters
;
264 next
= atomic_cas_ptr(&ptm
->ptm_waiters
, waiters
, self
);
270 * Set the waiters bit and block.
272 * Note that the mutex can become unlocked before we set
273 * the waiters bit. If that happens it's not safe to sleep
274 * as we may never be awoken: we must remove the current
275 * thread from the waiters list and try again.
277 * Because we are doing this atomically, we can't remove
278 * one waiter: we must remove all waiters and awken them,
279 * then sleep in _lwp_park() until we have been awoken.
281 * Issue a memory barrier to ensure that we are reading
282 * the value of ptm_owner/pt_mutexwait after we have entered
283 * the waiters list (the CAS itself must be atomic).
286 for (owner
= ptm
->ptm_owner
;; owner
= next
) {
287 if (MUTEX_HAS_WAITERS(owner
))
289 if (MUTEX_OWNER(owner
) == 0) {
290 pthread__mutex_wakeup(self
, ptm
);
293 new = (void *)((uintptr_t)owner
| MUTEX_WAITERS_BIT
);
294 next
= atomic_cas_ptr(&ptm
->ptm_owner
, owner
, new);
297 * pthread_mutex_unlock() can do a
298 * non-interlocked CAS. We cannot
299 * know if our attempt to set the
300 * waiters bit has succeeded while
301 * the holding thread is running.
302 * There are many assumptions; see
303 * sys/kern/kern_mutex.c for details.
304 * In short, we must spin if we see
305 * that the holder is running again.
308 next
= pthread__mutex_spin(ptm
, owner
);
313 * We may have been awoken by the current thread above,
314 * or will be awoken by the current holder of the mutex.
315 * The key requirement is that we must not proceed until
316 * told that we are no longer waiting (via pt_mutexwait
317 * being set to zero). Otherwise it is unsafe to re-enter
318 * the thread onto the waiters list.
320 while (self
->pt_mutexwait
) {
322 (void)_lwp_park(NULL
, self
->pt_unpark
,
323 __UNVOLATILE(&ptm
->ptm_waiters
),
324 __UNVOLATILE(&ptm
->ptm_waiters
));
333 pthread_mutex_trylock(pthread_mutex_t
*ptm
)
336 void *val
, *new, *next
;
338 self
= pthread__self();
339 val
= atomic_cas_ptr(&ptm
->ptm_owner
, NULL
, self
);
340 if (__predict_true(val
== NULL
)) {
341 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
347 if (MUTEX_RECURSIVE(val
)) {
348 if (MUTEX_OWNER(val
) == 0) {
349 new = (void *)((uintptr_t)self
| (uintptr_t)val
);
350 next
= atomic_cas_ptr(&ptm
->ptm_owner
, val
, new);
351 if (__predict_true(next
== val
)) {
352 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
358 if (MUTEX_OWNER(val
) == (uintptr_t)self
) {
359 if (ptm
->ptm_recursed
== INT_MAX
)
370 pthread_mutex_unlock(pthread_mutex_t
*ptm
)
376 * Note this may be a non-interlocked CAS. See lock_slow()
377 * above and sys/kern/kern_mutex.c for details.
379 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
382 self
= pthread__self();
383 value
= atomic_cas_ptr_ni(&ptm
->ptm_owner
, self
, NULL
);
384 if (__predict_true(value
== self
))
386 return pthread__mutex_unlock_slow(ptm
);
390 pthread__mutex_unlock_slow(pthread_mutex_t
*ptm
)
392 pthread_t self
, owner
, new;
393 int weown
, error
, deferred
;
395 pthread__error(EINVAL
, "Invalid mutex",
396 ptm
->ptm_magic
== _PT_MUTEX_MAGIC
);
398 self
= pthread__self();
399 owner
= ptm
->ptm_owner
;
400 weown
= (MUTEX_OWNER(owner
) == (uintptr_t)self
);
401 deferred
= (int)((uintptr_t)owner
& MUTEX_DEFERRED_BIT
);
404 if (__SIMPLELOCK_LOCKED_P(&ptm
->ptm_errorcheck
)) {
411 } else if (MUTEX_RECURSIVE(owner
)) {
415 } else if (ptm
->ptm_recursed
) {
419 new = (pthread_t
)MUTEX_RECURSIVE_BIT
;
422 pthread__error(EPERM
,
423 "Unlocking unlocked mutex", (owner
!= NULL
));
424 pthread__error(EPERM
,
425 "Unlocking mutex owned by another thread", weown
);
430 * Release the mutex. If there appear to be waiters, then
434 owner
= atomic_swap_ptr(&ptm
->ptm_owner
, new);
435 if (MUTEX_HAS_WAITERS(owner
) != 0) {
436 pthread__mutex_wakeup(self
, ptm
);
442 * There were no waiters, but we may have deferred waking
443 * other threads until mutex unlock - we must wake them now.
448 if (self
->pt_nwaiters
== 1) {
450 * If the calling thread is about to block, defer
451 * unparking the target until _lwp_park() is called.
453 if (self
->pt_willpark
&& self
->pt_unpark
== 0) {
454 self
->pt_unpark
= self
->pt_waiters
[0];
456 (void)_lwp_unpark(self
->pt_waiters
[0],
457 __UNVOLATILE(&ptm
->ptm_waiters
));
460 (void)_lwp_unpark_all(self
->pt_waiters
, self
->pt_nwaiters
,
461 __UNVOLATILE(&ptm
->ptm_waiters
));
463 self
->pt_nwaiters
= 0;
469 pthread__mutex_wakeup(pthread_t self
, pthread_mutex_t
*ptm
)
471 pthread_t thread
, next
;
475 * Take ownership of the current set of waiters. No
476 * need for a memory barrier following this, all loads
477 * are dependent upon 'thread'.
479 thread
= atomic_swap_ptr(&ptm
->ptm_waiters
, NULL
);
483 * Pull waiters from the queue and add to our list.
484 * Use a memory barrier to ensure that we safely
485 * read the value of pt_mutexnext before 'thread'
486 * sees pt_mutexwait being cleared.
488 for (n
= self
->pt_nwaiters
, self
->pt_nwaiters
= 0;
489 n
< pthread__unpark_max
&& thread
!= NULL
;
491 next
= thread
->pt_mutexnext
;
492 if (thread
!= self
) {
493 self
->pt_waiters
[n
++] = thread
->pt_lid
;
496 thread
->pt_mutexwait
= 0;
497 /* No longer safe to touch 'thread' */
505 * If the calling thread is about to block,
506 * defer unparking the target until _lwp_park()
509 if (self
->pt_willpark
&& self
->pt_unpark
== 0) {
510 self
->pt_unpark
= self
->pt_waiters
[0];
513 rv
= (ssize_t
)_lwp_unpark(self
->pt_waiters
[0],
514 __UNVOLATILE(&ptm
->ptm_waiters
));
515 if (rv
!= 0 && errno
!= EALREADY
&& errno
!= EINTR
&&
517 pthread__errorfunc(__FILE__
, __LINE__
,
518 __func__
, "_lwp_unpark failed");
522 rv
= _lwp_unpark_all(self
->pt_waiters
, (size_t)n
,
523 __UNVOLATILE(&ptm
->ptm_waiters
));
524 if (rv
!= 0 && errno
!= EINTR
) {
525 pthread__errorfunc(__FILE__
, __LINE__
,
526 __func__
, "_lwp_unpark_all failed");
533 pthread_mutexattr_init(pthread_mutexattr_t
*attr
)
536 attr
->ptma_magic
= _PT_MUTEXATTR_MAGIC
;
537 attr
->ptma_private
= (void *)PTHREAD_MUTEX_DEFAULT
;
542 pthread_mutexattr_destroy(pthread_mutexattr_t
*attr
)
545 pthread__error(EINVAL
, "Invalid mutex attribute",
546 attr
->ptma_magic
== _PT_MUTEXATTR_MAGIC
);
553 pthread_mutexattr_gettype(const pthread_mutexattr_t
*attr
, int *typep
)
556 pthread__error(EINVAL
, "Invalid mutex attribute",
557 attr
->ptma_magic
== _PT_MUTEXATTR_MAGIC
);
559 *typep
= (int)(intptr_t)attr
->ptma_private
;
565 pthread_mutexattr_settype(pthread_mutexattr_t
*attr
, int type
)
568 pthread__error(EINVAL
, "Invalid mutex attribute",
569 attr
->ptma_magic
== _PT_MUTEXATTR_MAGIC
);
572 case PTHREAD_MUTEX_NORMAL
:
573 case PTHREAD_MUTEX_ERRORCHECK
:
574 case PTHREAD_MUTEX_RECURSIVE
:
575 attr
->ptma_private
= (void *)(intptr_t)type
;
584 once_cleanup(void *closure
)
587 pthread_mutex_unlock((pthread_mutex_t
*)closure
);
592 pthread_once(pthread_once_t
*once_control
, void (*routine
)(void))
595 if (once_control
->pto_done
== 0) {
596 pthread_mutex_lock(&once_control
->pto_mutex
);
597 pthread_cleanup_push(&once_cleanup
, &once_control
->pto_mutex
);
598 if (once_control
->pto_done
== 0) {
600 once_control
->pto_done
= 1;
602 pthread_cleanup_pop(1);
609 pthread__mutex_deferwake(pthread_t self
, pthread_mutex_t
*ptm
)
612 if (__predict_false(ptm
== NULL
||
613 MUTEX_OWNER(ptm
->ptm_owner
) != (uintptr_t)self
)) {
614 (void)_lwp_unpark_all(self
->pt_waiters
, self
->pt_nwaiters
,
615 __UNVOLATILE(&ptm
->ptm_waiters
));
616 self
->pt_nwaiters
= 0;
618 atomic_or_ulong((volatile unsigned long *)
619 (uintptr_t)&ptm
->ptm_owner
,
620 (unsigned long)MUTEX_DEFERRED_BIT
);
625 _pthread_mutex_held_np(pthread_mutex_t
*ptm
)
628 return MUTEX_OWNER(ptm
->ptm_owner
) == (uintptr_t)pthread__self();
632 _pthread_mutex_owner_np(pthread_mutex_t
*ptm
)
635 return (pthread_t
)MUTEX_OWNER(ptm
->ptm_owner
);