1 /* $NetBSD: pthread.c,v 1.112 2009/07/02 09:59:00 joerg Exp $ */
4 * Copyright (c) 2001, 2002, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams and Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/cdefs.h>
33 __RCSID("$NetBSD: pthread.c,v 1.112 2009/07/02 09:59:00 joerg Exp $");
35 #define __EXPOSE_STACK 1
37 #include <sys/param.h>
39 #include <sys/sysctl.h>
40 #include <sys/lwpctl.h>
55 #include "pthread_int.h"
57 pthread_rwlock_t pthread__alltree_lock
= PTHREAD_RWLOCK_INITIALIZER
;
58 RB_HEAD(__pthread__alltree
, __pthread_st
) pthread__alltree
;
61 static int pthread__cmp(struct __pthread_st
*, struct __pthread_st
*);
62 RB_PROTOTYPE_STATIC(__pthread__alltree
, __pthread_st
, pt_alltree
, pthread__cmp
)
65 static void pthread__create_tramp(void *);
66 static void pthread__initthread(pthread_t
);
67 static void pthread__scrubthread(pthread_t
, char *, int);
68 static int pthread__stackid_setup(void *, size_t, pthread_t
*);
69 static int pthread__stackalloc(pthread_t
*);
70 static void pthread__initmain(pthread_t
*);
71 static void pthread__fork_callback(void);
72 static void pthread__reap(pthread_t
);
73 static void pthread__child_callback(void);
74 static void pthread__start(void);
76 void pthread__init(void);
79 pthread_mutex_t pthread__deadqueue_lock
= PTHREAD_MUTEX_INITIALIZER
;
80 pthread_queue_t pthread__deadqueue
;
81 pthread_queue_t pthread__allqueue
;
83 static pthread_attr_t pthread_default_attr
;
84 static lwpctl_t pthread__dummy_lwpctl
= { .lc_curcpu
= LWPCTL_CPU_NONE
};
85 static pthread_t pthread__first
;
88 DIAGASSERT_ABORT
= 1<<0,
89 DIAGASSERT_STDERR
= 1<<1,
90 DIAGASSERT_SYSLOG
= 1<<2
93 static int pthread__diagassert
;
95 int pthread__concurrency
;
97 int pthread__unpark_max
= PTHREAD__UNPARK_MAX
;
100 * We have to initialize the pthread_stack* variables here because
101 * mutexes are used before pthread_init() and thus pthread__initmain()
102 * are called. Since mutexes only save the stack pointer and not a
103 * pointer to the thread data, it is safe to change the mapping from
104 * stack pointer to thread data afterwards.
106 #define _STACKSIZE_LG 18
107 int pthread__stacksize_lg
= _STACKSIZE_LG
;
108 size_t pthread__stacksize
= 1 << _STACKSIZE_LG
;
109 vaddr_t pthread__stackmask
= (1 << _STACKSIZE_LG
) - 1;
110 vaddr_t pthread__threadmask
= (vaddr_t
)~((1 << _STACKSIZE_LG
) - 1);
113 int _sys___sigprocmask14(int, const sigset_t
*, sigset_t
*);
115 __strong_alias(__libc_thr_self
,pthread_self
)
116 __strong_alias(__libc_thr_create
,pthread_create
)
117 __strong_alias(__libc_thr_exit
,pthread_exit
)
118 __strong_alias(__libc_thr_errno
,pthread__errno
)
119 __strong_alias(__libc_thr_setcancelstate
,pthread_setcancelstate
)
120 __strong_alias(__libc_thr_equal
,pthread_equal
)
121 __strong_alias(__libc_thr_init
,pthread__init
)
124 * Static library kludge. Place a reference to a symbol any library
125 * file which does not already have a reference here.
127 extern int pthread__cancel_stub_binder
;
129 void *pthread__static_lib_binder
[] = {
130 &pthread__cancel_stub_binder
,
134 pthread_barrier_init
,
141 static union hashlock
{
142 pthread_mutex_t mutex
;
144 } hashlocks
[NHASHLOCK
] __aligned(64);
147 * This needs to be started by the library loading code, before main()
148 * gets to run, for various things that use the state of the initial thread
149 * to work properly (thread-specific data is an application-visible example;
150 * spinlock counts for mutexes is an internal example).
159 extern int __isthreaded
;
164 len
= sizeof(pthread__concurrency
);
165 if (sysctl(mib
, 2, &pthread__concurrency
, &len
, NULL
, 0) == -1)
166 err(1, "sysctl(hw.ncpu");
171 /* Initialize locks first; they're needed elsewhere. */
172 pthread__lockprim_init();
173 for (i
= 0; i
< NHASHLOCK
; i
++) {
174 pthread_mutex_init(&hashlocks
[i
].mutex
, NULL
);
177 /* Fetch parameters. */
178 i
= (int)_lwp_unpark_all(NULL
, 0, NULL
);
180 err(1, "_lwp_unpark_all");
181 if (i
< pthread__unpark_max
)
182 pthread__unpark_max
= i
;
184 /* Basic data structure setup */
185 pthread_attr_init(&pthread_default_attr
);
186 PTQ_INIT(&pthread__allqueue
);
187 PTQ_INIT(&pthread__deadqueue
);
188 RB_INIT(&pthread__alltree
);
190 /* Create the thread structure corresponding to main() */
191 pthread__initmain(&first
);
192 pthread__initthread(first
);
193 pthread__scrubthread(first
, NULL
, 0);
195 first
->pt_lid
= _lwp_self();
196 PTQ_INSERT_HEAD(&pthread__allqueue
, first
, pt_allq
);
197 RB_INSERT(__pthread__alltree
, &pthread__alltree
, first
);
199 if (_lwp_ctl(LWPCTL_FEATURE_CURCPU
, &first
->pt_lwpctl
) != 0) {
203 /* Start subsystems */
206 for (p
= pthread__getenv("PTHREAD_DIAGASSERT"); p
&& *p
; p
++) {
209 pthread__diagassert
|= DIAGASSERT_ABORT
;
212 pthread__diagassert
&= ~DIAGASSERT_ABORT
;
215 pthread__diagassert
|= DIAGASSERT_STDERR
;
218 pthread__diagassert
&= ~DIAGASSERT_STDERR
;
221 pthread__diagassert
|= DIAGASSERT_SYSLOG
;
224 pthread__diagassert
&= ~DIAGASSERT_SYSLOG
;
229 /* Tell libc that we're here and it should role-play accordingly. */
230 pthread__first
= first
;
231 pthread_atfork(NULL
, NULL
, pthread__fork_callback
);
236 pthread__fork_callback(void)
239 /* lwpctl state is not copied across fork. */
240 if (_lwp_ctl(LWPCTL_FEATURE_CURCPU
, &pthread__first
->pt_lwpctl
)) {
246 pthread__child_callback(void)
250 * Clean up data structures that a forked child process might
251 * trip over. Note that if threads have been created (causing
252 * this handler to be registered) the standards say that the
253 * child will trigger undefined behavior if it makes any
254 * pthread_* calls (or any other calls that aren't
255 * async-signal-safe), so we don't really have to clean up
256 * much. Anything that permits some pthread_* calls to work is
257 * merely being polite.
259 pthread__started
= 0;
267 * Per-process timers are cleared by fork(); despite the
268 * various restrictions on fork() and threads, it's legal to
269 * fork() before creating any threads.
271 pthread_atfork(NULL
, NULL
, pthread__child_callback
);
275 /* General-purpose thread data structure sanitization. */
278 pthread__initthread(pthread_t t
)
282 t
->pt_magic
= PT_MAGIC
;
286 t
->pt_sleepobj
= NULL
;
288 t
->pt_havespecific
= 0;
290 t
->pt_lwpctl
= &pthread__dummy_lwpctl
;
292 t
->pt_droplock
= NULL
;
294 memcpy(&t
->pt_lockops
, pthread__lock_ops
, sizeof(t
->pt_lockops
));
295 pthread_mutex_init(&t
->pt_lock
, NULL
);
296 PTQ_INIT(&t
->pt_cleanup_stack
);
297 pthread_cond_init(&t
->pt_joiners
, NULL
);
298 memset(&t
->pt_specific
, 0, sizeof(t
->pt_specific
));
302 pthread__scrubthread(pthread_t t
, char *name
, int flags
)
305 t
->pt_state
= PT_STATE_RUNNING
;
306 t
->pt_exitval
= NULL
;
316 pthread_create(pthread_t
*thread
, const pthread_attr_t
*attr
,
317 void *(*startfunc
)(void *), void *arg
)
320 pthread_attr_t nattr
;
321 struct pthread_attr_private
*p
;
322 char * volatile name
;
327 * It's okay to check this without a lock because there can
328 * only be one thread before it becomes true.
330 if (pthread__started
== 0) {
332 pthread__started
= 1;
336 nattr
= pthread_default_attr
;
337 else if (attr
->pta_magic
== PT_ATTR_MAGIC
)
342 /* Fetch misc. attributes from the attr structure. */
344 if ((p
= nattr
.pta_private
) != NULL
)
345 if (p
->ptap_name
[0] != '\0')
346 if ((name
= strdup(p
->ptap_name
)) == NULL
)
352 * Try to reclaim a dead thread.
354 if (!PTQ_EMPTY(&pthread__deadqueue
)) {
355 pthread_mutex_lock(&pthread__deadqueue_lock
);
356 PTQ_FOREACH(newthread
, &pthread__deadqueue
, pt_deadq
) {
358 if (newthread
->pt_lwpctl
->lc_curcpu
==
360 (_lwp_kill(newthread
->pt_lid
, 0) == -1 &&
365 PTQ_REMOVE(&pthread__deadqueue
, newthread
, pt_deadq
);
366 pthread_mutex_unlock(&pthread__deadqueue_lock
);
370 * If necessary set up a stack, allocate space for a pthread_st,
373 if (newthread
== NULL
) {
374 ret
= pthread__stackalloc(&newthread
);
381 /* This is used only when creating the thread. */
382 _INITCONTEXT_U(&newthread
->pt_uc
);
383 #ifdef PTHREAD_MACHINE_HAS_ID_REGISTER
384 pthread__uc_id(&newthread
->pt_uc
) = newthread
;
386 newthread
->pt_uc
.uc_stack
= newthread
->pt_stack
;
387 newthread
->pt_uc
.uc_link
= NULL
;
389 /* Add to list of all threads. */
390 pthread_rwlock_wrlock(&pthread__alltree_lock
);
391 PTQ_INSERT_TAIL(&pthread__allqueue
, newthread
, pt_allq
);
392 RB_INSERT(__pthread__alltree
, &pthread__alltree
, newthread
);
393 pthread_rwlock_unlock(&pthread__alltree_lock
);
395 /* Will be reset by the thread upon exit. */
396 pthread__initthread(newthread
);
400 * Create the new LWP.
402 pthread__scrubthread(newthread
, name
, nattr
.pta_flags
);
403 newthread
->pt_func
= startfunc
;
404 newthread
->pt_arg
= arg
;
405 _lwp_makecontext(&newthread
->pt_uc
, pthread__create_tramp
,
406 newthread
, newthread
, newthread
->pt_stack
.ss_sp
,
407 newthread
->pt_stack
.ss_size
);
410 if ((newthread
->pt_flags
& PT_FLAG_SUSPENDED
) != 0 ||
411 (nattr
.pta_flags
& PT_FLAG_EXPLICIT_SCHED
) != 0)
412 flag
|= LWP_SUSPENDED
;
413 ret
= _lwp_create(&newthread
->pt_uc
, flag
, &newthread
->pt_lid
);
416 newthread
->pt_state
= PT_STATE_DEAD
;
417 pthread_mutex_lock(&pthread__deadqueue_lock
);
418 PTQ_INSERT_HEAD(&pthread__deadqueue
, newthread
, pt_deadq
);
419 pthread_mutex_unlock(&pthread__deadqueue_lock
);
423 if ((nattr
.pta_flags
& PT_FLAG_EXPLICIT_SCHED
) != 0) {
425 (void)pthread_setschedparam(newthread
, p
->ptap_policy
,
428 if ((newthread
->pt_flags
& PT_FLAG_SUSPENDED
) == 0) {
429 (void)_lwp_continue(newthread
->pt_lid
);
440 pthread__create_tramp(void *cookie
)
448 * Throw away some stack in a feeble attempt to reduce cache
449 * thrash. May help for SMT processors. XXX We should not
450 * be allocating stacks on fixed 2MB boundaries. Needs a
451 * thread register or decent thread local storage.
453 * Note that we may race with the kernel in _lwp_create(),
454 * and so pt_lid can be unset at this point, but we don't
457 (void)alloca(((unsigned)self
->pt_lid
& 7) << 8);
459 if (self
->pt_name
!= NULL
) {
460 pthread_mutex_lock(&self
->pt_lock
);
461 if (self
->pt_name
!= NULL
)
462 (void)_lwp_setname(0, self
->pt_name
);
463 pthread_mutex_unlock(&self
->pt_lock
);
466 if (_lwp_ctl(LWPCTL_FEATURE_CURCPU
, &self
->pt_lwpctl
)) {
470 retval
= (*self
->pt_func
)(self
->pt_arg
);
472 pthread_exit(retval
);
479 pthread_suspend_np(pthread_t thread
)
483 self
= pthread__self();
484 if (self
== thread
) {
487 if (pthread__find(thread
) != 0)
489 if (_lwp_suspend(thread
->pt_lid
) == 0)
495 pthread_resume_np(pthread_t thread
)
498 if (pthread__find(thread
) != 0)
500 if (_lwp_continue(thread
->pt_lid
) == 0)
506 pthread_exit(void *retval
)
509 struct pt_clean_t
*cleanup
;
512 self
= pthread__self();
514 /* Disable cancellability. */
515 pthread_mutex_lock(&self
->pt_lock
);
516 self
->pt_flags
|= PT_FLAG_CS_DISABLED
;
519 /* Call any cancellation cleanup handlers */
520 if (!PTQ_EMPTY(&self
->pt_cleanup_stack
)) {
521 pthread_mutex_unlock(&self
->pt_lock
);
522 while (!PTQ_EMPTY(&self
->pt_cleanup_stack
)) {
523 cleanup
= PTQ_FIRST(&self
->pt_cleanup_stack
);
524 PTQ_REMOVE(&self
->pt_cleanup_stack
, cleanup
, ptc_next
);
525 (*cleanup
->ptc_cleanup
)(cleanup
->ptc_arg
);
527 pthread_mutex_lock(&self
->pt_lock
);
530 /* Perform cleanup of thread-specific data */
531 pthread__destroy_tsd(self
);
533 /* Signal our exit. */
534 self
->pt_exitval
= retval
;
535 if (self
->pt_flags
& PT_FLAG_DETACHED
) {
536 self
->pt_state
= PT_STATE_DEAD
;
537 name
= self
->pt_name
;
538 self
->pt_name
= NULL
;
539 pthread_mutex_unlock(&self
->pt_lock
);
542 pthread_mutex_lock(&pthread__deadqueue_lock
);
543 PTQ_INSERT_TAIL(&pthread__deadqueue
, self
, pt_deadq
);
544 pthread_mutex_unlock(&pthread__deadqueue_lock
);
547 self
->pt_state
= PT_STATE_ZOMBIE
;
548 pthread_cond_broadcast(&self
->pt_joiners
);
549 pthread_mutex_unlock(&self
->pt_lock
);
550 /* Note: name will be freed by the joiner. */
561 pthread_join(pthread_t thread
, void **valptr
)
566 self
= pthread__self();
568 if (pthread__find(thread
) != 0)
571 if (thread
->pt_magic
!= PT_MAGIC
)
577 self
->pt_droplock
= &thread
->pt_lock
;
578 pthread_mutex_lock(&thread
->pt_lock
);
580 if (thread
->pt_state
== PT_STATE_ZOMBIE
)
582 if (thread
->pt_state
== PT_STATE_DEAD
) {
583 pthread_mutex_unlock(&thread
->pt_lock
);
584 self
->pt_droplock
= NULL
;
587 if ((thread
->pt_flags
& PT_FLAG_DETACHED
) != 0) {
588 pthread_mutex_unlock(&thread
->pt_lock
);
589 self
->pt_droplock
= NULL
;
592 error
= pthread_cond_wait(&thread
->pt_joiners
,
595 pthread__errorfunc(__FILE__
, __LINE__
,
596 __func__
, "unexpected return from cond_wait()");
600 pthread__testcancel(self
);
602 *valptr
= thread
->pt_exitval
;
603 /* pthread__reap() will drop the lock. */
604 pthread__reap(thread
);
605 self
->pt_droplock
= NULL
;
611 pthread__reap(pthread_t thread
)
615 name
= thread
->pt_name
;
616 thread
->pt_name
= NULL
;
617 thread
->pt_state
= PT_STATE_DEAD
;
618 pthread_mutex_unlock(&thread
->pt_lock
);
620 pthread_mutex_lock(&pthread__deadqueue_lock
);
621 PTQ_INSERT_HEAD(&pthread__deadqueue
, thread
, pt_deadq
);
622 pthread_mutex_unlock(&pthread__deadqueue_lock
);
629 pthread_equal(pthread_t t1
, pthread_t t2
)
632 /* Nothing special here. */
638 pthread_detach(pthread_t thread
)
641 if (pthread__find(thread
) != 0)
644 if (thread
->pt_magic
!= PT_MAGIC
)
647 pthread_mutex_lock(&thread
->pt_lock
);
648 thread
->pt_flags
|= PT_FLAG_DETACHED
;
649 if (thread
->pt_state
== PT_STATE_ZOMBIE
) {
650 /* pthread__reap() will drop the lock. */
651 pthread__reap(thread
);
654 * Not valid for threads to be waiting in
655 * pthread_join() (there are intractable
656 * sync issues from the application
657 * perspective), but give those threads
660 pthread_cond_broadcast(&thread
->pt_joiners
);
661 pthread_mutex_unlock(&thread
->pt_lock
);
669 pthread_getname_np(pthread_t thread
, char *name
, size_t len
)
672 if (pthread__find(thread
) != 0)
675 if (thread
->pt_magic
!= PT_MAGIC
)
678 pthread_mutex_lock(&thread
->pt_lock
);
679 if (thread
->pt_name
== NULL
)
682 strlcpy(name
, thread
->pt_name
, len
);
683 pthread_mutex_unlock(&thread
->pt_lock
);
690 pthread_setname_np(pthread_t thread
, const char *name
, void *arg
)
692 char *oldname
, *cp
, newname
[PTHREAD_MAX_NAMELEN_NP
];
695 if (pthread__find(thread
) != 0)
698 if (thread
->pt_magic
!= PT_MAGIC
)
701 namelen
= snprintf(newname
, sizeof(newname
), name
, arg
);
702 if (namelen
>= PTHREAD_MAX_NAMELEN_NP
)
705 cp
= strdup(newname
);
709 pthread_mutex_lock(&thread
->pt_lock
);
710 oldname
= thread
->pt_name
;
711 thread
->pt_name
= cp
;
712 (void)_lwp_setname(thread
->pt_lid
, cp
);
713 pthread_mutex_unlock(&thread
->pt_lock
);
724 * XXX There should be a way for applications to use the efficent
725 * inline version, but there are opacity/namespace issues.
731 return pthread__self();
736 pthread_cancel(pthread_t thread
)
739 if (pthread__find(thread
) != 0)
741 pthread_mutex_lock(&thread
->pt_lock
);
742 thread
->pt_flags
|= PT_FLAG_CS_PENDING
;
743 if ((thread
->pt_flags
& PT_FLAG_CS_DISABLED
) == 0) {
744 thread
->pt_cancel
= 1;
745 pthread_mutex_unlock(&thread
->pt_lock
);
746 _lwp_wakeup(thread
->pt_lid
);
748 pthread_mutex_unlock(&thread
->pt_lock
);
755 pthread_setcancelstate(int state
, int *oldstate
)
760 self
= pthread__self();
763 pthread_mutex_lock(&self
->pt_lock
);
765 if (oldstate
!= NULL
) {
766 if (self
->pt_flags
& PT_FLAG_CS_DISABLED
)
767 *oldstate
= PTHREAD_CANCEL_DISABLE
;
769 *oldstate
= PTHREAD_CANCEL_ENABLE
;
772 if (state
== PTHREAD_CANCEL_DISABLE
) {
773 self
->pt_flags
|= PT_FLAG_CS_DISABLED
;
774 if (self
->pt_cancel
) {
775 self
->pt_flags
|= PT_FLAG_CS_PENDING
;
778 } else if (state
== PTHREAD_CANCEL_ENABLE
) {
779 self
->pt_flags
&= ~PT_FLAG_CS_DISABLED
;
781 * If a cancellation was requested while cancellation
782 * was disabled, note that fact for future
783 * cancellation tests.
785 if (self
->pt_flags
& PT_FLAG_CS_PENDING
) {
787 /* This is not a deferred cancellation point. */
788 if (self
->pt_flags
& PT_FLAG_CS_ASYNC
) {
789 pthread_mutex_unlock(&self
->pt_lock
);
790 pthread__cancelled();
796 pthread_mutex_unlock(&self
->pt_lock
);
803 pthread_setcanceltype(int type
, int *oldtype
)
808 self
= pthread__self();
811 pthread_mutex_lock(&self
->pt_lock
);
813 if (oldtype
!= NULL
) {
814 if (self
->pt_flags
& PT_FLAG_CS_ASYNC
)
815 *oldtype
= PTHREAD_CANCEL_ASYNCHRONOUS
;
817 *oldtype
= PTHREAD_CANCEL_DEFERRED
;
820 if (type
== PTHREAD_CANCEL_ASYNCHRONOUS
) {
821 self
->pt_flags
|= PT_FLAG_CS_ASYNC
;
822 if (self
->pt_cancel
) {
823 pthread_mutex_unlock(&self
->pt_lock
);
824 pthread__cancelled();
826 } else if (type
== PTHREAD_CANCEL_DEFERRED
)
827 self
->pt_flags
&= ~PT_FLAG_CS_ASYNC
;
831 pthread_mutex_unlock(&self
->pt_lock
);
838 pthread_testcancel(void)
842 self
= pthread__self();
844 pthread__cancelled();
849 * POSIX requires that certain functions return an error rather than
850 * invoking undefined behavior even when handed completely bogus
851 * pthread_t values, e.g. stack garbage or (pthread_t)666. This
852 * utility routine searches the list of threads for the pthread_t
853 * value without dereferencing it.
856 pthread__find(pthread_t id
)
860 pthread_rwlock_rdlock(&pthread__alltree_lock
);
862 target
= RB_FIND(__pthread__alltree
, &pthread__alltree
, id
);
863 pthread_rwlock_unlock(&pthread__alltree_lock
);
865 if (target
== NULL
|| target
->pt_state
== PT_STATE_DEAD
)
873 pthread__testcancel(pthread_t self
)
877 pthread__cancelled();
882 pthread__cancelled(void)
884 pthread_mutex_t
*droplock
;
887 self
= pthread__self();
888 droplock
= self
->pt_droplock
;
889 self
->pt_droplock
= NULL
;
891 if (droplock
!= NULL
&& pthread_mutex_held_np(droplock
))
892 pthread_mutex_unlock(droplock
);
894 pthread_exit(PTHREAD_CANCELED
);
899 pthread__cleanup_push(void (*cleanup
)(void *), void *arg
, void *store
)
902 struct pt_clean_t
*entry
;
904 self
= pthread__self();
906 entry
->ptc_cleanup
= cleanup
;
907 entry
->ptc_arg
= arg
;
908 PTQ_INSERT_HEAD(&self
->pt_cleanup_stack
, entry
, ptc_next
);
913 pthread__cleanup_pop(int ex
, void *store
)
916 struct pt_clean_t
*entry
;
918 self
= pthread__self();
921 PTQ_REMOVE(&self
->pt_cleanup_stack
, entry
, ptc_next
);
923 (*entry
->ptc_cleanup
)(entry
->ptc_arg
);
932 self
= pthread__self();
934 return &(self
->pt_errno
);
937 ssize_t
_sys_write(int, const void *, size_t);
940 pthread__assertfunc(const char *file
, int line
, const char *function
,
947 * snprintf should not acquire any locks, or we could
948 * end up deadlocked if the assert caller held locks.
950 len
= snprintf(buf
, 1024,
951 "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n",
953 function
? ", function \"" : "",
954 function
? function
: "",
955 function
? "\"" : "");
957 _sys_write(STDERR_FILENO
, buf
, (size_t)len
);
958 (void)kill(getpid(), SIGABRT
);
965 pthread__errorfunc(const char *file
, int line
, const char *function
,
971 if (pthread__diagassert
== 0)
975 * snprintf should not acquire any locks, or we could
976 * end up deadlocked if the assert caller held locks.
978 len
= snprintf(buf
, 1024,
979 "%s: Error detected by libpthread: %s.\n"
980 "Detected by file \"%s\", line %d%s%s%s.\n"
981 "See pthread(3) for information.\n",
982 getprogname(), msg
, file
, line
,
983 function
? ", function \"" : "",
984 function
? function
: "",
985 function
? "\"" : "");
987 if (pthread__diagassert
& DIAGASSERT_STDERR
)
988 _sys_write(STDERR_FILENO
, buf
, len
);
990 if (pthread__diagassert
& DIAGASSERT_SYSLOG
)
991 syslog(LOG_DEBUG
| LOG_USER
, "%s", buf
);
993 if (pthread__diagassert
& DIAGASSERT_ABORT
) {
994 (void)kill(getpid(), SIGABRT
);
1000 * Thread park/unpark operations. The kernel operations are
1001 * modelled after a brief description from "Multithreading in
1002 * the Solaris Operating Environment":
1004 * http://www.sun.com/software/whitepapers/solaris9/multithread.pdf
1008 pthread__errorfunc(__FILE__, __LINE__, __func__, msg)
1011 pthread__park(pthread_t self
, pthread_mutex_t
*lock
,
1012 pthread_queue_t
*queue
, const struct timespec
*abstime
,
1013 int cancelpt
, const void *hint
)
1019 * For non-interlocked release of mutexes we need a store
1020 * barrier before incrementing pt_blocking away from zero.
1021 * This is provided by pthread_mutex_unlock().
1023 self
->pt_willpark
= 1;
1024 pthread_mutex_unlock(lock
);
1025 self
->pt_willpark
= 0;
1026 self
->pt_blocking
++;
1029 * Wait until we are awoken by a pending unpark operation,
1030 * a signal, an unpark posted after we have gone asleep,
1031 * or an expired timeout.
1033 * It is fine to test the value of pt_sleepobj without
1034 * holding any locks, because:
1036 * o Only the blocking thread (this thread) ever sets them
1037 * to a non-NULL value.
1039 * o Other threads may set them NULL, but if they do so they
1040 * must also make this thread return from _lwp_park.
1042 * o _lwp_park, _lwp_unpark and _lwp_unpark_all are system
1043 * calls and all make use of spinlocks in the kernel. So
1044 * these system calls act as full memory barriers, and will
1045 * ensure that the calling CPU's store buffers are drained.
1046 * In combination with the spinlock release before unpark,
1047 * this means that modification of pt_sleepobj/onq by another
1048 * thread will become globally visible before that thread
1049 * schedules an unpark operation on this thread.
1051 * Note: the test in the while() statement dodges the park op if
1052 * we have already been awoken, unless there is another thread to
1053 * awaken. This saves a syscall - if we were already awakened,
1054 * the next call to _lwp_park() would need to return early in order
1055 * to eat the previous wakeup.
1060 * If we deferred unparking a thread, arrange to
1061 * have _lwp_park() restart it before blocking.
1063 error
= _lwp_park(abstime
, self
->pt_unpark
, hint
, hint
);
1064 self
->pt_unpark
= 0;
1066 switch (rv
= errno
) {
1074 OOPS("_lwp_park failed");
1078 /* Check for cancellation. */
1079 if (cancelpt
&& self
->pt_cancel
)
1081 } while (self
->pt_sleepobj
!= NULL
&& rv
== 0);
1084 * If we have been awoken early but are still on the queue,
1085 * then remove ourself. Again, it's safe to do the test
1086 * without holding any locks.
1088 if (__predict_false(self
->pt_sleepobj
!= NULL
)) {
1089 pthread_mutex_lock(lock
);
1090 if ((obj
= self
->pt_sleepobj
) != NULL
) {
1091 PTQ_REMOVE(queue
, self
, pt_sleep
);
1092 self
->pt_sleepobj
= NULL
;
1093 if (obj
!= NULL
&& self
->pt_early
!= NULL
)
1094 (*self
->pt_early
)(obj
);
1096 pthread_mutex_unlock(lock
);
1098 self
->pt_early
= NULL
;
1099 self
->pt_blocking
--;
1106 pthread__unpark(pthread_queue_t
*queue
, pthread_t self
,
1107 pthread_mutex_t
*interlock
)
1113 max
= pthread__unpark_max
;
1114 nwaiters
= self
->pt_nwaiters
;
1115 target
= PTQ_FIRST(queue
);
1116 if (nwaiters
== max
) {
1118 (void)_lwp_unpark_all(self
->pt_waiters
, nwaiters
,
1119 __UNVOLATILE(&interlock
->ptm_waiters
));
1122 target
->pt_sleepobj
= NULL
;
1123 self
->pt_waiters
[nwaiters
++] = target
->pt_lid
;
1124 PTQ_REMOVE(queue
, target
, pt_sleep
);
1125 self
->pt_nwaiters
= nwaiters
;
1126 pthread__mutex_deferwake(self
, interlock
);
1130 pthread__unpark_all(pthread_queue_t
*queue
, pthread_t self
,
1131 pthread_mutex_t
*interlock
)
1137 max
= pthread__unpark_max
;
1138 nwaiters
= self
->pt_nwaiters
;
1139 PTQ_FOREACH(target
, queue
, pt_sleep
) {
1140 if (nwaiters
== max
) {
1142 (void)_lwp_unpark_all(self
->pt_waiters
, nwaiters
,
1143 __UNVOLATILE(&interlock
->ptm_waiters
));
1146 target
->pt_sleepobj
= NULL
;
1147 self
->pt_waiters
[nwaiters
++] = target
->pt_lid
;
1149 self
->pt_nwaiters
= nwaiters
;
1151 pthread__mutex_deferwake(self
, interlock
);
1157 * Allocate a stack for a thread, and set it up. It needs to be aligned, so
1158 * that a thread can find itself by its stack pointer.
1161 pthread__stackalloc(pthread_t
*newt
)
1165 addr
= mmap(NULL
, pthread__stacksize
, PROT_READ
|PROT_WRITE
,
1166 MAP_ANON
|MAP_PRIVATE
| MAP_ALIGNED(pthread__stacksize_lg
),
1169 if (addr
== MAP_FAILED
)
1172 pthread__assert(((intptr_t)addr
& pthread__stackmask
) == 0);
1174 return pthread__stackid_setup(addr
, pthread__stacksize
, newt
);
1179 * Set up the slightly special stack for the "initial" thread, which
1180 * runs on the normal system stack, and thus gets slightly different
1184 pthread__initmain(pthread_t
*newt
)
1186 struct rlimit slimit
;
1194 pagesize
= (size_t)sysconf(_SC_PAGESIZE
);
1195 pthread__stacksize
= 0;
1196 ret
= getrlimit(RLIMIT_STACK
, &slimit
);
1198 err(1, "Couldn't get stack resource consumption limits");
1200 value
= pthread__getenv("PTHREAD_STACKSIZE");
1201 if (value
!= NULL
) {
1202 pthread__stacksize
= atoi(value
) * 1024;
1203 if (pthread__stacksize
> slimit
.rlim_cur
)
1204 pthread__stacksize
= (size_t)slimit
.rlim_cur
;
1206 if (pthread__stacksize
== 0)
1207 pthread__stacksize
= (size_t)slimit
.rlim_cur
;
1208 if (pthread__stacksize
< 4 * pagesize
)
1209 errx(1, "Stacksize limit is too low, minimum %zd kbyte.",
1210 4 * pagesize
/ 1024);
1212 pthread__stacksize_lg
= -1;
1213 while (pthread__stacksize
) {
1214 pthread__stacksize
>>= 1;
1215 pthread__stacksize_lg
++;
1218 pthread__stacksize
= (1 << pthread__stacksize_lg
);
1219 pthread__stackmask
= pthread__stacksize
- 1;
1220 pthread__threadmask
= ~pthread__stackmask
;
1222 base
= (void *)(pthread__sp() & pthread__threadmask
);
1223 size
= pthread__stacksize
;
1225 error
= pthread__stackid_setup(base
, size
, &t
);
1228 errx(2, "failed to setup main thread: error=%d", error
);
1233 /* Set up identity register. */
1234 (void)_lwp_setprivate(t
);
1239 pthread__stackid_setup(void *base
, size_t size
, pthread_t
*tp
)
1247 pagesize
= (size_t)sysconf(_SC_PAGESIZE
);
1250 * Put a pointer to the pthread in the bottom (but
1251 * redzone-protected section) of the stack.
1253 redaddr
= STACK_SHRINK(STACK_MAX(base
, size
), pagesize
);
1254 t
->pt_stack
.ss_size
= size
- 2 * pagesize
;
1255 #ifdef __MACHINE_STACK_GROWS_UP
1256 t
->pt_stack
.ss_sp
= (char *)(void *)base
+ pagesize
;
1258 t
->pt_stack
.ss_sp
= (char *)(void *)base
+ 2 * pagesize
;
1261 /* Protect the next-to-bottom stack page as a red zone. */
1262 ret
= mprotect(redaddr
, pagesize
, PROT_NONE
);
1272 pthread__cmp(struct __pthread_st
*a
, struct __pthread_st
*b
)
1275 if ((uintptr_t)a
< (uintptr_t)b
)
1282 RB_GENERATE_STATIC(__pthread__alltree
, __pthread_st
, pt_alltree
, pthread__cmp
)
1285 /* Because getenv() wants to use locks. */
1287 pthread__getenv(const char *name
)
1289 extern char *__findenv(const char *, int *);
1292 return __findenv(name
, &off
);
1296 pthread__hashlock(volatile const void *p
)
1301 return &hashlocks
[((v
>> 9) ^ (v
>> 3)) & (NHASHLOCK
- 1)].mutex
;
1305 pthread__checkpri(int pri
)
1308 static long min
, max
;
1311 min
= sysconf(_SC_SCHED_PRI_MIN
);
1312 max
= sysconf(_SC_SCHED_PRI_MAX
);
1315 return (pri
< min
|| pri
> max
) ? EINVAL
: 0;