2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * This file contains a skeleton pthread implementation for NT.
12 * This is not intended to be a fully compliant pthread implementation
13 * The purpose of this file is to only implement those functions that
14 * are truly needed to support the afs code base.
16 * A secondary goal is to allow a "real" pthread implementation to
17 * replace this file without any modification to code that depends upon
20 * The function signatures and argument types are meant to be the same
21 * as their UNIX prototypes.
22 * Where possible, the POSIX specified return values are used.
23 * For situations where an error can occur, but no corresponding
24 * POSIX error value exists, unique (within a given function) negative
25 * numbers are used for errors to avoid collsions with the errno
29 #include <afs/param.h>
38 #include <sys/timeb.h>
40 #define PTHREAD_EXIT_EXCEPTION 0x1
43 * Posix threads uses static initialization for pthread_once control
44 * objects, and under NT, every sophisticated synchronization primitive
45 * uses procedural initialization. This forces the use of CompareExchange
46 * (aka test and set) and busy waiting for threads that compete to run
47 * a pthread_once'd function. We make these "busy" threads give up their
48 * timeslice - which should cause acceptable behavior on a single processor
49 * machine, but on a multiprocessor machine this could very well result
53 int pthread_once(pthread_once_t
*once_control
, void (*init_routine
)(void)) {
56 if ((once_control
!= NULL
) && (init_routine
!= NULL
)) {
57 if (InterlockedExchange((LPLONG
)&once_control
->call_started
,
60 once_control
->call_running
= 0;
62 /* use Sleep() since SwitchToThread() not available on Win95 */
63 while(once_control
->call_running
) Sleep(20);
72 * For now only support PTHREAD_PROCESS_PRIVATE mutexes.
73 * if PTHREAD_PROCESS_SHARED are required later they can be added
76 int pthread_mutex_init(pthread_mutex_t
*mp
, const pthread_mutexattr_t
*attr
) {
79 if ((mp
!= NULL
) && (attr
== NULL
)) {
80 memset(mp
, 0, sizeof(*mp
));
81 InitializeCriticalSection(&mp
->cs
);
91 * Under NT, critical sections can be locked recursively by the owning
92 * thread. This is opposite of the pthread spec, and so we keep track
93 * of the thread that has locked a critical section. If the same thread
94 * tries to lock a critical section more than once we fail.
96 int pthread_mutex_trylock(pthread_mutex_t
*mp
) {
100 /* TryEnterCriticalSection() not available on Win95, so just wait for
101 * the lock. Correct code generally can't depend on how long the
102 * function takes to return, so the only code that will be broken is
103 * that for which 1) the mutex *mp is obtained and never released or
104 * 2) the mutex *mp is intentionally held until trylock() returns.
105 * These cases are unusual and don't appear in normal (non-test) AFS
106 * code; furthermore, we can reduce (but not eliminate!) the problem by
107 * sneaking a look at isLocked even though we don't hold the
108 * CRITICAL_SECTION in mutex *mp and are thus vulnerable to race
109 * conditions. Note that self-deadlock isn't a problem since
110 * CRITICAL_SECTION objects are recursive.
112 * Given the very restricted usage of the pthread library on Windows 95,
113 * we can live with these limitations.
119 rc
= pthread_mutex_lock(mp
);
125 /* TryEnterCriticalSection() provided on other MS platforms of interest */
127 if (TryEnterCriticalSection(&mp
->cs
)) {
129 /* same thread tried to recursively lock, fail */
130 LeaveCriticalSection(&mp
->cs
);
134 mp
->tid
= GetCurrentThreadId();
143 #endif /* AFS_WIN95_ENV */
149 int pthread_mutex_lock(pthread_mutex_t
*mp
) {
153 EnterCriticalSection(&mp
->cs
);
156 mp
->tid
= GetCurrentThreadId();
159 * same thread tried to recursively lock this mutex.
160 * Under real POSIX, this would cause a deadlock, but NT only
161 * supports recursive mutexes so we indicate the situation
162 * by returning EDEADLK.
164 LeaveCriticalSection(&mp
->cs
);
180 int pthread_mutex_unlock(pthread_mutex_t
*mp
) {
184 if (mp
->tid
== GetCurrentThreadId()) {
187 LeaveCriticalSection(&mp
->cs
);
203 int pthread_mutex_destroy(pthread_mutex_t
*mp
) {
207 DeleteCriticalSection(&mp
->cs
);
218 int pthread_rwlock_destroy(pthread_rwlock_t
*rwp
)
223 pthread_mutex_destroy(&rwp
->read_access_completion_mutex
);
224 pthread_mutex_destroy(&rwp
->write_access_mutex
);
225 pthread_cond_destroy(&rwp
->read_access_completion_wait
);
236 int pthread_rwlock_init(pthread_rwlock_t
*rwp
, const pthread_rwlockattr_t
*attr
)
245 rc
= pthread_mutex_init(&rwp
->write_access_mutex
, NULL
);
249 rc
= pthread_mutex_init(&rwp
->read_access_completion_mutex
, NULL
);
253 rc
= pthread_cond_init(&rwp
->read_access_completion_wait
, NULL
);
255 return 0; /* success */
257 pthread_mutex_destroy(&rwp
->read_access_completion_mutex
);
260 pthread_mutex_destroy(&rwp
->write_access_mutex
);
265 int pthread_rwlock_wrlock(pthread_rwlock_t
*rwp
)
272 if ((rc
= pthread_mutex_lock(&rwp
->write_access_mutex
)) != 0)
275 if ((rc
= pthread_mutex_lock(&rwp
->read_access_completion_mutex
)) != 0)
277 pthread_mutex_unlock(&rwp
->write_access_mutex
);
281 while (rc
== 0 && rwp
->readers
> 0) {
282 rc
= pthread_cond_wait( &rwp
->read_access_completion_wait
,
283 &rwp
->read_access_completion_mutex
);
286 pthread_mutex_unlock(&rwp
->read_access_completion_mutex
);
289 pthread_mutex_unlock(&rwp
->write_access_mutex
);
294 int pthread_rwlock_rdlock(pthread_rwlock_t
*rwp
)
301 if ((rc
= pthread_mutex_lock(&rwp
->write_access_mutex
)) != 0)
304 if ((rc
= pthread_mutex_lock(&rwp
->read_access_completion_mutex
)) != 0)
306 pthread_mutex_unlock(&rwp
->write_access_mutex
);
312 pthread_mutex_unlock(&rwp
->read_access_completion_mutex
);
314 pthread_mutex_unlock(&rwp
->write_access_mutex
);
320 int pthread_rwlock_tryrdlock(pthread_rwlock_t
*rwp
)
327 if ((rc
= pthread_mutex_trylock(&rwp
->write_access_mutex
)) != 0)
330 if ((rc
= pthread_mutex_trylock(&rwp
->read_access_completion_mutex
)) != 0) {
331 pthread_mutex_unlock(&rwp
->write_access_mutex
);
337 pthread_mutex_unlock(&rwp
->read_access_completion_mutex
);
339 pthread_mutex_unlock(&rwp
->write_access_mutex
);
344 int pthread_rwlock_trywrlock(pthread_rwlock_t
*rwp
)
351 if ((rc
= pthread_mutex_trylock(&rwp
->write_access_mutex
)) != 0)
354 if ((rc
= pthread_mutex_trylock(&rwp
->read_access_completion_mutex
)) != 0)
356 pthread_mutex_unlock(&rwp
->write_access_mutex
);
360 if (rwp
->readers
> 0)
363 pthread_mutex_unlock(&rwp
->read_access_completion_mutex
);
366 pthread_mutex_unlock(&rwp
->write_access_mutex
);
371 int pthread_rwlock_unlock(pthread_rwlock_t
*rwp
)
378 rc
= pthread_mutex_trylock(&rwp
->write_access_mutex
);
381 /* unlock a read lock */
383 pthread_mutex_unlock(&rwp
->write_access_mutex
);
385 if ((rc
= pthread_mutex_lock(&rwp
->read_access_completion_mutex
)) != 0)
387 pthread_mutex_unlock(&rwp
->write_access_mutex
);
391 if (rwp
->readers
<= 0)
397 if (--rwp
->readers
== 0)
398 pthread_cond_broadcast(&rwp
->read_access_completion_wait
);
401 pthread_mutex_unlock(&rwp
->read_access_completion_mutex
);
405 /* unlock a write lock */
406 rc
= pthread_mutex_unlock(&rwp
->write_access_mutex
);
414 * keys is used to keep track of which keys are currently
415 * in use by the threads library. pthread_tsd_mutex is used
418 * The bookkeeping for keys in use and destructor function/key is
419 * at the library level. Each individual thread only keeps its
420 * per key data value. This implies that the keys array and the
421 * tsd array in the pthread_t structure need to always be exactly
422 * the same size since the same index is used for both arrays.
427 void (*destructor
)(void *);
428 } pthread_tsd_table_t
;
430 static pthread_tsd_table_t keys
[PTHREAD_KEYS_MAX
];
431 static pthread_mutex_t pthread_tsd_mutex
;
432 static pthread_once_t pthread_tsd_once
= PTHREAD_ONCE_INIT
;
435 * In order to support p_self() and p_join() under NT,
436 * we have to keep our own list of active threads and provide a mapping
437 * function that maps the NT thread id to our internal structure.
438 * The main reason that this is necessary is that GetCurrentThread
439 * returns a special constant not an actual handle to the thread.
440 * This makes it impossible to write a p_self() function that works
441 * with only the native NT functions.
444 static struct rx_queue active_Q
;
445 static struct rx_queue cache_Q
;
447 static pthread_mutex_t active_Q_mutex
;
448 static pthread_mutex_t cache_Q_mutex
;
450 static pthread_once_t pthread_cache_once
= PTHREAD_ONCE_INIT
;
451 static int pthread_cache_done
;
453 typedef struct thread
{
454 struct rx_queue thread_queue
;
457 pthread_cond_t wait_terminate
;
465 } thread_t
, *thread_p
;
467 static void create_once(void) {
468 queue_Init(&active_Q
);
469 queue_Init(&cache_Q
);
470 pthread_mutex_init(&active_Q_mutex
, (const pthread_mutexattr_t
*)0);
471 pthread_mutex_init(&cache_Q_mutex
, (const pthread_mutexattr_t
*)0);
472 pthread_cache_done
= 1;
475 static void cleanup_pthread_cache(void) {
476 thread_p cur
= NULL
, next
= NULL
;
478 if (pthread_cache_done
) {
479 for(queue_Scan(&active_Q
, cur
, next
, thread
)) {
482 for(queue_Scan(&cache_Q
, cur
, next
, thread
)) {
486 pthread_mutex_destroy(&active_Q_mutex
);
487 pthread_mutex_destroy(&cache_Q_mutex
);
489 pthread_cache_done
= 0;
493 static void put_thread(thread_p old
) {
495 CloseHandle(old
->t_handle
);
496 pthread_mutex_lock(&cache_Q_mutex
);
497 queue_Prepend(&cache_Q
, old
);
498 pthread_mutex_unlock(&cache_Q_mutex
);
501 static thread_p
get_thread() {
504 pthread_mutex_lock(&cache_Q_mutex
);
506 if (queue_IsEmpty(&cache_Q
)) {
507 new = (thread_p
) malloc(sizeof(thread_t
));
510 * One time initialization - we assume threads put back have
511 * unlocked mutexes and condition variables with no waiters
513 * These functions cannot fail currently.
515 pthread_cond_init(&new->wait_terminate
,(const pthread_condattr_t
*)0);
518 new = queue_First(&cache_Q
, thread
);
522 pthread_mutex_unlock(&cache_Q_mutex
);
525 * Initialization done every time we hand out a thread_t
531 new->waiter_count
= 0;
532 new->has_been_joined
= 0;
539 * The thread start function signature is different on NT than the pthread
540 * spec so we create a tiny stub to map from one signature to the next.
541 * This assumes that a void * can be stored within a DWORD.
545 void *(*func
)(void *);
547 char *tsd
[PTHREAD_KEYS_MAX
];
551 static DWORD tsd_index
= 0xffffffff;
552 static DWORD tsd_pthread_index
= 0xffffffff;
553 static pthread_once_t global_tsd_once
= PTHREAD_ONCE_INIT
;
556 static void tsd_once(void) {
557 while(tsd_index
== 0xffffffff) {
558 tsd_index
= TlsAlloc();
560 while(tsd_pthread_index
== 0xffffffff) {
561 tsd_pthread_index
= TlsAlloc();
566 static void tsd_free_all(char *tsd
[PTHREAD_KEYS_MAX
]) {
567 int call_more_destructors
= 0;
571 void (*destructor
)(void *);
572 call_more_destructors
= 0;
573 for(i
=0;i
<PTHREAD_KEYS_MAX
;i
++) {
574 if (tsd
[i
] != NULL
) {
575 destructor
= keys
[i
].destructor
;
576 value
= (void *)tsd
[i
];
578 if (destructor
!= NULL
) {
581 * A side-effect of calling a destructor function is that
582 * more thread specific may be created for this thread.
583 * If we call a destructor, we must recycle through the
584 * entire list again and run any new destructors.
586 call_more_destructors
= 1;
590 } while(call_more_destructors
);
593 static void cleanup_global_tsd(void)
595 thread_p cur
= NULL
, next
= NULL
;
598 for(queue_Scan(&active_Q
, cur
, next
, thread
)) {
599 tsd_free_all(cur
->tsd
);
602 TlsFree(tsd_pthread_index
);
603 tsd_pthread_index
= 0xFFFFFFFF;
605 tsd_index
= 0xFFFFFFFF;
610 static DWORD WINAPI
afs_pthread_create_stub(LPVOID param
) {
611 pthread_create_t
*t
= (pthread_create_t
*) param
;
615 * Initialize thread specific storage structures.
618 memset(t
->tsd
, 0, (sizeof(char *) * PTHREAD_KEYS_MAX
));
619 (tsd_done
|| pthread_once(&global_tsd_once
, tsd_once
));
620 TlsSetValue(tsd_index
, (LPVOID
) (t
->tsd
));
621 TlsSetValue(tsd_pthread_index
, (LPVOID
) (t
->me
));
624 * Call the function the user passed to pthread_create and catch the
625 * pthread exit exception if it is raised.
629 rc
= (*(t
->func
))(t
->arg
);
630 } __except(GetExceptionCode() == PTHREAD_EXIT_EXCEPTION
) {
631 rc
= t
->me
->rc
; /* rc is set at pthread_exit */
635 * Cycle through the thread specific data for this thread and
636 * call the destructor function for each non-NULL datum
639 tsd_free_all (t
->tsd
);
643 * If we are joinable, signal any waiters.
646 pthread_mutex_lock(&active_Q_mutex
);
647 if (t
->me
->is_joinable
) {
650 if (t
->me
->waiter_count
) {
651 pthread_cond_broadcast(&t
->me
->wait_terminate
);
657 pthread_mutex_unlock(&active_Q_mutex
);
664 * If a pthread function is called on a thread which was not created by
665 * pthread_create(), that thread will have an entry added to the active_Q
666 * by pthread_self(). When the thread terminates, we need to know
667 * about it, so that we can perform cleanup. A dedicated thread is therefore
668 * maintained, which watches for any thread marked "native_thread==1"
669 * in the active_Q to terminate. The thread spends most of its time sleeping:
670 * it can be signalled by a dedicated event in order to alert it to the
671 * presense of a new thread to watch, or will wake up automatically when
672 * a native thread terminates.
675 static DWORD terminate_thread_id
= 0;
676 static HANDLE terminate_thread_handle
= INVALID_HANDLE_VALUE
;
677 static HANDLE terminate_thread_wakeup_event
= INVALID_HANDLE_VALUE
;
678 static HANDLE
*terminate_thread_wakeup_list
= NULL
;
679 static size_t terminate_thread_wakeup_list_size
= 0;
681 static DWORD WINAPI
terminate_thread_routine(LPVOID param
) {
683 DWORD native_thread_count
;
684 int should_terminate
;
685 int terminate_thread_wakeup_list_index
;
689 * Grab the active_Q_mutex, and while we hold it, scan the active_Q
690 * to see how many native threads we need to watch. If we don't need
691 * to watch any, we can stop this watcher thread entirely (or not);
692 * if we do need to watch some, fill the terminate_thread_wakeup_list
693 * array and go to sleep.
697 native_thread_count
= 0;
698 should_terminate
= FALSE
;
699 pthread_mutex_lock(&active_Q_mutex
);
701 for(queue_Scan(&active_Q
, cur
, next
, thread
)) {
702 if (cur
->native_thread
)
703 ++native_thread_count
;
707 * At this point we could decide to terminate this watcher thread
708 * whenever there are no longer any native threads to watch--however,
709 * since thread creation is a time-consuming thing, and since this
710 * thread spends all its time sleeping anyway, there's no real
711 * compelling reason to do so. Thus, the following statement is
714 * if (!native_thread_count) {
715 * should_terminate = TRUE;
718 * Restore the snippet above to cause this watcher thread to only
719 * live whenever there are native threads to watch.
724 * Make sure that our wakeup_list array is large enough to contain
725 * the handles of all the native threads /and/ to contain an
726 * entry for our wakeup_event (in case another native thread comes
729 if (terminate_thread_wakeup_list_size
< (1+native_thread_count
)) {
730 if (terminate_thread_wakeup_list
)
731 free (terminate_thread_wakeup_list
);
732 terminate_thread_wakeup_list
= (HANDLE
*)malloc (sizeof(HANDLE
) *
733 (1+native_thread_count
));
734 if (terminate_thread_wakeup_list
== NULL
) {
735 should_terminate
= TRUE
;
737 terminate_thread_wakeup_list_size
= 1+native_thread_count
;
741 if (should_terminate
) {
743 * Here, we've decided to terminate this watcher thread.
744 * Free our wakeup event and wakeup list, then release the
745 * active_Q_mutex and break this loop.
747 if (terminate_thread_wakeup_list
)
748 free (terminate_thread_wakeup_list
);
749 CloseHandle (terminate_thread_wakeup_event
);
750 terminate_thread_id
= 0;
751 terminate_thread_handle
= INVALID_HANDLE_VALUE
;
752 terminate_thread_wakeup_event
= INVALID_HANDLE_VALUE
;
753 terminate_thread_wakeup_list
= NULL
;
754 terminate_thread_wakeup_list_size
= 0;
755 pthread_mutex_unlock(&active_Q_mutex
);
759 * Here, we've decided to wait for native threads et al.
760 * Fill out the wakeup_list.
762 memset(terminate_thread_wakeup_list
, 0x00, (sizeof(HANDLE
) *
763 (1+native_thread_count
)));
765 terminate_thread_wakeup_list
[0] = terminate_thread_wakeup_event
;
766 terminate_thread_wakeup_list_index
= 1;
770 for(queue_Scan(&active_Q
, cur
, next
, thread
)) {
771 if (cur
->native_thread
) {
772 terminate_thread_wakeup_list
[terminate_thread_wakeup_list_index
]
774 ++terminate_thread_wakeup_list_index
;
778 ResetEvent (terminate_thread_wakeup_event
);
781 pthread_mutex_unlock(&active_Q_mutex
);
784 * Time to sleep. We'll wake up if either of the following happen:
785 * 1) Someone sets the terminate_thread_wakeup_event (this will
786 * happen if another native thread gets added to the active_Q)
787 * 2) One or more of the native threads terminate
789 terminate_thread_wakeup_list_index
= WaitForMultipleObjects(
790 1+native_thread_count
,
791 terminate_thread_wakeup_list
,
796 * If we awoke from sleep because an event other than
797 * terminate_thread_wakeup_event was triggered, it means the
798 * specified thread has terminated. (If more than one thread
799 * terminated, we'll handle this first one and loop around--
800 * the event's handle will still be triggered, so we just won't
801 * block at all when we sleep next time around.)
803 if (terminate_thread_wakeup_list_index
> 0) {
804 pthread_mutex_lock(&active_Q_mutex
);
808 for(queue_Scan(&active_Q
, cur
, next
, thread
)) {
809 if (cur
->t_handle
== terminate_thread_wakeup_list
[ terminate_thread_wakeup_list_index
])
815 * Cycle through the thread specific data for the specified
816 * thread and call the destructor function for each non-NULL
817 * datum. Then remove the thread_t from active_Q and put it
818 * back on cache_Q for possible later re-use.
820 if(cur
->tsd
!= NULL
) {
821 tsd_free_all(cur
->tsd
);
829 pthread_mutex_unlock(&active_Q_mutex
);
836 static void pthread_sync_terminate_thread(void) {
837 (pthread_cache_done
|| pthread_once(&pthread_cache_once
, create_once
));
839 if (terminate_thread_handle
== INVALID_HANDLE_VALUE
) {
840 CHAR eventName
[MAX_PATH
];
841 static eventCount
= 0;
842 sprintf(eventName
, "terminate_thread_wakeup_event %d::%d", _getpid(), eventCount
++);
843 terminate_thread_wakeup_event
= CreateEvent((LPSECURITY_ATTRIBUTES
) 0,
844 TRUE
, FALSE
, (LPCTSTR
) eventName
);
845 terminate_thread_handle
= CreateThread((LPSECURITY_ATTRIBUTES
) 0, 0,
846 terminate_thread_routine
, (LPVOID
) 0, 0,
847 &terminate_thread_id
);
849 SetEvent (terminate_thread_wakeup_event
);
855 * Only support the detached attribute specifier for pthread_create.
856 * Under NT, thread stacks grow automatically as needed.
859 int pthread_create(pthread_t
*tid
, const pthread_attr_t
*attr
, void *(*func
)(void *), void *arg
) {
861 pthread_create_t
*t
= NULL
;
863 (pthread_cache_done
|| pthread_once(&pthread_cache_once
, create_once
));
865 if ((tid
!= NULL
) && (func
!= NULL
)) {
866 if ((t
= (pthread_create_t
*) malloc(sizeof(pthread_create_t
))) &&
867 (t
->me
= get_thread()) ) {
870 *tid
= (pthread_t
) t
->me
;
872 t
->me
->is_joinable
= attr
->is_joinable
;
874 t
->me
->is_joinable
= PTHREAD_CREATE_JOINABLE
;
876 t
->me
->native_thread
= 0;
879 * At the point (before we actually create the thread)
880 * we need to add our entry to the active queue. This ensures
881 * us that other threads who may run after this thread returns
882 * will find an entry for the create thread regardless of
883 * whether the newly created thread has run or not.
884 * In the event the thread create fails, we will have temporarily
885 * added an entry to the list that was never valid, but we
886 * (i.e. the thread that is calling thread_create) are the
887 * only one who could possibly know about the bogus entry
888 * since we hold the active_Q_mutex.
890 pthread_mutex_lock(&active_Q_mutex
);
891 queue_Prepend(&active_Q
, t
->me
);
892 t
->me
->t_handle
= CreateThread((LPSECURITY_ATTRIBUTES
) 0, 0,
893 afs_pthread_create_stub
, (LPVOID
) t
, 0,
895 if (t
->me
->t_handle
== 0) {
897 * we only free t if the thread wasn't created, otherwise
898 * it's free'd by the new thread.
905 pthread_mutex_unlock(&active_Q_mutex
);
918 int pthread_cond_init(pthread_cond_t
*cond
, const pthread_condattr_t
*attr
) {
922 * Only support default attribute -> must pass a NULL pointer for
925 if ((attr
== NULL
) && (cond
!= NULL
)) {
926 memset(cond
, 0, sizeof(*cond
));
927 InitializeCriticalSection(&cond
->cs
);
928 queue_Init(&cond
->waiting_threads
);
937 * In order to optimize the performance of condition variables,
938 * we maintain a pool of cond_waiter_t's that have been dynamically
939 * allocated. There is no attempt made to garbage collect these -
940 * once they have been created, they stay in the cache for the life
944 static struct rx_queue waiter_cache
;
945 static CRITICAL_SECTION waiter_cache_cs
;
946 static int waiter_cache_init
;
947 static pthread_once_t waiter_cache_once
= PTHREAD_ONCE_INIT
;
949 static void init_waiter_cache(void) {
950 if (waiter_cache_init
)
953 memset(&waiter_cache_cs
, 0, sizeof(waiter_cache_cs
));
954 InitializeCriticalSection(&waiter_cache_cs
);
955 queue_Init(&waiter_cache
);
956 waiter_cache_init
= 1;
959 static void cleanup_waiter_cache(void)
961 cond_waiters_t
* cur
= NULL
, * next
= NULL
;
963 if (waiter_cache_init
) {
964 for(queue_Scan(&waiter_cache
, cur
, next
, cond_waiter
)) {
967 CloseHandle(cur
->event
);
971 DeleteCriticalSection(&waiter_cache_cs
);
972 waiter_cache_init
= 0;
976 static cond_waiters_t
*get_waiter() {
977 cond_waiters_t
*new = NULL
;
979 (waiter_cache_init
|| pthread_once(&waiter_cache_once
, init_waiter_cache
));
981 EnterCriticalSection(&waiter_cache_cs
);
983 if (queue_IsEmpty(&waiter_cache
)) {
984 new = (cond_waiters_t
*) malloc(sizeof(cond_waiters_t
));
986 CHAR eventName
[MAX_PATH
];
987 static eventCount
= 0;
988 sprintf(eventName
, "cond_waiters_t %d::%d", _getpid(), eventCount
++);
989 new->event
= CreateEvent((LPSECURITY_ATTRIBUTES
) 0, FALSE
,
990 FALSE
, (LPCTSTR
) eventName
);
991 if (new->event
== NULL
) {
997 new = queue_First(&waiter_cache
, cond_waiter
);
1001 LeaveCriticalSection(&waiter_cache_cs
);
1006 static void put_waiter(cond_waiters_t
*old
) {
1008 (waiter_cache_init
|| pthread_once(&waiter_cache_once
, init_waiter_cache
));
1010 EnterCriticalSection(&waiter_cache_cs
);
1011 queue_Prepend(&waiter_cache
, old
);
1012 LeaveCriticalSection(&waiter_cache_cs
);
1015 static int cond_wait_internal(pthread_cond_t
*cond
, pthread_mutex_t
*mutex
, const DWORD time
) {
1017 cond_waiters_t
*my_entry
= get_waiter();
1018 cond_waiters_t
*cur
, *next
;
1019 int hasnt_been_signalled
=0;
1021 if ((cond
!= NULL
) && (mutex
!= NULL
) && (my_entry
!= NULL
)) {
1022 EnterCriticalSection(&cond
->cs
);
1023 queue_Append(&cond
->waiting_threads
, my_entry
);
1024 LeaveCriticalSection(&cond
->cs
);
1026 if (pthread_mutex_unlock(mutex
) == 0) {
1027 switch(WaitForSingleObject(my_entry
->event
, time
)) {
1034 * This is a royal pain. We've timed out waiting
1035 * for the signal, but between the time out and here
1036 * it is possible that we were actually signalled by
1037 * another thread. So we grab the condition lock
1038 * and scan the waiting thread queue to see if we are
1039 * still there. If we are, we just remove ourselves.
1041 * If we are no longer listed in the waiter queue,
1042 * it means that we were signalled after the time
1043 * out occurred and so we have to do another wait
1044 * WHICH HAS TO SUCCEED! In this case, we reset
1045 * rc to indicate that we were signalled.
1047 * We have to wait or otherwise, the event
1048 * would be cached in the signalled state, which
1049 * is wrong. It might be more efficient to just
1050 * close and reopen the event.
1052 EnterCriticalSection(&cond
->cs
);
1053 for(queue_Scan(&cond
->waiting_threads
, cur
,
1054 next
, cond_waiter
)) {
1055 if (cur
== my_entry
) {
1056 hasnt_been_signalled
= 1;
1060 if (hasnt_been_signalled
) {
1064 if (!ResetEvent(my_entry
->event
)) {
1068 LeaveCriticalSection(&cond
->cs
);
1070 case WAIT_ABANDONED
:
1080 if (pthread_mutex_lock(mutex
) != 0) {
1090 if (my_entry
!= NULL
) {
1091 put_waiter(my_entry
);
1097 int pthread_cond_wait(pthread_cond_t
*cond
, pthread_mutex_t
*mutex
) {
1100 rc
= cond_wait_internal(cond
, mutex
, INFINITE
);
1104 int pthread_cond_timedwait(pthread_cond_t
*cond
, pthread_mutex_t
*mutex
, const struct timespec
*abstime
) {
1106 struct _timeb now
, then
;
1107 afs_uint32 n_milli
, t_milli
;
1109 if (abstime
->tv_nsec
< 1000000000) {
1112 * pthread timedwait uses an absolute time, NT uses relative so
1113 * we convert here. The millitm field in the timeb struct is
1114 * unsigned, but we need to do subtraction preserving the sign,
1115 * so we copy the fields into temporary variables.
1118 * In NT 4.0 SP3, WaitForSingleObject can occassionally timeout
1119 * earlier than requested. Therefore, our pthread_cond_timedwait
1120 * can also return early.
1124 n_milli
= now
.millitm
;
1125 then
.time
= abstime
->tv_sec
;
1126 t_milli
= abstime
->tv_nsec
/1000000;
1128 if((then
.time
> now
.time
||
1129 (then
.time
== now
.time
&& t_milli
> n_milli
))) {
1130 if((t_milli
-= n_milli
) < 0) {
1134 then
.time
-= now
.time
;
1136 if ((then
.time
+ (clock() / CLOCKS_PER_SEC
)) <= 50000000) {
1138 * Under NT, we can only wait for milliseconds, so we
1139 * round up the wait time here.
1141 rc
= cond_wait_internal(cond
, mutex
,
1142 (DWORD
)((then
.time
* 1000) + (t_milli
)));
1156 int pthread_cond_signal(pthread_cond_t
*cond
) {
1158 cond_waiters_t
*release_thread
;
1161 EnterCriticalSection(&cond
->cs
);
1164 * remove the first waiting thread from the queue
1165 * and resume his execution
1167 if (queue_IsNotEmpty(&cond
->waiting_threads
)) {
1168 release_thread
= queue_First(&cond
->waiting_threads
,
1170 queue_Remove(release_thread
);
1171 if (!SetEvent(release_thread
->event
)) {
1176 LeaveCriticalSection(&cond
->cs
);
1184 int pthread_cond_broadcast(pthread_cond_t
*cond
) {
1186 cond_waiters_t
*release_thread
, *next_thread
;
1189 EnterCriticalSection(&cond
->cs
);
1192 * Empty the waiting_threads queue.
1194 if (queue_IsNotEmpty(&cond
->waiting_threads
)) {
1195 for(queue_Scan(&cond
->waiting_threads
, release_thread
,
1196 next_thread
, cond_waiter
)) {
1197 queue_Remove(release_thread
);
1198 if (!SetEvent(release_thread
->event
)) {
1204 LeaveCriticalSection(&cond
->cs
);
1212 int pthread_cond_destroy(pthread_cond_t
*cond
) {
1216 DeleteCriticalSection(&cond
->cs
);
1222 * A previous version of this file had code to check the waiter
1223 * queue and empty it here. This has been removed in the hopes
1224 * that it will aid in debugging.
1230 int pthread_join(pthread_t target_thread
, void **status
) {
1232 thread_p me
, target
;
1235 target
= (thread_p
) target_thread
;
1236 me
= (thread_p
) pthread_self();
1240 * Check to see that the target thread is joinable and hasn't
1241 * already been joined.
1244 pthread_mutex_lock(&active_Q_mutex
);
1246 for(queue_Scan(&active_Q
, cur
, next
, thread
)) {
1247 if (target
== cur
) break;
1250 if (target
== cur
) {
1251 if ((!target
->is_joinable
) || (target
->has_been_joined
)) {
1259 pthread_mutex_unlock(&active_Q_mutex
);
1263 target
->waiter_count
++;
1264 while(target
->running
) {
1265 pthread_cond_wait(&target
->wait_terminate
, &active_Q_mutex
);
1269 * Only one waiter gets the status and is allowed to join, all the
1270 * others get an error.
1273 if (target
->has_been_joined
) {
1276 target
->has_been_joined
= 1;
1278 *status
= target
->rc
;
1283 * If we're the last waiter it is our responsibility to remove
1284 * this entry from the terminated list and put it back in the
1288 target
->waiter_count
--;
1289 if (target
->waiter_count
== 0) {
1290 queue_Remove(target
);
1291 pthread_mutex_unlock(&active_Q_mutex
);
1294 pthread_mutex_unlock(&active_Q_mutex
);
1304 * Note that we can't return an error from pthread_getspecific so
1305 * we return a NULL pointer instead.
1308 void *pthread_getspecific(pthread_key_t key
) {
1310 char **tsd
= TlsGetValue(tsd_index
);
1315 if ((key
> -1) && (key
< PTHREAD_KEYS_MAX
)) {
1316 rc
= (void *) *(tsd
+ key
);
1322 static int p_tsd_done
;
1324 static void pthread_tsd_init(void) {
1325 pthread_mutex_init(&pthread_tsd_mutex
, (const pthread_mutexattr_t
*)0);
1329 int pthread_key_create(pthread_key_t
*keyp
, void (*destructor
)(void *value
)) {
1333 if (p_tsd_done
|| (!pthread_once(&pthread_tsd_once
, pthread_tsd_init
))) {
1334 if (!pthread_mutex_lock(&pthread_tsd_mutex
)) {
1335 for(i
=0;i
<PTHREAD_KEYS_MAX
;i
++) {
1336 if (!keys
[i
].inuse
) break;
1339 if (!keys
[i
].inuse
) {
1341 keys
[i
].destructor
= destructor
;
1346 pthread_mutex_unlock(&pthread_tsd_mutex
);
1357 int pthread_key_delete(pthread_key_t key
) {
1360 if (p_tsd_done
|| (!pthread_once(&pthread_tsd_once
, pthread_tsd_init
))) {
1361 if ((key
> -1) && (key
< PTHREAD_KEYS_MAX
)) {
1362 if (!pthread_mutex_lock(&pthread_tsd_mutex
)) {
1363 keys
[key
].inuse
= 0;
1364 keys
[key
].destructor
= NULL
;
1365 pthread_mutex_unlock(&pthread_tsd_mutex
);
1379 int pthread_setspecific(pthread_key_t key
, const void *value
) {
1383 /* make sure all thread-local storage has been allocated */
1386 if (p_tsd_done
|| (!pthread_once(&pthread_tsd_once
, pthread_tsd_init
))) {
1387 if ((key
> -1) && (key
< PTHREAD_KEYS_MAX
)) {
1388 if (!pthread_mutex_lock(&pthread_tsd_mutex
)) {
1389 if (keys
[key
].inuse
) {
1390 tsd
= TlsGetValue(tsd_index
);
1391 *(tsd
+ key
) = (char *) value
;
1395 pthread_mutex_unlock(&pthread_tsd_mutex
);
1409 pthread_t
pthread_self(void) {
1411 DWORD my_id
= GetCurrentThreadId();
1413 (pthread_cache_done
|| pthread_once(&pthread_cache_once
, create_once
));
1414 (tsd_done
|| pthread_once(&global_tsd_once
, tsd_once
));
1416 pthread_mutex_lock(&active_Q_mutex
);
1418 cur
= TlsGetValue (tsd_pthread_index
);
1422 * This thread's ID was not found in our list of pthread-API client
1423 * threads (e.g., those threads created via pthread_create). Create
1426 if ((cur
= get_thread()) != NULL
) {
1427 cur
->is_joinable
= 0;
1429 cur
->native_thread
= 1;
1430 DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
1431 GetCurrentProcess(), &cur
->t_handle
, 0,
1432 TRUE
, DUPLICATE_SAME_ACCESS
);
1435 * We'll also need a place to store key data for this thread
1437 if ((cur
->tsd
= malloc(sizeof(char*) * PTHREAD_KEYS_MAX
)) != NULL
) {
1438 memset(cur
->tsd
, 0, (sizeof(char*) * PTHREAD_KEYS_MAX
));
1440 TlsSetValue(tsd_index
, (LPVOID
)cur
->tsd
);
1441 TlsSetValue(tsd_pthread_index
, (LPVOID
)cur
);
1444 * The thread_t structure is complete; add it to the active_Q
1446 queue_Prepend(&active_Q
, cur
);
1449 * We were able to successfully insert a new entry into the
1450 * active_Q; however, when this thread terminates, we will need
1451 * to know about it. The pthread_sync_terminate_thread() routine
1452 * will make sure there is a dedicated thread waiting for any
1453 * native-thread entries in the active_Q to terminate.
1455 pthread_sync_terminate_thread();
1459 pthread_mutex_unlock(&active_Q_mutex
);
1461 return (void *) cur
;
1464 int pthread_equal(pthread_t t1
, pthread_t t2
) {
1468 int pthread_attr_destroy(pthread_attr_t
*attr
) {
1474 int pthread_attr_init(pthread_attr_t
*attr
) {
1478 attr
->is_joinable
= PTHREAD_CREATE_JOINABLE
;
1486 int pthread_attr_getdetachstate(pthread_attr_t
*attr
, int *detachstate
) {
1489 if ((attr
!= NULL
) && (detachstate
!= NULL
)) {
1490 *detachstate
= attr
->is_joinable
;
1497 int pthread_attr_setdetachstate(pthread_attr_t
*attr
, int detachstate
) {
1500 if ((attr
!= NULL
) && ((detachstate
== PTHREAD_CREATE_JOINABLE
) ||
1501 (detachstate
== PTHREAD_CREATE_DETACHED
))) {
1502 attr
->is_joinable
= detachstate
;
1509 void pthread_exit(void *status
) {
1510 thread_p me
= (thread_p
) pthread_self();
1513 * Support pthread_exit for thread's created by calling pthread_create
1514 * only. Do this by using an exception that will transfer control
1515 * back to afs_pthread_create_stub. Store away our status before
1518 * If this turns out to be a native thread, the exception will be
1519 * unhandled and the process will terminate.
1523 RaiseException(PTHREAD_EXIT_EXCEPTION
, 0, 0, NULL
);
1528 * DllMain() -- Entry-point function called by the DllMainCRTStartup()
1529 * function in the MSVC runtime DLL (msvcrt.dll).
1531 * Note: the system serializes calls to this function.
1534 DllMain(HINSTANCE dllInstHandle
,/* instance handle for this DLL module */
1535 DWORD reason
, /* reason function is being called */
1537 { /* reserved for future use */
1539 case DLL_PROCESS_ATTACH
:
1540 /* library is being attached to a process */
1541 /* disable thread attach/detach notifications */
1542 (void)DisableThreadLibraryCalls(dllInstHandle
);
1544 pthread_once(&pthread_cache_once
, create_once
);
1545 pthread_once(&global_tsd_once
, tsd_once
);
1546 pthread_once(&waiter_cache_once
, init_waiter_cache
);
1549 case DLL_PROCESS_DETACH
:
1550 cleanup_waiter_cache();
1551 cleanup_global_tsd();
1552 cleanup_pthread_cache();