Update NEWS for 1.6.22
[pkg-k5-afs_openafs.git] / src / WINNT / pthread / pthread.c
blob36cdc4d7baea15919b1fe2acd638253cf53849c1
1 /*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
11 * This file contains a skeleton pthread implementation for NT.
12 * This is not intended to be a fully compliant pthread implementation
13 * The purpose of this file is to only implement those functions that
14 * are truly needed to support the afs code base.
16 * A secondary goal is to allow a "real" pthread implementation to
17 * replace this file without any modification to code that depends upon
18 * this file
20 * The function signatures and argument types are meant to be the same
21 * as their UNIX prototypes.
22 * Where possible, the POSIX specified return values are used.
23 * For situations where an error can occur, but no corresponding
24 * POSIX error value exists, unique (within a given function) negative
25 * numbers are used for errors to avoid collsions with the errno
26 * style values.
29 #include <afs/param.h>
30 #include <afs/stds.h>
32 #include <pthread.h>
33 #include <stdlib.h>
34 #include <stdio.h>
35 #include <string.h>
36 #include <process.h>
37 #include <errno.h>
38 #include <sys/timeb.h>
40 #define PTHREAD_EXIT_EXCEPTION 0x1
43 * Posix threads uses static initialization for pthread_once control
44 * objects, and under NT, every sophisticated synchronization primitive
45 * uses procedural initialization. This forces the use of CompareExchange
46 * (aka test and set) and busy waiting for threads that compete to run
47 * a pthread_once'd function. We make these "busy" threads give up their
48 * timeslice - which should cause acceptable behavior on a single processor
49 * machine, but on a multiprocessor machine this could very well result
50 * in busy waiting.
53 int pthread_once(pthread_once_t *once_control, void (*init_routine)(void)) {
54 int rc = 0;
56 if ((once_control != NULL) && (init_routine != NULL)) {
57 if (InterlockedExchange((LPLONG)&once_control->call_started,
58 (LONG) 1) == 0) {
59 (*init_routine)();
60 once_control->call_running = 0;
61 } else {
62 /* use Sleep() since SwitchToThread() not available on Win95 */
63 while(once_control->call_running) Sleep(20);
65 } else {
66 rc = EINVAL;
68 return rc;
72 * For now only support PTHREAD_PROCESS_PRIVATE mutexes.
73 * if PTHREAD_PROCESS_SHARED are required later they can be added
76 int pthread_mutex_init(pthread_mutex_t *mp, const pthread_mutexattr_t *attr) {
77 int rc = 0;
79 if ((mp != NULL) && (attr == NULL)) {
80 memset(mp, 0, sizeof(*mp));
81 InitializeCriticalSection(&mp->cs);
82 mp->isLocked = 0;
83 mp->tid = 0;
84 } else {
85 rc = EINVAL;
87 return rc;
91 * Under NT, critical sections can be locked recursively by the owning
92 * thread. This is opposite of the pthread spec, and so we keep track
93 * of the thread that has locked a critical section. If the same thread
94 * tries to lock a critical section more than once we fail.
96 int pthread_mutex_trylock(pthread_mutex_t *mp) {
97 int rc = 0;
99 #ifdef AFS_WIN95_ENV
100 /* TryEnterCriticalSection() not available on Win95, so just wait for
101 * the lock. Correct code generally can't depend on how long the
102 * function takes to return, so the only code that will be broken is
103 * that for which 1) the mutex *mp is obtained and never released or
104 * 2) the mutex *mp is intentionally held until trylock() returns.
105 * These cases are unusual and don't appear in normal (non-test) AFS
106 * code; furthermore, we can reduce (but not eliminate!) the problem by
107 * sneaking a look at isLocked even though we don't hold the
108 * CRITICAL_SECTION in mutex *mp and are thus vulnerable to race
109 * conditions. Note that self-deadlock isn't a problem since
110 * CRITICAL_SECTION objects are recursive.
112 * Given the very restricted usage of the pthread library on Windows 95,
113 * we can live with these limitations.
115 if (mp != NULL) {
116 if (mp->isLocked) {
117 rc = EBUSY;
118 } else {
119 rc = pthread_mutex_lock(mp);
121 } else {
122 rc = EINVAL;
124 #else
125 /* TryEnterCriticalSection() provided on other MS platforms of interest */
126 if (mp != NULL) {
127 if (TryEnterCriticalSection(&mp->cs)) {
128 if (mp->isLocked) {
129 /* same thread tried to recursively lock, fail */
130 LeaveCriticalSection(&mp->cs);
131 rc = EDEADLK;
132 } else {
133 mp->isLocked = 1;
134 mp->tid = GetCurrentThreadId();
135 rc = 0;
137 } else {
138 rc = EBUSY;
140 } else {
141 rc = EINVAL;
143 #endif /* AFS_WIN95_ENV */
145 return rc;
149 int pthread_mutex_lock(pthread_mutex_t *mp) {
150 int rc = 0;
152 if (mp != NULL) {
153 EnterCriticalSection(&mp->cs);
154 if (!mp->isLocked) {
155 mp->isLocked = 1;
156 mp->tid = GetCurrentThreadId();
157 } else {
159 * same thread tried to recursively lock this mutex.
160 * Under real POSIX, this would cause a deadlock, but NT only
161 * supports recursive mutexes so we indicate the situation
162 * by returning EDEADLK.
164 LeaveCriticalSection(&mp->cs);
165 rc = EDEADLK;
166 #ifdef PTHREAD_DEBUG
167 DebugBreak();
168 #endif
170 } else {
171 #ifdef PTHREAD_DEBUG
172 DebugBreak();
173 #endif
174 rc = EINVAL;
177 return rc;
180 int pthread_mutex_unlock(pthread_mutex_t *mp) {
181 int rc = 0;
183 if (mp != NULL) {
184 if (mp->tid == GetCurrentThreadId()) {
185 mp->isLocked = 0;
186 mp->tid = 0;
187 LeaveCriticalSection(&mp->cs);
188 } else {
189 #ifdef PTHREAD_DEBUG
190 DebugBreak();
191 #endif
192 rc = EPERM;
194 } else {
195 #ifdef PTHREAD_DEBUG
196 DebugBreak();
197 #endif
198 rc = EINVAL;
200 return rc;
203 int pthread_mutex_destroy(pthread_mutex_t *mp) {
204 int rc = 0;
206 if (mp != NULL) {
207 DeleteCriticalSection(&mp->cs);
208 } else {
209 #ifdef PTHREAD_DEBUG
210 DebugBreak();
211 #endif
212 rc = EINVAL;
215 return rc;
218 int pthread_rwlock_destroy(pthread_rwlock_t *rwp)
220 int rc = 0;
222 if (rwp != NULL) {
223 pthread_mutex_destroy(&rwp->read_access_completion_mutex);
224 pthread_mutex_destroy(&rwp->write_access_mutex);
225 pthread_cond_destroy(&rwp->read_access_completion_wait);
226 } else {
227 #ifdef PTHREAD_DEBUG
228 DebugBreak();
229 #endif
230 rc = EINVAL;
233 return rc;
236 int pthread_rwlock_init(pthread_rwlock_t *rwp, const pthread_rwlockattr_t *attr)
238 int rc = 0;
240 if (rwp == NULL)
241 return EINVAL;
243 rwp->readers = 0;
245 rc = pthread_mutex_init(&rwp->write_access_mutex, NULL);
246 if (rc)
247 return rc;
249 rc = pthread_mutex_init(&rwp->read_access_completion_mutex, NULL);
250 if (rc)
251 goto error1;
253 rc = pthread_cond_init(&rwp->read_access_completion_wait, NULL);
254 if (rc == 0)
255 return 0; /* success */
257 pthread_mutex_destroy(&rwp->read_access_completion_mutex);
259 error1:
260 pthread_mutex_destroy(&rwp->write_access_mutex);
262 return rc;
265 int pthread_rwlock_wrlock(pthread_rwlock_t *rwp)
267 int rc = 0;
269 if (rwp == NULL)
270 return EINVAL;
272 if ((rc = pthread_mutex_lock(&rwp->write_access_mutex)) != 0)
273 return rc;
275 if ((rc = pthread_mutex_lock(&rwp->read_access_completion_mutex)) != 0)
277 pthread_mutex_unlock(&rwp->write_access_mutex);
278 return rc;
281 while (rc == 0 && rwp->readers > 0) {
282 rc = pthread_cond_wait( &rwp->read_access_completion_wait,
283 &rwp->read_access_completion_mutex);
286 pthread_mutex_unlock(&rwp->read_access_completion_mutex);
288 if (rc)
289 pthread_mutex_unlock(&rwp->write_access_mutex);
291 return rc;
294 int pthread_rwlock_rdlock(pthread_rwlock_t *rwp)
296 int rc = 0;
298 if (rwp == NULL)
299 return EINVAL;
301 if ((rc = pthread_mutex_lock(&rwp->write_access_mutex)) != 0)
302 return rc;
304 if ((rc = pthread_mutex_lock(&rwp->read_access_completion_mutex)) != 0)
306 pthread_mutex_unlock(&rwp->write_access_mutex);
307 return rc;
310 rwp->readers++;
312 pthread_mutex_unlock(&rwp->read_access_completion_mutex);
314 pthread_mutex_unlock(&rwp->write_access_mutex);
316 return rc;
320 int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwp)
322 int rc = 0;
324 if (rwp == NULL)
325 return EINVAL;
327 if ((rc = pthread_mutex_trylock(&rwp->write_access_mutex)) != 0)
328 return rc;
330 if ((rc = pthread_mutex_trylock(&rwp->read_access_completion_mutex)) != 0) {
331 pthread_mutex_unlock(&rwp->write_access_mutex);
332 return rc;
335 rwp->readers++;
337 pthread_mutex_unlock(&rwp->read_access_completion_mutex);
339 pthread_mutex_unlock(&rwp->write_access_mutex);
341 return rc;
344 int pthread_rwlock_trywrlock(pthread_rwlock_t *rwp)
346 int rc = 0;
348 if (rwp == NULL)
349 return EINVAL;
351 if ((rc = pthread_mutex_trylock(&rwp->write_access_mutex)) != 0)
352 return rc;
354 if ((rc = pthread_mutex_trylock(&rwp->read_access_completion_mutex)) != 0)
356 pthread_mutex_unlock(&rwp->write_access_mutex);
357 return rc;
360 if (rwp->readers > 0)
361 rc = EBUSY;
363 pthread_mutex_unlock(&rwp->read_access_completion_mutex);
365 if (rc)
366 pthread_mutex_unlock(&rwp->write_access_mutex);
368 return rc;
371 int pthread_rwlock_unlock(pthread_rwlock_t *rwp)
373 int rc = 0;
375 if (rwp == NULL)
376 return EINVAL;
378 rc = pthread_mutex_trylock(&rwp->write_access_mutex);
379 if (rc != EDEADLK)
381 /* unlock a read lock */
382 if (rc == 0)
383 pthread_mutex_unlock(&rwp->write_access_mutex);
385 if ((rc = pthread_mutex_lock(&rwp->read_access_completion_mutex)) != 0)
387 pthread_mutex_unlock(&rwp->write_access_mutex);
388 return rc;
391 if (rwp->readers <= 0)
393 rc = EINVAL;
395 else
397 if (--rwp->readers == 0)
398 pthread_cond_broadcast(&rwp->read_access_completion_wait);
401 pthread_mutex_unlock(&rwp->read_access_completion_mutex);
403 else
405 /* unlock a write lock */
406 rc = pthread_mutex_unlock(&rwp->write_access_mutex);
409 return rc;
414 * keys is used to keep track of which keys are currently
415 * in use by the threads library. pthread_tsd_mutex is used
416 * to protect keys.
418 * The bookkeeping for keys in use and destructor function/key is
419 * at the library level. Each individual thread only keeps its
420 * per key data value. This implies that the keys array and the
421 * tsd array in the pthread_t structure need to always be exactly
422 * the same size since the same index is used for both arrays.
425 typedef struct {
426 int inuse;
427 void (*destructor)(void *);
428 } pthread_tsd_table_t;
430 static pthread_tsd_table_t keys[PTHREAD_KEYS_MAX];
431 static pthread_mutex_t pthread_tsd_mutex;
432 static pthread_once_t pthread_tsd_once = PTHREAD_ONCE_INIT;
435 * In order to support p_self() and p_join() under NT,
436 * we have to keep our own list of active threads and provide a mapping
437 * function that maps the NT thread id to our internal structure.
438 * The main reason that this is necessary is that GetCurrentThread
439 * returns a special constant not an actual handle to the thread.
440 * This makes it impossible to write a p_self() function that works
441 * with only the native NT functions.
444 static struct rx_queue active_Q;
445 static struct rx_queue cache_Q;
447 static pthread_mutex_t active_Q_mutex;
448 static pthread_mutex_t cache_Q_mutex;
450 static pthread_once_t pthread_cache_once = PTHREAD_ONCE_INIT;
451 static int pthread_cache_done;
453 typedef struct thread {
454 struct rx_queue thread_queue;
455 void *rc;
456 int running;
457 pthread_cond_t wait_terminate;
458 int waiter_count;
459 int is_joinable;
460 int has_been_joined;
461 HANDLE t_handle;
462 DWORD NT_id;
463 int native_thread;
464 char **tsd;
465 } thread_t, *thread_p;
467 static void create_once(void) {
468 queue_Init(&active_Q);
469 queue_Init(&cache_Q);
470 pthread_mutex_init(&active_Q_mutex, (const pthread_mutexattr_t*)0);
471 pthread_mutex_init(&cache_Q_mutex, (const pthread_mutexattr_t*)0);
472 pthread_cache_done = 1;
475 static void cleanup_pthread_cache(void) {
476 thread_p cur = NULL, next = NULL;
478 if (pthread_cache_done) {
479 for(queue_Scan(&active_Q, cur, next, thread)) {
480 queue_Remove(cur);
482 for(queue_Scan(&cache_Q, cur, next, thread)) {
483 queue_Remove(cur);
486 pthread_mutex_destroy(&active_Q_mutex);
487 pthread_mutex_destroy(&cache_Q_mutex);
489 pthread_cache_done = 0;
493 static void put_thread(thread_p old) {
495 CloseHandle(old->t_handle);
496 pthread_mutex_lock(&cache_Q_mutex);
497 queue_Prepend(&cache_Q, old);
498 pthread_mutex_unlock(&cache_Q_mutex);
501 static thread_p get_thread() {
502 thread_p new = NULL;
504 pthread_mutex_lock(&cache_Q_mutex);
506 if (queue_IsEmpty(&cache_Q)) {
507 new = (thread_p) malloc(sizeof(thread_t));
508 if (new != NULL) {
510 * One time initialization - we assume threads put back have
511 * unlocked mutexes and condition variables with no waiters
513 * These functions cannot fail currently.
515 pthread_cond_init(&new->wait_terminate,(const pthread_condattr_t *)0);
517 } else {
518 new = queue_First(&cache_Q, thread);
519 queue_Remove(new);
522 pthread_mutex_unlock(&cache_Q_mutex);
525 * Initialization done every time we hand out a thread_t
528 if (new != NULL) {
529 new->rc = NULL;
530 new->running = 1;
531 new->waiter_count = 0;
532 new->has_been_joined = 0;
534 return new;
539 * The thread start function signature is different on NT than the pthread
540 * spec so we create a tiny stub to map from one signature to the next.
541 * This assumes that a void * can be stored within a DWORD.
544 typedef struct {
545 void *(*func)(void *);
546 void *arg;
547 char *tsd[PTHREAD_KEYS_MAX];
548 thread_p me;
549 } pthread_create_t;
551 static DWORD tsd_index = 0xffffffff;
552 static DWORD tsd_pthread_index = 0xffffffff;
553 static pthread_once_t global_tsd_once = PTHREAD_ONCE_INIT;
554 static int tsd_done;
556 static void tsd_once(void) {
557 while(tsd_index == 0xffffffff) {
558 tsd_index = TlsAlloc();
560 while(tsd_pthread_index == 0xffffffff) {
561 tsd_pthread_index = TlsAlloc();
563 tsd_done = 1;
566 static void tsd_free_all(char *tsd[PTHREAD_KEYS_MAX]) {
567 int call_more_destructors = 0;
568 do {
569 int i;
570 void *value;
571 void (*destructor)(void *);
572 call_more_destructors = 0;
573 for(i=0;i<PTHREAD_KEYS_MAX;i++) {
574 if (tsd[i] != NULL) {
575 destructor = keys[i].destructor;
576 value = (void *)tsd[i];
577 tsd[i] = NULL;
578 if (destructor != NULL) {
579 (destructor)(value);
581 * A side-effect of calling a destructor function is that
582 * more thread specific may be created for this thread.
583 * If we call a destructor, we must recycle through the
584 * entire list again and run any new destructors.
586 call_more_destructors = 1;
590 } while(call_more_destructors);
593 static void cleanup_global_tsd(void)
595 thread_p cur = NULL, next = NULL;
597 if (tsd_done) {
598 for(queue_Scan(&active_Q, cur, next, thread)) {
599 tsd_free_all(cur->tsd);
602 TlsFree(tsd_pthread_index);
603 tsd_pthread_index = 0xFFFFFFFF;
604 TlsFree(tsd_index);
605 tsd_index = 0xFFFFFFFF;
606 tsd_done = 0;
610 static DWORD WINAPI afs_pthread_create_stub(LPVOID param) {
611 pthread_create_t *t = (pthread_create_t *) param;
612 void *rc;
615 * Initialize thread specific storage structures.
618 memset(t->tsd, 0, (sizeof(char *) * PTHREAD_KEYS_MAX));
619 (tsd_done || pthread_once(&global_tsd_once, tsd_once));
620 TlsSetValue(tsd_index, (LPVOID) (t->tsd));
621 TlsSetValue(tsd_pthread_index, (LPVOID) (t->me));
624 * Call the function the user passed to pthread_create and catch the
625 * pthread exit exception if it is raised.
628 __try {
629 rc = (*(t->func))(t->arg);
630 } __except(GetExceptionCode() == PTHREAD_EXIT_EXCEPTION) {
631 rc = t->me->rc; /* rc is set at pthread_exit */
635 * Cycle through the thread specific data for this thread and
636 * call the destructor function for each non-NULL datum
639 tsd_free_all (t->tsd);
640 t->me->tsd = NULL;
643 * If we are joinable, signal any waiters.
646 pthread_mutex_lock(&active_Q_mutex);
647 if (t->me->is_joinable) {
648 t->me->running = 0;
649 t->me->rc = rc;
650 if (t->me->waiter_count) {
651 pthread_cond_broadcast(&t->me->wait_terminate);
653 } else {
654 queue_Remove(t->me);
655 put_thread(t->me);
657 pthread_mutex_unlock(&active_Q_mutex);
659 free(t);
660 return 0;
664 * If a pthread function is called on a thread which was not created by
665 * pthread_create(), that thread will have an entry added to the active_Q
666 * by pthread_self(). When the thread terminates, we need to know
667 * about it, so that we can perform cleanup. A dedicated thread is therefore
668 * maintained, which watches for any thread marked "native_thread==1"
669 * in the active_Q to terminate. The thread spends most of its time sleeping:
670 * it can be signalled by a dedicated event in order to alert it to the
671 * presense of a new thread to watch, or will wake up automatically when
672 * a native thread terminates.
675 static DWORD terminate_thread_id = 0;
676 static HANDLE terminate_thread_handle = INVALID_HANDLE_VALUE;
677 static HANDLE terminate_thread_wakeup_event = INVALID_HANDLE_VALUE;
678 static HANDLE *terminate_thread_wakeup_list = NULL;
679 static size_t terminate_thread_wakeup_list_size = 0;
681 static DWORD WINAPI terminate_thread_routine(LPVOID param) {
682 thread_p cur, next;
683 DWORD native_thread_count;
684 int should_terminate;
685 int terminate_thread_wakeup_list_index;
687 for (;;) {
689 * Grab the active_Q_mutex, and while we hold it, scan the active_Q
690 * to see how many native threads we need to watch. If we don't need
691 * to watch any, we can stop this watcher thread entirely (or not);
692 * if we do need to watch some, fill the terminate_thread_wakeup_list
693 * array and go to sleep.
695 cur = NULL;
696 next = NULL;
697 native_thread_count = 0;
698 should_terminate = FALSE;
699 pthread_mutex_lock(&active_Q_mutex);
701 for(queue_Scan(&active_Q, cur, next, thread)) {
702 if (cur->native_thread)
703 ++native_thread_count;
707 * At this point we could decide to terminate this watcher thread
708 * whenever there are no longer any native threads to watch--however,
709 * since thread creation is a time-consuming thing, and since this
710 * thread spends all its time sleeping anyway, there's no real
711 * compelling reason to do so. Thus, the following statement is
712 * commented out:
714 * if (!native_thread_count) {
715 * should_terminate = TRUE;
718 * Restore the snippet above to cause this watcher thread to only
719 * live whenever there are native threads to watch.
724 * Make sure that our wakeup_list array is large enough to contain
725 * the handles of all the native threads /and/ to contain an
726 * entry for our wakeup_event (in case another native thread comes
727 * along).
729 if (terminate_thread_wakeup_list_size < (1+native_thread_count)) {
730 if (terminate_thread_wakeup_list)
731 free (terminate_thread_wakeup_list);
732 terminate_thread_wakeup_list = (HANDLE*)malloc (sizeof(HANDLE) *
733 (1+native_thread_count));
734 if (terminate_thread_wakeup_list == NULL) {
735 should_terminate = TRUE;
736 } else {
737 terminate_thread_wakeup_list_size = 1+native_thread_count;
741 if (should_terminate) {
743 * Here, we've decided to terminate this watcher thread.
744 * Free our wakeup event and wakeup list, then release the
745 * active_Q_mutex and break this loop.
747 if (terminate_thread_wakeup_list)
748 free (terminate_thread_wakeup_list);
749 CloseHandle (terminate_thread_wakeup_event);
750 terminate_thread_id = 0;
751 terminate_thread_handle = INVALID_HANDLE_VALUE;
752 terminate_thread_wakeup_event = INVALID_HANDLE_VALUE;
753 terminate_thread_wakeup_list = NULL;
754 terminate_thread_wakeup_list_size = 0;
755 pthread_mutex_unlock(&active_Q_mutex);
756 break;
757 } else {
759 * Here, we've decided to wait for native threads et al.
760 * Fill out the wakeup_list.
762 memset(terminate_thread_wakeup_list, 0x00, (sizeof(HANDLE) *
763 (1+native_thread_count)));
765 terminate_thread_wakeup_list[0] = terminate_thread_wakeup_event;
766 terminate_thread_wakeup_list_index = 1;
768 cur = NULL;
769 next = NULL;
770 for(queue_Scan(&active_Q, cur, next, thread)) {
771 if (cur->native_thread) {
772 terminate_thread_wakeup_list[terminate_thread_wakeup_list_index]
773 = cur->t_handle;
774 ++terminate_thread_wakeup_list_index;
778 ResetEvent (terminate_thread_wakeup_event);
781 pthread_mutex_unlock(&active_Q_mutex);
784 * Time to sleep. We'll wake up if either of the following happen:
785 * 1) Someone sets the terminate_thread_wakeup_event (this will
786 * happen if another native thread gets added to the active_Q)
787 * 2) One or more of the native threads terminate
789 terminate_thread_wakeup_list_index = WaitForMultipleObjects(
790 1+native_thread_count,
791 terminate_thread_wakeup_list,
792 FALSE,
793 INFINITE);
796 * If we awoke from sleep because an event other than
797 * terminate_thread_wakeup_event was triggered, it means the
798 * specified thread has terminated. (If more than one thread
799 * terminated, we'll handle this first one and loop around--
800 * the event's handle will still be triggered, so we just won't
801 * block at all when we sleep next time around.)
803 if (terminate_thread_wakeup_list_index > 0) {
804 pthread_mutex_lock(&active_Q_mutex);
806 cur = NULL;
807 next = NULL;
808 for(queue_Scan(&active_Q, cur, next, thread)) {
809 if (cur->t_handle == terminate_thread_wakeup_list[ terminate_thread_wakeup_list_index ])
810 break;
813 if(cur != NULL) {
815 * Cycle through the thread specific data for the specified
816 * thread and call the destructor function for each non-NULL
817 * datum. Then remove the thread_t from active_Q and put it
818 * back on cache_Q for possible later re-use.
820 if(cur->tsd != NULL) {
821 tsd_free_all(cur->tsd);
822 free(cur->tsd);
823 cur->tsd = NULL;
825 queue_Remove(cur);
826 put_thread(cur);
829 pthread_mutex_unlock(&active_Q_mutex);
832 return 0;
836 static void pthread_sync_terminate_thread(void) {
837 (pthread_cache_done || pthread_once(&pthread_cache_once, create_once));
839 if (terminate_thread_handle == INVALID_HANDLE_VALUE) {
840 CHAR eventName[MAX_PATH];
841 static eventCount = 0;
842 sprintf(eventName, "terminate_thread_wakeup_event %d::%d", _getpid(), eventCount++);
843 terminate_thread_wakeup_event = CreateEvent((LPSECURITY_ATTRIBUTES) 0,
844 TRUE, FALSE, (LPCTSTR) eventName);
845 terminate_thread_handle = CreateThread((LPSECURITY_ATTRIBUTES) 0, 0,
846 terminate_thread_routine, (LPVOID) 0, 0,
847 &terminate_thread_id);
848 } else {
849 SetEvent (terminate_thread_wakeup_event);
855 * Only support the detached attribute specifier for pthread_create.
856 * Under NT, thread stacks grow automatically as needed.
859 int pthread_create(pthread_t *tid, const pthread_attr_t *attr, void *(*func)(void *), void *arg) {
860 int rc = 0;
861 pthread_create_t *t = NULL;
863 (pthread_cache_done || pthread_once(&pthread_cache_once, create_once));
865 if ((tid != NULL) && (func != NULL)) {
866 if ((t = (pthread_create_t *) malloc(sizeof(pthread_create_t))) &&
867 (t->me = get_thread()) ) {
868 t->func = func;
869 t->arg = arg;
870 *tid = (pthread_t) t->me;
871 if (attr != NULL) {
872 t->me->is_joinable = attr->is_joinable;
873 } else {
874 t->me->is_joinable = PTHREAD_CREATE_JOINABLE;
876 t->me->native_thread = 0;
877 t->me->tsd = t->tsd;
879 * At the point (before we actually create the thread)
880 * we need to add our entry to the active queue. This ensures
881 * us that other threads who may run after this thread returns
882 * will find an entry for the create thread regardless of
883 * whether the newly created thread has run or not.
884 * In the event the thread create fails, we will have temporarily
885 * added an entry to the list that was never valid, but we
886 * (i.e. the thread that is calling thread_create) are the
887 * only one who could possibly know about the bogus entry
888 * since we hold the active_Q_mutex.
890 pthread_mutex_lock(&active_Q_mutex);
891 queue_Prepend(&active_Q, t->me);
892 t->me->t_handle = CreateThread((LPSECURITY_ATTRIBUTES) 0, 0,
893 afs_pthread_create_stub, (LPVOID) t, 0,
894 &t->me->NT_id);
895 if (t->me->t_handle == 0) {
897 * we only free t if the thread wasn't created, otherwise
898 * it's free'd by the new thread.
900 queue_Remove(t->me);
901 put_thread(t->me);
902 free(t);
903 rc = EAGAIN;
905 pthread_mutex_unlock(&active_Q_mutex);
906 } else {
907 if (t != NULL) {
908 free(t);
910 rc = ENOMEM;
912 } else {
913 rc = EINVAL;
915 return rc;
918 int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr) {
919 int rc = 0;
922 * Only support default attribute -> must pass a NULL pointer for
923 * attr parameter.
925 if ((attr == NULL) && (cond != NULL)) {
926 memset(cond, 0, sizeof(*cond));
927 InitializeCriticalSection(&cond->cs);
928 queue_Init(&cond->waiting_threads);
929 } else {
930 rc = EINVAL;
933 return rc;
937 * In order to optimize the performance of condition variables,
938 * we maintain a pool of cond_waiter_t's that have been dynamically
939 * allocated. There is no attempt made to garbage collect these -
940 * once they have been created, they stay in the cache for the life
941 * of the process.
944 static struct rx_queue waiter_cache;
945 static CRITICAL_SECTION waiter_cache_cs;
946 static int waiter_cache_init;
947 static pthread_once_t waiter_cache_once = PTHREAD_ONCE_INIT;
949 static void init_waiter_cache(void) {
950 if (waiter_cache_init)
951 return;
953 memset(&waiter_cache_cs, 0, sizeof(waiter_cache_cs));
954 InitializeCriticalSection(&waiter_cache_cs);
955 queue_Init(&waiter_cache);
956 waiter_cache_init = 1;
959 static void cleanup_waiter_cache(void)
961 cond_waiters_t * cur = NULL, * next = NULL;
963 if (waiter_cache_init) {
964 for(queue_Scan(&waiter_cache, cur, next, cond_waiter)) {
965 queue_Remove(cur);
967 CloseHandle(cur->event);
968 free(cur);
971 DeleteCriticalSection(&waiter_cache_cs);
972 waiter_cache_init = 0;
976 static cond_waiters_t *get_waiter() {
977 cond_waiters_t *new = NULL;
979 (waiter_cache_init || pthread_once(&waiter_cache_once, init_waiter_cache));
981 EnterCriticalSection(&waiter_cache_cs);
983 if (queue_IsEmpty(&waiter_cache)) {
984 new = (cond_waiters_t *) malloc(sizeof(cond_waiters_t));
985 if (new != NULL) {
986 CHAR eventName[MAX_PATH];
987 static eventCount = 0;
988 sprintf(eventName, "cond_waiters_t %d::%d", _getpid(), eventCount++);
989 new->event = CreateEvent((LPSECURITY_ATTRIBUTES) 0, FALSE,
990 FALSE, (LPCTSTR) eventName);
991 if (new->event == NULL) {
992 free(new);
993 new = NULL;
996 } else {
997 new = queue_First(&waiter_cache, cond_waiter);
998 queue_Remove(new);
1001 LeaveCriticalSection(&waiter_cache_cs);
1002 return new;
1006 static void put_waiter(cond_waiters_t *old) {
1008 (waiter_cache_init || pthread_once(&waiter_cache_once, init_waiter_cache));
1010 EnterCriticalSection(&waiter_cache_cs);
1011 queue_Prepend(&waiter_cache, old);
1012 LeaveCriticalSection(&waiter_cache_cs);
1015 static int cond_wait_internal(pthread_cond_t *cond, pthread_mutex_t *mutex, const DWORD time) {
1016 int rc=0;
1017 cond_waiters_t *my_entry = get_waiter();
1018 cond_waiters_t *cur, *next;
1019 int hasnt_been_signalled=0;
1021 if ((cond != NULL) && (mutex != NULL) && (my_entry != NULL)) {
1022 EnterCriticalSection(&cond->cs);
1023 queue_Append(&cond->waiting_threads, my_entry);
1024 LeaveCriticalSection(&cond->cs);
1026 if (pthread_mutex_unlock(mutex) == 0) {
1027 switch(WaitForSingleObject(my_entry->event, time)) {
1028 case WAIT_FAILED:
1029 rc = -1;
1030 break;
1031 case WAIT_TIMEOUT:
1032 rc = ETIMEDOUT;
1034 * This is a royal pain. We've timed out waiting
1035 * for the signal, but between the time out and here
1036 * it is possible that we were actually signalled by
1037 * another thread. So we grab the condition lock
1038 * and scan the waiting thread queue to see if we are
1039 * still there. If we are, we just remove ourselves.
1041 * If we are no longer listed in the waiter queue,
1042 * it means that we were signalled after the time
1043 * out occurred and so we have to do another wait
1044 * WHICH HAS TO SUCCEED! In this case, we reset
1045 * rc to indicate that we were signalled.
1047 * We have to wait or otherwise, the event
1048 * would be cached in the signalled state, which
1049 * is wrong. It might be more efficient to just
1050 * close and reopen the event.
1052 EnterCriticalSection(&cond->cs);
1053 for(queue_Scan(&cond->waiting_threads, cur,
1054 next, cond_waiter)) {
1055 if (cur == my_entry) {
1056 hasnt_been_signalled = 1;
1057 break;
1060 if (hasnt_been_signalled) {
1061 queue_Remove(cur);
1062 } else {
1063 rc = 0;
1064 if (!ResetEvent(my_entry->event)) {
1065 rc = -6;
1068 LeaveCriticalSection(&cond->cs);
1069 break;
1070 case WAIT_ABANDONED:
1071 rc = -2;
1072 break;
1073 case WAIT_OBJECT_0:
1074 rc = 0;
1075 break;
1076 default:
1077 rc = -4;
1078 break;
1080 if (pthread_mutex_lock(mutex) != 0) {
1081 rc = -3;
1083 } else {
1084 rc = EINVAL;
1086 } else {
1087 rc = EINVAL;
1090 if (my_entry != NULL) {
1091 put_waiter(my_entry);
1094 return rc;
1097 int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) {
1098 int rc = 0;
1100 rc = cond_wait_internal(cond, mutex, INFINITE);
1101 return rc;
1104 int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime) {
1105 int rc = 0;
1106 struct _timeb now, then;
1107 afs_uint32 n_milli, t_milli;
1109 if (abstime->tv_nsec < 1000000000) {
1112 * pthread timedwait uses an absolute time, NT uses relative so
1113 * we convert here. The millitm field in the timeb struct is
1114 * unsigned, but we need to do subtraction preserving the sign,
1115 * so we copy the fields into temporary variables.
1117 * WARNING:
1118 * In NT 4.0 SP3, WaitForSingleObject can occassionally timeout
1119 * earlier than requested. Therefore, our pthread_cond_timedwait
1120 * can also return early.
1123 _ftime(&now);
1124 n_milli = now.millitm;
1125 then.time = abstime->tv_sec;
1126 t_milli = abstime->tv_nsec/1000000;
1128 if((then.time > now.time ||
1129 (then.time == now.time && t_milli > n_milli))) {
1130 if((t_milli -= n_milli) < 0) {
1131 t_milli += 1000;
1132 then.time--;
1134 then.time -= now.time;
1136 if ((then.time + (clock() / CLOCKS_PER_SEC)) <= 50000000) {
1138 * Under NT, we can only wait for milliseconds, so we
1139 * round up the wait time here.
1141 rc = cond_wait_internal(cond, mutex,
1142 (DWORD)((then.time * 1000) + (t_milli)));
1143 } else {
1144 rc = EINVAL;
1146 } else {
1147 rc = ETIME;
1149 } else {
1150 rc = EINVAL;
1153 return rc;
1156 int pthread_cond_signal(pthread_cond_t *cond) {
1157 int rc = 0;
1158 cond_waiters_t *release_thread;
1160 if (cond != NULL) {
1161 EnterCriticalSection(&cond->cs);
1164 * remove the first waiting thread from the queue
1165 * and resume his execution
1167 if (queue_IsNotEmpty(&cond->waiting_threads)) {
1168 release_thread = queue_First(&cond->waiting_threads,
1169 cond_waiter);
1170 queue_Remove(release_thread);
1171 if (!SetEvent(release_thread->event)) {
1172 rc = -1;
1176 LeaveCriticalSection(&cond->cs);
1177 } else {
1178 rc = EINVAL;
1181 return rc;
1184 int pthread_cond_broadcast(pthread_cond_t *cond) {
1185 int rc = 0;
1186 cond_waiters_t *release_thread, *next_thread;
1188 if(cond != NULL) {
1189 EnterCriticalSection(&cond->cs);
1192 * Empty the waiting_threads queue.
1194 if (queue_IsNotEmpty(&cond->waiting_threads)) {
1195 for(queue_Scan(&cond->waiting_threads, release_thread,
1196 next_thread, cond_waiter)) {
1197 queue_Remove(release_thread);
1198 if (!SetEvent(release_thread->event)) {
1199 rc = -1;
1204 LeaveCriticalSection(&cond->cs);
1205 } else {
1206 rc = EINVAL;
1209 return rc;
1212 int pthread_cond_destroy(pthread_cond_t *cond) {
1213 int rc = 0;
1215 if (cond != NULL) {
1216 DeleteCriticalSection(&cond->cs);
1217 } else {
1218 rc = EINVAL;
1222 * A previous version of this file had code to check the waiter
1223 * queue and empty it here. This has been removed in the hopes
1224 * that it will aid in debugging.
1227 return rc;
1230 int pthread_join(pthread_t target_thread, void **status) {
1231 int rc = 0;
1232 thread_p me, target;
1233 thread_p cur, next;
1235 target = (thread_p) target_thread;
1236 me = (thread_p) pthread_self();
1238 if (me != target) {
1240 * Check to see that the target thread is joinable and hasn't
1241 * already been joined.
1244 pthread_mutex_lock(&active_Q_mutex);
1246 for(queue_Scan(&active_Q, cur, next, thread)) {
1247 if (target == cur) break;
1250 if (target == cur) {
1251 if ((!target->is_joinable) || (target->has_been_joined)) {
1252 rc = ESRCH;
1254 } else {
1255 rc = ESRCH;
1258 if (rc) {
1259 pthread_mutex_unlock(&active_Q_mutex);
1260 return rc;
1263 target->waiter_count++;
1264 while(target->running) {
1265 pthread_cond_wait(&target->wait_terminate, &active_Q_mutex);
1269 * Only one waiter gets the status and is allowed to join, all the
1270 * others get an error.
1273 if (target->has_been_joined) {
1274 rc = ESRCH;
1275 } else {
1276 target->has_been_joined = 1;
1277 if (status) {
1278 *status = target->rc;
1283 * If we're the last waiter it is our responsibility to remove
1284 * this entry from the terminated list and put it back in the
1285 * cache.
1288 target->waiter_count--;
1289 if (target->waiter_count == 0) {
1290 queue_Remove(target);
1291 pthread_mutex_unlock(&active_Q_mutex);
1292 put_thread(target);
1293 } else {
1294 pthread_mutex_unlock(&active_Q_mutex);
1296 } else {
1297 rc = EDEADLK;
1300 return rc;
1304 * Note that we can't return an error from pthread_getspecific so
1305 * we return a NULL pointer instead.
1308 void *pthread_getspecific(pthread_key_t key) {
1309 void *rc = NULL;
1310 char **tsd = TlsGetValue(tsd_index);
1312 if (tsd == NULL)
1313 return NULL;
1315 if ((key > -1) && (key < PTHREAD_KEYS_MAX )) {
1316 rc = (void *) *(tsd + key);
1319 return rc;
1322 static int p_tsd_done;
1324 static void pthread_tsd_init(void) {
1325 pthread_mutex_init(&pthread_tsd_mutex, (const pthread_mutexattr_t*)0);
1326 p_tsd_done = 1;
1329 int pthread_key_create(pthread_key_t *keyp, void (*destructor)(void *value)) {
1330 int rc = 0;
1331 int i;
1333 if (p_tsd_done || (!pthread_once(&pthread_tsd_once, pthread_tsd_init))) {
1334 if (!pthread_mutex_lock(&pthread_tsd_mutex)) {
1335 for(i=0;i<PTHREAD_KEYS_MAX;i++) {
1336 if (!keys[i].inuse) break;
1339 if (!keys[i].inuse) {
1340 keys[i].inuse = 1;
1341 keys[i].destructor = destructor;
1342 *keyp = i;
1343 } else {
1344 rc = EAGAIN;
1346 pthread_mutex_unlock(&pthread_tsd_mutex);
1347 } else {
1348 rc = -1;
1350 } else {
1351 rc = -2;
1354 return rc;
1357 int pthread_key_delete(pthread_key_t key) {
1358 int rc = 0;
1360 if (p_tsd_done || (!pthread_once(&pthread_tsd_once, pthread_tsd_init))) {
1361 if ((key > -1) && (key < PTHREAD_KEYS_MAX )) {
1362 if (!pthread_mutex_lock(&pthread_tsd_mutex)) {
1363 keys[key].inuse = 0;
1364 keys[key].destructor = NULL;
1365 pthread_mutex_unlock(&pthread_tsd_mutex);
1366 } else {
1367 rc = -1;
1369 } else {
1370 rc = EINVAL;
1372 } else {
1373 rc = -2;
1376 return rc;
1379 int pthread_setspecific(pthread_key_t key, const void *value) {
1380 int rc = 0;
1381 char **tsd;
1383 /* make sure all thread-local storage has been allocated */
1384 pthread_self();
1386 if (p_tsd_done || (!pthread_once(&pthread_tsd_once, pthread_tsd_init))) {
1387 if ((key > -1) && (key < PTHREAD_KEYS_MAX )) {
1388 if (!pthread_mutex_lock(&pthread_tsd_mutex)) {
1389 if (keys[key].inuse) {
1390 tsd = TlsGetValue(tsd_index);
1391 *(tsd + key) = (char *) value;
1392 } else {
1393 rc = EINVAL;
1395 pthread_mutex_unlock(&pthread_tsd_mutex);
1396 } else {
1397 rc = -1;
1399 } else {
1400 rc = EINVAL;
1402 } else {
1403 rc = -2;
1406 return rc;
1409 pthread_t pthread_self(void) {
1410 thread_p cur;
1411 DWORD my_id = GetCurrentThreadId();
1413 (pthread_cache_done || pthread_once(&pthread_cache_once, create_once));
1414 (tsd_done || pthread_once(&global_tsd_once, tsd_once));
1416 pthread_mutex_lock(&active_Q_mutex);
1418 cur = TlsGetValue (tsd_pthread_index);
1420 if(!cur) {
1422 * This thread's ID was not found in our list of pthread-API client
1423 * threads (e.g., those threads created via pthread_create). Create
1424 * an entry for it.
1426 if ((cur = get_thread()) != NULL) {
1427 cur->is_joinable = 0;
1428 cur->NT_id = my_id;
1429 cur->native_thread = 1;
1430 DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
1431 GetCurrentProcess(), &cur->t_handle, 0,
1432 TRUE, DUPLICATE_SAME_ACCESS);
1435 * We'll also need a place to store key data for this thread
1437 if ((cur->tsd = malloc(sizeof(char*) * PTHREAD_KEYS_MAX)) != NULL) {
1438 memset(cur->tsd, 0, (sizeof(char*) * PTHREAD_KEYS_MAX));
1440 TlsSetValue(tsd_index, (LPVOID)cur->tsd);
1441 TlsSetValue(tsd_pthread_index, (LPVOID)cur);
1444 * The thread_t structure is complete; add it to the active_Q
1446 queue_Prepend(&active_Q, cur);
1449 * We were able to successfully insert a new entry into the
1450 * active_Q; however, when this thread terminates, we will need
1451 * to know about it. The pthread_sync_terminate_thread() routine
1452 * will make sure there is a dedicated thread waiting for any
1453 * native-thread entries in the active_Q to terminate.
1455 pthread_sync_terminate_thread();
1459 pthread_mutex_unlock(&active_Q_mutex);
1461 return (void *) cur;
1464 int pthread_equal(pthread_t t1, pthread_t t2) {
1465 return (t1 == t2);
1468 int pthread_attr_destroy(pthread_attr_t *attr) {
1469 int rc = 0;
1471 return rc;
1474 int pthread_attr_init(pthread_attr_t *attr) {
1475 int rc = 0;
1477 if (attr != NULL) {
1478 attr->is_joinable = PTHREAD_CREATE_JOINABLE;
1479 } else {
1480 rc = EINVAL;
1483 return rc;
1486 int pthread_attr_getdetachstate(pthread_attr_t *attr, int *detachstate) {
1487 int rc = 0;
1489 if ((attr != NULL) && (detachstate != NULL)) {
1490 *detachstate = attr->is_joinable;
1491 } else {
1492 rc = EINVAL;
1494 return rc;
1497 int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate) {
1498 int rc = 0;
1500 if ((attr != NULL) && ((detachstate == PTHREAD_CREATE_JOINABLE) ||
1501 (detachstate == PTHREAD_CREATE_DETACHED))) {
1502 attr->is_joinable = detachstate;
1503 } else {
1504 rc = EINVAL;
1506 return rc;
1509 void pthread_exit(void *status) {
1510 thread_p me = (thread_p) pthread_self();
1513 * Support pthread_exit for thread's created by calling pthread_create
1514 * only. Do this by using an exception that will transfer control
1515 * back to afs_pthread_create_stub. Store away our status before
1516 * returning.
1518 * If this turns out to be a native thread, the exception will be
1519 * unhandled and the process will terminate.
1522 me->rc = status;
1523 RaiseException(PTHREAD_EXIT_EXCEPTION, 0, 0, NULL);
1528 * DllMain() -- Entry-point function called by the DllMainCRTStartup()
1529 * function in the MSVC runtime DLL (msvcrt.dll).
1531 * Note: the system serializes calls to this function.
1533 BOOL WINAPI
1534 DllMain(HINSTANCE dllInstHandle,/* instance handle for this DLL module */
1535 DWORD reason, /* reason function is being called */
1536 LPVOID reserved)
1537 { /* reserved for future use */
1538 switch (reason) {
1539 case DLL_PROCESS_ATTACH:
1540 /* library is being attached to a process */
1541 /* disable thread attach/detach notifications */
1542 (void)DisableThreadLibraryCalls(dllInstHandle);
1544 pthread_once(&pthread_cache_once, create_once);
1545 pthread_once(&global_tsd_once, tsd_once);
1546 pthread_once(&waiter_cache_once, init_waiter_cache);
1547 return TRUE;
1549 case DLL_PROCESS_DETACH:
1550 cleanup_waiter_cache();
1551 cleanup_global_tsd();
1552 cleanup_pthread_cache();
1553 return TRUE;
1555 default:
1556 return FALSE;