2 * Copyright 2008, Google Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
15 * * Neither the name of Google Inc. nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 * Native Client threads library
41 #include <sys/errno.h>
42 #include <sys/unistd.h>
45 #include "pthread_types.h"
46 #include "../libnacl/tls.h"
48 void __pthread_memset(void* s
, int c
, size_t size
);
49 void __newlib_thread_init();
51 /* Thread management global variables */
52 const int __nc_kMaxCachedMemoryBlocks
= 50;
54 /* array of TDB pointers */
55 struct NaClHashTable __nc_thread_threads
;
57 int32_t __nc_thread_id_counter
;
58 int32_t __nc_thread_counter_reached_32_bit
;
60 /* mutex used to synchronize thread management code */
61 pthread_mutex_t __nc_thread_management_lock
;
63 /* condition variable that gets signaled when all the threads
64 * except the main thread have terminated
66 pthread_cond_t __nc_last_thread_cond
;
68 /* number of threads currently running in this NaCl module */
69 int __nc_running_threads_counter
;
72 * Array for TSD keys status management. */
73 tsd_status_t __nc_thread_specific_storage
[PTHREAD_KEYS_MAX
];
75 /* we have two queues of memory blocks - one for each type */
76 STAILQ_HEAD(tailhead
, entry
) __nc_thread_memory_blocks
[2];
77 /* We need a counter for each queue to keep track of number of blocks */
78 int __nc_memory_block_counter
[2];
81 #define ALIGN_ADDRESS(Address, AlignmentRequirement) \
82 (void*)(((int32_t)(Address) + ((AlignmentRequirement) - 1)) \
83 & ~((AlignmentRequirement) - 1))
85 /* The macro assumes that the TDB is stored immediately after the TLS template
86 * (i.e. at higher address) and uses symbols defined by the linker */
87 #define TLS_TO_TDB(Tls) \
88 ((nc_thread_descriptor_t *)((char *)(Tls) + TLS_SIZE))
90 /* TLS memory starts right after the block header. The result should be 16-byte
91 * aligned thanks to the padding
93 #define NODE_TO_DATA(TlsNode) \
94 ((char*)(TlsNode) + sizeof(nc_thread_memory_block_t))
96 #define ALIGNED_NODE_TO_DATA(TlsNode) \
97 ALIGN_ADDRESS((NODE_TO_DATA(TlsNode)), TLS_ALIGNMENT)
99 #define NODE_TO_TDB(TlsNode) \
100 TLS_TO_TDB(ALIGNED_NODE_TO_DATA(TlsNode))
102 /* Internal functions */
104 static inline nc_thread_descriptor_t
*nc_get_tdb() {
105 nc_thread_descriptor_t
*tdb
= NULL
;
106 __asm__
__volatile__ ("mov %%gs:0, %0"
112 static int nc_allocate_thread_id_mu(nc_basic_thread_data_t
*basic_data
) {
113 /* assuming the global lock is locked */
115 if (!__nc_thread_counter_reached_32_bit
) {
116 /* just allocate the next id */
117 basic_data
->thread_id
= __nc_thread_id_counter
;
118 if (MAX_THREAD_ID
== __nc_thread_id_counter
) {
119 /* no more higher ids, next threads will have to search for a free id */
120 __nc_thread_counter_reached_32_bit
= 1;
122 ++__nc_thread_id_counter
;
126 * the counter is useless at this point, because some older threads
127 * may be still running
130 for (i
= 0; i
<= MAX_THREAD_ID
; ++i
) {
131 if (!HASH_ID_EXISTS(&__nc_thread_threads
, i
)) {
132 basic_data
->thread_id
= i
;
138 /* all the IDs are in use?! */
143 HASH_TABLE_INSERT(&__nc_thread_threads
, basic_data
, hash_entry
);
147 static nc_basic_thread_data_t
*nc_find_tdb_mu(uint32_t id
)
149 /* assuming the global lock is locked */
150 return HASH_FIND_ID(&__nc_thread_threads
,
152 nc_basic_thread_data
,
156 void nc_thread_starter(nc_thread_function func
,
158 nc_thread_descriptor_t
*tdb
= NULL
;
161 __newlib_thread_init();
162 retval
= tdb
->start_func(tdb
->state
);
163 /* if the function returns, terminate the thread */
164 tdb
->exiting_without_returning
= 0;
165 pthread_exit(retval
);
167 /* TODO - add assert */
170 static nc_thread_memory_block_t
* nc_allocate_memory_block_mu(
171 nc_thread_memory_block_type_t type
) {
172 /* assume the lock is held!!! */
173 if (type
>= MAX_MEMORY_TYPE
)
175 struct tailhead
*head
= &__nc_thread_memory_blocks
[type
];
177 /* We need to know the size even if we find a free node - to memset it to 0 */
180 case THREAD_STACK_MEMORY
:
181 required_size
= NC_DEFAULT_STACK_SIZE
+ (TLS_ALIGNMENT
- 1);
183 case TLS_AND_TDB_MEMORY
:
184 required_size
= TLS_SIZE
+
185 sizeof(nc_thread_descriptor_t
) +
188 case MAX_MEMORY_TYPE
:
193 if (!STAILQ_EMPTY(head
)) {
194 /* try to get one from queue */
195 nc_thread_memory_block_t
*node
= STAILQ_FIRST(head
);
198 * On average the memory blocks will be marked as not used in the same order
199 * as they are added to the queue, therefore there is no need to check the
200 * next queue entries if the first one is still in use.
202 if (0 == node
->is_used
) {
203 STAILQ_REMOVE_HEAD(head
, entries
);
204 --__nc_memory_block_counter
[type
];
205 while (__nc_memory_block_counter
[type
] > __nc_kMaxCachedMemoryBlocks
) {
207 * We have too many blocks in the queue - try to release some.
208 * The maximum number of memory blocks to keep in the queue
209 * is almost arbitrary and can be tuned.
210 * The main limitation is that if we keep too many
211 * blocks in the queue, the NaCl app will run out of memory,
212 * since the default thread stack size is 512K.
213 * TODO: we might give up reusing stack entries once we
214 * support variable stack size.
216 nc_thread_memory_block_t
*tmp
= STAILQ_FIRST(head
);
217 if (0 == tmp
->is_used
) {
218 STAILQ_REMOVE_HEAD(head
, entries
);
219 --__nc_memory_block_counter
[type
];
223 * Stop once we find a block that is still in use,
224 * since probably there is no point to continue
229 memset(node
, 0,sizeof(*node
));
230 node
->size
= required_size
;
235 /* no available blocks of the required type - allocate one */
236 nc_thread_memory_block_t
*node
=
237 malloc(MEMORY_BLOCK_ALLOCATION_SIZE(required_size
));
238 memset(node
, 0, sizeof(*node
));
239 node
->size
= required_size
;
244 static void nc_free_memory_block_mu(nc_thread_memory_block_type_t type
,
245 nc_thread_memory_block_t
* node
) {
246 /* assume the lock is held !!! */
247 struct tailhead
*head
= &__nc_thread_memory_blocks
[type
];
248 STAILQ_INSERT_TAIL(head
, node
, entries
);
249 ++__nc_memory_block_counter
[type
];
252 static void nc_release_basic_data_mu(nc_basic_thread_data_t
*basic_data
) {
253 if (NACL_PTHREAD_ILLEGAL_THREAD_ID
!= basic_data
->thread_id
) {
254 HASH_REMOVE(&__nc_thread_threads
,
255 basic_data
->thread_id
,
256 nc_basic_thread_data
,
259 /* join_condvar can be initialized only if tls_node exists */
260 pthread_cond_destroy(&basic_data
->join_condvar
);
264 static void nc_release_tls_node(nc_thread_memory_block_t
*tls_node
) {
266 nc_thread_descriptor_t
* tdb_to_release
= NODE_TO_TDB(tls_node
);
267 tdb_to_release
->basic_data
->tdb
= NULL
;
268 tls_node
->is_used
= 0;
269 nc_free_memory_block_mu(TLS_AND_TDB_MEMORY
, tls_node
);
273 /* internal initialization spinlock */
275 * TODO - Make static. Use local labels to prevent redefinition errors
276 * when the definition is moved to a header file.
278 inline void nc_spinlock_lock(int *lock
) {
279 __asm__("mov %0, %%ecx \n\t"
280 "mov 0x1, %%eax \n\t"
281 "loop: xchg (%%ecx), %%eax \n\t"
282 "test %%eax, %%eax \n\t"
284 :"=r"(lock
): "0"(lock
));
287 inline void nc_spinlock_unlock(int *lock
) {
288 __asm__("mov %0, %%ecx \n\t"
290 "xchg (%%ecx), %%eax":"=r"(lock
));
293 uint32_t __nacl_tdb_id_function(nc_hash_entry_t
*entry
) {
294 nc_basic_thread_data_t
*basic_data
=
295 HASH_ENTRY_TO_ENTRY_TYPE(entry
, nc_basic_thread_data
, hash_entry
);
296 return basic_data
->thread_id
;
299 /* Initialize a newly allocated TDB to some default values */
300 static int nc_tdb_init(nc_thread_descriptor_t
*tdb
,
301 nc_basic_thread_data_t
* basic_data
) {
303 tdb
->basic_data
= basic_data
;
304 basic_data
->tdb
= tdb
;
305 /* Put an illegal value, should be set when the ID is allocated */
306 tdb
->basic_data
->thread_id
= NACL_PTHREAD_ILLEGAL_THREAD_ID
;
307 tdb
->basic_data
->retval
= 0;
308 tdb
->basic_data
->status
= THREAD_RUNNING
;
309 tdb
->basic_data
->hash_entry
.id_function
= __nacl_tdb_id_function
;
311 tdb
->joinable
= PTHREAD_CREATE_JOINABLE
;
312 tdb
->join_waiting
= 0;
314 tdb
->thread_specific_data
= NULL
;
315 tdb
->tls_node
= NULL
;
316 tdb
->stack_node
= NULL
;
318 tdb
->start_func
= NULL
;
321 * A call to pthread_exit without returning from the thread function
322 * should be recognized
324 tdb
->exiting_without_returning
= 1;
326 /* Imitate PTHREAD_COND_INITIALIZER - we cannot use it directly here,
327 * since this is not variable initialization.
329 nc_pthread_condvar_ctor(&basic_data
->join_condvar
);
334 /* Will be called from the library startup code,
335 * which always happens on the application's main thread
337 int __pthread_initialize() {
339 /* allocate TLS+TDB area */
340 /* We allocate the basic data immediately after TDB */
341 __pthread_initialize_minimal(sizeof(nc_thread_descriptor_t
) +
342 sizeof(nc_basic_thread_data_t
));
344 /* At this point GS is already initialized */
345 nc_thread_descriptor_t
*tdb
= nc_get_tdb();
346 nc_basic_thread_data_t
*basic_data
= (nc_basic_thread_data_t
*)(tdb
+ 1);
348 HASH_INIT(&__nc_thread_threads
);
350 retval
= pthread_mutex_init(&__nc_thread_management_lock
, NULL
);
352 /* If the initialization fails we just fail the whole process */
354 * TODO: check that the return value
355 * is actually checked somewhere
361 retval
= pthread_cond_init(&__nc_last_thread_cond
, NULL
);
365 STAILQ_INIT(&__nc_thread_memory_blocks
[0]);
366 STAILQ_INIT(&__nc_thread_memory_blocks
[1]);
368 __nc_running_threads_counter
= 1; /* the main thread */
370 /* Initialize the main thread TDB */
371 nc_tdb_init(tdb
, basic_data
);
372 /* ID 0 is reserved for the main thread */
373 basic_data
->thread_id
= 0;
374 __nc_thread_id_counter
= 1;
375 HASH_TABLE_INSERT(&__nc_thread_threads
, basic_data
, hash_entry
);
381 /* pthread functions */
383 int pthread_create(pthread_t
*thread_id
,
384 pthread_attr_t
*attr
,
385 void *(*start_routine
) (void *),
389 /* declare the variables outside of the while scope */
390 nc_thread_memory_block_t
*stack_node
= NULL
;
391 void *thread_stack
= NULL
;
392 nc_thread_descriptor_t
*new_tdb
= NULL
;
393 nc_basic_thread_data_t
*new_basic_data
= NULL
;
394 nc_thread_memory_block_t
*tls_node
= NULL
;
397 /* TODO - right now a single lock is used, try to optimize? */
398 pthread_mutex_lock(&__nc_thread_management_lock
);
401 /* Allocate the tls block */
403 tls_node
= nc_allocate_memory_block_mu(TLS_AND_TDB_MEMORY
);
404 if (NULL
== tls_node
)
406 /* Ensure that tls pointer is aligned, since the compiler assumes that */
407 tls
= ALIGNED_NODE_TO_DATA(tls_node
);
409 /* copy the TLS template into the TLS area */
410 memcpy(tls
, TLS_TDATA_START
, TLS_TDATA_SIZE
);
411 memset(tls
+ TLS_TDATA_SIZE
, 0, TLS_TBSS_SIZE
);
413 new_tdb
= TLS_TO_TDB(tls
);
414 /* TODO: consider creating a pool of basic_data structs,
415 * similar to stack and TLS+TDB (probably when adding the support for
416 * variable stack size).
418 new_basic_data
= malloc(sizeof(*new_basic_data
));
419 if (NULL
== new_basic_data
)
422 nc_tdb_init(new_tdb
, new_basic_data
);
423 new_tdb
->tls_node
= tls_node
;
425 retval
= nc_allocate_thread_id_mu(new_basic_data
);
429 /* all the required members of the tdb must be initialized before
430 * the thread is started and actually before the global lock is released,
431 * since another thread can call pthread_join() or pthread_detach()
433 new_tdb
->start_func
= start_routine
;
434 new_tdb
->state
= arg
;
436 new_tdb
->joinable
= attr
->joinable
;
439 /* Allocate the stack for the thread */
440 stack_node
= nc_allocate_memory_block_mu(THREAD_STACK_MEMORY
);
441 if (NULL
== stack_node
) {
445 thread_stack
= NODE_TO_DATA(stack_node
);
446 thread_stack
= ALIGN_ADDRESS(thread_stack
, 16);
447 new_tdb
->stack_node
= stack_node
;
451 pthread_mutex_unlock(&__nc_thread_management_lock
);
454 goto ret
; /* error */
458 * Calculate the stack location - it should be 12 mod 16 aligned.
459 * We subtract 4 since thread_stack is 0 mod 16 aligned and
460 * the stack size is a multiple of 16.
462 void *esp
= (void*)((char*)thread_stack
+ NC_DEFAULT_STACK_SIZE
- 4);
465 * Put 0 on the stack as a return address - it is needed to satisfy
466 * the alignment requirement when we call __nacl_exit_thread syscall
467 * when the thread terminates.
471 /* start the thread */
472 retval
= __nacl_create_thread((void*)nc_thread_starter
,
475 sizeof(nc_thread_descriptor_t
));
479 *thread_id
= new_basic_data
->thread_id
;
480 pthread_mutex_lock(&__nc_thread_management_lock
);
481 /* TODO : replace with atomic increment? */
482 ++__nc_running_threads_counter
;
483 pthread_mutex_unlock(&__nc_thread_management_lock
);
488 /* failed to create a thread */
489 pthread_mutex_lock(&__nc_thread_management_lock
);
491 nc_release_tls_node(tls_node
);
492 if (new_basic_data
) {
493 nc_release_basic_data_mu(new_basic_data
);
496 stack_node
->is_used
= 0;
497 nc_free_memory_block_mu(THREAD_STACK_MEMORY
, stack_node
);
500 pthread_mutex_unlock(&__nc_thread_management_lock
);
507 int __pthread_shutdown() {
508 pthread_mutex_lock(&__nc_thread_management_lock
);
510 while (1 != __nc_running_threads_counter
) {
511 pthread_cond_wait(&__nc_last_thread_cond
, &__nc_thread_management_lock
);
514 pthread_mutex_unlock(&__nc_thread_management_lock
);
518 void pthread_exit (void* retval
) {
519 /* get all we need from the tdb before releasing it */
520 nc_thread_descriptor_t
*tdb
= nc_get_tdb();
521 nc_thread_memory_block_t
*stack_node
= tdb
->stack_node
;
522 int32_t *is_used
= &stack_node
->is_used
;
523 nc_basic_thread_data_t
*basic_data
= tdb
->basic_data
;
524 pthread_t thread_id
= basic_data
->thread_id
;
525 int joinable
= tdb
->joinable
;
526 unsigned int generation
;
528 /* call the destruction functions for TSD */
529 if (tdb
->exiting_without_returning
) {
530 int destruction_iterations
;
531 for (destruction_iterations
= 0;
532 destruction_iterations
< PTHREAD_DESTRUCTOR_ITERATIONS
;
533 destruction_iterations
++) {
535 int nothing_to_do
= 1;
536 for (i
= 0; i
< PTHREAD_KEYS_MAX
; i
++) {
537 void (*destructor
)(void*) =
538 __nc_thread_specific_storage
[i
].destruction_callback
;
539 if ((KEY_ALLOCATED(generation
=
540 __nc_thread_specific_storage
[i
].generation
)) &&
541 (tdb
->thread_specific_data
!= NULL
) &&
542 (tdb
->thread_specific_data
[i
].generation
== generation
) &&
543 (tdb
->thread_specific_data
[i
].ptr
!= NULL
) &&
544 (NULL
!= destructor
)) {
545 void *tmp
= tdb
->thread_specific_data
[i
].ptr
;
546 tdb
->thread_specific_data
[i
].ptr
= NULL
;
558 if (0 == thread_id
) {
559 /* This is the main thread - wait for other threads to complete */
560 __pthread_shutdown();
561 /* TODO: should we call exit() or __nacl_exit_thread? */
562 __nacl_exit_thread(NULL
);
565 pthread_mutex_lock(&__nc_thread_management_lock
);
567 basic_data
->retval
= retval
;
571 /* If somebody is waiting for this thread, signal */
572 basic_data
->status
= THREAD_TERMINATED
;
573 pthread_cond_signal(&basic_data
->join_condvar
);
576 * We can release TLS+TDB - thread id and its return value are still
579 nc_release_tls_node(tdb
->tls_node
);
582 nc_release_basic_data_mu(basic_data
);
585 /* now add the stack to the list but keep it marked as used */
586 nc_free_memory_block_mu(THREAD_STACK_MEMORY
, stack_node
);
588 --__nc_running_threads_counter
;
589 /* TODO: is this synchronization enough? Main thread's
590 * pthread_exit can still complete before the last thread
593 if (1 == __nc_running_threads_counter
) {
594 pthread_cond_signal(&__nc_last_thread_cond
);
597 pthread_mutex_unlock(&__nc_thread_management_lock
);
598 __nacl_exit_thread(is_used
);
601 int pthread_join(pthread_t thread_id
, void **thread_return
) {
604 if (pthread_self() == thread_id
) {
608 pthread_mutex_lock(&__nc_thread_management_lock
);
609 nc_basic_thread_data_t
*basic_data
= nc_find_tdb_mu(thread_id
);
610 if (NULL
== basic_data
) {
615 if (basic_data
->tdb
!= NULL
) {
616 /* The thread is still running */
617 nc_thread_descriptor_t
*joined_tdb
= basic_data
->tdb
;
618 if (!joined_tdb
->joinable
|| joined_tdb
->join_waiting
) {
619 /* the thread is detached or another thread is waiting to join */
623 joined_tdb
->join_waiting
= 1;
624 /* wait till the thread terminates */
625 while (THREAD_TERMINATED
!= basic_data
->status
) {
626 pthread_cond_wait(&basic_data
->join_condvar
,
627 &__nc_thread_management_lock
);
630 /* The thread has already terminated */
631 /* save the return value */
632 if (thread_return
!= NULL
) {
633 *thread_return
= basic_data
->retval
;
636 /* release the resources */
637 nc_release_basic_data_mu(basic_data
);
641 pthread_mutex_unlock(&__nc_thread_management_lock
);
647 int pthread_detach(pthread_t thread_id
) {
649 /* TODO - can be optimized using InterlockedExchange
650 * once it's available */
651 pthread_mutex_lock(&__nc_thread_management_lock
);
652 nc_basic_thread_data_t
*basic_data
= nc_find_tdb_mu(thread_id
);
653 if (NULL
== basic_data
) {
658 nc_thread_descriptor_t
*detached_tdb
= basic_data
->tdb
;
660 if (NULL
== detached_tdb
) {
661 /* The thread has already terminated */
662 nc_release_basic_data_mu(basic_data
);
664 if (!detached_tdb
->join_waiting
) {
665 if (detached_tdb
->joinable
) {
666 detached_tdb
->joinable
= 0;
668 /* already detached */
672 /* another thread is already waiting to join - do nothing */
676 pthread_mutex_unlock(&__nc_thread_management_lock
);
680 pthread_t
pthread_self() {
681 /* get the tdb pointer from gs and use it to return the thread handle*/
682 nc_thread_descriptor_t
*tdb
= nc_get_tdb();
683 return tdb
->basic_data
->thread_id
;
686 int pthread_equal (pthread_t thread1
, pthread_t thread2
) {
687 return (thread1
== thread2
);
690 /* Thread-specific data functions */
692 int pthread_key_create (pthread_key_t
*key
,
693 void (*destr_function
) (void *)) {
695 unsigned int generation
;
696 for (i
=0; i
<PTHREAD_KEYS_MAX
; ++i
) {
697 if (!KEY_ALLOCATED(generation
=
698 __nc_thread_specific_storage
[i
].generation
) &&
699 /* A key is not reusable after UINT_MAX-1 generations -
700 * the last even generation */
701 (__nc_thread_specific_storage
[i
].generation
!= UINT_MAX
- 1) &&
702 (generation
== CompareAndSwap(
703 (AtomicWord
*)&__nc_thread_specific_storage
[i
].generation
,
707 __nc_thread_specific_storage
[i
].destruction_callback
= destr_function
;
715 int pthread_key_delete (pthread_key_t key
) {
716 unsigned int generation
;
717 if ((key
> PTHREAD_KEYS_MAX
- 1) ||
718 !KEY_ALLOCATED(generation
=
719 __nc_thread_specific_storage
[key
].generation
)) {
724 CompareAndSwap((AtomicWord
*)&__nc_thread_specific_storage
[key
].generation
,
727 /* Somebody incremented the generation counter before we did, i.e. the key
728 * has already been deleted.
733 __nc_thread_specific_storage
[key
].destruction_callback
= NULL
;
737 int pthread_setspecific (pthread_key_t key
,
738 const void *pointer
) {
739 nc_thread_descriptor_t
*tdb
= nc_get_tdb();
740 unsigned int generation
;
742 if ((key
> PTHREAD_KEYS_MAX
- 1) ||
743 !KEY_ALLOCATED(generation
=
744 __nc_thread_specific_storage
[key
].generation
)) {
748 /* Allocate the memory for this thread's TSD - we actually need it */
749 if (NULL
== tdb
->thread_specific_data
) {
750 tdb
->thread_specific_data
= calloc(PTHREAD_KEYS_MAX
, sizeof(tsd_t
));
751 if (tdb
->thread_specific_data
== NULL
) {
756 tdb
->thread_specific_data
[key
].generation
= generation
;
757 tdb
->thread_specific_data
[key
].ptr
= (void*) pointer
;
761 void *pthread_getspecific (pthread_key_t key
) {
762 nc_thread_descriptor_t
*tdb
= nc_get_tdb();
763 if (!tdb
->thread_specific_data
||
764 (key
> PTHREAD_KEYS_MAX
- 1) ||
765 (tdb
->thread_specific_data
[key
].generation
!=
766 __nc_thread_specific_storage
[key
].generation
)) {
769 return tdb
->thread_specific_data
[key
].ptr
;
772 int pthread_attr_init (pthread_attr_t
*attr
) {
776 attr
->joinable
= PTHREAD_CREATE_JOINABLE
;
780 int pthread_attr_destroy (pthread_attr_t
*attr
) {
784 /* nothing to destroy */
788 int pthread_attr_setdetachstate (pthread_attr_t
*attr
,
793 attr
->joinable
= detachstate
;
797 int pthread_attr_getdetachstate (pthread_attr_t
*attr
,
802 return attr
->joinable
;
805 void __local_lock_init(_LOCK_T
* lock
);
806 void __local_lock_init_recursive(_LOCK_T
* lock
);
807 void __local_lock_close(_LOCK_T
* lock
);
808 void __local_lock_close_recursive(_LOCK_T
* lock
);
809 void __local_lock_acquire(_LOCK_T
* lock
);
810 void __local_lock_acquire_recursive(_LOCK_T
* lock
);
811 int __local_lock_try_acquire(_LOCK_T
* lock
);
812 int __local_lock_try_acquire_recursive(_LOCK_T
* lock
);
813 void __local_lock_release(_LOCK_T
* lock
);
814 void __local_lock_release_recursive(_LOCK_T
* lock
);
818 void __local_lock_init(_LOCK_T
* lock
)
821 pthread_mutexattr_t attr
;
822 pthread_mutexattr_init(&attr
);
823 pthread_mutexattr_settype(&attr
, PTHREAD_MUTEX_FAST_NP
);
824 pthread_mutex_init((pthread_mutex_t
*)lock
, &attr
);
828 void __local_lock_init_recursive(_LOCK_T
* lock
)
831 pthread_mutexattr_t attr
;
832 pthread_mutexattr_init(&attr
);
833 pthread_mutexattr_settype(&attr
, PTHREAD_MUTEX_RECURSIVE_NP
);
834 pthread_mutex_init((pthread_mutex_t
*)lock
, &attr
);
838 void __local_lock_close(_LOCK_T
* lock
)
841 pthread_mutex_destroy((pthread_mutex_t
*)lock
);
845 void __local_lock_close_recursive(_LOCK_T
* lock
)
847 __local_lock_close(lock
);
850 void __local_lock_acquire(_LOCK_T
* lock
)
852 if (0 == __nc_thread_id_counter
) {
854 * pthread library is not initialized yet - there is only one thread.
855 * Calling pthread_mutex_lock will cause an access violation because it
856 * will attempt to access the TDB which is not initialized yet
861 pthread_mutex_lock((pthread_mutex_t
*)lock
);
865 void __local_lock_acquire_recursive(_LOCK_T
* lock
)
867 __local_lock_acquire(lock
);
870 int __local_lock_try_acquire(_LOCK_T
* lock
)
872 if (0 == __nc_thread_id_counter
) {
874 * pthread library is not initialized yet - there is only one thread.
875 * Calling pthread_mutex_lock will cause an access violation because it
876 * will attempt to access the TDB which is not initialized yet
882 return pthread_mutex_trylock((pthread_mutex_t
*)lock
);
888 int __local_lock_try_acquire_recursive(_LOCK_T
* lock
)
890 return __local_lock_try_acquire(lock
);
893 void __local_lock_release(_LOCK_T
* lock
)
895 if (0 == __nc_thread_id_counter
) {
897 * pthread library is not initialized yet - there is only one thread.
898 * Calling pthread_mutex_lock will cause an access violation because it
899 * will attempt to access the TDB which is not initialized yet
900 * NOTE: there is no race condition here because the value of the counter
901 * cannot change while the lock is held - the startup process is
908 pthread_mutex_unlock((pthread_mutex_t
*)lock
);
912 void __local_lock_release_recursive(_LOCK_T
* lock
)
914 __local_lock_release(lock
);