1 /* GLIB - Library of useful routines for C programming
2 * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
4 * gthread.c: posix thread system implementation
5 * Copyright 1998 Sebastian Wilhelmi; University of Karlsruhe
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the
19 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 * Boston, MA 02111-1307, USA.
24 * Modified by the GLib Team and others 1997-2000. See the AUTHORS
25 * file for a list of people on the GLib Team. See the ChangeLog
26 * files for a list of changes. These files are distributed with
27 * GLib at ftp://ftp.gtk.org/pub/gtk/.
30 /* The GMutex, GCond and GPrivate implementations in this file are some
31 * of the lowest-level code in GLib. All other parts of GLib (messages,
32 * memory, slices, etc) assume that they can freely use these facilities
33 * without risking recursion.
35 * As such, these functions are NOT permitted to call any other part of
38 * The thread manipulation functions (create, exit, join, etc.) have
39 * more freedom -- they can do as they please.
46 #include "gthreadprivate.h"
48 #include "gmessages.h"
49 #include "gstrfuncs.h"
57 #ifdef HAVE_SYS_TIME_H
58 # include <sys/time.h>
66 #ifdef HAVE_SYS_PRCTL_H
67 #include <sys/prctl.h>
74 g_thread_abort (gint status
,
75 const gchar
*function
)
77 fprintf (stderr
, "GLib (gthread-posix.c): Unexpected error from C library during '%s': %s. Aborting.\n",
78 function
, strerror (status
));
84 static pthread_mutex_t
*
85 g_mutex_impl_new (void)
87 pthread_mutexattr_t
*pattr
= NULL
;
88 pthread_mutex_t
*mutex
;
91 mutex
= malloc (sizeof (pthread_mutex_t
));
92 if G_UNLIKELY (mutex
== NULL
)
93 g_thread_abort (errno
, "malloc");
95 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
97 pthread_mutexattr_t attr
;
98 pthread_mutexattr_init (&attr
);
99 pthread_mutexattr_settype (&attr
, PTHREAD_MUTEX_ADAPTIVE_NP
);
104 if G_UNLIKELY ((status
= pthread_mutex_init (mutex
, pattr
)) != 0)
105 g_thread_abort (status
, "pthread_mutex_init");
107 #ifdef PTHREAD_ADAPTIVE_MUTEX_NP
108 pthread_mutexattr_destroy (&attr
);
115 g_mutex_impl_free (pthread_mutex_t
*mutex
)
117 pthread_mutex_destroy (mutex
);
121 static pthread_mutex_t
*
122 g_mutex_get_impl (GMutex
*mutex
)
124 pthread_mutex_t
*impl
= g_atomic_pointer_get (&mutex
->p
);
126 if G_UNLIKELY (impl
== NULL
)
128 impl
= g_mutex_impl_new ();
129 if (!g_atomic_pointer_compare_and_exchange (&mutex
->p
, NULL
, impl
))
130 g_mutex_impl_free (impl
);
140 * @mutex: an uninitialized #GMutex
142 * Initializes a #GMutex so that it can be used.
144 * This function is useful to initialize a mutex that has been
145 * allocated on the stack, or as part of a larger structure.
146 * It is not necessary to initialize a mutex that has been
147 * statically allocated.
157 * b = g_new (Blob, 1);
158 * g_mutex_init (&b->m);
161 * To undo the effect of g_mutex_init() when a mutex is no longer
162 * needed, use g_mutex_clear().
164 * Calling g_mutex_init() on an already initialized #GMutex leads
165 * to undefined behaviour.
170 g_mutex_init (GMutex
*mutex
)
172 mutex
->p
= g_mutex_impl_new ();
177 * @mutex: an initialized #GMutex
179 * Frees the resources allocated to a mutex with g_mutex_init().
181 * This function should not be used with a #GMutex that has been
182 * statically allocated.
184 * Calling g_mutex_clear() on a locked mutex leads to undefined
190 g_mutex_clear (GMutex
*mutex
)
192 g_mutex_impl_free (mutex
->p
);
199 * Locks @mutex. If @mutex is already locked by another thread, the
200 * current thread will block until @mutex is unlocked by the other
203 * <note>#GMutex is neither guaranteed to be recursive nor to be
204 * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
205 * already been locked by the same thread results in undefined behaviour
206 * (including but not limited to deadlocks).</note>
209 g_mutex_lock (GMutex
*mutex
)
213 if G_UNLIKELY ((status
= pthread_mutex_lock (g_mutex_get_impl (mutex
))) != 0)
214 g_thread_abort (status
, "pthread_mutex_lock");
221 * Unlocks @mutex. If another thread is blocked in a g_mutex_lock()
222 * call for @mutex, it will become unblocked and can lock @mutex itself.
224 * Calling g_mutex_unlock() on a mutex that is not locked by the
225 * current thread leads to undefined behaviour.
228 g_mutex_unlock (GMutex
*mutex
)
232 if G_UNLIKELY ((status
= pthread_mutex_unlock (g_mutex_get_impl (mutex
))) != 0)
233 g_thread_abort (status
, "pthread_mutex_unlock");
240 * Tries to lock @mutex. If @mutex is already locked by another thread,
241 * it immediately returns %FALSE. Otherwise it locks @mutex and returns
244 * <note>#GMutex is neither guaranteed to be recursive nor to be
245 * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
246 * already been locked by the same thread results in undefined behaviour
247 * (including but not limited to deadlocks or arbitrary return values).
250 * Returns: %TRUE if @mutex could be locked
253 g_mutex_trylock (GMutex
*mutex
)
257 if G_LIKELY ((status
= pthread_mutex_trylock (g_mutex_get_impl (mutex
))) == 0)
260 if G_UNLIKELY (status
!= EBUSY
)
261 g_thread_abort (status
, "pthread_mutex_trylock");
268 static pthread_mutex_t
*
269 g_rec_mutex_impl_new (void)
271 pthread_mutexattr_t attr
;
272 pthread_mutex_t
*mutex
;
274 mutex
= g_slice_new (pthread_mutex_t
);
275 pthread_mutexattr_init (&attr
);
276 pthread_mutexattr_settype (&attr
, PTHREAD_MUTEX_RECURSIVE
);
277 pthread_mutex_init (mutex
, &attr
);
278 pthread_mutexattr_destroy (&attr
);
284 g_rec_mutex_impl_free (pthread_mutex_t
*mutex
)
286 pthread_mutex_destroy (mutex
);
287 g_slice_free (pthread_mutex_t
, mutex
);
290 static pthread_mutex_t
*
291 g_rec_mutex_get_impl (GRecMutex
*rec_mutex
)
293 pthread_mutex_t
*impl
= g_atomic_pointer_get (&rec_mutex
->p
);
295 if G_UNLIKELY (impl
== NULL
)
297 impl
= g_rec_mutex_impl_new ();
298 if (!g_atomic_pointer_compare_and_exchange (&rec_mutex
->p
, NULL
, impl
))
299 g_rec_mutex_impl_free (impl
);
308 * @rec_mutex: an uninitialized #GRecMutex
310 * Initializes a #GRecMutex so that it can be used.
312 * This function is useful to initialize a recursive mutex
313 * that has been allocated on the stack, or as part of a larger
316 * It is not necessary to initialise a recursive mutex that has been
317 * statically allocated.
327 * b = g_new (Blob, 1);
328 * g_rec_mutex_init (&b->m);
331 * Calling g_rec_mutex_init() on an already initialized #GRecMutex
332 * leads to undefined behaviour.
334 * To undo the effect of g_rec_mutex_init() when a recursive mutex
335 * is no longer needed, use g_rec_mutex_clear().
340 g_rec_mutex_init (GRecMutex
*rec_mutex
)
342 rec_mutex
->p
= g_rec_mutex_impl_new ();
347 * @rec_mutex: an initialized #GRecMutex
349 * Frees the resources allocated to a recursive mutex with
350 * g_rec_mutex_init().
352 * This function should not be used with a #GRecMutex that has been
353 * statically allocated.
355 * Calling g_rec_mutex_clear() on a locked recursive mutex leads
356 * to undefined behaviour.
361 g_rec_mutex_clear (GRecMutex
*rec_mutex
)
363 g_rec_mutex_impl_free (rec_mutex
->p
);
368 * @rec_mutex: a #GRecMutex
370 * Locks @rec_mutex. If @rec_mutex is already locked by another
371 * thread, the current thread will block until @rec_mutex is
372 * unlocked by the other thread. If @rec_mutex is already locked
373 * by the current thread, the 'lock count' of @rec_mutex is increased.
374 * The mutex will only become available again when it is unlocked
375 * as many times as it has been locked.
380 g_rec_mutex_lock (GRecMutex
*mutex
)
382 pthread_mutex_lock (g_rec_mutex_get_impl (mutex
));
386 * g_rec_mutex_unlock:
387 * @rec_mutex: a #GRecMutex
389 * Unlocks @rec_mutex. If another thread is blocked in a
390 * g_rec_mutex_lock() call for @rec_mutex, it will become unblocked
391 * and can lock @rec_mutex itself.
393 * Calling g_rec_mutex_unlock() on a recursive mutex that is not
394 * locked by the current thread leads to undefined behaviour.
399 g_rec_mutex_unlock (GRecMutex
*rec_mutex
)
401 pthread_mutex_unlock (rec_mutex
->p
);
405 * g_rec_mutex_trylock:
406 * @rec_mutex: a #GRecMutex
408 * Tries to lock @rec_mutex. If @rec_mutex is already locked
409 * by another thread, it immediately returns %FALSE. Otherwise
410 * it locks @rec_mutex and returns %TRUE.
412 * Returns: %TRUE if @rec_mutex could be locked
417 g_rec_mutex_trylock (GRecMutex
*rec_mutex
)
419 if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex
)) != 0)
427 static pthread_rwlock_t
*
428 g_rw_lock_impl_new (void)
430 pthread_rwlock_t
*rwlock
;
433 rwlock
= malloc (sizeof (pthread_rwlock_t
));
434 if G_UNLIKELY (rwlock
== NULL
)
435 g_thread_abort (errno
, "malloc");
437 if G_UNLIKELY ((status
= pthread_rwlock_init (rwlock
, NULL
)) != 0)
438 g_thread_abort (status
, "pthread_rwlock_init");
444 g_rw_lock_impl_free (pthread_rwlock_t
*rwlock
)
446 pthread_rwlock_destroy (rwlock
);
450 static pthread_rwlock_t
*
451 g_rw_lock_get_impl (GRWLock
*lock
)
453 pthread_rwlock_t
*impl
= g_atomic_pointer_get (&lock
->p
);
455 if G_UNLIKELY (impl
== NULL
)
457 impl
= g_rw_lock_impl_new ();
458 if (!g_atomic_pointer_compare_and_exchange (&lock
->p
, NULL
, impl
))
459 g_rw_lock_impl_free (impl
);
468 * @rw_lock: an uninitialized #GRWLock
470 * Initializes a #GRWLock so that it can be used.
472 * This function is useful to initialize a lock that has been
473 * allocated on the stack, or as part of a larger structure. It is not
474 * necessary to initialise a reader-writer lock that has been statically
485 * b = g_new (Blob, 1);
486 * g_rw_lock_init (&b->l);
489 * To undo the effect of g_rw_lock_init() when a lock is no longer
490 * needed, use g_rw_lock_clear().
492 * Calling g_rw_lock_init() on an already initialized #GRWLock leads
493 * to undefined behaviour.
498 g_rw_lock_init (GRWLock
*rw_lock
)
500 rw_lock
->p
= g_rw_lock_impl_new ();
505 * @rw_lock: an initialized #GRWLock
507 * Frees the resources allocated to a lock with g_rw_lock_init().
509 * This function should not be used with a #GRWLock that has been
510 * statically allocated.
512 * Calling g_rw_lock_clear() when any thread holds the lock
513 * leads to undefined behaviour.
518 g_rw_lock_clear (GRWLock
*rw_lock
)
520 g_rw_lock_impl_free (rw_lock
->p
);
524 * g_rw_lock_writer_lock:
525 * @rw_lock: a #GRWLock
527 * Obtain a write lock on @rw_lock. If any thread already holds
528 * a read or write lock on @rw_lock, the current thread will block
529 * until all other threads have dropped their locks on @rw_lock.
534 g_rw_lock_writer_lock (GRWLock
*rw_lock
)
536 pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock
));
540 * g_rw_lock_writer_trylock:
541 * @rw_lock: a #GRWLock
543 * Tries to obtain a write lock on @rw_lock. If any other thread holds
544 * a read or write lock on @rw_lock, it immediately returns %FALSE.
545 * Otherwise it locks @rw_lock and returns %TRUE.
547 * Returns: %TRUE if @rw_lock could be locked
552 g_rw_lock_writer_trylock (GRWLock
*rw_lock
)
554 if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock
)) != 0)
561 * g_rw_lock_writer_unlock:
562 * @rw_lock: a #GRWLock
564 * Release a write lock on @rw_lock.
566 * Calling g_rw_lock_writer_unlock() on a lock that is not held
567 * by the current thread leads to undefined behaviour.
572 g_rw_lock_writer_unlock (GRWLock
*rw_lock
)
574 pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock
));
578 * g_rw_lock_reader_lock:
579 * @rw_lock: a #GRWLock
581 * Obtain a read lock on @rw_lock. If another thread currently holds
582 * the write lock on @rw_lock or blocks waiting for it, the current
583 * thread will block. Read locks can be taken recursively.
585 * It is implementation-defined how many threads are allowed to
586 * hold read locks on the same lock simultaneously.
591 g_rw_lock_reader_lock (GRWLock
*rw_lock
)
593 pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock
));
597 * g_rw_lock_reader_trylock:
598 * @rw_lock: a #GRWLock
600 * Tries to obtain a read lock on @rw_lock and returns %TRUE if
601 * the read lock was successfully obtained. Otherwise it
604 * Returns: %TRUE if @rw_lock could be locked
609 g_rw_lock_reader_trylock (GRWLock
*rw_lock
)
611 if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock
)) != 0)
618 * g_rw_lock_reader_unlock:
619 * @rw_lock: a #GRWLock
621 * Release a read lock on @rw_lock.
623 * Calling g_rw_lock_reader_unlock() on a lock that is not held
624 * by the current thread leads to undefined behaviour.
629 g_rw_lock_reader_unlock (GRWLock
*rw_lock
)
631 pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock
));
636 static pthread_cond_t
*
637 g_cond_impl_new (void)
639 pthread_condattr_t attr
;
640 pthread_cond_t
*cond
;
643 pthread_condattr_init (&attr
);
644 #if defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
645 pthread_condattr_setclock (&attr
, CLOCK_MONOTONIC
);
648 cond
= malloc (sizeof (pthread_cond_t
));
649 if G_UNLIKELY (cond
== NULL
)
650 g_thread_abort (errno
, "malloc");
652 if G_UNLIKELY ((status
= pthread_cond_init (cond
, &attr
)) != 0)
653 g_thread_abort (status
, "pthread_cond_init");
655 pthread_condattr_destroy (&attr
);
661 g_cond_impl_free (pthread_cond_t
*cond
)
663 pthread_cond_destroy (cond
);
667 static pthread_cond_t
*
668 g_cond_get_impl (GCond
*cond
)
670 pthread_cond_t
*impl
= g_atomic_pointer_get (&cond
->p
);
672 if G_UNLIKELY (impl
== NULL
)
674 impl
= g_cond_impl_new ();
675 if (!g_atomic_pointer_compare_and_exchange (&cond
->p
, NULL
, impl
))
676 g_cond_impl_free (impl
);
685 * @cond: an uninitialized #GCond
687 * Initialises a #GCond so that it can be used.
689 * This function is useful to initialise a #GCond that has been
690 * allocated as part of a larger structure. It is not necessary to
691 * initialise a #GCond that has been statically allocated.
693 * To undo the effect of g_cond_init() when a #GCond is no longer
694 * needed, use g_cond_clear().
696 * Calling g_cond_init() on an already-initialised #GCond leads
697 * to undefined behaviour.
702 g_cond_init (GCond
*cond
)
704 cond
->p
= g_cond_impl_new ();
709 * @cond: an initialised #GCond
711 * Frees the resources allocated to a #GCond with g_cond_init().
713 * This function should not be used with a #GCond that has been
714 * statically allocated.
716 * Calling g_cond_clear() for a #GCond on which threads are
717 * blocking leads to undefined behaviour.
722 g_cond_clear (GCond
*cond
)
724 g_cond_impl_free (cond
->p
);
730 * @mutex: a #GMutex that is currently locked
732 * Atomically releases @mutex and waits until @cond is signalled.
733 * When this function returns, @mutex is locked again and owned by the
736 * When using condition variables, it is possible that a spurious wakeup
737 * may occur (ie: g_cond_wait() returns even though g_cond_signal() was
738 * not called). It's also possible that a stolen wakeup may occur.
739 * This is when g_cond_signal() is called, but another thread acquires
740 * @mutex before this thread and modifies the state of the program in
741 * such a way that when g_cond_wait() is able to return, the expected
742 * condition is no longer met.
744 * For this reason, g_cond_wait() must always be used in a loop. See
745 * the documentation for #GCond for a complete example.
748 g_cond_wait (GCond
*cond
,
753 if G_UNLIKELY ((status
= pthread_cond_wait (g_cond_get_impl (cond
), g_mutex_get_impl (mutex
))) != 0)
754 g_thread_abort (status
, "pthread_cond_wait");
761 * If threads are waiting for @cond, at least one of them is unblocked.
762 * If no threads are waiting for @cond, this function has no effect.
763 * It is good practice to hold the same lock as the waiting thread
764 * while calling this function, though not required.
767 g_cond_signal (GCond
*cond
)
771 if G_UNLIKELY ((status
= pthread_cond_signal (g_cond_get_impl (cond
))) != 0)
772 g_thread_abort (status
, "pthread_cond_signal");
779 * If threads are waiting for @cond, all of them are unblocked.
780 * If no threads are waiting for @cond, this function has no effect.
781 * It is good practice to lock the same mutex as the waiting threads
782 * while calling this function, though not required.
785 g_cond_broadcast (GCond
*cond
)
789 if G_UNLIKELY ((status
= pthread_cond_broadcast (g_cond_get_impl (cond
))) != 0)
790 g_thread_abort (status
, "pthread_cond_broadcast");
796 * @mutex: a #GMutex that is currently locked
797 * @end_time: the monotonic time to wait until
799 * Waits until either @cond is signalled or @end_time has passed.
801 * As with g_cond_wait() it is possible that a spurious or stolen wakeup
802 * could occur. For that reason, waiting on a condition variable should
803 * always be in a loop, based on an explicitly-checked predicate.
805 * %TRUE is returned if the condition variable was signalled (or in the
806 * case of a spurious wakeup). %FALSE is returned if @end_time has
809 * The following code shows how to correctly perform a timed wait on a
810 * condition variable (extended the example presented in the
811 * documentation for #GCond):
815 * pop_data_timed (void)
820 * g_mutex_lock (&data_mutex);
822 * end_time = g_get_monotonic_time () + 5 * G_TIME_SPAN_SECOND;
823 * while (!current_data)
824 * if (!g_cond_wait_until (&data_cond, &data_mutex, end_time))
826 * // timeout has passed.
827 * g_mutex_unlock (&data_mutex);
831 * // there is data for us
832 * data = current_data;
833 * current_data = NULL;
835 * g_mutex_unlock (&data_mutex);
841 * Notice that the end time is calculated once, before entering the
842 * loop and reused. This is the motivation behind the use of absolute
843 * time on this API -- if a relative time of 5 seconds were passed
844 * directly to the call and a spurious wakeup occurred, the program would
845 * have to start over waiting again (which would lead to a total wait
846 * time of more than 5 seconds).
848 * Returns: %TRUE on a signal, %FALSE on a timeout
852 g_cond_wait_until (GCond
*cond
,
859 ts
.tv_sec
= end_time
/ 1000000;
860 ts
.tv_nsec
= (end_time
% 1000000) * 1000;
862 #if defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC)
863 if ((status
= pthread_cond_timedwait_monotonic (g_cond_get_impl (cond
), g_mutex_get_impl (mutex
), &ts
)) == 0)
865 #elif defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC_NP)
866 if ((status
= pthread_cond_timedwait_monotonic_np (g_cond_get_impl (cond
), g_mutex_get_impl (mutex
), &ts
)) == 0)
869 /* Pray that the cond is actually using the monotonic clock */
870 if ((status
= pthread_cond_timedwait (g_cond_get_impl (cond
), g_mutex_get_impl (mutex
), &ts
)) == 0)
874 if G_UNLIKELY (status
!= ETIMEDOUT
)
875 g_thread_abort (status
, "pthread_cond_timedwait");
885 * The #GPrivate struct is an opaque data structure to represent a
886 * thread-local data key. It is approximately equivalent to the
887 * pthread_setspecific()/pthread_getspecific() APIs on POSIX and to
888 * TlsSetValue()/TlsGetValue() on Windows.
890 * If you don't already know why you might want this functionality,
891 * then you probably don't need it.
893 * #GPrivate is a very limited resource (as far as 128 per program,
894 * shared between all libraries). It is also not possible to destroy a
895 * #GPrivate after it has been used. As such, it is only ever acceptable
896 * to use #GPrivate in static scope, and even then sparingly so.
898 * See G_PRIVATE_INIT() for a couple of examples.
900 * The #GPrivate structure should be considered opaque. It should only
901 * be accessed via the <function>g_private_</function> functions.
906 * @notify: a #GDestroyNotify
908 * A macro to assist with the static initialisation of a #GPrivate.
910 * This macro is useful for the case that a #GDestroyNotify function
911 * should be associated the key. This is needed when the key will be
912 * used to point at memory that should be deallocated when the thread
915 * Additionally, the #GDestroyNotify will also be called on the previous
916 * value stored in the key when g_private_replace() is used.
918 * If no #GDestroyNotify is needed, then use of this macro is not
919 * required -- if the #GPrivate is declared in static scope then it will
920 * be properly initialised by default (ie: to all zeros). See the
924 * static GPrivate name_key = G_PRIVATE_INIT (g_free);
926 * // return value should not be freed
928 * get_local_name (void)
930 * return g_private_get (&name_key);
934 * set_local_name (const gchar *name)
936 * g_private_replace (&name_key, g_strdup (name));
940 * static GPrivate count_key; // no free function
943 * get_local_count (void)
945 * return GPOINTER_TO_INT (g_private_get (&count_key));
949 * set_local_count (gint count)
951 * g_private_set (&count_key, GINT_TO_POINTER (count));
958 static pthread_key_t
*
959 g_private_impl_new (GDestroyNotify notify
)
964 key
= malloc (sizeof (pthread_key_t
));
965 if G_UNLIKELY (key
== NULL
)
966 g_thread_abort (errno
, "malloc");
967 status
= pthread_key_create (key
, notify
);
968 if G_UNLIKELY (status
!= 0)
969 g_thread_abort (status
, "pthread_key_create");
975 g_private_impl_free (pthread_key_t
*key
)
979 status
= pthread_key_delete (*key
);
980 if G_UNLIKELY (status
!= 0)
981 g_thread_abort (status
, "pthread_key_delete");
985 static pthread_key_t
*
986 g_private_get_impl (GPrivate
*key
)
988 pthread_key_t
*impl
= g_atomic_pointer_get (&key
->p
);
990 if G_UNLIKELY (impl
== NULL
)
992 impl
= g_private_impl_new (key
->notify
);
993 if (!g_atomic_pointer_compare_and_exchange (&key
->p
, NULL
, impl
))
995 g_private_impl_free (impl
);
1007 * Returns the current value of the thread local variable @key.
1009 * If the value has not yet been set in this thread, %NULL is returned.
1010 * Values are never copied between threads (when a new thread is
1011 * created, for example).
1013 * Returns: the thread-local value
1016 g_private_get (GPrivate
*key
)
1018 /* quote POSIX: No errors are returned from pthread_getspecific(). */
1019 return pthread_getspecific (*g_private_get_impl (key
));
1025 * @value: the new value
1027 * Sets the thread local variable @key to have the value @value in the
1030 * This function differs from g_private_replace() in the following way:
1031 * the #GDestroyNotify for @key is not called on the old value.
1034 g_private_set (GPrivate
*key
,
1039 if G_UNLIKELY ((status
= pthread_setspecific (*g_private_get_impl (key
), value
)) != 0)
1040 g_thread_abort (status
, "pthread_setspecific");
1044 * g_private_replace:
1046 * @value: the new value
1048 * Sets the thread local variable @key to have the value @value in the
1051 * This function differs from g_private_set() in the following way: if
1052 * the previous value was non-%NULL then the #GDestroyNotify handler for
1053 * @key is run on it.
1058 g_private_replace (GPrivate
*key
,
1061 pthread_key_t
*impl
= g_private_get_impl (key
);
1065 old
= pthread_getspecific (*impl
);
1066 if (old
&& key
->notify
)
1069 if G_UNLIKELY ((status
= pthread_setspecific (*impl
, value
)) != 0)
1070 g_thread_abort (status
, "pthread_setspecific");
1075 #define posix_check_err(err, name) G_STMT_START{ \
1076 int error = (err); \
1078 g_error ("file %s: line %d (%s): error '%s' during '%s'", \
1079 __FILE__, __LINE__, G_STRFUNC, \
1080 g_strerror (error), name); \
1083 #define posix_check_cmd(cmd) posix_check_err (cmd, #cmd)
1089 pthread_t system_thread
;
1095 g_system_thread_free (GRealThread
*thread
)
1097 GThreadPosix
*pt
= (GThreadPosix
*) thread
;
1100 pthread_detach (pt
->system_thread
);
1102 g_mutex_clear (&pt
->lock
);
1104 g_slice_free (GThreadPosix
, pt
);
1108 g_system_thread_new (GThreadFunc thread_func
,
1112 GThreadPosix
*thread
;
1113 pthread_attr_t attr
;
1116 thread
= g_slice_new0 (GThreadPosix
);
1118 posix_check_cmd (pthread_attr_init (&attr
));
1120 #ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE
1123 #ifdef _SC_THREAD_STACK_MIN
1124 stack_size
= MAX (sysconf (_SC_THREAD_STACK_MIN
), stack_size
);
1125 #endif /* _SC_THREAD_STACK_MIN */
1126 /* No error check here, because some systems can't do it and
1127 * we simply don't want threads to fail because of that. */
1128 pthread_attr_setstacksize (&attr
, stack_size
);
1130 #endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */
1132 ret
= pthread_create (&thread
->system_thread
, &attr
, (void* (*)(void*))thread_func
, thread
);
1134 posix_check_cmd (pthread_attr_destroy (&attr
));
1138 g_set_error (error
, G_THREAD_ERROR
, G_THREAD_ERROR_AGAIN
,
1139 "Error creating thread: %s", g_strerror (ret
));
1140 g_slice_free (GThreadPosix
, thread
);
1144 posix_check_err (ret
, "pthread_create");
1146 g_mutex_init (&thread
->lock
);
1148 return (GRealThread
*) thread
;
1154 * Causes the calling thread to voluntarily relinquish the CPU, so
1155 * that other threads can run.
1157 * This function is often used as a method to make busy wait less evil.
1160 g_thread_yield (void)
1166 g_system_thread_wait (GRealThread
*thread
)
1168 GThreadPosix
*pt
= (GThreadPosix
*) thread
;
1170 g_mutex_lock (&pt
->lock
);
1174 posix_check_cmd (pthread_join (pt
->system_thread
, NULL
));
1178 g_mutex_unlock (&pt
->lock
);
1182 g_system_thread_exit (void)
1184 pthread_exit (NULL
);
1188 g_system_thread_set_name (const gchar
*name
)
1190 #ifdef HAVE_SYS_PRCTL_H
1192 prctl (PR_SET_NAME
, name
, 0, 0, 0, 0);
1198 /* vim:set foldmethod=marker: */