1 /* GLIB - Library of useful routines for C programming
2 * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
4 * gthread.c: posix thread system implementation
5 * Copyright 1998 Sebastian Wilhelmi; University of Karlsruhe
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 * Modified by the GLib Team and others 1997-2000. See the AUTHORS
23 * file for a list of people on the GLib Team. See the ChangeLog
24 * files for a list of changes. These files are distributed with
25 * GLib at ftp://ftp.gtk.org/pub/gtk/.
28 /* The GMutex, GCond and GPrivate implementations in this file are some
29 * of the lowest-level code in GLib. All other parts of GLib (messages,
30 * memory, slices, etc) assume that they can freely use these facilities
31 * without risking recursion.
33 * As such, these functions are NOT permitted to call any other part of
36 * The thread manipulation functions (create, exit, join, etc.) have
37 * more freedom -- they can do as they please.
44 #include "gthreadprivate.h"
46 #include "gmessages.h"
47 #include "gstrfuncs.h"
67 /* clang defines __ATOMIC_SEQ_CST but doesn't support the GCC extension */
68 #if defined(HAVE_FUTEX) && defined(__ATOMIC_SEQ_CST) && !defined(__clang__)
69 #define USE_NATIVE_MUTEX
73 g_thread_abort (gint status
,
74 const gchar
*function
)
76 fprintf (stderr
, "GLib (gthread-posix.c): Unexpected error from C library during '%s': %s. Aborting.\n",
77 function
, strerror (status
));
83 #if !defined(USE_NATIVE_MUTEX)
85 static pthread_mutex_t
*
86 g_mutex_impl_new (void)
88 pthread_mutexattr_t
*pattr
= NULL
;
89 pthread_mutex_t
*mutex
;
91 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
92 pthread_mutexattr_t attr
;
95 mutex
= malloc (sizeof (pthread_mutex_t
));
96 if G_UNLIKELY (mutex
== NULL
)
97 g_thread_abort (errno
, "malloc");
99 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
100 pthread_mutexattr_init (&attr
);
101 pthread_mutexattr_settype (&attr
, PTHREAD_MUTEX_ADAPTIVE_NP
);
105 if G_UNLIKELY ((status
= pthread_mutex_init (mutex
, pattr
)) != 0)
106 g_thread_abort (status
, "pthread_mutex_init");
108 #ifdef PTHREAD_ADAPTIVE_MUTEX_NP
109 pthread_mutexattr_destroy (&attr
);
116 g_mutex_impl_free (pthread_mutex_t
*mutex
)
118 pthread_mutex_destroy (mutex
);
122 static inline pthread_mutex_t
*
123 g_mutex_get_impl (GMutex
*mutex
)
125 pthread_mutex_t
*impl
= g_atomic_pointer_get (&mutex
->p
);
127 if G_UNLIKELY (impl
== NULL
)
129 impl
= g_mutex_impl_new ();
130 if (!g_atomic_pointer_compare_and_exchange (&mutex
->p
, NULL
, impl
))
131 g_mutex_impl_free (impl
);
141 * @mutex: an uninitialized #GMutex
143 * Initializes a #GMutex so that it can be used.
145 * This function is useful to initialize a mutex that has been
146 * allocated on the stack, or as part of a larger structure.
147 * It is not necessary to initialize a mutex that has been
148 * statically allocated.
150 * |[<!-- language="C" -->
158 * b = g_new (Blob, 1);
159 * g_mutex_init (&b->m);
162 * To undo the effect of g_mutex_init() when a mutex is no longer
163 * needed, use g_mutex_clear().
165 * Calling g_mutex_init() on an already initialized #GMutex leads
166 * to undefined behaviour.
171 g_mutex_init (GMutex
*mutex
)
173 mutex
->p
= g_mutex_impl_new ();
178 * @mutex: an initialized #GMutex
180 * Frees the resources allocated to a mutex with g_mutex_init().
182 * This function should not be used with a #GMutex that has been
183 * statically allocated.
185 * Calling g_mutex_clear() on a locked mutex leads to undefined
191 g_mutex_clear (GMutex
*mutex
)
193 g_mutex_impl_free (mutex
->p
);
200 * Locks @mutex. If @mutex is already locked by another thread, the
201 * current thread will block until @mutex is unlocked by the other
204 * #GMutex is neither guaranteed to be recursive nor to be
205 * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
206 * already been locked by the same thread results in undefined behaviour
207 * (including but not limited to deadlocks).
210 g_mutex_lock (GMutex
*mutex
)
214 if G_UNLIKELY ((status
= pthread_mutex_lock (g_mutex_get_impl (mutex
))) != 0)
215 g_thread_abort (status
, "pthread_mutex_lock");
222 * Unlocks @mutex. If another thread is blocked in a g_mutex_lock()
223 * call for @mutex, it will become unblocked and can lock @mutex itself.
225 * Calling g_mutex_unlock() on a mutex that is not locked by the
226 * current thread leads to undefined behaviour.
229 g_mutex_unlock (GMutex
*mutex
)
233 if G_UNLIKELY ((status
= pthread_mutex_unlock (g_mutex_get_impl (mutex
))) != 0)
234 g_thread_abort (status
, "pthread_mutex_unlock");
241 * Tries to lock @mutex. If @mutex is already locked by another thread,
242 * it immediately returns %FALSE. Otherwise it locks @mutex and returns
245 * #GMutex is neither guaranteed to be recursive nor to be
246 * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
247 * already been locked by the same thread results in undefined behaviour
248 * (including but not limited to deadlocks or arbitrary return values).
250 * Returns: %TRUE if @mutex could be locked
253 g_mutex_trylock (GMutex
*mutex
)
257 if G_LIKELY ((status
= pthread_mutex_trylock (g_mutex_get_impl (mutex
))) == 0)
260 if G_UNLIKELY (status
!= EBUSY
)
261 g_thread_abort (status
, "pthread_mutex_trylock");
266 #endif /* !defined(USE_NATIVE_MUTEX) */
270 static pthread_mutex_t
*
271 g_rec_mutex_impl_new (void)
273 pthread_mutexattr_t attr
;
274 pthread_mutex_t
*mutex
;
276 mutex
= malloc (sizeof (pthread_mutex_t
));
277 if G_UNLIKELY (mutex
== NULL
)
278 g_thread_abort (errno
, "malloc");
280 pthread_mutexattr_init (&attr
);
281 pthread_mutexattr_settype (&attr
, PTHREAD_MUTEX_RECURSIVE
);
282 pthread_mutex_init (mutex
, &attr
);
283 pthread_mutexattr_destroy (&attr
);
289 g_rec_mutex_impl_free (pthread_mutex_t
*mutex
)
291 pthread_mutex_destroy (mutex
);
295 static inline pthread_mutex_t
*
296 g_rec_mutex_get_impl (GRecMutex
*rec_mutex
)
298 pthread_mutex_t
*impl
= g_atomic_pointer_get (&rec_mutex
->p
);
300 if G_UNLIKELY (impl
== NULL
)
302 impl
= g_rec_mutex_impl_new ();
303 if (!g_atomic_pointer_compare_and_exchange (&rec_mutex
->p
, NULL
, impl
))
304 g_rec_mutex_impl_free (impl
);
313 * @rec_mutex: an uninitialized #GRecMutex
315 * Initializes a #GRecMutex so that it can be used.
317 * This function is useful to initialize a recursive mutex
318 * that has been allocated on the stack, or as part of a larger
321 * It is not necessary to initialise a recursive mutex that has been
322 * statically allocated.
324 * |[<!-- language="C" -->
332 * b = g_new (Blob, 1);
333 * g_rec_mutex_init (&b->m);
336 * Calling g_rec_mutex_init() on an already initialized #GRecMutex
337 * leads to undefined behaviour.
339 * To undo the effect of g_rec_mutex_init() when a recursive mutex
340 * is no longer needed, use g_rec_mutex_clear().
345 g_rec_mutex_init (GRecMutex
*rec_mutex
)
347 rec_mutex
->p
= g_rec_mutex_impl_new ();
352 * @rec_mutex: an initialized #GRecMutex
354 * Frees the resources allocated to a recursive mutex with
355 * g_rec_mutex_init().
357 * This function should not be used with a #GRecMutex that has been
358 * statically allocated.
360 * Calling g_rec_mutex_clear() on a locked recursive mutex leads
361 * to undefined behaviour.
366 g_rec_mutex_clear (GRecMutex
*rec_mutex
)
368 g_rec_mutex_impl_free (rec_mutex
->p
);
373 * @rec_mutex: a #GRecMutex
375 * Locks @rec_mutex. If @rec_mutex is already locked by another
376 * thread, the current thread will block until @rec_mutex is
377 * unlocked by the other thread. If @rec_mutex is already locked
378 * by the current thread, the 'lock count' of @rec_mutex is increased.
379 * The mutex will only become available again when it is unlocked
380 * as many times as it has been locked.
385 g_rec_mutex_lock (GRecMutex
*mutex
)
387 pthread_mutex_lock (g_rec_mutex_get_impl (mutex
));
391 * g_rec_mutex_unlock:
392 * @rec_mutex: a #GRecMutex
394 * Unlocks @rec_mutex. If another thread is blocked in a
395 * g_rec_mutex_lock() call for @rec_mutex, it will become unblocked
396 * and can lock @rec_mutex itself.
398 * Calling g_rec_mutex_unlock() on a recursive mutex that is not
399 * locked by the current thread leads to undefined behaviour.
404 g_rec_mutex_unlock (GRecMutex
*rec_mutex
)
406 pthread_mutex_unlock (rec_mutex
->p
);
410 * g_rec_mutex_trylock:
411 * @rec_mutex: a #GRecMutex
413 * Tries to lock @rec_mutex. If @rec_mutex is already locked
414 * by another thread, it immediately returns %FALSE. Otherwise
415 * it locks @rec_mutex and returns %TRUE.
417 * Returns: %TRUE if @rec_mutex could be locked
422 g_rec_mutex_trylock (GRecMutex
*rec_mutex
)
424 if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex
)) != 0)
432 static pthread_rwlock_t
*
433 g_rw_lock_impl_new (void)
435 pthread_rwlock_t
*rwlock
;
438 rwlock
= malloc (sizeof (pthread_rwlock_t
));
439 if G_UNLIKELY (rwlock
== NULL
)
440 g_thread_abort (errno
, "malloc");
442 if G_UNLIKELY ((status
= pthread_rwlock_init (rwlock
, NULL
)) != 0)
443 g_thread_abort (status
, "pthread_rwlock_init");
449 g_rw_lock_impl_free (pthread_rwlock_t
*rwlock
)
451 pthread_rwlock_destroy (rwlock
);
455 static inline pthread_rwlock_t
*
456 g_rw_lock_get_impl (GRWLock
*lock
)
458 pthread_rwlock_t
*impl
= g_atomic_pointer_get (&lock
->p
);
460 if G_UNLIKELY (impl
== NULL
)
462 impl
= g_rw_lock_impl_new ();
463 if (!g_atomic_pointer_compare_and_exchange (&lock
->p
, NULL
, impl
))
464 g_rw_lock_impl_free (impl
);
473 * @rw_lock: an uninitialized #GRWLock
475 * Initializes a #GRWLock so that it can be used.
477 * This function is useful to initialize a lock that has been
478 * allocated on the stack, or as part of a larger structure. It is not
479 * necessary to initialise a reader-writer lock that has been statically
482 * |[<!-- language="C" -->
490 * b = g_new (Blob, 1);
491 * g_rw_lock_init (&b->l);
494 * To undo the effect of g_rw_lock_init() when a lock is no longer
495 * needed, use g_rw_lock_clear().
497 * Calling g_rw_lock_init() on an already initialized #GRWLock leads
498 * to undefined behaviour.
503 g_rw_lock_init (GRWLock
*rw_lock
)
505 rw_lock
->p
= g_rw_lock_impl_new ();
510 * @rw_lock: an initialized #GRWLock
512 * Frees the resources allocated to a lock with g_rw_lock_init().
514 * This function should not be used with a #GRWLock that has been
515 * statically allocated.
517 * Calling g_rw_lock_clear() when any thread holds the lock
518 * leads to undefined behaviour.
523 g_rw_lock_clear (GRWLock
*rw_lock
)
525 g_rw_lock_impl_free (rw_lock
->p
);
529 * g_rw_lock_writer_lock:
530 * @rw_lock: a #GRWLock
532 * Obtain a write lock on @rw_lock. If any thread already holds
533 * a read or write lock on @rw_lock, the current thread will block
534 * until all other threads have dropped their locks on @rw_lock.
539 g_rw_lock_writer_lock (GRWLock
*rw_lock
)
541 pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock
));
545 * g_rw_lock_writer_trylock:
546 * @rw_lock: a #GRWLock
548 * Tries to obtain a write lock on @rw_lock. If any other thread holds
549 * a read or write lock on @rw_lock, it immediately returns %FALSE.
550 * Otherwise it locks @rw_lock and returns %TRUE.
552 * Returns: %TRUE if @rw_lock could be locked
557 g_rw_lock_writer_trylock (GRWLock
*rw_lock
)
559 if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock
)) != 0)
566 * g_rw_lock_writer_unlock:
567 * @rw_lock: a #GRWLock
569 * Release a write lock on @rw_lock.
571 * Calling g_rw_lock_writer_unlock() on a lock that is not held
572 * by the current thread leads to undefined behaviour.
577 g_rw_lock_writer_unlock (GRWLock
*rw_lock
)
579 pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock
));
583 * g_rw_lock_reader_lock:
584 * @rw_lock: a #GRWLock
586 * Obtain a read lock on @rw_lock. If another thread currently holds
587 * the write lock on @rw_lock or blocks waiting for it, the current
588 * thread will block. Read locks can be taken recursively.
590 * It is implementation-defined how many threads are allowed to
591 * hold read locks on the same lock simultaneously.
596 g_rw_lock_reader_lock (GRWLock
*rw_lock
)
598 pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock
));
602 * g_rw_lock_reader_trylock:
603 * @rw_lock: a #GRWLock
605 * Tries to obtain a read lock on @rw_lock and returns %TRUE if
606 * the read lock was successfully obtained. Otherwise it
609 * Returns: %TRUE if @rw_lock could be locked
614 g_rw_lock_reader_trylock (GRWLock
*rw_lock
)
616 if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock
)) != 0)
623 * g_rw_lock_reader_unlock:
624 * @rw_lock: a #GRWLock
626 * Release a read lock on @rw_lock.
628 * Calling g_rw_lock_reader_unlock() on a lock that is not held
629 * by the current thread leads to undefined behaviour.
634 g_rw_lock_reader_unlock (GRWLock
*rw_lock
)
636 pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock
));
641 #if !defined(USE_NATIVE_MUTEX)
643 static pthread_cond_t
*
644 g_cond_impl_new (void)
646 pthread_condattr_t attr
;
647 pthread_cond_t
*cond
;
650 pthread_condattr_init (&attr
);
652 #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
653 #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
654 if G_UNLIKELY ((status
= pthread_condattr_setclock (&attr
, CLOCK_MONOTONIC
)) != 0)
655 g_thread_abort (status
, "pthread_condattr_setclock");
657 #error Cannot support GCond on your platform.
660 cond
= malloc (sizeof (pthread_cond_t
));
661 if G_UNLIKELY (cond
== NULL
)
662 g_thread_abort (errno
, "malloc");
664 if G_UNLIKELY ((status
= pthread_cond_init (cond
, &attr
)) != 0)
665 g_thread_abort (status
, "pthread_cond_init");
667 pthread_condattr_destroy (&attr
);
673 g_cond_impl_free (pthread_cond_t
*cond
)
675 pthread_cond_destroy (cond
);
679 static inline pthread_cond_t
*
680 g_cond_get_impl (GCond
*cond
)
682 pthread_cond_t
*impl
= g_atomic_pointer_get (&cond
->p
);
684 if G_UNLIKELY (impl
== NULL
)
686 impl
= g_cond_impl_new ();
687 if (!g_atomic_pointer_compare_and_exchange (&cond
->p
, NULL
, impl
))
688 g_cond_impl_free (impl
);
697 * @cond: an uninitialized #GCond
699 * Initialises a #GCond so that it can be used.
701 * This function is useful to initialise a #GCond that has been
702 * allocated as part of a larger structure. It is not necessary to
703 * initialise a #GCond that has been statically allocated.
705 * To undo the effect of g_cond_init() when a #GCond is no longer
706 * needed, use g_cond_clear().
708 * Calling g_cond_init() on an already-initialised #GCond leads
709 * to undefined behaviour.
714 g_cond_init (GCond
*cond
)
716 cond
->p
= g_cond_impl_new ();
721 * @cond: an initialised #GCond
723 * Frees the resources allocated to a #GCond with g_cond_init().
725 * This function should not be used with a #GCond that has been
726 * statically allocated.
728 * Calling g_cond_clear() for a #GCond on which threads are
729 * blocking leads to undefined behaviour.
734 g_cond_clear (GCond
*cond
)
736 g_cond_impl_free (cond
->p
);
742 * @mutex: a #GMutex that is currently locked
744 * Atomically releases @mutex and waits until @cond is signalled.
745 * When this function returns, @mutex is locked again and owned by the
748 * When using condition variables, it is possible that a spurious wakeup
749 * may occur (ie: g_cond_wait() returns even though g_cond_signal() was
750 * not called). It's also possible that a stolen wakeup may occur.
751 * This is when g_cond_signal() is called, but another thread acquires
752 * @mutex before this thread and modifies the state of the program in
753 * such a way that when g_cond_wait() is able to return, the expected
754 * condition is no longer met.
756 * For this reason, g_cond_wait() must always be used in a loop. See
757 * the documentation for #GCond for a complete example.
760 g_cond_wait (GCond
*cond
,
765 if G_UNLIKELY ((status
= pthread_cond_wait (g_cond_get_impl (cond
), g_mutex_get_impl (mutex
))) != 0)
766 g_thread_abort (status
, "pthread_cond_wait");
773 * If threads are waiting for @cond, at least one of them is unblocked.
774 * If no threads are waiting for @cond, this function has no effect.
775 * It is good practice to hold the same lock as the waiting thread
776 * while calling this function, though not required.
779 g_cond_signal (GCond
*cond
)
783 if G_UNLIKELY ((status
= pthread_cond_signal (g_cond_get_impl (cond
))) != 0)
784 g_thread_abort (status
, "pthread_cond_signal");
791 * If threads are waiting for @cond, all of them are unblocked.
792 * If no threads are waiting for @cond, this function has no effect.
793 * It is good practice to lock the same mutex as the waiting threads
794 * while calling this function, though not required.
797 g_cond_broadcast (GCond
*cond
)
801 if G_UNLIKELY ((status
= pthread_cond_broadcast (g_cond_get_impl (cond
))) != 0)
802 g_thread_abort (status
, "pthread_cond_broadcast");
808 * @mutex: a #GMutex that is currently locked
809 * @end_time: the monotonic time to wait until
811 * Waits until either @cond is signalled or @end_time has passed.
813 * As with g_cond_wait() it is possible that a spurious or stolen wakeup
814 * could occur. For that reason, waiting on a condition variable should
815 * always be in a loop, based on an explicitly-checked predicate.
817 * %TRUE is returned if the condition variable was signalled (or in the
818 * case of a spurious wakeup). %FALSE is returned if @end_time has
821 * The following code shows how to correctly perform a timed wait on a
822 * condition variable (extending the example presented in the
823 * documentation for #GCond):
825 * |[<!-- language="C" -->
827 * pop_data_timed (void)
832 * g_mutex_lock (&data_mutex);
834 * end_time = g_get_monotonic_time () + 5 * G_TIME_SPAN_SECOND;
835 * while (!current_data)
836 * if (!g_cond_wait_until (&data_cond, &data_mutex, end_time))
838 * // timeout has passed.
839 * g_mutex_unlock (&data_mutex);
843 * // there is data for us
844 * data = current_data;
845 * current_data = NULL;
847 * g_mutex_unlock (&data_mutex);
853 * Notice that the end time is calculated once, before entering the
854 * loop and reused. This is the motivation behind the use of absolute
855 * time on this API -- if a relative time of 5 seconds were passed
856 * directly to the call and a spurious wakeup occurred, the program would
857 * have to start over waiting again (which would lead to a total wait
858 * time of more than 5 seconds).
860 * Returns: %TRUE on a signal, %FALSE on a timeout
864 g_cond_wait_until (GCond
*cond
,
871 #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
872 /* end_time is given relative to the monotonic clock as returned by
873 * g_get_monotonic_time().
875 * Since this pthreads wants the relative time, convert it back again.
878 gint64 now
= g_get_monotonic_time ();
884 relative
= end_time
- now
;
886 ts
.tv_sec
= relative
/ 1000000;
887 ts
.tv_nsec
= (relative
% 1000000) * 1000;
889 if ((status
= pthread_cond_timedwait_relative_np (g_cond_get_impl (cond
), g_mutex_get_impl (mutex
), &ts
)) == 0)
892 #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
893 /* This is the exact check we used during init to set the clock to
894 * monotonic, so if we're in this branch, timedwait() will already be
895 * expecting a monotonic clock.
898 ts
.tv_sec
= end_time
/ 1000000;
899 ts
.tv_nsec
= (end_time
% 1000000) * 1000;
901 if ((status
= pthread_cond_timedwait (g_cond_get_impl (cond
), g_mutex_get_impl (mutex
), &ts
)) == 0)
905 #error Cannot support GCond on your platform.
908 if G_UNLIKELY (status
!= ETIMEDOUT
)
909 g_thread_abort (status
, "pthread_cond_timedwait");
914 #endif /* defined(USE_NATIVE_MUTEX) */
921 * The #GPrivate struct is an opaque data structure to represent a
922 * thread-local data key. It is approximately equivalent to the
923 * pthread_setspecific()/pthread_getspecific() APIs on POSIX and to
924 * TlsSetValue()/TlsGetValue() on Windows.
926 * If you don't already know why you might want this functionality,
927 * then you probably don't need it.
929 * #GPrivate is a very limited resource (as far as 128 per program,
930 * shared between all libraries). It is also not possible to destroy a
931 * #GPrivate after it has been used. As such, it is only ever acceptable
932 * to use #GPrivate in static scope, and even then sparingly so.
934 * See G_PRIVATE_INIT() for a couple of examples.
936 * The #GPrivate structure should be considered opaque. It should only
937 * be accessed via the g_private_ functions.
942 * @notify: a #GDestroyNotify
944 * A macro to assist with the static initialisation of a #GPrivate.
946 * This macro is useful for the case that a #GDestroyNotify function
947 * should be associated the key. This is needed when the key will be
948 * used to point at memory that should be deallocated when the thread
951 * Additionally, the #GDestroyNotify will also be called on the previous
952 * value stored in the key when g_private_replace() is used.
954 * If no #GDestroyNotify is needed, then use of this macro is not
955 * required -- if the #GPrivate is declared in static scope then it will
956 * be properly initialised by default (ie: to all zeros). See the
959 * |[<!-- language="C" -->
960 * static GPrivate name_key = G_PRIVATE_INIT (g_free);
962 * // return value should not be freed
964 * get_local_name (void)
966 * return g_private_get (&name_key);
970 * set_local_name (const gchar *name)
972 * g_private_replace (&name_key, g_strdup (name));
976 * static GPrivate count_key; // no free function
979 * get_local_count (void)
981 * return GPOINTER_TO_INT (g_private_get (&count_key));
985 * set_local_count (gint count)
987 * g_private_set (&count_key, GINT_TO_POINTER (count));
994 static pthread_key_t
*
995 g_private_impl_new (GDestroyNotify notify
)
1000 key
= malloc (sizeof (pthread_key_t
));
1001 if G_UNLIKELY (key
== NULL
)
1002 g_thread_abort (errno
, "malloc");
1003 status
= pthread_key_create (key
, notify
);
1004 if G_UNLIKELY (status
!= 0)
1005 g_thread_abort (status
, "pthread_key_create");
1011 g_private_impl_free (pthread_key_t
*key
)
1015 status
= pthread_key_delete (*key
);
1016 if G_UNLIKELY (status
!= 0)
1017 g_thread_abort (status
, "pthread_key_delete");
1021 static inline pthread_key_t
*
1022 g_private_get_impl (GPrivate
*key
)
1024 pthread_key_t
*impl
= g_atomic_pointer_get (&key
->p
);
1026 if G_UNLIKELY (impl
== NULL
)
1028 impl
= g_private_impl_new (key
->notify
);
1029 if (!g_atomic_pointer_compare_and_exchange (&key
->p
, NULL
, impl
))
1031 g_private_impl_free (impl
);
1043 * Returns the current value of the thread local variable @key.
1045 * If the value has not yet been set in this thread, %NULL is returned.
1046 * Values are never copied between threads (when a new thread is
1047 * created, for example).
1049 * Returns: the thread-local value
1052 g_private_get (GPrivate
*key
)
1054 /* quote POSIX: No errors are returned from pthread_getspecific(). */
1055 return pthread_getspecific (*g_private_get_impl (key
));
1061 * @value: the new value
1063 * Sets the thread local variable @key to have the value @value in the
1066 * This function differs from g_private_replace() in the following way:
1067 * the #GDestroyNotify for @key is not called on the old value.
1070 g_private_set (GPrivate
*key
,
1075 if G_UNLIKELY ((status
= pthread_setspecific (*g_private_get_impl (key
), value
)) != 0)
1076 g_thread_abort (status
, "pthread_setspecific");
1080 * g_private_replace:
1082 * @value: the new value
1084 * Sets the thread local variable @key to have the value @value in the
1087 * This function differs from g_private_set() in the following way: if
1088 * the previous value was non-%NULL then the #GDestroyNotify handler for
1089 * @key is run on it.
1094 g_private_replace (GPrivate
*key
,
1097 pthread_key_t
*impl
= g_private_get_impl (key
);
1101 old
= pthread_getspecific (*impl
);
1102 if (old
&& key
->notify
)
1105 if G_UNLIKELY ((status
= pthread_setspecific (*impl
, value
)) != 0)
1106 g_thread_abort (status
, "pthread_setspecific");
1111 #define posix_check_err(err, name) G_STMT_START{ \
1112 int error = (err); \
1114 g_error ("file %s: line %d (%s): error '%s' during '%s'", \
1115 __FILE__, __LINE__, G_STRFUNC, \
1116 g_strerror (error), name); \
1119 #define posix_check_cmd(cmd) posix_check_err (cmd, #cmd)
1125 pthread_t system_thread
;
1131 g_system_thread_free (GRealThread
*thread
)
1133 GThreadPosix
*pt
= (GThreadPosix
*) thread
;
1136 pthread_detach (pt
->system_thread
);
1138 g_mutex_clear (&pt
->lock
);
1140 g_slice_free (GThreadPosix
, pt
);
1144 g_system_thread_new (GThreadFunc thread_func
,
1148 GThreadPosix
*thread
;
1149 pthread_attr_t attr
;
1152 thread
= g_slice_new0 (GThreadPosix
);
1154 posix_check_cmd (pthread_attr_init (&attr
));
1156 #ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE
1159 #ifdef _SC_THREAD_STACK_MIN
1160 long min_stack_size
= sysconf (_SC_THREAD_STACK_MIN
);
1161 if (min_stack_size
>= 0)
1162 stack_size
= MAX (min_stack_size
, stack_size
);
1163 #endif /* _SC_THREAD_STACK_MIN */
1164 /* No error check here, because some systems can't do it and
1165 * we simply don't want threads to fail because of that. */
1166 pthread_attr_setstacksize (&attr
, stack_size
);
1168 #endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */
1170 ret
= pthread_create (&thread
->system_thread
, &attr
, (void* (*)(void*))thread_func
, thread
);
1172 posix_check_cmd (pthread_attr_destroy (&attr
));
1176 g_set_error (error
, G_THREAD_ERROR
, G_THREAD_ERROR_AGAIN
,
1177 "Error creating thread: %s", g_strerror (ret
));
1178 g_slice_free (GThreadPosix
, thread
);
1182 posix_check_err (ret
, "pthread_create");
1184 g_mutex_init (&thread
->lock
);
1186 return (GRealThread
*) thread
;
1192 * Causes the calling thread to voluntarily relinquish the CPU, so
1193 * that other threads can run.
1195 * This function is often used as a method to make busy wait less evil.
1198 g_thread_yield (void)
1204 g_system_thread_wait (GRealThread
*thread
)
1206 GThreadPosix
*pt
= (GThreadPosix
*) thread
;
1208 g_mutex_lock (&pt
->lock
);
1212 posix_check_cmd (pthread_join (pt
->system_thread
, NULL
));
1216 g_mutex_unlock (&pt
->lock
);
1220 g_system_thread_exit (void)
1222 pthread_exit (NULL
);
1226 g_system_thread_set_name (const gchar
*name
)
1228 #if defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID)
1229 pthread_setname_np (pthread_self(), name
); /* on Linux and Solaris */
1230 #elif defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID)
1231 pthread_setname_np (name
); /* on OS X and iOS */
1235 /* {{{1 GMutex and GCond futex implementation */
1237 #if defined(USE_NATIVE_MUTEX)
1239 #include <linux/futex.h>
1240 #include <sys/syscall.h>
1242 #ifndef FUTEX_WAIT_PRIVATE
1243 #define FUTEX_WAIT_PRIVATE FUTEX_WAIT
1244 #define FUTEX_WAKE_PRIVATE FUTEX_WAKE
1247 /* We should expand the set of operations available in gatomic once we
1248 * have better C11 support in GCC in common distributions (ie: 4.9).
1250 * Before then, let's define a couple of useful things for our own
1254 #define exchange_acquire(ptr, new) \
1255 __atomic_exchange_4((ptr), (new), __ATOMIC_ACQUIRE)
1256 #define compare_exchange_acquire(ptr, old, new) \
1257 __atomic_compare_exchange_4((ptr), (old), (new), 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
1259 #define exchange_release(ptr, new) \
1260 __atomic_exchange_4((ptr), (new), __ATOMIC_RELEASE)
1261 #define store_release(ptr, new) \
1262 __atomic_store_4((ptr), (new), __ATOMIC_RELEASE)
1264 /* Our strategy for the mutex is pretty simple:
1268 * 1: acquired by one thread only, no contention
1273 * As such, attempting to acquire the lock should involve an increment.
1274 * If we find that the previous value was 0 then we can return
1277 * On unlock, we always store 0 to indicate that the lock is available.
1278 * If the value there was 1 before then we didn't have contention and
1279 * can return immediately. If the value was something other than 1 then
1280 * we have the contended case and need to wake a waiter.
1282 * If it was not 0 then there is another thread holding it and we must
1283 * wait. We must always ensure that we mark a value >1 while we are
1284 * waiting in order to instruct the holder to do a wake operation on
1289 g_mutex_init (GMutex
*mutex
)
1295 g_mutex_clear (GMutex
*mutex
)
1297 if G_UNLIKELY (mutex
->i
[0] != 0)
1299 fprintf (stderr
, "g_mutex_clear() called on uninitialised or locked mutex\n");
1304 static void __attribute__((noinline
))
1305 g_mutex_lock_slowpath (GMutex
*mutex
)
1307 /* Set to 2 to indicate contention. If it was zero before then we
1308 * just acquired the lock.
1310 * Otherwise, sleep for as long as the 2 remains...
1312 while (exchange_acquire (&mutex
->i
[0], 2) != 0)
1313 syscall (__NR_futex
, &mutex
->i
[0], (gsize
) FUTEX_WAIT_PRIVATE
, (gsize
) 2, NULL
);
1316 static void __attribute__((noinline
))
1317 g_mutex_unlock_slowpath (GMutex
*mutex
,
1320 /* We seem to get better code for the uncontended case by splitting
1323 if G_UNLIKELY (prev
== 0)
1325 fprintf (stderr
, "Attempt to unlock mutex that was not locked\n");
1329 syscall (__NR_futex
, &mutex
->i
[0], (gsize
) FUTEX_WAKE_PRIVATE
, (gsize
) 1, NULL
);
1333 g_mutex_lock (GMutex
*mutex
)
1335 /* 0 -> 1 and we're done. Anything else, and we need to wait... */
1336 if G_UNLIKELY (g_atomic_int_add (&mutex
->i
[0], 1) != 0)
1337 g_mutex_lock_slowpath (mutex
);
1341 g_mutex_unlock (GMutex
*mutex
)
1345 prev
= exchange_release (&mutex
->i
[0], 0);
1347 /* 1-> 0 and we're done. Anything else and we need to signal... */
1348 if G_UNLIKELY (prev
!= 1)
1349 g_mutex_unlock_slowpath (mutex
, prev
);
1353 g_mutex_trylock (GMutex
*mutex
)
1357 /* We don't want to touch the value at all unless we can move it from
1360 return compare_exchange_acquire (&mutex
->i
[0], &zero
, 1);
1363 /* Condition variables are implemented in a rather simple way as well.
1364 * In many ways, futex() as an abstraction is even more ideally suited
1365 * to condition variables than it is to mutexes.
1367 * We store a generation counter. We sample it with the lock held and
1368 * unlock before sleeping on the futex.
1370 * Signalling simply involves increasing the counter and making the
1371 * appropriate futex call.
1373 * The only thing that is the slightest bit complicated is timed waits
1374 * because we must convert our absolute time to relative.
1378 g_cond_init (GCond
*cond
)
1384 g_cond_clear (GCond
*cond
)
1389 g_cond_wait (GCond
*cond
,
1392 guint sampled
= g_atomic_int_get (&cond
->i
[0]);
1394 g_mutex_unlock (mutex
);
1395 syscall (__NR_futex
, &cond
->i
[0], (gsize
) FUTEX_WAIT_PRIVATE
, (gsize
) sampled
, NULL
);
1396 g_mutex_lock (mutex
);
1400 g_cond_signal (GCond
*cond
)
1402 g_atomic_int_inc (&cond
->i
[0]);
1404 syscall (__NR_futex
, &cond
->i
[0], (gsize
) FUTEX_WAKE_PRIVATE
, (gsize
) 1, NULL
);
1408 g_cond_broadcast (GCond
*cond
)
1410 g_atomic_int_inc (&cond
->i
[0]);
1412 syscall (__NR_futex
, &cond
->i
[0], (gsize
) FUTEX_WAKE_PRIVATE
, (gsize
) INT_MAX
, NULL
);
1416 g_cond_wait_until (GCond
*cond
,
1420 struct timespec now
;
1421 struct timespec span
;
1428 clock_gettime (CLOCK_MONOTONIC
, &now
);
1429 span
.tv_sec
= (end_time
/ 1000000) - now
.tv_sec
;
1430 span
.tv_nsec
= ((end_time
% 1000000) * 1000) - now
.tv_nsec
;
1431 if (span
.tv_nsec
< 0)
1433 span
.tv_nsec
+= 1000000000;
1437 if (span
.tv_sec
< 0)
1440 sampled
= cond
->i
[0];
1441 g_mutex_unlock (mutex
);
1442 res
= syscall (__NR_futex
, &cond
->i
[0], (gsize
) FUTEX_WAIT_PRIVATE
, (gsize
) sampled
, &span
);
1443 g_mutex_lock (mutex
);
1445 return (res
< 0 && errno
== ETIMEDOUT
) ? FALSE
: TRUE
;
1451 /* vim:set foldmethod=marker: */