gsettings: schema_list should use the passed schema's source
[glib.git] / glib / gthread-posix.c
blobbd3c9a10b6832e98ba42466a6a6d5d35cfb72df8
1 /* GLIB - Library of useful routines for C programming
2 * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
4 * gthread.c: posix thread system implementation
5 * Copyright 1998 Sebastian Wilhelmi; University of Karlsruhe
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 * Modified by the GLib Team and others 1997-2000. See the AUTHORS
23 * file for a list of people on the GLib Team. See the ChangeLog
24 * files for a list of changes. These files are distributed with
25 * GLib at ftp://ftp.gtk.org/pub/gtk/.
28 /* The GMutex, GCond and GPrivate implementations in this file are some
29 * of the lowest-level code in GLib. All other parts of GLib (messages,
30 * memory, slices, etc) assume that they can freely use these facilities
31 * without risking recursion.
33 * As such, these functions are NOT permitted to call any other part of
34 * GLib.
36 * The thread manipulation functions (create, exit, join, etc.) have
37 * more freedom -- they can do as they please.
40 #include "config.h"
42 #include "gthread.h"
44 #include "gthreadprivate.h"
45 #include "gslice.h"
46 #include "gmessages.h"
47 #include "gstrfuncs.h"
48 #include "gmain.h"
50 #include <stdlib.h>
51 #include <stdio.h>
52 #include <string.h>
53 #include <errno.h>
54 #include <pthread.h>
56 #include <sys/time.h>
57 #include <unistd.h>
59 #ifdef HAVE_SCHED_H
60 #include <sched.h>
61 #endif
62 #ifdef HAVE_SYS_PRCTL_H
63 #include <sys/prctl.h>
64 #endif
65 #ifdef G_OS_WIN32
66 #include <windows.h>
67 #endif
69 /* clang defines __ATOMIC_SEQ_CST but doesn't support the GCC extension */
70 #if defined(HAVE_FUTEX) && defined(__ATOMIC_SEQ_CST) && !defined(__clang__)
71 #define USE_NATIVE_MUTEX
72 #endif
74 static void
75 g_thread_abort (gint status,
76 const gchar *function)
78 fprintf (stderr, "GLib (gthread-posix.c): Unexpected error from C library during '%s': %s. Aborting.\n",
79 function, strerror (status));
80 abort ();
83 /* {{{1 GMutex */
85 #if !defined(USE_NATIVE_MUTEX)
87 static pthread_mutex_t *
88 g_mutex_impl_new (void)
90 pthread_mutexattr_t *pattr = NULL;
91 pthread_mutex_t *mutex;
92 gint status;
93 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
94 pthread_mutexattr_t attr;
95 #endif
97 mutex = malloc (sizeof (pthread_mutex_t));
98 if G_UNLIKELY (mutex == NULL)
99 g_thread_abort (errno, "malloc");
101 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
102 pthread_mutexattr_init (&attr);
103 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
104 pattr = &attr;
105 #endif
107 if G_UNLIKELY ((status = pthread_mutex_init (mutex, pattr)) != 0)
108 g_thread_abort (status, "pthread_mutex_init");
110 #ifdef PTHREAD_ADAPTIVE_MUTEX_NP
111 pthread_mutexattr_destroy (&attr);
112 #endif
114 return mutex;
117 static void
118 g_mutex_impl_free (pthread_mutex_t *mutex)
120 pthread_mutex_destroy (mutex);
121 free (mutex);
124 static inline pthread_mutex_t *
125 g_mutex_get_impl (GMutex *mutex)
127 pthread_mutex_t *impl = g_atomic_pointer_get (&mutex->p);
129 if G_UNLIKELY (impl == NULL)
131 impl = g_mutex_impl_new ();
132 if (!g_atomic_pointer_compare_and_exchange (&mutex->p, NULL, impl))
133 g_mutex_impl_free (impl);
134 impl = mutex->p;
137 return impl;
142 * g_mutex_init:
143 * @mutex: an uninitialized #GMutex
145 * Initializes a #GMutex so that it can be used.
147 * This function is useful to initialize a mutex that has been
148 * allocated on the stack, or as part of a larger structure.
149 * It is not necessary to initialize a mutex that has been
150 * statically allocated.
152 * |[<!-- language="C" -->
153 * typedef struct {
154 * GMutex m;
155 * ...
156 * } Blob;
158 * Blob *b;
160 * b = g_new (Blob, 1);
161 * g_mutex_init (&b->m);
162 * ]|
164 * To undo the effect of g_mutex_init() when a mutex is no longer
165 * needed, use g_mutex_clear().
167 * Calling g_mutex_init() on an already initialized #GMutex leads
168 * to undefined behaviour.
170 * Since: 2.32
172 void
173 g_mutex_init (GMutex *mutex)
175 mutex->p = g_mutex_impl_new ();
179 * g_mutex_clear:
180 * @mutex: an initialized #GMutex
182 * Frees the resources allocated to a mutex with g_mutex_init().
184 * This function should not be used with a #GMutex that has been
185 * statically allocated.
187 * Calling g_mutex_clear() on a locked mutex leads to undefined
188 * behaviour.
190 * Sine: 2.32
192 void
193 g_mutex_clear (GMutex *mutex)
195 g_mutex_impl_free (mutex->p);
199 * g_mutex_lock:
200 * @mutex: a #GMutex
202 * Locks @mutex. If @mutex is already locked by another thread, the
203 * current thread will block until @mutex is unlocked by the other
204 * thread.
206 * #GMutex is neither guaranteed to be recursive nor to be
207 * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
208 * already been locked by the same thread results in undefined behaviour
209 * (including but not limited to deadlocks).
211 void
212 g_mutex_lock (GMutex *mutex)
214 gint status;
216 if G_UNLIKELY ((status = pthread_mutex_lock (g_mutex_get_impl (mutex))) != 0)
217 g_thread_abort (status, "pthread_mutex_lock");
221 * g_mutex_unlock:
222 * @mutex: a #GMutex
224 * Unlocks @mutex. If another thread is blocked in a g_mutex_lock()
225 * call for @mutex, it will become unblocked and can lock @mutex itself.
227 * Calling g_mutex_unlock() on a mutex that is not locked by the
228 * current thread leads to undefined behaviour.
230 void
231 g_mutex_unlock (GMutex *mutex)
233 gint status;
235 if G_UNLIKELY ((status = pthread_mutex_unlock (g_mutex_get_impl (mutex))) != 0)
236 g_thread_abort (status, "pthread_mutex_unlock");
240 * g_mutex_trylock:
241 * @mutex: a #GMutex
243 * Tries to lock @mutex. If @mutex is already locked by another thread,
244 * it immediately returns %FALSE. Otherwise it locks @mutex and returns
245 * %TRUE.
247 * #GMutex is neither guaranteed to be recursive nor to be
248 * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
249 * already been locked by the same thread results in undefined behaviour
250 * (including but not limited to deadlocks or arbitrary return values).
252 * Returns: %TRUE if @mutex could be locked
254 gboolean
255 g_mutex_trylock (GMutex *mutex)
257 gint status;
259 if G_LIKELY ((status = pthread_mutex_trylock (g_mutex_get_impl (mutex))) == 0)
260 return TRUE;
262 if G_UNLIKELY (status != EBUSY)
263 g_thread_abort (status, "pthread_mutex_trylock");
265 return FALSE;
268 #endif /* !defined(USE_NATIVE_MUTEX) */
270 /* {{{1 GRecMutex */
272 static pthread_mutex_t *
273 g_rec_mutex_impl_new (void)
275 pthread_mutexattr_t attr;
276 pthread_mutex_t *mutex;
278 mutex = malloc (sizeof (pthread_mutex_t));
279 if G_UNLIKELY (mutex == NULL)
280 g_thread_abort (errno, "malloc");
282 pthread_mutexattr_init (&attr);
283 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
284 pthread_mutex_init (mutex, &attr);
285 pthread_mutexattr_destroy (&attr);
287 return mutex;
290 static void
291 g_rec_mutex_impl_free (pthread_mutex_t *mutex)
293 pthread_mutex_destroy (mutex);
294 free (mutex);
297 static inline pthread_mutex_t *
298 g_rec_mutex_get_impl (GRecMutex *rec_mutex)
300 pthread_mutex_t *impl = g_atomic_pointer_get (&rec_mutex->p);
302 if G_UNLIKELY (impl == NULL)
304 impl = g_rec_mutex_impl_new ();
305 if (!g_atomic_pointer_compare_and_exchange (&rec_mutex->p, NULL, impl))
306 g_rec_mutex_impl_free (impl);
307 impl = rec_mutex->p;
310 return impl;
314 * g_rec_mutex_init:
315 * @rec_mutex: an uninitialized #GRecMutex
317 * Initializes a #GRecMutex so that it can be used.
319 * This function is useful to initialize a recursive mutex
320 * that has been allocated on the stack, or as part of a larger
321 * structure.
323 * It is not necessary to initialise a recursive mutex that has been
324 * statically allocated.
326 * |[<!-- language="C" -->
327 * typedef struct {
328 * GRecMutex m;
329 * ...
330 * } Blob;
332 * Blob *b;
334 * b = g_new (Blob, 1);
335 * g_rec_mutex_init (&b->m);
336 * ]|
338 * Calling g_rec_mutex_init() on an already initialized #GRecMutex
339 * leads to undefined behaviour.
341 * To undo the effect of g_rec_mutex_init() when a recursive mutex
342 * is no longer needed, use g_rec_mutex_clear().
344 * Since: 2.32
346 void
347 g_rec_mutex_init (GRecMutex *rec_mutex)
349 rec_mutex->p = g_rec_mutex_impl_new ();
353 * g_rec_mutex_clear:
354 * @rec_mutex: an initialized #GRecMutex
356 * Frees the resources allocated to a recursive mutex with
357 * g_rec_mutex_init().
359 * This function should not be used with a #GRecMutex that has been
360 * statically allocated.
362 * Calling g_rec_mutex_clear() on a locked recursive mutex leads
363 * to undefined behaviour.
365 * Sine: 2.32
367 void
368 g_rec_mutex_clear (GRecMutex *rec_mutex)
370 g_rec_mutex_impl_free (rec_mutex->p);
374 * g_rec_mutex_lock:
375 * @rec_mutex: a #GRecMutex
377 * Locks @rec_mutex. If @rec_mutex is already locked by another
378 * thread, the current thread will block until @rec_mutex is
379 * unlocked by the other thread. If @rec_mutex is already locked
380 * by the current thread, the 'lock count' of @rec_mutex is increased.
381 * The mutex will only become available again when it is unlocked
382 * as many times as it has been locked.
384 * Since: 2.32
386 void
387 g_rec_mutex_lock (GRecMutex *mutex)
389 pthread_mutex_lock (g_rec_mutex_get_impl (mutex));
393 * g_rec_mutex_unlock:
394 * @rec_mutex: a #GRecMutex
396 * Unlocks @rec_mutex. If another thread is blocked in a
397 * g_rec_mutex_lock() call for @rec_mutex, it will become unblocked
398 * and can lock @rec_mutex itself.
400 * Calling g_rec_mutex_unlock() on a recursive mutex that is not
401 * locked by the current thread leads to undefined behaviour.
403 * Since: 2.32
405 void
406 g_rec_mutex_unlock (GRecMutex *rec_mutex)
408 pthread_mutex_unlock (rec_mutex->p);
412 * g_rec_mutex_trylock:
413 * @rec_mutex: a #GRecMutex
415 * Tries to lock @rec_mutex. If @rec_mutex is already locked
416 * by another thread, it immediately returns %FALSE. Otherwise
417 * it locks @rec_mutex and returns %TRUE.
419 * Returns: %TRUE if @rec_mutex could be locked
421 * Since: 2.32
423 gboolean
424 g_rec_mutex_trylock (GRecMutex *rec_mutex)
426 if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex)) != 0)
427 return FALSE;
429 return TRUE;
432 /* {{{1 GRWLock */
434 static pthread_rwlock_t *
435 g_rw_lock_impl_new (void)
437 pthread_rwlock_t *rwlock;
438 gint status;
440 rwlock = malloc (sizeof (pthread_rwlock_t));
441 if G_UNLIKELY (rwlock == NULL)
442 g_thread_abort (errno, "malloc");
444 if G_UNLIKELY ((status = pthread_rwlock_init (rwlock, NULL)) != 0)
445 g_thread_abort (status, "pthread_rwlock_init");
447 return rwlock;
450 static void
451 g_rw_lock_impl_free (pthread_rwlock_t *rwlock)
453 pthread_rwlock_destroy (rwlock);
454 free (rwlock);
457 static inline pthread_rwlock_t *
458 g_rw_lock_get_impl (GRWLock *lock)
460 pthread_rwlock_t *impl = g_atomic_pointer_get (&lock->p);
462 if G_UNLIKELY (impl == NULL)
464 impl = g_rw_lock_impl_new ();
465 if (!g_atomic_pointer_compare_and_exchange (&lock->p, NULL, impl))
466 g_rw_lock_impl_free (impl);
467 impl = lock->p;
470 return impl;
474 * g_rw_lock_init:
475 * @rw_lock: an uninitialized #GRWLock
477 * Initializes a #GRWLock so that it can be used.
479 * This function is useful to initialize a lock that has been
480 * allocated on the stack, or as part of a larger structure. It is not
481 * necessary to initialise a reader-writer lock that has been statically
482 * allocated.
484 * |[<!-- language="C" -->
485 * typedef struct {
486 * GRWLock l;
487 * ...
488 * } Blob;
490 * Blob *b;
492 * b = g_new (Blob, 1);
493 * g_rw_lock_init (&b->l);
494 * ]|
496 * To undo the effect of g_rw_lock_init() when a lock is no longer
497 * needed, use g_rw_lock_clear().
499 * Calling g_rw_lock_init() on an already initialized #GRWLock leads
500 * to undefined behaviour.
502 * Since: 2.32
504 void
505 g_rw_lock_init (GRWLock *rw_lock)
507 rw_lock->p = g_rw_lock_impl_new ();
511 * g_rw_lock_clear:
512 * @rw_lock: an initialized #GRWLock
514 * Frees the resources allocated to a lock with g_rw_lock_init().
516 * This function should not be used with a #GRWLock that has been
517 * statically allocated.
519 * Calling g_rw_lock_clear() when any thread holds the lock
520 * leads to undefined behaviour.
522 * Sine: 2.32
524 void
525 g_rw_lock_clear (GRWLock *rw_lock)
527 g_rw_lock_impl_free (rw_lock->p);
531 * g_rw_lock_writer_lock:
532 * @rw_lock: a #GRWLock
534 * Obtain a write lock on @rw_lock. If any thread already holds
535 * a read or write lock on @rw_lock, the current thread will block
536 * until all other threads have dropped their locks on @rw_lock.
538 * Since: 2.32
540 void
541 g_rw_lock_writer_lock (GRWLock *rw_lock)
543 pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock));
547 * g_rw_lock_writer_trylock:
548 * @rw_lock: a #GRWLock
550 * Tries to obtain a write lock on @rw_lock. If any other thread holds
551 * a read or write lock on @rw_lock, it immediately returns %FALSE.
552 * Otherwise it locks @rw_lock and returns %TRUE.
554 * Returns: %TRUE if @rw_lock could be locked
556 * Since: 2.32
558 gboolean
559 g_rw_lock_writer_trylock (GRWLock *rw_lock)
561 if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock)) != 0)
562 return FALSE;
564 return TRUE;
568 * g_rw_lock_writer_unlock:
569 * @rw_lock: a #GRWLock
571 * Release a write lock on @rw_lock.
573 * Calling g_rw_lock_writer_unlock() on a lock that is not held
574 * by the current thread leads to undefined behaviour.
576 * Since: 2.32
578 void
579 g_rw_lock_writer_unlock (GRWLock *rw_lock)
581 pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
585 * g_rw_lock_reader_lock:
586 * @rw_lock: a #GRWLock
588 * Obtain a read lock on @rw_lock. If another thread currently holds
589 * the write lock on @rw_lock or blocks waiting for it, the current
590 * thread will block. Read locks can be taken recursively.
592 * It is implementation-defined how many threads are allowed to
593 * hold read locks on the same lock simultaneously.
595 * Since: 2.32
597 void
598 g_rw_lock_reader_lock (GRWLock *rw_lock)
600 pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock));
604 * g_rw_lock_reader_trylock:
605 * @rw_lock: a #GRWLock
607 * Tries to obtain a read lock on @rw_lock and returns %TRUE if
608 * the read lock was successfully obtained. Otherwise it
609 * returns %FALSE.
611 * Returns: %TRUE if @rw_lock could be locked
613 * Since: 2.32
615 gboolean
616 g_rw_lock_reader_trylock (GRWLock *rw_lock)
618 if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock)) != 0)
619 return FALSE;
621 return TRUE;
625 * g_rw_lock_reader_unlock:
626 * @rw_lock: a #GRWLock
628 * Release a read lock on @rw_lock.
630 * Calling g_rw_lock_reader_unlock() on a lock that is not held
631 * by the current thread leads to undefined behaviour.
633 * Since: 2.32
635 void
636 g_rw_lock_reader_unlock (GRWLock *rw_lock)
638 pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
641 /* {{{1 GCond */
643 #if !defined(USE_NATIVE_MUTEX)
645 static pthread_cond_t *
646 g_cond_impl_new (void)
648 pthread_condattr_t attr;
649 pthread_cond_t *cond;
650 gint status;
652 pthread_condattr_init (&attr);
654 #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
655 #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
656 if G_UNLIKELY ((status = pthread_condattr_setclock (&attr, CLOCK_MONOTONIC)) != 0)
657 g_thread_abort (status, "pthread_condattr_setclock");
658 #else
659 #error Cannot support GCond on your platform.
660 #endif
662 cond = malloc (sizeof (pthread_cond_t));
663 if G_UNLIKELY (cond == NULL)
664 g_thread_abort (errno, "malloc");
666 if G_UNLIKELY ((status = pthread_cond_init (cond, &attr)) != 0)
667 g_thread_abort (status, "pthread_cond_init");
669 pthread_condattr_destroy (&attr);
671 return cond;
674 static void
675 g_cond_impl_free (pthread_cond_t *cond)
677 pthread_cond_destroy (cond);
678 free (cond);
681 static inline pthread_cond_t *
682 g_cond_get_impl (GCond *cond)
684 pthread_cond_t *impl = g_atomic_pointer_get (&cond->p);
686 if G_UNLIKELY (impl == NULL)
688 impl = g_cond_impl_new ();
689 if (!g_atomic_pointer_compare_and_exchange (&cond->p, NULL, impl))
690 g_cond_impl_free (impl);
691 impl = cond->p;
694 return impl;
698 * g_cond_init:
699 * @cond: an uninitialized #GCond
701 * Initialises a #GCond so that it can be used.
703 * This function is useful to initialise a #GCond that has been
704 * allocated as part of a larger structure. It is not necessary to
705 * initialise a #GCond that has been statically allocated.
707 * To undo the effect of g_cond_init() when a #GCond is no longer
708 * needed, use g_cond_clear().
710 * Calling g_cond_init() on an already-initialised #GCond leads
711 * to undefined behaviour.
713 * Since: 2.32
715 void
716 g_cond_init (GCond *cond)
718 cond->p = g_cond_impl_new ();
722 * g_cond_clear:
723 * @cond: an initialised #GCond
725 * Frees the resources allocated to a #GCond with g_cond_init().
727 * This function should not be used with a #GCond that has been
728 * statically allocated.
730 * Calling g_cond_clear() for a #GCond on which threads are
731 * blocking leads to undefined behaviour.
733 * Since: 2.32
735 void
736 g_cond_clear (GCond *cond)
738 g_cond_impl_free (cond->p);
742 * g_cond_wait:
743 * @cond: a #GCond
744 * @mutex: a #GMutex that is currently locked
746 * Atomically releases @mutex and waits until @cond is signalled.
747 * When this function returns, @mutex is locked again and owned by the
748 * calling thread.
750 * When using condition variables, it is possible that a spurious wakeup
751 * may occur (ie: g_cond_wait() returns even though g_cond_signal() was
752 * not called). It's also possible that a stolen wakeup may occur.
753 * This is when g_cond_signal() is called, but another thread acquires
754 * @mutex before this thread and modifies the state of the program in
755 * such a way that when g_cond_wait() is able to return, the expected
756 * condition is no longer met.
758 * For this reason, g_cond_wait() must always be used in a loop. See
759 * the documentation for #GCond for a complete example.
761 void
762 g_cond_wait (GCond *cond,
763 GMutex *mutex)
765 gint status;
767 if G_UNLIKELY ((status = pthread_cond_wait (g_cond_get_impl (cond), g_mutex_get_impl (mutex))) != 0)
768 g_thread_abort (status, "pthread_cond_wait");
772 * g_cond_signal:
773 * @cond: a #GCond
775 * If threads are waiting for @cond, at least one of them is unblocked.
776 * If no threads are waiting for @cond, this function has no effect.
777 * It is good practice to hold the same lock as the waiting thread
778 * while calling this function, though not required.
780 void
781 g_cond_signal (GCond *cond)
783 gint status;
785 if G_UNLIKELY ((status = pthread_cond_signal (g_cond_get_impl (cond))) != 0)
786 g_thread_abort (status, "pthread_cond_signal");
790 * g_cond_broadcast:
791 * @cond: a #GCond
793 * If threads are waiting for @cond, all of them are unblocked.
794 * If no threads are waiting for @cond, this function has no effect.
795 * It is good practice to lock the same mutex as the waiting threads
796 * while calling this function, though not required.
798 void
799 g_cond_broadcast (GCond *cond)
801 gint status;
803 if G_UNLIKELY ((status = pthread_cond_broadcast (g_cond_get_impl (cond))) != 0)
804 g_thread_abort (status, "pthread_cond_broadcast");
808 * g_cond_wait_until:
809 * @cond: a #GCond
810 * @mutex: a #GMutex that is currently locked
811 * @end_time: the monotonic time to wait until
813 * Waits until either @cond is signalled or @end_time has passed.
815 * As with g_cond_wait() it is possible that a spurious or stolen wakeup
816 * could occur. For that reason, waiting on a condition variable should
817 * always be in a loop, based on an explicitly-checked predicate.
819 * %TRUE is returned if the condition variable was signalled (or in the
820 * case of a spurious wakeup). %FALSE is returned if @end_time has
821 * passed.
823 * The following code shows how to correctly perform a timed wait on a
824 * condition variable (extending the example presented in the
825 * documentation for #GCond):
827 * |[<!-- language="C" -->
828 * gpointer
829 * pop_data_timed (void)
831 * gint64 end_time;
832 * gpointer data;
834 * g_mutex_lock (&data_mutex);
836 * end_time = g_get_monotonic_time () + 5 * G_TIME_SPAN_SECOND;
837 * while (!current_data)
838 * if (!g_cond_wait_until (&data_cond, &data_mutex, end_time))
840 * // timeout has passed.
841 * g_mutex_unlock (&data_mutex);
842 * return NULL;
845 * // there is data for us
846 * data = current_data;
847 * current_data = NULL;
849 * g_mutex_unlock (&data_mutex);
851 * return data;
853 * ]|
855 * Notice that the end time is calculated once, before entering the
856 * loop and reused. This is the motivation behind the use of absolute
857 * time on this API -- if a relative time of 5 seconds were passed
858 * directly to the call and a spurious wakeup occurred, the program would
859 * have to start over waiting again (which would lead to a total wait
860 * time of more than 5 seconds).
862 * Returns: %TRUE on a signal, %FALSE on a timeout
863 * Since: 2.32
865 gboolean
866 g_cond_wait_until (GCond *cond,
867 GMutex *mutex,
868 gint64 end_time)
870 struct timespec ts;
871 gint status;
873 #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
874 /* end_time is given relative to the monotonic clock as returned by
875 * g_get_monotonic_time().
877 * Since this pthreads wants the relative time, convert it back again.
880 gint64 now = g_get_monotonic_time ();
881 gint64 relative;
883 if (end_time <= now)
884 return FALSE;
886 relative = end_time - now;
888 ts.tv_sec = relative / 1000000;
889 ts.tv_nsec = (relative % 1000000) * 1000;
891 if ((status = pthread_cond_timedwait_relative_np (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
892 return TRUE;
894 #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
895 /* This is the exact check we used during init to set the clock to
896 * monotonic, so if we're in this branch, timedwait() will already be
897 * expecting a monotonic clock.
900 ts.tv_sec = end_time / 1000000;
901 ts.tv_nsec = (end_time % 1000000) * 1000;
903 if ((status = pthread_cond_timedwait (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
904 return TRUE;
906 #else
907 #error Cannot support GCond on your platform.
908 #endif
910 if G_UNLIKELY (status != ETIMEDOUT)
911 g_thread_abort (status, "pthread_cond_timedwait");
913 return FALSE;
916 #endif /* defined(USE_NATIVE_MUTEX) */
918 /* {{{1 GPrivate */
921 * GPrivate:
923 * The #GPrivate struct is an opaque data structure to represent a
924 * thread-local data key. It is approximately equivalent to the
925 * pthread_setspecific()/pthread_getspecific() APIs on POSIX and to
926 * TlsSetValue()/TlsGetValue() on Windows.
928 * If you don't already know why you might want this functionality,
929 * then you probably don't need it.
931 * #GPrivate is a very limited resource (as far as 128 per program,
932 * shared between all libraries). It is also not possible to destroy a
933 * #GPrivate after it has been used. As such, it is only ever acceptable
934 * to use #GPrivate in static scope, and even then sparingly so.
936 * See G_PRIVATE_INIT() for a couple of examples.
938 * The #GPrivate structure should be considered opaque. It should only
939 * be accessed via the g_private_ functions.
943 * G_PRIVATE_INIT:
944 * @notify: a #GDestroyNotify
946 * A macro to assist with the static initialisation of a #GPrivate.
948 * This macro is useful for the case that a #GDestroyNotify function
949 * should be associated the key. This is needed when the key will be
950 * used to point at memory that should be deallocated when the thread
951 * exits.
953 * Additionally, the #GDestroyNotify will also be called on the previous
954 * value stored in the key when g_private_replace() is used.
956 * If no #GDestroyNotify is needed, then use of this macro is not
957 * required -- if the #GPrivate is declared in static scope then it will
958 * be properly initialised by default (ie: to all zeros). See the
959 * examples below.
961 * |[<!-- language="C" -->
962 * static GPrivate name_key = G_PRIVATE_INIT (g_free);
964 * // return value should not be freed
965 * const gchar *
966 * get_local_name (void)
968 * return g_private_get (&name_key);
971 * void
972 * set_local_name (const gchar *name)
974 * g_private_replace (&name_key, g_strdup (name));
978 * static GPrivate count_key; // no free function
980 * gint
981 * get_local_count (void)
983 * return GPOINTER_TO_INT (g_private_get (&count_key));
986 * void
987 * set_local_count (gint count)
989 * g_private_set (&count_key, GINT_TO_POINTER (count));
991 * ]|
993 * Since: 2.32
996 static pthread_key_t *
997 g_private_impl_new (GDestroyNotify notify)
999 pthread_key_t *key;
1000 gint status;
1002 key = malloc (sizeof (pthread_key_t));
1003 if G_UNLIKELY (key == NULL)
1004 g_thread_abort (errno, "malloc");
1005 status = pthread_key_create (key, notify);
1006 if G_UNLIKELY (status != 0)
1007 g_thread_abort (status, "pthread_key_create");
1009 return key;
1012 static void
1013 g_private_impl_free (pthread_key_t *key)
1015 gint status;
1017 status = pthread_key_delete (*key);
1018 if G_UNLIKELY (status != 0)
1019 g_thread_abort (status, "pthread_key_delete");
1020 free (key);
1023 static inline pthread_key_t *
1024 g_private_get_impl (GPrivate *key)
1026 pthread_key_t *impl = g_atomic_pointer_get (&key->p);
1028 if G_UNLIKELY (impl == NULL)
1030 impl = g_private_impl_new (key->notify);
1031 if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl))
1033 g_private_impl_free (impl);
1034 impl = key->p;
1038 return impl;
1042 * g_private_get:
1043 * @key: a #GPrivate
1045 * Returns the current value of the thread local variable @key.
1047 * If the value has not yet been set in this thread, %NULL is returned.
1048 * Values are never copied between threads (when a new thread is
1049 * created, for example).
1051 * Returns: the thread-local value
1053 gpointer
1054 g_private_get (GPrivate *key)
1056 /* quote POSIX: No errors are returned from pthread_getspecific(). */
1057 return pthread_getspecific (*g_private_get_impl (key));
1061 * g_private_set:
1062 * @key: a #GPrivate
1063 * @value: the new value
1065 * Sets the thread local variable @key to have the value @value in the
1066 * current thread.
1068 * This function differs from g_private_replace() in the following way:
1069 * the #GDestroyNotify for @key is not called on the old value.
1071 void
1072 g_private_set (GPrivate *key,
1073 gpointer value)
1075 gint status;
1077 if G_UNLIKELY ((status = pthread_setspecific (*g_private_get_impl (key), value)) != 0)
1078 g_thread_abort (status, "pthread_setspecific");
1082 * g_private_replace:
1083 * @key: a #GPrivate
1084 * @value: the new value
1086 * Sets the thread local variable @key to have the value @value in the
1087 * current thread.
1089 * This function differs from g_private_set() in the following way: if
1090 * the previous value was non-%NULL then the #GDestroyNotify handler for
1091 * @key is run on it.
1093 * Since: 2.32
1095 void
1096 g_private_replace (GPrivate *key,
1097 gpointer value)
1099 pthread_key_t *impl = g_private_get_impl (key);
1100 gpointer old;
1101 gint status;
1103 old = pthread_getspecific (*impl);
1104 if (old && key->notify)
1105 key->notify (old);
1107 if G_UNLIKELY ((status = pthread_setspecific (*impl, value)) != 0)
1108 g_thread_abort (status, "pthread_setspecific");
1111 /* {{{1 GThread */
1113 #define posix_check_err(err, name) G_STMT_START{ \
1114 int error = (err); \
1115 if (error) \
1116 g_error ("file %s: line %d (%s): error '%s' during '%s'", \
1117 __FILE__, __LINE__, G_STRFUNC, \
1118 g_strerror (error), name); \
1119 }G_STMT_END
1121 #define posix_check_cmd(cmd) posix_check_err (cmd, #cmd)
1123 typedef struct
1125 GRealThread thread;
1127 pthread_t system_thread;
1128 gboolean joined;
1129 GMutex lock;
1130 } GThreadPosix;
1132 void
1133 g_system_thread_free (GRealThread *thread)
1135 GThreadPosix *pt = (GThreadPosix *) thread;
1137 if (!pt->joined)
1138 pthread_detach (pt->system_thread);
1140 g_mutex_clear (&pt->lock);
1142 g_slice_free (GThreadPosix, pt);
1145 GRealThread *
1146 g_system_thread_new (GThreadFunc thread_func,
1147 gulong stack_size,
1148 GError **error)
1150 GThreadPosix *thread;
1151 pthread_attr_t attr;
1152 gint ret;
1154 thread = g_slice_new0 (GThreadPosix);
1156 posix_check_cmd (pthread_attr_init (&attr));
1158 #ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE
1159 if (stack_size)
1161 #ifdef _SC_THREAD_STACK_MIN
1162 long min_stack_size = sysconf (_SC_THREAD_STACK_MIN);
1163 if (min_stack_size >= 0)
1164 stack_size = MAX (min_stack_size, stack_size);
1165 #endif /* _SC_THREAD_STACK_MIN */
1166 /* No error check here, because some systems can't do it and
1167 * we simply don't want threads to fail because of that. */
1168 pthread_attr_setstacksize (&attr, stack_size);
1170 #endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */
1172 ret = pthread_create (&thread->system_thread, &attr, (void* (*)(void*))thread_func, thread);
1174 posix_check_cmd (pthread_attr_destroy (&attr));
1176 if (ret == EAGAIN)
1178 g_set_error (error, G_THREAD_ERROR, G_THREAD_ERROR_AGAIN,
1179 "Error creating thread: %s", g_strerror (ret));
1180 g_slice_free (GThreadPosix, thread);
1181 return NULL;
1184 posix_check_err (ret, "pthread_create");
1186 g_mutex_init (&thread->lock);
1188 return (GRealThread *) thread;
1192 * g_thread_yield:
1194 * Causes the calling thread to voluntarily relinquish the CPU, so
1195 * that other threads can run.
1197 * This function is often used as a method to make busy wait less evil.
1199 void
1200 g_thread_yield (void)
1202 sched_yield ();
1205 void
1206 g_system_thread_wait (GRealThread *thread)
1208 GThreadPosix *pt = (GThreadPosix *) thread;
1210 g_mutex_lock (&pt->lock);
1212 if (!pt->joined)
1214 posix_check_cmd (pthread_join (pt->system_thread, NULL));
1215 pt->joined = TRUE;
1218 g_mutex_unlock (&pt->lock);
1221 void
1222 g_system_thread_exit (void)
1224 pthread_exit (NULL);
1227 void
1228 g_system_thread_set_name (const gchar *name)
1230 #if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_NAME)
1231 prctl (PR_SET_NAME, name, 0, 0, 0, 0); /* on Linux */
1232 #elif defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID)
1233 pthread_setname_np(name); /* on OS X and iOS */
1234 #endif
1237 /* {{{1 GMutex and GCond futex implementation */
1239 #if defined(USE_NATIVE_MUTEX)
1241 #include <linux/futex.h>
1242 #include <sys/syscall.h>
1244 #ifndef FUTEX_WAIT_PRIVATE
1245 #define FUTEX_WAIT_PRIVATE FUTEX_WAIT
1246 #define FUTEX_WAKE_PRIVATE FUTEX_WAKE
1247 #endif
1249 /* We should expand the set of operations available in gatomic once we
1250 * have better C11 support in GCC in common distributions (ie: 4.9).
1252 * Before then, let's define a couple of useful things for our own
1253 * purposes...
1256 #define exchange_acquire(ptr, new) \
1257 __atomic_exchange_4((ptr), (new), __ATOMIC_ACQUIRE)
1258 #define compare_exchange_acquire(ptr, old, new) \
1259 __atomic_compare_exchange_4((ptr), (old), (new), 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
1261 #define exchange_release(ptr, new) \
1262 __atomic_exchange_4((ptr), (new), __ATOMIC_RELEASE)
1263 #define store_release(ptr, new) \
1264 __atomic_store_4((ptr), (new), __ATOMIC_RELEASE)
1266 /* Our strategy for the mutex is pretty simple:
1268 * 0: not in use
1270 * 1: acquired by one thread only, no contention
1272 * > 1: contended
1275 * As such, attempting to acquire the lock should involve an increment.
1276 * If we find that the previous value was 0 then we can return
1277 * immediately.
1279 * On unlock, we always store 0 to indicate that the lock is available.
1280 * If the value there was 1 before then we didn't have contention and
1281 * can return immediately. If the value was something other than 1 then
1282 * we have the contended case and need to wake a waiter.
1284 * If it was not 0 then there is another thread holding it and we must
1285 * wait. We must always ensure that we mark a value >1 while we are
1286 * waiting in order to instruct the holder to do a wake operation on
1287 * unlock.
1290 void
1291 g_mutex_init (GMutex *mutex)
1293 mutex->i[0] = 0;
1296 void
1297 g_mutex_clear (GMutex *mutex)
1299 if G_UNLIKELY (mutex->i[0] != 0)
1301 fprintf (stderr, "g_mutex_clear() called on uninitialised or locked mutex\n");
1302 abort ();
1306 static void __attribute__((noinline))
1307 g_mutex_lock_slowpath (GMutex *mutex)
1309 /* Set to 2 to indicate contention. If it was zero before then we
1310 * just acquired the lock.
1312 * Otherwise, sleep for as long as the 2 remains...
1314 while (exchange_acquire (&mutex->i[0], 2) != 0)
1315 syscall (__NR_futex, &mutex->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) 2, NULL);
1318 static void __attribute__((noinline))
1319 g_mutex_unlock_slowpath (GMutex *mutex,
1320 guint prev)
1322 /* We seem to get better code for the uncontended case by splitting
1323 * this out...
1325 if G_UNLIKELY (prev == 0)
1327 fprintf (stderr, "Attempt to unlock mutex that was not locked\n");
1328 abort ();
1331 syscall (__NR_futex, &mutex->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL);
1334 void
1335 g_mutex_lock (GMutex *mutex)
1337 /* 0 -> 1 and we're done. Anything else, and we need to wait... */
1338 if G_UNLIKELY (g_atomic_int_add (&mutex->i[0], 1) != 0)
1339 g_mutex_lock_slowpath (mutex);
1342 void
1343 g_mutex_unlock (GMutex *mutex)
1345 guint prev;
1347 prev = exchange_release (&mutex->i[0], 0);
1349 /* 1-> 0 and we're done. Anything else and we need to signal... */
1350 if G_UNLIKELY (prev != 1)
1351 g_mutex_unlock_slowpath (mutex, prev);
1354 gboolean
1355 g_mutex_trylock (GMutex *mutex)
1357 guint zero = 0;
1359 /* We don't want to touch the value at all unless we can move it from
1360 * exactly 0 to 1.
1362 return compare_exchange_acquire (&mutex->i[0], &zero, 1);
1365 /* Condition variables are implemented in a rather simple way as well.
1366 * In many ways, futex() as an abstraction is even more ideally suited
1367 * to condition variables than it is to mutexes.
1369 * We store a generation counter. We sample it with the lock held and
1370 * unlock before sleeping on the futex.
1372 * Signalling simply involves increasing the counter and making the
1373 * appropriate futex call.
1375 * The only thing that is the slightest bit complicated is timed waits
1376 * because we must convert our absolute time to relative.
1379 void
1380 g_cond_init (GCond *cond)
1382 cond->i[0] = 0;
1385 void
1386 g_cond_clear (GCond *cond)
1390 void
1391 g_cond_wait (GCond *cond,
1392 GMutex *mutex)
1394 guint sampled = g_atomic_int_get (&cond->i[0]);
1396 g_mutex_unlock (mutex);
1397 syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, NULL);
1398 g_mutex_lock (mutex);
1401 void
1402 g_cond_signal (GCond *cond)
1404 g_atomic_int_inc (&cond->i[0]);
1406 syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL);
1409 void
1410 g_cond_broadcast (GCond *cond)
1412 g_atomic_int_inc (&cond->i[0]);
1414 syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) INT_MAX, NULL);
1417 gboolean
1418 g_cond_wait_until (GCond *cond,
1419 GMutex *mutex,
1420 gint64 end_time)
1422 struct timespec now;
1423 struct timespec span;
1424 guint sampled;
1425 int res;
1427 if (end_time < 0)
1428 return FALSE;
1430 clock_gettime (CLOCK_MONOTONIC, &now);
1431 span.tv_sec = (end_time / 1000000) - now.tv_sec;
1432 span.tv_nsec = ((end_time % 1000000) * 1000) - now.tv_nsec;
1433 if (span.tv_nsec < 0)
1435 span.tv_nsec += 1000000000;
1436 span.tv_sec--;
1439 if (span.tv_sec < 0)
1440 return FALSE;
1442 sampled = cond->i[0];
1443 g_mutex_unlock (mutex);
1444 res = syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, &span);
1445 g_mutex_lock (mutex);
1447 return (res < 0 && errno == ETIMEDOUT) ? FALSE : TRUE;
1450 #endif
1452 /* {{{1 Epilogue */
1453 /* vim:set foldmethod=marker: */