unicode: Simplify width table generation
[glib.git] / glib / gthread-posix.c
blob6f5a60664b15830ddbcb6446ea364f3dc347b2da
1 /* GLIB - Library of useful routines for C programming
2 * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
4 * gthread.c: posix thread system implementation
5 * Copyright 1998 Sebastian Wilhelmi; University of Karlsruhe
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 * Modified by the GLib Team and others 1997-2000. See the AUTHORS
23 * file for a list of people on the GLib Team. See the ChangeLog
24 * files for a list of changes. These files are distributed with
25 * GLib at ftp://ftp.gtk.org/pub/gtk/.
28 /* The GMutex, GCond and GPrivate implementations in this file are some
29 * of the lowest-level code in GLib. All other parts of GLib (messages,
30 * memory, slices, etc) assume that they can freely use these facilities
31 * without risking recursion.
33 * As such, these functions are NOT permitted to call any other part of
34 * GLib.
36 * The thread manipulation functions (create, exit, join, etc.) have
37 * more freedom -- they can do as they please.
40 #include "config.h"
42 #include "gthread.h"
44 #include "gthreadprivate.h"
45 #include "gslice.h"
46 #include "gmessages.h"
47 #include "gstrfuncs.h"
48 #include "gmain.h"
50 #include <stdlib.h>
51 #include <stdio.h>
52 #include <string.h>
53 #include <errno.h>
54 #include <pthread.h>
56 #include <sys/time.h>
57 #include <unistd.h>
59 #ifdef HAVE_SCHED_H
60 #include <sched.h>
61 #endif
62 #ifdef HAVE_SYS_PRCTL_H
63 #include <sys/prctl.h>
64 #endif
65 #ifdef G_OS_WIN32
66 #include <windows.h>
67 #endif
69 static void
70 g_thread_abort (gint status,
71 const gchar *function)
73 fprintf (stderr, "GLib (gthread-posix.c): Unexpected error from C library during '%s': %s. Aborting.\n",
74 function, strerror (status));
75 abort ();
78 /* {{{1 GMutex */
80 static pthread_mutex_t *
81 g_mutex_impl_new (void)
83 pthread_mutexattr_t *pattr = NULL;
84 pthread_mutex_t *mutex;
85 gint status;
86 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
87 pthread_mutexattr_t attr;
88 #endif
90 mutex = malloc (sizeof (pthread_mutex_t));
91 if G_UNLIKELY (mutex == NULL)
92 g_thread_abort (errno, "malloc");
94 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
95 pthread_mutexattr_init (&attr);
96 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
97 pattr = &attr;
98 #endif
100 if G_UNLIKELY ((status = pthread_mutex_init (mutex, pattr)) != 0)
101 g_thread_abort (status, "pthread_mutex_init");
103 #ifdef PTHREAD_ADAPTIVE_MUTEX_NP
104 pthread_mutexattr_destroy (&attr);
105 #endif
107 return mutex;
110 static void
111 g_mutex_impl_free (pthread_mutex_t *mutex)
113 pthread_mutex_destroy (mutex);
114 free (mutex);
117 static inline pthread_mutex_t *
118 g_mutex_get_impl (GMutex *mutex)
120 pthread_mutex_t *impl = g_atomic_pointer_get (&mutex->p);
122 if G_UNLIKELY (impl == NULL)
124 impl = g_mutex_impl_new ();
125 if (!g_atomic_pointer_compare_and_exchange (&mutex->p, NULL, impl))
126 g_mutex_impl_free (impl);
127 impl = mutex->p;
130 return impl;
135 * g_mutex_init:
136 * @mutex: an uninitialized #GMutex
138 * Initializes a #GMutex so that it can be used.
140 * This function is useful to initialize a mutex that has been
141 * allocated on the stack, or as part of a larger structure.
142 * It is not necessary to initialize a mutex that has been
143 * statically allocated.
145 * |[<!-- language="C" -->
146 * typedef struct {
147 * GMutex m;
148 * ...
149 * } Blob;
151 * Blob *b;
153 * b = g_new (Blob, 1);
154 * g_mutex_init (&b->m);
155 * ]|
157 * To undo the effect of g_mutex_init() when a mutex is no longer
158 * needed, use g_mutex_clear().
160 * Calling g_mutex_init() on an already initialized #GMutex leads
161 * to undefined behaviour.
163 * Since: 2.32
165 void
166 g_mutex_init (GMutex *mutex)
168 mutex->p = g_mutex_impl_new ();
172 * g_mutex_clear:
173 * @mutex: an initialized #GMutex
175 * Frees the resources allocated to a mutex with g_mutex_init().
177 * This function should not be used with a #GMutex that has been
178 * statically allocated.
180 * Calling g_mutex_clear() on a locked mutex leads to undefined
181 * behaviour.
183 * Sine: 2.32
185 void
186 g_mutex_clear (GMutex *mutex)
188 g_mutex_impl_free (mutex->p);
192 * g_mutex_lock:
193 * @mutex: a #GMutex
195 * Locks @mutex. If @mutex is already locked by another thread, the
196 * current thread will block until @mutex is unlocked by the other
197 * thread.
199 * #GMutex is neither guaranteed to be recursive nor to be
200 * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
201 * already been locked by the same thread results in undefined behaviour
202 * (including but not limited to deadlocks).
204 void
205 g_mutex_lock (GMutex *mutex)
207 gint status;
209 if G_UNLIKELY ((status = pthread_mutex_lock (g_mutex_get_impl (mutex))) != 0)
210 g_thread_abort (status, "pthread_mutex_lock");
214 * g_mutex_unlock:
215 * @mutex: a #GMutex
217 * Unlocks @mutex. If another thread is blocked in a g_mutex_lock()
218 * call for @mutex, it will become unblocked and can lock @mutex itself.
220 * Calling g_mutex_unlock() on a mutex that is not locked by the
221 * current thread leads to undefined behaviour.
223 void
224 g_mutex_unlock (GMutex *mutex)
226 gint status;
228 if G_UNLIKELY ((status = pthread_mutex_unlock (g_mutex_get_impl (mutex))) != 0)
229 g_thread_abort (status, "pthread_mutex_unlock");
233 * g_mutex_trylock:
234 * @mutex: a #GMutex
236 * Tries to lock @mutex. If @mutex is already locked by another thread,
237 * it immediately returns %FALSE. Otherwise it locks @mutex and returns
238 * %TRUE.
240 * #GMutex is neither guaranteed to be recursive nor to be
241 * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
242 * already been locked by the same thread results in undefined behaviour
243 * (including but not limited to deadlocks or arbitrary return values).
245 * Returns: %TRUE if @mutex could be locked
247 gboolean
248 g_mutex_trylock (GMutex *mutex)
250 gint status;
252 if G_LIKELY ((status = pthread_mutex_trylock (g_mutex_get_impl (mutex))) == 0)
253 return TRUE;
255 if G_UNLIKELY (status != EBUSY)
256 g_thread_abort (status, "pthread_mutex_trylock");
258 return FALSE;
261 /* {{{1 GRecMutex */
263 static pthread_mutex_t *
264 g_rec_mutex_impl_new (void)
266 pthread_mutexattr_t attr;
267 pthread_mutex_t *mutex;
269 mutex = malloc (sizeof (pthread_mutex_t));
270 if G_UNLIKELY (mutex == NULL)
271 g_thread_abort (errno, "malloc");
273 pthread_mutexattr_init (&attr);
274 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
275 pthread_mutex_init (mutex, &attr);
276 pthread_mutexattr_destroy (&attr);
278 return mutex;
281 static void
282 g_rec_mutex_impl_free (pthread_mutex_t *mutex)
284 pthread_mutex_destroy (mutex);
285 free (mutex);
288 static inline pthread_mutex_t *
289 g_rec_mutex_get_impl (GRecMutex *rec_mutex)
291 pthread_mutex_t *impl = g_atomic_pointer_get (&rec_mutex->p);
293 if G_UNLIKELY (impl == NULL)
295 impl = g_rec_mutex_impl_new ();
296 if (!g_atomic_pointer_compare_and_exchange (&rec_mutex->p, NULL, impl))
297 g_rec_mutex_impl_free (impl);
298 impl = rec_mutex->p;
301 return impl;
305 * g_rec_mutex_init:
306 * @rec_mutex: an uninitialized #GRecMutex
308 * Initializes a #GRecMutex so that it can be used.
310 * This function is useful to initialize a recursive mutex
311 * that has been allocated on the stack, or as part of a larger
312 * structure.
314 * It is not necessary to initialise a recursive mutex that has been
315 * statically allocated.
317 * |[<!-- language="C" -->
318 * typedef struct {
319 * GRecMutex m;
320 * ...
321 * } Blob;
323 * Blob *b;
325 * b = g_new (Blob, 1);
326 * g_rec_mutex_init (&b->m);
327 * ]|
329 * Calling g_rec_mutex_init() on an already initialized #GRecMutex
330 * leads to undefined behaviour.
332 * To undo the effect of g_rec_mutex_init() when a recursive mutex
333 * is no longer needed, use g_rec_mutex_clear().
335 * Since: 2.32
337 void
338 g_rec_mutex_init (GRecMutex *rec_mutex)
340 rec_mutex->p = g_rec_mutex_impl_new ();
344 * g_rec_mutex_clear:
345 * @rec_mutex: an initialized #GRecMutex
347 * Frees the resources allocated to a recursive mutex with
348 * g_rec_mutex_init().
350 * This function should not be used with a #GRecMutex that has been
351 * statically allocated.
353 * Calling g_rec_mutex_clear() on a locked recursive mutex leads
354 * to undefined behaviour.
356 * Sine: 2.32
358 void
359 g_rec_mutex_clear (GRecMutex *rec_mutex)
361 g_rec_mutex_impl_free (rec_mutex->p);
365 * g_rec_mutex_lock:
366 * @rec_mutex: a #GRecMutex
368 * Locks @rec_mutex. If @rec_mutex is already locked by another
369 * thread, the current thread will block until @rec_mutex is
370 * unlocked by the other thread. If @rec_mutex is already locked
371 * by the current thread, the 'lock count' of @rec_mutex is increased.
372 * The mutex will only become available again when it is unlocked
373 * as many times as it has been locked.
375 * Since: 2.32
377 void
378 g_rec_mutex_lock (GRecMutex *mutex)
380 pthread_mutex_lock (g_rec_mutex_get_impl (mutex));
384 * g_rec_mutex_unlock:
385 * @rec_mutex: a #GRecMutex
387 * Unlocks @rec_mutex. If another thread is blocked in a
388 * g_rec_mutex_lock() call for @rec_mutex, it will become unblocked
389 * and can lock @rec_mutex itself.
391 * Calling g_rec_mutex_unlock() on a recursive mutex that is not
392 * locked by the current thread leads to undefined behaviour.
394 * Since: 2.32
396 void
397 g_rec_mutex_unlock (GRecMutex *rec_mutex)
399 pthread_mutex_unlock (rec_mutex->p);
403 * g_rec_mutex_trylock:
404 * @rec_mutex: a #GRecMutex
406 * Tries to lock @rec_mutex. If @rec_mutex is already locked
407 * by another thread, it immediately returns %FALSE. Otherwise
408 * it locks @rec_mutex and returns %TRUE.
410 * Returns: %TRUE if @rec_mutex could be locked
412 * Since: 2.32
414 gboolean
415 g_rec_mutex_trylock (GRecMutex *rec_mutex)
417 if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex)) != 0)
418 return FALSE;
420 return TRUE;
423 /* {{{1 GRWLock */
425 static pthread_rwlock_t *
426 g_rw_lock_impl_new (void)
428 pthread_rwlock_t *rwlock;
429 gint status;
431 rwlock = malloc (sizeof (pthread_rwlock_t));
432 if G_UNLIKELY (rwlock == NULL)
433 g_thread_abort (errno, "malloc");
435 if G_UNLIKELY ((status = pthread_rwlock_init (rwlock, NULL)) != 0)
436 g_thread_abort (status, "pthread_rwlock_init");
438 return rwlock;
441 static void
442 g_rw_lock_impl_free (pthread_rwlock_t *rwlock)
444 pthread_rwlock_destroy (rwlock);
445 free (rwlock);
448 static inline pthread_rwlock_t *
449 g_rw_lock_get_impl (GRWLock *lock)
451 pthread_rwlock_t *impl = g_atomic_pointer_get (&lock->p);
453 if G_UNLIKELY (impl == NULL)
455 impl = g_rw_lock_impl_new ();
456 if (!g_atomic_pointer_compare_and_exchange (&lock->p, NULL, impl))
457 g_rw_lock_impl_free (impl);
458 impl = lock->p;
461 return impl;
465 * g_rw_lock_init:
466 * @rw_lock: an uninitialized #GRWLock
468 * Initializes a #GRWLock so that it can be used.
470 * This function is useful to initialize a lock that has been
471 * allocated on the stack, or as part of a larger structure. It is not
472 * necessary to initialise a reader-writer lock that has been statically
473 * allocated.
475 * |[<!-- language="C" -->
476 * typedef struct {
477 * GRWLock l;
478 * ...
479 * } Blob;
481 * Blob *b;
483 * b = g_new (Blob, 1);
484 * g_rw_lock_init (&b->l);
485 * ]|
487 * To undo the effect of g_rw_lock_init() when a lock is no longer
488 * needed, use g_rw_lock_clear().
490 * Calling g_rw_lock_init() on an already initialized #GRWLock leads
491 * to undefined behaviour.
493 * Since: 2.32
495 void
496 g_rw_lock_init (GRWLock *rw_lock)
498 rw_lock->p = g_rw_lock_impl_new ();
502 * g_rw_lock_clear:
503 * @rw_lock: an initialized #GRWLock
505 * Frees the resources allocated to a lock with g_rw_lock_init().
507 * This function should not be used with a #GRWLock that has been
508 * statically allocated.
510 * Calling g_rw_lock_clear() when any thread holds the lock
511 * leads to undefined behaviour.
513 * Sine: 2.32
515 void
516 g_rw_lock_clear (GRWLock *rw_lock)
518 g_rw_lock_impl_free (rw_lock->p);
522 * g_rw_lock_writer_lock:
523 * @rw_lock: a #GRWLock
525 * Obtain a write lock on @rw_lock. If any thread already holds
526 * a read or write lock on @rw_lock, the current thread will block
527 * until all other threads have dropped their locks on @rw_lock.
529 * Since: 2.32
531 void
532 g_rw_lock_writer_lock (GRWLock *rw_lock)
534 pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock));
538 * g_rw_lock_writer_trylock:
539 * @rw_lock: a #GRWLock
541 * Tries to obtain a write lock on @rw_lock. If any other thread holds
542 * a read or write lock on @rw_lock, it immediately returns %FALSE.
543 * Otherwise it locks @rw_lock and returns %TRUE.
545 * Returns: %TRUE if @rw_lock could be locked
547 * Since: 2.32
549 gboolean
550 g_rw_lock_writer_trylock (GRWLock *rw_lock)
552 if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock)) != 0)
553 return FALSE;
555 return TRUE;
559 * g_rw_lock_writer_unlock:
560 * @rw_lock: a #GRWLock
562 * Release a write lock on @rw_lock.
564 * Calling g_rw_lock_writer_unlock() on a lock that is not held
565 * by the current thread leads to undefined behaviour.
567 * Since: 2.32
569 void
570 g_rw_lock_writer_unlock (GRWLock *rw_lock)
572 pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
576 * g_rw_lock_reader_lock:
577 * @rw_lock: a #GRWLock
579 * Obtain a read lock on @rw_lock. If another thread currently holds
580 * the write lock on @rw_lock or blocks waiting for it, the current
581 * thread will block. Read locks can be taken recursively.
583 * It is implementation-defined how many threads are allowed to
584 * hold read locks on the same lock simultaneously.
586 * Since: 2.32
588 void
589 g_rw_lock_reader_lock (GRWLock *rw_lock)
591 pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock));
595 * g_rw_lock_reader_trylock:
596 * @rw_lock: a #GRWLock
598 * Tries to obtain a read lock on @rw_lock and returns %TRUE if
599 * the read lock was successfully obtained. Otherwise it
600 * returns %FALSE.
602 * Returns: %TRUE if @rw_lock could be locked
604 * Since: 2.32
606 gboolean
607 g_rw_lock_reader_trylock (GRWLock *rw_lock)
609 if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock)) != 0)
610 return FALSE;
612 return TRUE;
616 * g_rw_lock_reader_unlock:
617 * @rw_lock: a #GRWLock
619 * Release a read lock on @rw_lock.
621 * Calling g_rw_lock_reader_unlock() on a lock that is not held
622 * by the current thread leads to undefined behaviour.
624 * Since: 2.32
626 void
627 g_rw_lock_reader_unlock (GRWLock *rw_lock)
629 pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
632 /* {{{1 GCond */
634 static pthread_cond_t *
635 g_cond_impl_new (void)
637 pthread_condattr_t attr;
638 pthread_cond_t *cond;
639 gint status;
641 pthread_condattr_init (&attr);
643 #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
644 #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
645 if G_UNLIKELY ((status = pthread_condattr_setclock (&attr, CLOCK_MONOTONIC)) != 0)
646 g_thread_abort (status, "pthread_condattr_setclock");
647 #else
648 #error Cannot support GCond on your platform.
649 #endif
651 cond = malloc (sizeof (pthread_cond_t));
652 if G_UNLIKELY (cond == NULL)
653 g_thread_abort (errno, "malloc");
655 if G_UNLIKELY ((status = pthread_cond_init (cond, &attr)) != 0)
656 g_thread_abort (status, "pthread_cond_init");
658 pthread_condattr_destroy (&attr);
660 return cond;
663 static void
664 g_cond_impl_free (pthread_cond_t *cond)
666 pthread_cond_destroy (cond);
667 free (cond);
670 static inline pthread_cond_t *
671 g_cond_get_impl (GCond *cond)
673 pthread_cond_t *impl = g_atomic_pointer_get (&cond->p);
675 if G_UNLIKELY (impl == NULL)
677 impl = g_cond_impl_new ();
678 if (!g_atomic_pointer_compare_and_exchange (&cond->p, NULL, impl))
679 g_cond_impl_free (impl);
680 impl = cond->p;
683 return impl;
687 * g_cond_init:
688 * @cond: an uninitialized #GCond
690 * Initialises a #GCond so that it can be used.
692 * This function is useful to initialise a #GCond that has been
693 * allocated as part of a larger structure. It is not necessary to
694 * initialise a #GCond that has been statically allocated.
696 * To undo the effect of g_cond_init() when a #GCond is no longer
697 * needed, use g_cond_clear().
699 * Calling g_cond_init() on an already-initialised #GCond leads
700 * to undefined behaviour.
702 * Since: 2.32
704 void
705 g_cond_init (GCond *cond)
707 cond->p = g_cond_impl_new ();
711 * g_cond_clear:
712 * @cond: an initialised #GCond
714 * Frees the resources allocated to a #GCond with g_cond_init().
716 * This function should not be used with a #GCond that has been
717 * statically allocated.
719 * Calling g_cond_clear() for a #GCond on which threads are
720 * blocking leads to undefined behaviour.
722 * Since: 2.32
724 void
725 g_cond_clear (GCond *cond)
727 g_cond_impl_free (cond->p);
731 * g_cond_wait:
732 * @cond: a #GCond
733 * @mutex: a #GMutex that is currently locked
735 * Atomically releases @mutex and waits until @cond is signalled.
736 * When this function returns, @mutex is locked again and owned by the
737 * calling thread.
739 * When using condition variables, it is possible that a spurious wakeup
740 * may occur (ie: g_cond_wait() returns even though g_cond_signal() was
741 * not called). It's also possible that a stolen wakeup may occur.
742 * This is when g_cond_signal() is called, but another thread acquires
743 * @mutex before this thread and modifies the state of the program in
744 * such a way that when g_cond_wait() is able to return, the expected
745 * condition is no longer met.
747 * For this reason, g_cond_wait() must always be used in a loop. See
748 * the documentation for #GCond for a complete example.
750 void
751 g_cond_wait (GCond *cond,
752 GMutex *mutex)
754 gint status;
756 if G_UNLIKELY ((status = pthread_cond_wait (g_cond_get_impl (cond), g_mutex_get_impl (mutex))) != 0)
757 g_thread_abort (status, "pthread_cond_wait");
761 * g_cond_signal:
762 * @cond: a #GCond
764 * If threads are waiting for @cond, at least one of them is unblocked.
765 * If no threads are waiting for @cond, this function has no effect.
766 * It is good practice to hold the same lock as the waiting thread
767 * while calling this function, though not required.
769 void
770 g_cond_signal (GCond *cond)
772 gint status;
774 if G_UNLIKELY ((status = pthread_cond_signal (g_cond_get_impl (cond))) != 0)
775 g_thread_abort (status, "pthread_cond_signal");
779 * g_cond_broadcast:
780 * @cond: a #GCond
782 * If threads are waiting for @cond, all of them are unblocked.
783 * If no threads are waiting for @cond, this function has no effect.
784 * It is good practice to lock the same mutex as the waiting threads
785 * while calling this function, though not required.
787 void
788 g_cond_broadcast (GCond *cond)
790 gint status;
792 if G_UNLIKELY ((status = pthread_cond_broadcast (g_cond_get_impl (cond))) != 0)
793 g_thread_abort (status, "pthread_cond_broadcast");
797 * g_cond_wait_until:
798 * @cond: a #GCond
799 * @mutex: a #GMutex that is currently locked
800 * @end_time: the monotonic time to wait until
802 * Waits until either @cond is signalled or @end_time has passed.
804 * As with g_cond_wait() it is possible that a spurious or stolen wakeup
805 * could occur. For that reason, waiting on a condition variable should
806 * always be in a loop, based on an explicitly-checked predicate.
808 * %TRUE is returned if the condition variable was signalled (or in the
809 * case of a spurious wakeup). %FALSE is returned if @end_time has
810 * passed.
812 * The following code shows how to correctly perform a timed wait on a
813 * condition variable (extending the example presented in the
814 * documentation for #GCond):
816 * |[<!-- language="C" -->
817 * gpointer
818 * pop_data_timed (void)
820 * gint64 end_time;
821 * gpointer data;
823 * g_mutex_lock (&data_mutex);
825 * end_time = g_get_monotonic_time () + 5 * G_TIME_SPAN_SECOND;
826 * while (!current_data)
827 * if (!g_cond_wait_until (&data_cond, &data_mutex, end_time))
829 * // timeout has passed.
830 * g_mutex_unlock (&data_mutex);
831 * return NULL;
834 * // there is data for us
835 * data = current_data;
836 * current_data = NULL;
838 * g_mutex_unlock (&data_mutex);
840 * return data;
842 * ]|
844 * Notice that the end time is calculated once, before entering the
845 * loop and reused. This is the motivation behind the use of absolute
846 * time on this API -- if a relative time of 5 seconds were passed
847 * directly to the call and a spurious wakeup occurred, the program would
848 * have to start over waiting again (which would lead to a total wait
849 * time of more than 5 seconds).
851 * Returns: %TRUE on a signal, %FALSE on a timeout
852 * Since: 2.32
854 gboolean
855 g_cond_wait_until (GCond *cond,
856 GMutex *mutex,
857 gint64 end_time)
859 struct timespec ts;
860 gint status;
862 #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
863 /* end_time is given relative to the monotonic clock as returned by
864 * g_get_monotonic_time().
866 * Since this pthreads wants the relative time, convert it back again.
869 gint64 now = g_get_monotonic_time ();
870 gint64 relative;
872 if (end_time <= now)
873 return FALSE;
875 relative = end_time - now;
877 ts.tv_sec = relative / 1000000;
878 ts.tv_nsec = (relative % 1000000) * 1000;
880 if ((status = pthread_cond_timedwait_relative_np (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
881 return TRUE;
883 #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
884 /* This is the exact check we used during init to set the clock to
885 * monotonic, so if we're in this branch, timedwait() will already be
886 * expecting a monotonic clock.
889 ts.tv_sec = end_time / 1000000;
890 ts.tv_nsec = (end_time % 1000000) * 1000;
892 if ((status = pthread_cond_timedwait (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
893 return TRUE;
895 #else
896 #error Cannot support GCond on your platform.
897 #endif
899 if G_UNLIKELY (status != ETIMEDOUT)
900 g_thread_abort (status, "pthread_cond_timedwait");
902 return FALSE;
905 /* {{{1 GPrivate */
908 * GPrivate:
910 * The #GPrivate struct is an opaque data structure to represent a
911 * thread-local data key. It is approximately equivalent to the
912 * pthread_setspecific()/pthread_getspecific() APIs on POSIX and to
913 * TlsSetValue()/TlsGetValue() on Windows.
915 * If you don't already know why you might want this functionality,
916 * then you probably don't need it.
918 * #GPrivate is a very limited resource (as far as 128 per program,
919 * shared between all libraries). It is also not possible to destroy a
920 * #GPrivate after it has been used. As such, it is only ever acceptable
921 * to use #GPrivate in static scope, and even then sparingly so.
923 * See G_PRIVATE_INIT() for a couple of examples.
925 * The #GPrivate structure should be considered opaque. It should only
926 * be accessed via the g_private_ functions.
930 * G_PRIVATE_INIT:
931 * @notify: a #GDestroyNotify
933 * A macro to assist with the static initialisation of a #GPrivate.
935 * This macro is useful for the case that a #GDestroyNotify function
936 * should be associated the key. This is needed when the key will be
937 * used to point at memory that should be deallocated when the thread
938 * exits.
940 * Additionally, the #GDestroyNotify will also be called on the previous
941 * value stored in the key when g_private_replace() is used.
943 * If no #GDestroyNotify is needed, then use of this macro is not
944 * required -- if the #GPrivate is declared in static scope then it will
945 * be properly initialised by default (ie: to all zeros). See the
946 * examples below.
948 * |[<!-- language="C" -->
949 * static GPrivate name_key = G_PRIVATE_INIT (g_free);
951 * // return value should not be freed
952 * const gchar *
953 * get_local_name (void)
955 * return g_private_get (&name_key);
958 * void
959 * set_local_name (const gchar *name)
961 * g_private_replace (&name_key, g_strdup (name));
965 * static GPrivate count_key; // no free function
967 * gint
968 * get_local_count (void)
970 * return GPOINTER_TO_INT (g_private_get (&count_key));
973 * void
974 * set_local_count (gint count)
976 * g_private_set (&count_key, GINT_TO_POINTER (count));
978 * ]|
980 * Since: 2.32
983 static pthread_key_t *
984 g_private_impl_new (GDestroyNotify notify)
986 pthread_key_t *key;
987 gint status;
989 key = malloc (sizeof (pthread_key_t));
990 if G_UNLIKELY (key == NULL)
991 g_thread_abort (errno, "malloc");
992 status = pthread_key_create (key, notify);
993 if G_UNLIKELY (status != 0)
994 g_thread_abort (status, "pthread_key_create");
996 return key;
999 static void
1000 g_private_impl_free (pthread_key_t *key)
1002 gint status;
1004 status = pthread_key_delete (*key);
1005 if G_UNLIKELY (status != 0)
1006 g_thread_abort (status, "pthread_key_delete");
1007 free (key);
1010 static inline pthread_key_t *
1011 g_private_get_impl (GPrivate *key)
1013 pthread_key_t *impl = g_atomic_pointer_get (&key->p);
1015 if G_UNLIKELY (impl == NULL)
1017 impl = g_private_impl_new (key->notify);
1018 if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl))
1020 g_private_impl_free (impl);
1021 impl = key->p;
1025 return impl;
1029 * g_private_get:
1030 * @key: a #GPrivate
1032 * Returns the current value of the thread local variable @key.
1034 * If the value has not yet been set in this thread, %NULL is returned.
1035 * Values are never copied between threads (when a new thread is
1036 * created, for example).
1038 * Returns: the thread-local value
1040 gpointer
1041 g_private_get (GPrivate *key)
1043 /* quote POSIX: No errors are returned from pthread_getspecific(). */
1044 return pthread_getspecific (*g_private_get_impl (key));
1048 * g_private_set:
1049 * @key: a #GPrivate
1050 * @value: the new value
1052 * Sets the thread local variable @key to have the value @value in the
1053 * current thread.
1055 * This function differs from g_private_replace() in the following way:
1056 * the #GDestroyNotify for @key is not called on the old value.
1058 void
1059 g_private_set (GPrivate *key,
1060 gpointer value)
1062 gint status;
1064 if G_UNLIKELY ((status = pthread_setspecific (*g_private_get_impl (key), value)) != 0)
1065 g_thread_abort (status, "pthread_setspecific");
1069 * g_private_replace:
1070 * @key: a #GPrivate
1071 * @value: the new value
1073 * Sets the thread local variable @key to have the value @value in the
1074 * current thread.
1076 * This function differs from g_private_set() in the following way: if
1077 * the previous value was non-%NULL then the #GDestroyNotify handler for
1078 * @key is run on it.
1080 * Since: 2.32
1082 void
1083 g_private_replace (GPrivate *key,
1084 gpointer value)
1086 pthread_key_t *impl = g_private_get_impl (key);
1087 gpointer old;
1088 gint status;
1090 old = pthread_getspecific (*impl);
1091 if (old && key->notify)
1092 key->notify (old);
1094 if G_UNLIKELY ((status = pthread_setspecific (*impl, value)) != 0)
1095 g_thread_abort (status, "pthread_setspecific");
1098 /* {{{1 GThread */
1100 #define posix_check_err(err, name) G_STMT_START{ \
1101 int error = (err); \
1102 if (error) \
1103 g_error ("file %s: line %d (%s): error '%s' during '%s'", \
1104 __FILE__, __LINE__, G_STRFUNC, \
1105 g_strerror (error), name); \
1106 }G_STMT_END
1108 #define posix_check_cmd(cmd) posix_check_err (cmd, #cmd)
1110 typedef struct
1112 GRealThread thread;
1114 pthread_t system_thread;
1115 gboolean joined;
1116 GMutex lock;
1117 } GThreadPosix;
1119 void
1120 g_system_thread_free (GRealThread *thread)
1122 GThreadPosix *pt = (GThreadPosix *) thread;
1124 if (!pt->joined)
1125 pthread_detach (pt->system_thread);
1127 g_mutex_clear (&pt->lock);
1129 g_slice_free (GThreadPosix, pt);
1132 GRealThread *
1133 g_system_thread_new (GThreadFunc thread_func,
1134 gulong stack_size,
1135 GError **error)
1137 GThreadPosix *thread;
1138 pthread_attr_t attr;
1139 gint ret;
1141 thread = g_slice_new0 (GThreadPosix);
1143 posix_check_cmd (pthread_attr_init (&attr));
1145 #ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE
1146 if (stack_size)
1148 #ifdef _SC_THREAD_STACK_MIN
1149 stack_size = MAX (sysconf (_SC_THREAD_STACK_MIN), stack_size);
1150 #endif /* _SC_THREAD_STACK_MIN */
1151 /* No error check here, because some systems can't do it and
1152 * we simply don't want threads to fail because of that. */
1153 pthread_attr_setstacksize (&attr, stack_size);
1155 #endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */
1157 ret = pthread_create (&thread->system_thread, &attr, (void* (*)(void*))thread_func, thread);
1159 posix_check_cmd (pthread_attr_destroy (&attr));
1161 if (ret == EAGAIN)
1163 g_set_error (error, G_THREAD_ERROR, G_THREAD_ERROR_AGAIN,
1164 "Error creating thread: %s", g_strerror (ret));
1165 g_slice_free (GThreadPosix, thread);
1166 return NULL;
1169 posix_check_err (ret, "pthread_create");
1171 g_mutex_init (&thread->lock);
1173 return (GRealThread *) thread;
1177 * g_thread_yield:
1179 * Causes the calling thread to voluntarily relinquish the CPU, so
1180 * that other threads can run.
1182 * This function is often used as a method to make busy wait less evil.
1184 void
1185 g_thread_yield (void)
1187 sched_yield ();
1190 void
1191 g_system_thread_wait (GRealThread *thread)
1193 GThreadPosix *pt = (GThreadPosix *) thread;
1195 g_mutex_lock (&pt->lock);
1197 if (!pt->joined)
1199 posix_check_cmd (pthread_join (pt->system_thread, NULL));
1200 pt->joined = TRUE;
1203 g_mutex_unlock (&pt->lock);
1206 void
1207 g_system_thread_exit (void)
1209 pthread_exit (NULL);
1212 void
1213 g_system_thread_set_name (const gchar *name)
1215 #ifdef HAVE_SYS_PRCTL_H
1216 #ifdef PR_SET_NAME
1217 prctl (PR_SET_NAME, name, 0, 0, 0, 0);
1218 #endif
1219 #endif
1222 /* {{{1 Epilogue */
1223 /* vim:set foldmethod=marker: */