first version upgrade
[devspec.git] / devspec.en_US / project / recutils / lib / glthread / lock.c
bloba4498cbd94d6f4499410845271c1f9d0ede33370
1 /* Locking in multithreaded situations.
2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 3, or (at your option)
7 any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, see <https://www.gnu.org/licenses/>. */
17 /* Written by Bruno Haible <bruno@clisp.org>, 2005.
18 Based on GCC's gthr-posix.h, gthr-posix95.h, gthr-solaris.h,
19 gthr-win32.h. */
21 #include <config.h>
23 #include "glthread/lock.h"
25 /* ========================================================================= */
27 #if USE_POSIX_THREADS
29 /* -------------------------- gl_lock_t datatype -------------------------- */
31 /* ------------------------- gl_rwlock_t datatype ------------------------- */
33 # if HAVE_PTHREAD_RWLOCK && (HAVE_PTHREAD_RWLOCK_RDLOCK_PREFER_WRITER || (defined PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP && (__GNU_LIBRARY__ > 1)))
35 # ifdef PTHREAD_RWLOCK_INITIALIZER
37 # if !HAVE_PTHREAD_RWLOCK_RDLOCK_PREFER_WRITER
38 /* glibc with bug https://sourceware.org/bugzilla/show_bug.cgi?id=13701 */
40 int
41 glthread_rwlock_init_for_glibc (pthread_rwlock_t *lock)
43 pthread_rwlockattr_t attributes;
44 int err;
46 err = pthread_rwlockattr_init (&attributes);
47 if (err != 0)
48 return err;
49 /* Note: PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP is the only value that
50 causes the writer to be preferred. PTHREAD_RWLOCK_PREFER_WRITER_NP does not
51 do this; see
52 http://man7.org/linux/man-pages/man3/pthread_rwlockattr_setkind_np.3.html */
53 err = pthread_rwlockattr_setkind_np (&attributes,
54 PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
55 if (err == 0)
56 err = pthread_rwlock_init(lock, &attributes);
57 /* pthread_rwlockattr_destroy always returns 0. It cannot influence the
58 return value. */
59 pthread_rwlockattr_destroy (&attributes);
60 return err;
63 # endif
64 # else
66 int
67 glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
69 int err;
71 err = pthread_rwlock_init (&lock->rwlock, NULL);
72 if (err != 0)
73 return err;
74 lock->initialized = 1;
75 return 0;
78 int
79 glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
81 if (!lock->initialized)
83 int err;
85 err = pthread_mutex_lock (&lock->guard);
86 if (err != 0)
87 return err;
88 if (!lock->initialized)
90 err = glthread_rwlock_init_multithreaded (lock);
91 if (err != 0)
93 pthread_mutex_unlock (&lock->guard);
94 return err;
97 err = pthread_mutex_unlock (&lock->guard);
98 if (err != 0)
99 return err;
101 return pthread_rwlock_rdlock (&lock->rwlock);
105 glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
107 if (!lock->initialized)
109 int err;
111 err = pthread_mutex_lock (&lock->guard);
112 if (err != 0)
113 return err;
114 if (!lock->initialized)
116 err = glthread_rwlock_init_multithreaded (lock);
117 if (err != 0)
119 pthread_mutex_unlock (&lock->guard);
120 return err;
123 err = pthread_mutex_unlock (&lock->guard);
124 if (err != 0)
125 return err;
127 return pthread_rwlock_wrlock (&lock->rwlock);
131 glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
133 if (!lock->initialized)
134 return EINVAL;
135 return pthread_rwlock_unlock (&lock->rwlock);
139 glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
141 int err;
143 if (!lock->initialized)
144 return EINVAL;
145 err = pthread_rwlock_destroy (&lock->rwlock);
146 if (err != 0)
147 return err;
148 lock->initialized = 0;
149 return 0;
152 # endif
154 # else
157 glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
159 int err;
161 err = pthread_mutex_init (&lock->lock, NULL);
162 if (err != 0)
163 return err;
164 err = pthread_cond_init (&lock->waiting_readers, NULL);
165 if (err != 0)
166 return err;
167 err = pthread_cond_init (&lock->waiting_writers, NULL);
168 if (err != 0)
169 return err;
170 lock->waiting_writers_count = 0;
171 lock->runcount = 0;
172 return 0;
176 glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
178 int err;
180 err = pthread_mutex_lock (&lock->lock);
181 if (err != 0)
182 return err;
183 /* Test whether only readers are currently running, and whether the runcount
184 field will not overflow, and whether no writer is waiting. The latter
185 condition is because POSIX recommends that "write locks shall take
186 precedence over read locks", to avoid "writer starvation". */
187 while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
189 /* This thread has to wait for a while. Enqueue it among the
190 waiting_readers. */
191 err = pthread_cond_wait (&lock->waiting_readers, &lock->lock);
192 if (err != 0)
194 pthread_mutex_unlock (&lock->lock);
195 return err;
198 lock->runcount++;
199 return pthread_mutex_unlock (&lock->lock);
203 glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
205 int err;
207 err = pthread_mutex_lock (&lock->lock);
208 if (err != 0)
209 return err;
210 /* Test whether no readers or writers are currently running. */
211 while (!(lock->runcount == 0))
213 /* This thread has to wait for a while. Enqueue it among the
214 waiting_writers. */
215 lock->waiting_writers_count++;
216 err = pthread_cond_wait (&lock->waiting_writers, &lock->lock);
217 if (err != 0)
219 lock->waiting_writers_count--;
220 pthread_mutex_unlock (&lock->lock);
221 return err;
223 lock->waiting_writers_count--;
225 lock->runcount--; /* runcount becomes -1 */
226 return pthread_mutex_unlock (&lock->lock);
230 glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
232 int err;
234 err = pthread_mutex_lock (&lock->lock);
235 if (err != 0)
236 return err;
237 if (lock->runcount < 0)
239 /* Drop a writer lock. */
240 if (!(lock->runcount == -1))
242 pthread_mutex_unlock (&lock->lock);
243 return EINVAL;
245 lock->runcount = 0;
247 else
249 /* Drop a reader lock. */
250 if (!(lock->runcount > 0))
252 pthread_mutex_unlock (&lock->lock);
253 return EINVAL;
255 lock->runcount--;
257 if (lock->runcount == 0)
259 /* POSIX recommends that "write locks shall take precedence over read
260 locks", to avoid "writer starvation". */
261 if (lock->waiting_writers_count > 0)
263 /* Wake up one of the waiting writers. */
264 err = pthread_cond_signal (&lock->waiting_writers);
265 if (err != 0)
267 pthread_mutex_unlock (&lock->lock);
268 return err;
271 else
273 /* Wake up all waiting readers. */
274 err = pthread_cond_broadcast (&lock->waiting_readers);
275 if (err != 0)
277 pthread_mutex_unlock (&lock->lock);
278 return err;
282 return pthread_mutex_unlock (&lock->lock);
286 glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
288 int err;
290 err = pthread_mutex_destroy (&lock->lock);
291 if (err != 0)
292 return err;
293 err = pthread_cond_destroy (&lock->waiting_readers);
294 if (err != 0)
295 return err;
296 err = pthread_cond_destroy (&lock->waiting_writers);
297 if (err != 0)
298 return err;
299 return 0;
302 # endif
304 /* --------------------- gl_recursive_lock_t datatype --------------------- */
306 # if HAVE_PTHREAD_MUTEX_RECURSIVE
308 # if defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER || defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
311 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
313 pthread_mutexattr_t attributes;
314 int err;
316 err = pthread_mutexattr_init (&attributes);
317 if (err != 0)
318 return err;
319 err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
320 if (err != 0)
322 pthread_mutexattr_destroy (&attributes);
323 return err;
325 err = pthread_mutex_init (lock, &attributes);
326 if (err != 0)
328 pthread_mutexattr_destroy (&attributes);
329 return err;
331 err = pthread_mutexattr_destroy (&attributes);
332 if (err != 0)
333 return err;
334 return 0;
337 # else
340 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
342 pthread_mutexattr_t attributes;
343 int err;
345 err = pthread_mutexattr_init (&attributes);
346 if (err != 0)
347 return err;
348 err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
349 if (err != 0)
351 pthread_mutexattr_destroy (&attributes);
352 return err;
354 err = pthread_mutex_init (&lock->recmutex, &attributes);
355 if (err != 0)
357 pthread_mutexattr_destroy (&attributes);
358 return err;
360 err = pthread_mutexattr_destroy (&attributes);
361 if (err != 0)
362 return err;
363 lock->initialized = 1;
364 return 0;
368 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
370 if (!lock->initialized)
372 int err;
374 err = pthread_mutex_lock (&lock->guard);
375 if (err != 0)
376 return err;
377 if (!lock->initialized)
379 err = glthread_recursive_lock_init_multithreaded (lock);
380 if (err != 0)
382 pthread_mutex_unlock (&lock->guard);
383 return err;
386 err = pthread_mutex_unlock (&lock->guard);
387 if (err != 0)
388 return err;
390 return pthread_mutex_lock (&lock->recmutex);
394 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
396 if (!lock->initialized)
397 return EINVAL;
398 return pthread_mutex_unlock (&lock->recmutex);
402 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
404 int err;
406 if (!lock->initialized)
407 return EINVAL;
408 err = pthread_mutex_destroy (&lock->recmutex);
409 if (err != 0)
410 return err;
411 lock->initialized = 0;
412 return 0;
415 # endif
417 # else
420 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
422 int err;
424 err = pthread_mutex_init (&lock->mutex, NULL);
425 if (err != 0)
426 return err;
427 lock->owner = (pthread_t) 0;
428 lock->depth = 0;
429 return 0;
433 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
435 pthread_t self = pthread_self ();
436 if (lock->owner != self)
438 int err;
440 err = pthread_mutex_lock (&lock->mutex);
441 if (err != 0)
442 return err;
443 lock->owner = self;
445 if (++(lock->depth) == 0) /* wraparound? */
447 lock->depth--;
448 return EAGAIN;
450 return 0;
454 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
456 if (lock->owner != pthread_self ())
457 return EPERM;
458 if (lock->depth == 0)
459 return EINVAL;
460 if (--(lock->depth) == 0)
462 lock->owner = (pthread_t) 0;
463 return pthread_mutex_unlock (&lock->mutex);
465 else
466 return 0;
470 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
472 if (lock->owner != (pthread_t) 0)
473 return EBUSY;
474 return pthread_mutex_destroy (&lock->mutex);
477 # endif
479 /* -------------------------- gl_once_t datatype -------------------------- */
481 static const pthread_once_t fresh_once = PTHREAD_ONCE_INIT;
484 glthread_once_singlethreaded (pthread_once_t *once_control)
486 /* We don't know whether pthread_once_t is an integer type, a floating-point
487 type, a pointer type, or a structure type. */
488 char *firstbyte = (char *)once_control;
489 if (*firstbyte == *(const char *)&fresh_once)
491 /* First time use of once_control. Invert the first byte. */
492 *firstbyte = ~ *(const char *)&fresh_once;
493 return 1;
495 else
496 return 0;
499 #endif
501 /* ========================================================================= */
503 #if USE_PTH_THREADS
505 /* Use the GNU Pth threads library. */
507 /* -------------------------- gl_lock_t datatype -------------------------- */
509 /* ------------------------- gl_rwlock_t datatype ------------------------- */
511 # if !HAVE_PTH_RWLOCK_ACQUIRE_PREFER_WRITER
514 glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
516 if (!pth_mutex_init (&lock->lock))
517 return errno;
518 if (!pth_cond_init (&lock->waiting_readers))
519 return errno;
520 if (!pth_cond_init (&lock->waiting_writers))
521 return errno;
522 lock->waiting_writers_count = 0;
523 lock->runcount = 0;
524 lock->initialized = 1;
525 return 0;
529 glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
531 if (!lock->initialized)
532 glthread_rwlock_init_multithreaded (lock);
533 if (!pth_mutex_acquire (&lock->lock, 0, NULL))
534 return errno;
535 /* Test whether only readers are currently running, and whether the runcount
536 field will not overflow, and whether no writer is waiting. The latter
537 condition is because POSIX recommends that "write locks shall take
538 precedence over read locks", to avoid "writer starvation". */
539 while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
541 /* This thread has to wait for a while. Enqueue it among the
542 waiting_readers. */
543 if (!pth_cond_await (&lock->waiting_readers, &lock->lock, NULL))
545 int err = errno;
546 pth_mutex_release (&lock->lock);
547 return err;
550 lock->runcount++;
551 return (!pth_mutex_release (&lock->lock) ? errno : 0);
555 glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
557 if (!lock->initialized)
558 glthread_rwlock_init_multithreaded (lock);
559 if (!pth_mutex_acquire (&lock->lock, 0, NULL))
560 return errno;
561 /* Test whether no readers or writers are currently running. */
562 while (!(lock->runcount == 0))
564 /* This thread has to wait for a while. Enqueue it among the
565 waiting_writers. */
566 lock->waiting_writers_count++;
567 if (!pth_cond_await (&lock->waiting_writers, &lock->lock, NULL))
569 int err = errno;
570 lock->waiting_writers_count--;
571 pth_mutex_release (&lock->lock);
572 return err;
574 lock->waiting_writers_count--;
576 lock->runcount--; /* runcount becomes -1 */
577 return (!pth_mutex_release (&lock->lock) ? errno : 0);
581 glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
583 int err;
585 if (!lock->initialized)
586 return EINVAL;
587 if (!pth_mutex_acquire (&lock->lock, 0, NULL))
588 return errno;
589 if (lock->runcount < 0)
591 /* Drop a writer lock. */
592 if (!(lock->runcount == -1))
594 pth_mutex_release (&lock->lock);
595 return EINVAL;
597 lock->runcount = 0;
599 else
601 /* Drop a reader lock. */
602 if (!(lock->runcount > 0))
604 pth_mutex_release (&lock->lock);
605 return EINVAL;
607 lock->runcount--;
609 if (lock->runcount == 0)
611 /* POSIX recommends that "write locks shall take precedence over read
612 locks", to avoid "writer starvation". */
613 if (lock->waiting_writers_count > 0)
615 /* Wake up one of the waiting writers. */
616 if (!pth_cond_notify (&lock->waiting_writers, FALSE))
618 int err = errno;
619 pth_mutex_release (&lock->lock);
620 return err;
623 else
625 /* Wake up all waiting readers. */
626 if (!pth_cond_notify (&lock->waiting_readers, TRUE))
628 int err = errno;
629 pth_mutex_release (&lock->lock);
630 return err;
634 return (!pth_mutex_release (&lock->lock) ? errno : 0);
638 glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
640 lock->initialized = 0;
641 return 0;
644 # endif
646 /* --------------------- gl_recursive_lock_t datatype --------------------- */
648 /* -------------------------- gl_once_t datatype -------------------------- */
650 static void
651 glthread_once_call (void *arg)
653 void (**gl_once_temp_addr) (void) = (void (**) (void)) arg;
654 void (*initfunction) (void) = *gl_once_temp_addr;
655 initfunction ();
659 glthread_once_multithreaded (pth_once_t *once_control, void (*initfunction) (void))
661 void (*temp) (void) = initfunction;
662 return (!pth_once (once_control, glthread_once_call, &temp) ? errno : 0);
666 glthread_once_singlethreaded (pth_once_t *once_control)
668 /* We know that pth_once_t is an integer type. */
669 if (*once_control == PTH_ONCE_INIT)
671 /* First time use of once_control. Invert the marker. */
672 *once_control = ~ PTH_ONCE_INIT;
673 return 1;
675 else
676 return 0;
679 #endif
681 /* ========================================================================= */
683 #if USE_SOLARIS_THREADS
685 /* Use the old Solaris threads library. */
687 /* -------------------------- gl_lock_t datatype -------------------------- */
689 /* ------------------------- gl_rwlock_t datatype ------------------------- */
691 /* --------------------- gl_recursive_lock_t datatype --------------------- */
694 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
696 int err;
698 err = mutex_init (&lock->mutex, USYNC_THREAD, NULL);
699 if (err != 0)
700 return err;
701 lock->owner = (thread_t) 0;
702 lock->depth = 0;
703 return 0;
707 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
709 thread_t self = thr_self ();
710 if (lock->owner != self)
712 int err;
714 err = mutex_lock (&lock->mutex);
715 if (err != 0)
716 return err;
717 lock->owner = self;
719 if (++(lock->depth) == 0) /* wraparound? */
721 lock->depth--;
722 return EAGAIN;
724 return 0;
728 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
730 if (lock->owner != thr_self ())
731 return EPERM;
732 if (lock->depth == 0)
733 return EINVAL;
734 if (--(lock->depth) == 0)
736 lock->owner = (thread_t) 0;
737 return mutex_unlock (&lock->mutex);
739 else
740 return 0;
744 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
746 if (lock->owner != (thread_t) 0)
747 return EBUSY;
748 return mutex_destroy (&lock->mutex);
751 /* -------------------------- gl_once_t datatype -------------------------- */
754 glthread_once_multithreaded (gl_once_t *once_control, void (*initfunction) (void))
756 if (!once_control->inited)
758 int err;
760 /* Use the mutex to guarantee that if another thread is already calling
761 the initfunction, this thread waits until it's finished. */
762 err = mutex_lock (&once_control->mutex);
763 if (err != 0)
764 return err;
765 if (!once_control->inited)
767 once_control->inited = 1;
768 initfunction ();
770 return mutex_unlock (&once_control->mutex);
772 else
773 return 0;
777 glthread_once_singlethreaded (gl_once_t *once_control)
779 /* We know that gl_once_t contains an integer type. */
780 if (!once_control->inited)
782 /* First time use of once_control. Invert the marker. */
783 once_control->inited = ~ 0;
784 return 1;
786 else
787 return 0;
790 #endif
792 /* ========================================================================= */
794 #if USE_WINDOWS_THREADS
796 /* -------------------------- gl_lock_t datatype -------------------------- */
798 void
799 glthread_lock_init_func (gl_lock_t *lock)
801 InitializeCriticalSection (&lock->lock);
802 lock->guard.done = 1;
806 glthread_lock_lock_func (gl_lock_t *lock)
808 if (!lock->guard.done)
810 if (InterlockedIncrement (&lock->guard.started) == 0)
811 /* This thread is the first one to need this lock. Initialize it. */
812 glthread_lock_init (lock);
813 else
814 /* Yield the CPU while waiting for another thread to finish
815 initializing this lock. */
816 while (!lock->guard.done)
817 Sleep (0);
819 EnterCriticalSection (&lock->lock);
820 return 0;
824 glthread_lock_unlock_func (gl_lock_t *lock)
826 if (!lock->guard.done)
827 return EINVAL;
828 LeaveCriticalSection (&lock->lock);
829 return 0;
833 glthread_lock_destroy_func (gl_lock_t *lock)
835 if (!lock->guard.done)
836 return EINVAL;
837 DeleteCriticalSection (&lock->lock);
838 lock->guard.done = 0;
839 return 0;
842 /* ------------------------- gl_rwlock_t datatype ------------------------- */
844 /* In this file, the waitqueues are implemented as circular arrays. */
845 #define gl_waitqueue_t gl_carray_waitqueue_t
847 static void
848 gl_waitqueue_init (gl_waitqueue_t *wq)
850 wq->array = NULL;
851 wq->count = 0;
852 wq->alloc = 0;
853 wq->offset = 0;
856 /* Enqueues the current thread, represented by an event, in a wait queue.
857 Returns INVALID_HANDLE_VALUE if an allocation failure occurs. */
858 static HANDLE
859 gl_waitqueue_add (gl_waitqueue_t *wq)
861 HANDLE event;
862 unsigned int index;
864 if (wq->count == wq->alloc)
866 unsigned int new_alloc = 2 * wq->alloc + 1;
867 HANDLE *new_array =
868 (HANDLE *) realloc (wq->array, new_alloc * sizeof (HANDLE));
869 if (new_array == NULL)
870 /* No more memory. */
871 return INVALID_HANDLE_VALUE;
872 /* Now is a good opportunity to rotate the array so that its contents
873 starts at offset 0. */
874 if (wq->offset > 0)
876 unsigned int old_count = wq->count;
877 unsigned int old_alloc = wq->alloc;
878 unsigned int old_offset = wq->offset;
879 unsigned int i;
880 if (old_offset + old_count > old_alloc)
882 unsigned int limit = old_offset + old_count - old_alloc;
883 for (i = 0; i < limit; i++)
884 new_array[old_alloc + i] = new_array[i];
886 for (i = 0; i < old_count; i++)
887 new_array[i] = new_array[old_offset + i];
888 wq->offset = 0;
890 wq->array = new_array;
891 wq->alloc = new_alloc;
893 /* Whether the created event is a manual-reset one or an auto-reset one,
894 does not matter, since we will wait on it only once. */
895 event = CreateEvent (NULL, TRUE, FALSE, NULL);
896 if (event == INVALID_HANDLE_VALUE)
897 /* No way to allocate an event. */
898 return INVALID_HANDLE_VALUE;
899 index = wq->offset + wq->count;
900 if (index >= wq->alloc)
901 index -= wq->alloc;
902 wq->array[index] = event;
903 wq->count++;
904 return event;
907 /* Notifies the first thread from a wait queue and dequeues it. */
908 static void
909 gl_waitqueue_notify_first (gl_waitqueue_t *wq)
911 SetEvent (wq->array[wq->offset + 0]);
912 wq->offset++;
913 wq->count--;
914 if (wq->count == 0 || wq->offset == wq->alloc)
915 wq->offset = 0;
918 /* Notifies all threads from a wait queue and dequeues them all. */
919 static void
920 gl_waitqueue_notify_all (gl_waitqueue_t *wq)
922 unsigned int i;
924 for (i = 0; i < wq->count; i++)
926 unsigned int index = wq->offset + i;
927 if (index >= wq->alloc)
928 index -= wq->alloc;
929 SetEvent (wq->array[index]);
931 wq->count = 0;
932 wq->offset = 0;
935 void
936 glthread_rwlock_init_func (gl_rwlock_t *lock)
938 InitializeCriticalSection (&lock->lock);
939 gl_waitqueue_init (&lock->waiting_readers);
940 gl_waitqueue_init (&lock->waiting_writers);
941 lock->runcount = 0;
942 lock->guard.done = 1;
946 glthread_rwlock_rdlock_func (gl_rwlock_t *lock)
948 if (!lock->guard.done)
950 if (InterlockedIncrement (&lock->guard.started) == 0)
951 /* This thread is the first one to need this lock. Initialize it. */
952 glthread_rwlock_init (lock);
953 else
954 /* Yield the CPU while waiting for another thread to finish
955 initializing this lock. */
956 while (!lock->guard.done)
957 Sleep (0);
959 EnterCriticalSection (&lock->lock);
960 /* Test whether only readers are currently running, and whether the runcount
961 field will not overflow, and whether no writer is waiting. The latter
962 condition is because POSIX recommends that "write locks shall take
963 precedence over read locks", to avoid "writer starvation". */
964 if (!(lock->runcount + 1 > 0 && lock->waiting_writers.count == 0))
966 /* This thread has to wait for a while. Enqueue it among the
967 waiting_readers. */
968 HANDLE event = gl_waitqueue_add (&lock->waiting_readers);
969 if (event != INVALID_HANDLE_VALUE)
971 DWORD result;
972 LeaveCriticalSection (&lock->lock);
973 /* Wait until another thread signals this event. */
974 result = WaitForSingleObject (event, INFINITE);
975 if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
976 abort ();
977 CloseHandle (event);
978 /* The thread which signalled the event already did the bookkeeping:
979 removed us from the waiting_readers, incremented lock->runcount. */
980 if (!(lock->runcount > 0))
981 abort ();
982 return 0;
984 else
986 /* Allocation failure. Weird. */
989 LeaveCriticalSection (&lock->lock);
990 Sleep (1);
991 EnterCriticalSection (&lock->lock);
993 while (!(lock->runcount + 1 > 0));
996 lock->runcount++;
997 LeaveCriticalSection (&lock->lock);
998 return 0;
1002 glthread_rwlock_wrlock_func (gl_rwlock_t *lock)
1004 if (!lock->guard.done)
1006 if (InterlockedIncrement (&lock->guard.started) == 0)
1007 /* This thread is the first one to need this lock. Initialize it. */
1008 glthread_rwlock_init (lock);
1009 else
1010 /* Yield the CPU while waiting for another thread to finish
1011 initializing this lock. */
1012 while (!lock->guard.done)
1013 Sleep (0);
1015 EnterCriticalSection (&lock->lock);
1016 /* Test whether no readers or writers are currently running. */
1017 if (!(lock->runcount == 0))
1019 /* This thread has to wait for a while. Enqueue it among the
1020 waiting_writers. */
1021 HANDLE event = gl_waitqueue_add (&lock->waiting_writers);
1022 if (event != INVALID_HANDLE_VALUE)
1024 DWORD result;
1025 LeaveCriticalSection (&lock->lock);
1026 /* Wait until another thread signals this event. */
1027 result = WaitForSingleObject (event, INFINITE);
1028 if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
1029 abort ();
1030 CloseHandle (event);
1031 /* The thread which signalled the event already did the bookkeeping:
1032 removed us from the waiting_writers, set lock->runcount = -1. */
1033 if (!(lock->runcount == -1))
1034 abort ();
1035 return 0;
1037 else
1039 /* Allocation failure. Weird. */
1042 LeaveCriticalSection (&lock->lock);
1043 Sleep (1);
1044 EnterCriticalSection (&lock->lock);
1046 while (!(lock->runcount == 0));
1049 lock->runcount--; /* runcount becomes -1 */
1050 LeaveCriticalSection (&lock->lock);
1051 return 0;
1055 glthread_rwlock_unlock_func (gl_rwlock_t *lock)
1057 if (!lock->guard.done)
1058 return EINVAL;
1059 EnterCriticalSection (&lock->lock);
1060 if (lock->runcount < 0)
1062 /* Drop a writer lock. */
1063 if (!(lock->runcount == -1))
1064 abort ();
1065 lock->runcount = 0;
1067 else
1069 /* Drop a reader lock. */
1070 if (!(lock->runcount > 0))
1072 LeaveCriticalSection (&lock->lock);
1073 return EPERM;
1075 lock->runcount--;
1077 if (lock->runcount == 0)
1079 /* POSIX recommends that "write locks shall take precedence over read
1080 locks", to avoid "writer starvation". */
1081 if (lock->waiting_writers.count > 0)
1083 /* Wake up one of the waiting writers. */
1084 lock->runcount--;
1085 gl_waitqueue_notify_first (&lock->waiting_writers);
1087 else
1089 /* Wake up all waiting readers. */
1090 lock->runcount += lock->waiting_readers.count;
1091 gl_waitqueue_notify_all (&lock->waiting_readers);
1094 LeaveCriticalSection (&lock->lock);
1095 return 0;
1099 glthread_rwlock_destroy_func (gl_rwlock_t *lock)
1101 if (!lock->guard.done)
1102 return EINVAL;
1103 if (lock->runcount != 0)
1104 return EBUSY;
1105 DeleteCriticalSection (&lock->lock);
1106 if (lock->waiting_readers.array != NULL)
1107 free (lock->waiting_readers.array);
1108 if (lock->waiting_writers.array != NULL)
1109 free (lock->waiting_writers.array);
1110 lock->guard.done = 0;
1111 return 0;
1114 /* --------------------- gl_recursive_lock_t datatype --------------------- */
1116 void
1117 glthread_recursive_lock_init_func (gl_recursive_lock_t *lock)
1119 lock->owner = 0;
1120 lock->depth = 0;
1121 InitializeCriticalSection (&lock->lock);
1122 lock->guard.done = 1;
1126 glthread_recursive_lock_lock_func (gl_recursive_lock_t *lock)
1128 if (!lock->guard.done)
1130 if (InterlockedIncrement (&lock->guard.started) == 0)
1131 /* This thread is the first one to need this lock. Initialize it. */
1132 glthread_recursive_lock_init (lock);
1133 else
1134 /* Yield the CPU while waiting for another thread to finish
1135 initializing this lock. */
1136 while (!lock->guard.done)
1137 Sleep (0);
1140 DWORD self = GetCurrentThreadId ();
1141 if (lock->owner != self)
1143 EnterCriticalSection (&lock->lock);
1144 lock->owner = self;
1146 if (++(lock->depth) == 0) /* wraparound? */
1148 lock->depth--;
1149 return EAGAIN;
1152 return 0;
1156 glthread_recursive_lock_unlock_func (gl_recursive_lock_t *lock)
1158 if (lock->owner != GetCurrentThreadId ())
1159 return EPERM;
1160 if (lock->depth == 0)
1161 return EINVAL;
1162 if (--(lock->depth) == 0)
1164 lock->owner = 0;
1165 LeaveCriticalSection (&lock->lock);
1167 return 0;
1171 glthread_recursive_lock_destroy_func (gl_recursive_lock_t *lock)
1173 if (lock->owner != 0)
1174 return EBUSY;
1175 DeleteCriticalSection (&lock->lock);
1176 lock->guard.done = 0;
1177 return 0;
1180 /* -------------------------- gl_once_t datatype -------------------------- */
1182 void
1183 glthread_once_func (gl_once_t *once_control, void (*initfunction) (void))
1185 if (once_control->inited <= 0)
1187 if (InterlockedIncrement (&once_control->started) == 0)
1189 /* This thread is the first one to come to this once_control. */
1190 InitializeCriticalSection (&once_control->lock);
1191 EnterCriticalSection (&once_control->lock);
1192 once_control->inited = 0;
1193 initfunction ();
1194 once_control->inited = 1;
1195 LeaveCriticalSection (&once_control->lock);
1197 else
1199 /* Undo last operation. */
1200 InterlockedDecrement (&once_control->started);
1201 /* Some other thread has already started the initialization.
1202 Yield the CPU while waiting for the other thread to finish
1203 initializing and taking the lock. */
1204 while (once_control->inited < 0)
1205 Sleep (0);
1206 if (once_control->inited <= 0)
1208 /* Take the lock. This blocks until the other thread has
1209 finished calling the initfunction. */
1210 EnterCriticalSection (&once_control->lock);
1211 LeaveCriticalSection (&once_control->lock);
1212 if (!(once_control->inited > 0))
1213 abort ();
1219 #endif
1221 /* ========================================================================= */