2 * Copyright 2002-2010, Haiku Inc. All Rights Reserved.
3 * Distributed under the terms of the MIT license.
6 * Ingo Weinhold, bonefish@cs.tu-berlin.de.
7 * Axel Dörfler, axeld@pinc-software.de.
15 #include <AutoLocker.h>
18 #include <user_thread.h>
22 #include <user_thread_defs.h>
29 mutex_waiter
* next
; // next in queue
30 mutex_waiter
* last
; // last in queue (valid for the first in queue)
33 struct rw_lock_waiter
{
35 rw_lock_waiter
* next
; // next in queue
36 rw_lock_waiter
* last
; // last in queue (valid for the first in queue)
40 #define MUTEX_FLAG_OWNS_NAME MUTEX_FLAG_CLONE_NAME
41 #define MUTEX_FLAG_RELEASED 0x2
43 #define RW_LOCK_FLAG_OWNS_NAME RW_LOCK_FLAG_CLONE_NAME
46 static void _rw_lock_read_unlock_threads_locked(rw_lock
* lock
);
47 static void _rw_lock_write_unlock_threads_locked(rw_lock
* lock
);
49 static status_t
_mutex_lock_threads_locked(mutex
* lock
);
50 static void _mutex_unlock_threads_locked(mutex
* lock
);
53 /*! Helper class playing the role of the kernel's thread spinlock. We don't use
54 as spinlock as that could be expensive in userland (due to spinlock holder
55 potentially being unscheduled), but a benaphore.
57 struct ThreadSpinlock
{
61 fSemaphore(create_sem(0, "thread spinlock"))
64 panic("Failed to create thread spinlock semaphore!");
70 delete_sem(fSemaphore
);
75 if (atomic_add(&fCount
, -1) > 0)
80 error
= acquire_sem(fSemaphore
);
81 } while (error
== B_INTERRUPTED
);
88 if (atomic_add(&fCount
, 1) < 0)
89 release_sem(fSemaphore
);
97 static ThreadSpinlock sThreadSpinlock
;
104 recursive_lock_get_recursion(recursive_lock
*lock
)
106 if (RECURSIVE_LOCK_HOLDER(lock
) == find_thread(NULL
))
107 return lock
->recursion
;
114 recursive_lock_init(recursive_lock
*lock
, const char *name
)
116 mutex_init(&lock
->lock
, name
!= NULL
? name
: "recursive lock");
117 RECURSIVE_LOCK_HOLDER(lock
) = -1;
123 recursive_lock_init_etc(recursive_lock
*lock
, const char *name
, uint32 flags
)
125 mutex_init_etc(&lock
->lock
, name
!= NULL
? name
: "recursive lock", flags
);
126 RECURSIVE_LOCK_HOLDER(lock
) = -1;
132 recursive_lock_destroy(recursive_lock
*lock
)
137 mutex_destroy(&lock
->lock
);
142 recursive_lock_lock(recursive_lock
*lock
)
144 thread_id thread
= find_thread(NULL
);
146 if (thread
!= RECURSIVE_LOCK_HOLDER(lock
)) {
147 mutex_lock(&lock
->lock
);
149 lock
->holder
= thread
;
159 recursive_lock_trylock(recursive_lock
*lock
)
161 thread_id thread
= find_thread(NULL
);
163 if (thread
!= RECURSIVE_LOCK_HOLDER(lock
)) {
164 status_t status
= mutex_trylock(&lock
->lock
);
169 lock
->holder
= thread
;
179 recursive_lock_unlock(recursive_lock
*lock
)
181 if (find_thread(NULL
) != RECURSIVE_LOCK_HOLDER(lock
))
182 panic("recursive_lock %p unlocked by non-holder thread!\n", lock
);
184 if (--lock
->recursion
== 0) {
188 mutex_unlock(&lock
->lock
);
197 rw_lock_wait(rw_lock
* lock
, bool writer
)
199 // enqueue in waiter list
200 rw_lock_waiter waiter
;
201 waiter
.thread
= get_current_thread();
203 waiter
.writer
= writer
;
205 if (lock
->waiters
!= NULL
)
206 lock
->waiters
->last
->next
= &waiter
;
208 lock
->waiters
= &waiter
;
210 lock
->waiters
->last
= &waiter
;
213 get_user_thread()->wait_status
= 1;
214 sThreadSpinlock
.Unlock();
217 while ((error
= _kern_block_thread(0, 0)) == B_INTERRUPTED
) {
220 sThreadSpinlock
.Lock();
226 rw_lock_unblock(rw_lock
* lock
)
228 // Check whether there any waiting threads at all and whether anyone
229 // has the write lock.
230 rw_lock_waiter
* waiter
= lock
->waiters
;
231 if (waiter
== NULL
|| lock
->holder
> 0)
234 // writer at head of queue?
235 if (waiter
->writer
) {
236 if (lock
->active_readers
> 0 || lock
->pending_readers
> 0)
240 lock
->waiters
= waiter
->next
;
241 if (lock
->waiters
!= NULL
)
242 lock
->waiters
->last
= waiter
->last
;
244 lock
->holder
= get_thread_id(waiter
->thread
);
247 _kern_unblock_thread(get_thread_id(waiter
->thread
), B_OK
);
248 waiter
->thread
= NULL
;
249 return RW_LOCK_WRITER_COUNT_BASE
;
252 // wake up one or more readers
253 uint32 readerCount
= 0;
256 lock
->waiters
= waiter
->next
;
257 if (lock
->waiters
!= NULL
)
258 lock
->waiters
->last
= waiter
->last
;
263 _kern_unblock_thread(get_thread_id(waiter
->thread
), B_OK
);
264 waiter
->thread
= NULL
;
265 } while ((waiter
= lock
->waiters
) != NULL
&& !waiter
->writer
);
267 if (lock
->count
>= RW_LOCK_WRITER_COUNT_BASE
)
268 lock
->active_readers
+= readerCount
;
275 rw_lock_init(rw_lock
* lock
, const char* name
)
278 lock
->waiters
= NULL
;
281 lock
->owner_count
= 0;
282 lock
->active_readers
= 0;
283 lock
->pending_readers
= 0;
289 rw_lock_init_etc(rw_lock
* lock
, const char* name
, uint32 flags
)
291 lock
->name
= (flags
& RW_LOCK_FLAG_CLONE_NAME
) != 0 ? strdup(name
) : name
;
292 lock
->waiters
= NULL
;
295 lock
->owner_count
= 0;
296 lock
->active_readers
= 0;
297 lock
->pending_readers
= 0;
298 lock
->flags
= flags
& RW_LOCK_FLAG_CLONE_NAME
;
303 rw_lock_destroy(rw_lock
* lock
)
305 char* name
= (lock
->flags
& RW_LOCK_FLAG_CLONE_NAME
) != 0
306 ? (char*)lock
->name
: NULL
;
308 // unblock all waiters
309 AutoLocker
<ThreadSpinlock
> locker(sThreadSpinlock
);
312 if (lock
->waiters
!= NULL
&& find_thread(NULL
)
314 panic("rw_lock_destroy(): there are blocking threads, but the caller "
315 "doesn't hold the write lock (%p)", lock
);
318 if (rw_lock_write_lock(lock
) != B_OK
)
324 while (rw_lock_waiter
* waiter
= lock
->waiters
) {
326 lock
->waiters
= waiter
->next
;
329 _kern_unblock_thread(get_thread_id(waiter
->thread
), B_ERROR
);
340 #if !KDEBUG_RW_LOCK_DEBUG
343 _rw_lock_read_lock(rw_lock
* lock
)
345 AutoLocker
<ThreadSpinlock
> locker(sThreadSpinlock
);
347 // We might be the writer ourselves.
348 if (lock
->holder
== find_thread(NULL
)) {
353 // The writer that originally had the lock when we called atomic_add() might
354 // already have gone and another writer could have overtaken us. In this
355 // case the original writer set pending_readers, so we know that we don't
357 if (lock
->pending_readers
> 0) {
358 lock
->pending_readers
--;
360 if (lock
->count
>= RW_LOCK_WRITER_COUNT_BASE
)
361 lock
->active_readers
++;
367 return rw_lock_wait(lock
, false);
372 _rw_lock_read_lock_with_timeout(rw_lock
* lock
, uint32 timeoutFlags
,
375 AutoLocker
<ThreadSpinlock
> locker(sThreadSpinlock
);
377 // We might be the writer ourselves.
378 if (lock
->holder
== find_thread(NULL
)) {
383 // The writer that originally had the lock when we called atomic_add() might
384 // already have gone and another writer could have overtaken us. In this
385 // case the original writer set pending_readers, so we know that we don't
387 if (lock
->pending_readers
> 0) {
388 lock
->pending_readers
--;
390 if (lock
->count
>= RW_LOCK_WRITER_COUNT_BASE
)
391 lock
->active_readers
++;
396 ASSERT(lock
->count
>= RW_LOCK_WRITER_COUNT_BASE
);
400 // enqueue in waiter list
401 rw_lock_waiter waiter
;
402 waiter
.thread
= get_current_thread();
404 waiter
.writer
= false;
406 if (lock
->waiters
!= NULL
)
407 lock
->waiters
->last
->next
= &waiter
;
409 lock
->waiters
= &waiter
;
411 lock
->waiters
->last
= &waiter
;
414 get_user_thread()->wait_status
= 1;
415 sThreadSpinlock
.Unlock();
418 while ((error
= _kern_block_thread(timeoutFlags
, timeout
))
422 sThreadSpinlock
.Lock();
424 if (error
== B_OK
|| waiter
.thread
== NULL
) {
425 // We were unblocked successfully -- potentially our unblocker overtook
426 // us after we already failed. In either case, we've got the lock, now.
430 // We failed to get the lock -- dequeue from waiter list.
431 rw_lock_waiter
* previous
= NULL
;
432 rw_lock_waiter
* other
= lock
->waiters
;
433 while (other
!= &waiter
) {
438 if (previous
== NULL
) {
439 // we are the first in line
440 lock
->waiters
= waiter
.next
;
441 if (lock
->waiters
!= NULL
)
442 lock
->waiters
->last
= waiter
.last
;
444 // one or more other waiters are before us in the queue
445 previous
->next
= waiter
.next
;
446 if (lock
->waiters
->last
== &waiter
)
447 lock
->waiters
->last
= previous
;
450 // Decrement the count. ATM this is all we have to do. There's at least
451 // one writer ahead of us -- otherwise the last writer would have unblocked
452 // us (writers only manipulate the lock data with thread spinlock being
453 // held) -- so our leaving doesn't make a difference to the ones behind us
455 atomic_add(&lock
->count
, -1);
462 _rw_lock_read_unlock(rw_lock
* lock
)
464 AutoLocker
<ThreadSpinlock
> locker(sThreadSpinlock
);
465 _rw_lock_read_unlock_threads_locked(lock
);
470 _rw_lock_read_unlock_threads_locked(rw_lock
* lock
)
472 // If we're still holding the write lock or if there are other readers,
473 // no-one can be woken up.
474 if (lock
->holder
== find_thread(NULL
)) {
479 if (--lock
->active_readers
> 0)
482 if (lock
->active_readers
< 0) {
483 panic("rw_lock_read_unlock(): lock %p not read-locked", lock
);
484 lock
->active_readers
= 0;
488 rw_lock_unblock(lock
);
491 #endif // !KDEBUG_RW_LOCK_DEBUG
495 rw_lock_write_lock(rw_lock
* lock
)
497 AutoLocker
<ThreadSpinlock
> locker(sThreadSpinlock
);
499 // If we're already the lock holder, we just need to increment the owner
501 thread_id thread
= find_thread(NULL
);
502 if (lock
->holder
== thread
) {
503 lock
->owner_count
+= RW_LOCK_WRITER_COUNT_BASE
;
507 // announce our claim
508 int32 oldCount
= atomic_add(&lock
->count
, RW_LOCK_WRITER_COUNT_BASE
);
511 // No-one else held a read or write lock, so it's ours now.
512 lock
->holder
= thread
;
513 lock
->owner_count
= RW_LOCK_WRITER_COUNT_BASE
;
517 // We have to wait. If we're the first writer, note the current reader
519 if (oldCount
< RW_LOCK_WRITER_COUNT_BASE
)
520 lock
->active_readers
= oldCount
- lock
->pending_readers
;
522 status_t status
= rw_lock_wait(lock
, true);
523 if (status
== B_OK
) {
524 lock
->holder
= thread
;
525 lock
->owner_count
= RW_LOCK_WRITER_COUNT_BASE
;
533 _rw_lock_write_unlock(rw_lock
* lock
)
535 AutoLocker
<ThreadSpinlock
> locker(sThreadSpinlock
);
536 _rw_lock_write_unlock_threads_locked(lock
);
541 _rw_lock_write_unlock_threads_locked(rw_lock
* lock
)
543 if (find_thread(NULL
) != lock
->holder
) {
544 panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
549 lock
->owner_count
-= RW_LOCK_WRITER_COUNT_BASE
;
550 if (lock
->owner_count
>= RW_LOCK_WRITER_COUNT_BASE
)
553 // We gave up our last write lock -- clean up and unblock waiters.
554 int32 readerCount
= lock
->owner_count
;
556 lock
->owner_count
= 0;
558 int32 oldCount
= atomic_add(&lock
->count
, -RW_LOCK_WRITER_COUNT_BASE
);
559 oldCount
-= RW_LOCK_WRITER_COUNT_BASE
;
562 // If writers are waiting, take over our reader count.
563 if (oldCount
>= RW_LOCK_WRITER_COUNT_BASE
) {
564 lock
->active_readers
= readerCount
;
565 rw_lock_unblock(lock
);
567 // No waiting writer, but there are one or more readers. We will
568 // unblock all waiting readers -- that's the easy part -- and must
569 // also make sure that all readers that haven't entered the critical
570 // section yet, won't start to wait. Otherwise a writer overtaking
571 // such a reader will correctly start to wait, but the reader,
572 // seeing the writer count > 0, would also start to wait. We set
573 // pending_readers to the number of readers that are still expected
574 // to enter the critical section.
575 lock
->pending_readers
= oldCount
- readerCount
576 - rw_lock_unblock(lock
);
586 mutex_init(mutex
* lock
, const char *name
)
589 lock
->waiters
= NULL
;
600 mutex_init_etc(mutex
* lock
, const char *name
, uint32 flags
)
602 lock
->name
= (flags
& MUTEX_FLAG_CLONE_NAME
) != 0 ? strdup(name
) : name
;
603 lock
->waiters
= NULL
;
609 lock
->flags
= flags
& MUTEX_FLAG_CLONE_NAME
;
614 mutex_destroy(mutex
* lock
)
616 char* name
= (lock
->flags
& MUTEX_FLAG_CLONE_NAME
) != 0
617 ? (char*)lock
->name
: NULL
;
619 // unblock all waiters
620 AutoLocker
<ThreadSpinlock
> locker(sThreadSpinlock
);
623 if (lock
->waiters
!= NULL
&& find_thread(NULL
)
625 panic("mutex_destroy(): there are blocking threads, but caller doesn't "
626 "hold the lock (%p)", lock
);
627 if (_mutex_lock_threads_locked(lock
) != B_OK
)
632 while (mutex_waiter
* waiter
= lock
->waiters
) {
634 lock
->waiters
= waiter
->next
;
637 _kern_unblock_thread(get_thread_id(waiter
->thread
), B_ERROR
);
649 mutex_switch_lock(mutex
* from
, mutex
* to
)
651 AutoLocker
<ThreadSpinlock
> locker(sThreadSpinlock
);
654 if (atomic_add(&from
->count
, 1) < -1)
656 _mutex_unlock_threads_locked(from
);
658 return _mutex_lock_threads_locked(to
);
663 mutex_switch_from_read_lock(rw_lock
* from
, mutex
* to
)
665 AutoLocker
<ThreadSpinlock
> locker(sThreadSpinlock
);
667 #if KDEBUG_RW_LOCK_DEBUG
668 _rw_lock_write_unlock_threads_locked(from
);
670 int32 oldCount
= atomic_add(&from
->count
, -1);
671 if (oldCount
>= RW_LOCK_WRITER_COUNT_BASE
)
672 _rw_lock_read_unlock_threads_locked(from
);
675 return _mutex_lock_threads_locked(to
);
681 _mutex_lock_threads_locked(mutex
* lock
)
684 // Might have been released after we decremented the count, but before
685 // we acquired the spinlock.
687 if (lock
->holder
< 0) {
688 lock
->holder
= find_thread(NULL
);
690 } else if (lock
->holder
== find_thread(NULL
)) {
691 panic("_mutex_lock(): double lock of %p by thread %" B_PRId32
, lock
,
693 } else if (lock
->holder
== 0)
694 panic("_mutex_lock(): using unitialized lock %p", lock
);
696 if ((lock
->flags
& MUTEX_FLAG_RELEASED
) != 0) {
697 lock
->flags
&= ~MUTEX_FLAG_RELEASED
;
702 // enqueue in waiter list
704 waiter
.thread
= get_current_thread();
707 if (lock
->waiters
!= NULL
) {
708 lock
->waiters
->last
->next
= &waiter
;
710 lock
->waiters
= &waiter
;
712 lock
->waiters
->last
= &waiter
;
715 get_user_thread()->wait_status
= 1;
716 sThreadSpinlock
.Unlock();
719 while ((error
= _kern_block_thread(0, 0)) == B_INTERRUPTED
) {
722 sThreadSpinlock
.Lock();
726 lock
->holder
= get_thread_id(waiter
.thread
);
734 _mutex_lock(mutex
* lock
, void*)
736 AutoLocker
<ThreadSpinlock
> locker(sThreadSpinlock
);
737 return _mutex_lock_threads_locked(lock
);
742 _mutex_unlock_threads_locked(mutex
* lock
)
745 if (find_thread(NULL
) != lock
->holder
) {
746 panic("_mutex_unlock() failure: thread %" B_PRId32
" is trying to "
747 "release mutex %p (current holder %" B_PRId32
")\n",
748 find_thread(NULL
), lock
, lock
->holder
);
753 mutex_waiter
* waiter
= lock
->waiters
;
754 if (waiter
!= NULL
) {
755 // dequeue the first waiter
756 lock
->waiters
= waiter
->next
;
757 if (lock
->waiters
!= NULL
)
758 lock
->waiters
->last
= waiter
->last
;
761 _kern_unblock_thread(get_thread_id(waiter
->thread
), B_OK
);
764 // Already set the holder to the unblocked thread. Besides that this
765 // actually reflects the current situation, setting it to -1 would
766 // cause a race condition, since another locker could think the lock
767 // is not held by anyone.
768 lock
->holder
= get_thread_id(waiter
->thread
);
771 // We've acquired the spinlock before the locker that is going to wait.
772 // Just mark the lock as released.
776 lock
->flags
|= MUTEX_FLAG_RELEASED
;
783 _mutex_unlock(mutex
* lock
)
785 AutoLocker
<ThreadSpinlock
> locker(sThreadSpinlock
);
786 _mutex_unlock_threads_locked(lock
);
791 _mutex_trylock(mutex
* lock
)
794 AutoLocker
<ThreadSpinlock
> _(sThreadSpinlock
);
796 if (lock
->holder
<= 0) {
797 lock
->holder
= find_thread(NULL
);
801 return B_WOULD_BLOCK
;