headers/bsd: Add sys/queue.h.
[haiku.git] / src / system / kernel / locks / lock.cpp
blob2108874f53cfea0926eaf6a4d20e1402663ff801
1 /*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
11 /*! Mutex and recursive_lock code */
14 #include <lock.h>
16 #include <stdlib.h>
17 #include <string.h>
19 #include <OS.h>
21 #include <debug.h>
22 #include <int.h>
23 #include <kernel.h>
24 #include <listeners.h>
25 #include <scheduling_analysis.h>
26 #include <thread.h>
27 #include <util/AutoLock.h>
30 struct mutex_waiter {
31 Thread* thread;
32 mutex_waiter* next; // next in queue
33 mutex_waiter* last; // last in queue (valid for the first in queue)
36 struct rw_lock_waiter {
37 Thread* thread;
38 rw_lock_waiter* next; // next in queue
39 rw_lock_waiter* last; // last in queue (valid for the first in queue)
40 bool writer;
43 #define MUTEX_FLAG_OWNS_NAME MUTEX_FLAG_CLONE_NAME
44 #define MUTEX_FLAG_RELEASED 0x2
46 #define RW_LOCK_FLAG_OWNS_NAME RW_LOCK_FLAG_CLONE_NAME
49 int32
50 recursive_lock_get_recursion(recursive_lock *lock)
52 if (RECURSIVE_LOCK_HOLDER(lock) == thread_get_current_thread_id())
53 return lock->recursion;
55 return -1;
59 void
60 recursive_lock_init(recursive_lock *lock, const char *name)
62 mutex_init(&lock->lock, name != NULL ? name : "recursive lock");
63 RECURSIVE_LOCK_HOLDER(lock) = -1;
64 lock->recursion = 0;
68 void
69 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
71 mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
72 RECURSIVE_LOCK_HOLDER(lock) = -1;
73 lock->recursion = 0;
77 void
78 recursive_lock_destroy(recursive_lock *lock)
80 if (lock == NULL)
81 return;
83 mutex_destroy(&lock->lock);
87 status_t
88 recursive_lock_lock(recursive_lock *lock)
90 thread_id thread = thread_get_current_thread_id();
92 if (!gKernelStartup && !are_interrupts_enabled()) {
93 panic("recursive_lock_lock: called with interrupts disabled for lock "
94 "%p (\"%s\")\n", lock, lock->lock.name);
97 if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
98 mutex_lock(&lock->lock);
99 #if !KDEBUG
100 lock->holder = thread;
101 #endif
104 lock->recursion++;
105 return B_OK;
109 status_t
110 recursive_lock_trylock(recursive_lock *lock)
112 thread_id thread = thread_get_current_thread_id();
114 if (!gKernelStartup && !are_interrupts_enabled())
115 panic("recursive_lock_lock: called with interrupts disabled for lock "
116 "%p (\"%s\")\n", lock, lock->lock.name);
118 if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
119 status_t status = mutex_trylock(&lock->lock);
120 if (status != B_OK)
121 return status;
123 #if !KDEBUG
124 lock->holder = thread;
125 #endif
128 lock->recursion++;
129 return B_OK;
133 void
134 recursive_lock_unlock(recursive_lock *lock)
136 if (thread_get_current_thread_id() != RECURSIVE_LOCK_HOLDER(lock))
137 panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
139 if (--lock->recursion == 0) {
140 #if !KDEBUG
141 lock->holder = -1;
142 #endif
143 mutex_unlock(&lock->lock);
148 // #pragma mark -
151 static status_t
152 rw_lock_wait(rw_lock* lock, bool writer, InterruptsSpinLocker& locker)
154 // enqueue in waiter list
155 rw_lock_waiter waiter;
156 waiter.thread = thread_get_current_thread();
157 waiter.next = NULL;
158 waiter.writer = writer;
160 if (lock->waiters != NULL)
161 lock->waiters->last->next = &waiter;
162 else
163 lock->waiters = &waiter;
165 lock->waiters->last = &waiter;
167 // block
168 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
169 locker.Unlock();
171 status_t result = thread_block();
173 locker.Lock();
174 return result;
178 static int32
179 rw_lock_unblock(rw_lock* lock)
181 // Check whether there are any waiting threads at all and whether anyone
182 // has the write lock.
183 rw_lock_waiter* waiter = lock->waiters;
184 if (waiter == NULL || lock->holder >= 0)
185 return 0;
187 // writer at head of queue?
188 if (waiter->writer) {
189 if (lock->active_readers > 0 || lock->pending_readers > 0)
190 return 0;
192 // dequeue writer
193 lock->waiters = waiter->next;
194 if (lock->waiters != NULL)
195 lock->waiters->last = waiter->last;
197 lock->holder = waiter->thread->id;
199 // unblock thread
200 thread_unblock(waiter->thread, B_OK);
202 waiter->thread = NULL;
203 return RW_LOCK_WRITER_COUNT_BASE;
206 // wake up one or more readers
207 uint32 readerCount = 0;
208 do {
209 // dequeue reader
210 lock->waiters = waiter->next;
211 if (lock->waiters != NULL)
212 lock->waiters->last = waiter->last;
214 readerCount++;
216 // unblock thread
217 thread_unblock(waiter->thread, B_OK);
219 waiter->thread = NULL;
220 } while ((waiter = lock->waiters) != NULL && !waiter->writer);
222 if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
223 lock->active_readers += readerCount;
225 return readerCount;
229 void
230 rw_lock_init(rw_lock* lock, const char* name)
232 lock->name = name;
233 lock->waiters = NULL;
234 B_INITIALIZE_SPINLOCK(&lock->lock);
235 lock->holder = -1;
236 lock->count = 0;
237 lock->owner_count = 0;
238 lock->active_readers = 0;
239 lock->pending_readers = 0;
240 lock->flags = 0;
242 T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
243 NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
247 void
248 rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
250 lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
251 lock->waiters = NULL;
252 B_INITIALIZE_SPINLOCK(&lock->lock);
253 lock->holder = -1;
254 lock->count = 0;
255 lock->owner_count = 0;
256 lock->active_readers = 0;
257 lock->pending_readers = 0;
258 lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
260 T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
261 NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
265 void
266 rw_lock_destroy(rw_lock* lock)
268 char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
269 ? (char*)lock->name : NULL;
271 // unblock all waiters
272 InterruptsSpinLocker locker(lock->lock);
274 #if KDEBUG
275 if (lock->waiters != NULL && thread_get_current_thread_id()
276 != lock->holder) {
277 panic("rw_lock_destroy(): there are blocking threads, but the caller "
278 "doesn't hold the write lock (%p)", lock);
280 locker.Unlock();
281 if (rw_lock_write_lock(lock) != B_OK)
282 return;
283 locker.Lock();
285 #endif
287 while (rw_lock_waiter* waiter = lock->waiters) {
288 // dequeue
289 lock->waiters = waiter->next;
291 // unblock thread
292 thread_unblock(waiter->thread, B_ERROR);
295 lock->name = NULL;
297 locker.Unlock();
299 free(name);
303 #if !KDEBUG_RW_LOCK_DEBUG
305 status_t
306 _rw_lock_read_lock(rw_lock* lock)
308 InterruptsSpinLocker locker(lock->lock);
310 // We might be the writer ourselves.
311 if (lock->holder == thread_get_current_thread_id()) {
312 lock->owner_count++;
313 return B_OK;
316 // The writer that originally had the lock when we called atomic_add() might
317 // already have gone and another writer could have overtaken us. In this
318 // case the original writer set pending_readers, so we know that we don't
319 // have to wait.
320 if (lock->pending_readers > 0) {
321 lock->pending_readers--;
323 if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
324 lock->active_readers++;
326 return B_OK;
329 ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
331 // we need to wait
332 return rw_lock_wait(lock, false, locker);
336 status_t
337 _rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
338 bigtime_t timeout)
340 InterruptsSpinLocker locker(lock->lock);
342 // We might be the writer ourselves.
343 if (lock->holder == thread_get_current_thread_id()) {
344 lock->owner_count++;
345 return B_OK;
348 // The writer that originally had the lock when we called atomic_add() might
349 // already have gone and another writer could have overtaken us. In this
350 // case the original writer set pending_readers, so we know that we don't
351 // have to wait.
352 if (lock->pending_readers > 0) {
353 lock->pending_readers--;
355 if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
356 lock->active_readers++;
358 return B_OK;
361 ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
363 // we need to wait
365 // enqueue in waiter list
366 rw_lock_waiter waiter;
367 waiter.thread = thread_get_current_thread();
368 waiter.next = NULL;
369 waiter.writer = false;
371 if (lock->waiters != NULL)
372 lock->waiters->last->next = &waiter;
373 else
374 lock->waiters = &waiter;
376 lock->waiters->last = &waiter;
378 // block
379 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
380 locker.Unlock();
382 status_t error = thread_block_with_timeout(timeoutFlags, timeout);
383 if (error == B_OK || waiter.thread == NULL) {
384 // We were unblocked successfully -- potentially our unblocker overtook
385 // us after we already failed. In either case, we've got the lock, now.
386 return B_OK;
389 locker.Lock();
390 // We failed to get the lock -- dequeue from waiter list.
391 rw_lock_waiter* previous = NULL;
392 rw_lock_waiter* other = lock->waiters;
393 while (other != &waiter) {
394 previous = other;
395 other = other->next;
398 if (previous == NULL) {
399 // we are the first in line
400 lock->waiters = waiter.next;
401 if (lock->waiters != NULL)
402 lock->waiters->last = waiter.last;
403 } else {
404 // one or more other waiters are before us in the queue
405 previous->next = waiter.next;
406 if (lock->waiters->last == &waiter)
407 lock->waiters->last = previous;
410 // Decrement the count. ATM this is all we have to do. There's at least
411 // one writer ahead of us -- otherwise the last writer would have unblocked
412 // us (writers only manipulate the lock data with thread spinlock being
413 // held) -- so our leaving doesn't make a difference to the ones behind us
414 // in the queue.
415 atomic_add(&lock->count, -1);
417 return error;
421 void
422 _rw_lock_read_unlock(rw_lock* lock)
424 InterruptsSpinLocker locker(lock->lock);
426 // If we're still holding the write lock or if there are other readers,
427 // no-one can be woken up.
428 if (lock->holder == thread_get_current_thread_id()) {
429 ASSERT(lock->owner_count % RW_LOCK_WRITER_COUNT_BASE > 0);
430 lock->owner_count--;
431 return;
434 if (--lock->active_readers > 0)
435 return;
437 if (lock->active_readers < 0) {
438 panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
439 lock->active_readers = 0;
440 return;
443 rw_lock_unblock(lock);
446 #endif // !KDEBUG_RW_LOCK_DEBUG
449 status_t
450 rw_lock_write_lock(rw_lock* lock)
452 InterruptsSpinLocker locker(lock->lock);
454 // If we're already the lock holder, we just need to increment the owner
455 // count.
456 thread_id thread = thread_get_current_thread_id();
457 if (lock->holder == thread) {
458 lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
459 return B_OK;
462 // announce our claim
463 int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
465 if (oldCount == 0) {
466 // No-one else held a read or write lock, so it's ours now.
467 lock->holder = thread;
468 lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
469 return B_OK;
472 // We have to wait. If we're the first writer, note the current reader
473 // count.
474 if (oldCount < RW_LOCK_WRITER_COUNT_BASE)
475 lock->active_readers = oldCount - lock->pending_readers;
477 status_t status = rw_lock_wait(lock, true, locker);
478 if (status == B_OK) {
479 lock->holder = thread;
480 lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
483 return status;
487 void
488 _rw_lock_write_unlock(rw_lock* lock)
490 InterruptsSpinLocker locker(lock->lock);
492 if (thread_get_current_thread_id() != lock->holder) {
493 panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
494 lock);
495 return;
498 ASSERT(lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE);
500 lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
501 if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
502 return;
504 // We gave up our last write lock -- clean up and unblock waiters.
505 int32 readerCount = lock->owner_count;
506 lock->holder = -1;
507 lock->owner_count = 0;
509 int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
510 oldCount -= RW_LOCK_WRITER_COUNT_BASE;
512 if (oldCount != 0) {
513 // If writers are waiting, take over our reader count.
514 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) {
515 lock->active_readers = readerCount;
516 rw_lock_unblock(lock);
517 } else {
518 // No waiting writer, but there are one or more readers. We will
519 // unblock all waiting readers -- that's the easy part -- and must
520 // also make sure that all readers that haven't entered the critical
521 // section yet, won't start to wait. Otherwise a writer overtaking
522 // such a reader will correctly start to wait, but the reader,
523 // seeing the writer count > 0, would also start to wait. We set
524 // pending_readers to the number of readers that are still expected
525 // to enter the critical section.
526 lock->pending_readers = oldCount - readerCount
527 - rw_lock_unblock(lock);
533 static int
534 dump_rw_lock_info(int argc, char** argv)
536 if (argc < 2) {
537 print_debugger_command_usage(argv[0]);
538 return 0;
541 rw_lock* lock = (rw_lock*)parse_expression(argv[1]);
543 if (!IS_KERNEL_ADDRESS(lock)) {
544 kprintf("invalid address: %p\n", lock);
545 return 0;
548 kprintf("rw lock %p:\n", lock);
549 kprintf(" name: %s\n", lock->name);
550 kprintf(" holder: %" B_PRId32 "\n", lock->holder);
551 kprintf(" count: %#" B_PRIx32 "\n", lock->count);
552 kprintf(" active readers %d\n", lock->active_readers);
553 kprintf(" pending readers %d\n", lock->pending_readers);
554 kprintf(" owner count: %#" B_PRIx32 "\n", lock->owner_count);
555 kprintf(" flags: %#" B_PRIx32 "\n", lock->flags);
557 kprintf(" waiting threads:");
558 rw_lock_waiter* waiter = lock->waiters;
559 while (waiter != NULL) {
560 kprintf(" %" B_PRId32 "/%c", waiter->thread->id, waiter->writer ? 'w' : 'r');
561 waiter = waiter->next;
563 kputs("\n");
565 return 0;
569 // #pragma mark -
572 void
573 mutex_init(mutex* lock, const char *name)
575 lock->name = name;
576 lock->waiters = NULL;
577 B_INITIALIZE_SPINLOCK(&lock->lock);
578 #if KDEBUG
579 lock->holder = -1;
580 #else
581 lock->count = 0;
582 lock->ignore_unlock_count = 0;
583 #endif
584 lock->flags = 0;
586 T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
587 NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
591 void
592 mutex_init_etc(mutex* lock, const char *name, uint32 flags)
594 lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
595 lock->waiters = NULL;
596 B_INITIALIZE_SPINLOCK(&lock->lock);
597 #if KDEBUG
598 lock->holder = -1;
599 #else
600 lock->count = 0;
601 lock->ignore_unlock_count = 0;
602 #endif
603 lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
605 T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
606 NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
610 void
611 mutex_destroy(mutex* lock)
613 char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
614 ? (char*)lock->name : NULL;
616 // unblock all waiters
617 InterruptsSpinLocker locker(lock->lock);
619 #if KDEBUG
620 if (lock->waiters != NULL && thread_get_current_thread_id()
621 != lock->holder) {
622 panic("mutex_destroy(): there are blocking threads, but caller doesn't "
623 "hold the lock (%p)", lock);
624 if (_mutex_lock(lock, &locker) != B_OK)
625 return;
626 locker.Lock();
628 #endif
630 while (mutex_waiter* waiter = lock->waiters) {
631 // dequeue
632 lock->waiters = waiter->next;
634 // unblock thread
635 thread_unblock(waiter->thread, B_ERROR);
638 lock->name = NULL;
640 locker.Unlock();
642 free(name);
646 static inline status_t
647 mutex_lock_threads_locked(mutex* lock, InterruptsSpinLocker* locker)
649 #if KDEBUG
650 return _mutex_lock(lock, locker);
651 #else
652 if (atomic_add(&lock->count, -1) < 0)
653 return _mutex_lock(lock, locker);
654 return B_OK;
655 #endif
659 status_t
660 mutex_switch_lock(mutex* from, mutex* to)
662 InterruptsSpinLocker locker(to->lock);
664 #if !KDEBUG
665 if (atomic_add(&from->count, 1) < -1)
666 #endif
667 _mutex_unlock(from);
669 return mutex_lock_threads_locked(to, &locker);
673 status_t
674 mutex_switch_from_read_lock(rw_lock* from, mutex* to)
676 InterruptsSpinLocker locker(to->lock);
678 #if KDEBUG_RW_LOCK_DEBUG
679 _rw_lock_write_unlock(from);
680 #else
681 int32 oldCount = atomic_add(&from->count, -1);
682 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
683 _rw_lock_read_unlock(from);
684 #endif
686 return mutex_lock_threads_locked(to, &locker);
690 status_t
691 _mutex_lock(mutex* lock, void* _locker)
693 #if KDEBUG
694 if (!gKernelStartup && _locker == NULL && !are_interrupts_enabled()) {
695 panic("_mutex_lock(): called with interrupts disabled for lock %p",
696 lock);
698 #endif
700 // lock only, if !lockLocked
701 InterruptsSpinLocker* locker
702 = reinterpret_cast<InterruptsSpinLocker*>(_locker);
704 InterruptsSpinLocker lockLocker;
705 if (locker == NULL) {
706 lockLocker.SetTo(lock->lock, false);
707 locker = &lockLocker;
710 // Might have been released after we decremented the count, but before
711 // we acquired the spinlock.
712 #if KDEBUG
713 if (lock->holder < 0) {
714 lock->holder = thread_get_current_thread_id();
715 return B_OK;
716 } else if (lock->holder == thread_get_current_thread_id()) {
717 panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
718 lock->holder);
719 } else if (lock->holder == 0)
720 panic("_mutex_lock(): using unitialized lock %p", lock);
721 #else
722 if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
723 lock->flags &= ~MUTEX_FLAG_RELEASED;
724 return B_OK;
726 #endif
728 // enqueue in waiter list
729 mutex_waiter waiter;
730 waiter.thread = thread_get_current_thread();
731 waiter.next = NULL;
733 if (lock->waiters != NULL) {
734 lock->waiters->last->next = &waiter;
735 } else
736 lock->waiters = &waiter;
738 lock->waiters->last = &waiter;
740 // block
741 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
742 locker->Unlock();
744 status_t error = thread_block();
745 #if KDEBUG
746 if (error == B_OK)
747 atomic_set(&lock->holder, waiter.thread->id);
748 #endif
749 return error;
753 void
754 _mutex_unlock(mutex* lock)
756 InterruptsSpinLocker locker(lock->lock);
758 #if KDEBUG
759 if (thread_get_current_thread_id() != lock->holder) {
760 panic("_mutex_unlock() failure: thread %" B_PRId32 " is trying to "
761 "release mutex %p (current holder %" B_PRId32 ")\n",
762 thread_get_current_thread_id(), lock, lock->holder);
763 return;
765 #else
766 if (lock->ignore_unlock_count > 0) {
767 lock->ignore_unlock_count--;
768 return;
770 #endif
772 mutex_waiter* waiter = lock->waiters;
773 if (waiter != NULL) {
774 // dequeue the first waiter
775 lock->waiters = waiter->next;
776 if (lock->waiters != NULL)
777 lock->waiters->last = waiter->last;
778 #if KDEBUG
779 thread_id unblockedThread = waiter->thread->id;
780 #endif
782 // unblock thread
783 thread_unblock(waiter->thread, B_OK);
785 #if KDEBUG
786 // Already set the holder to the unblocked thread. Besides that this
787 // actually reflects the current situation, setting it to -1 would
788 // cause a race condition, since another locker could think the lock
789 // is not held by anyone.
790 lock->holder = unblockedThread;
791 #endif
792 } else {
793 // We've acquired the spinlock before the locker that is going to wait.
794 // Just mark the lock as released.
795 #if KDEBUG
796 lock->holder = -1;
797 #else
798 lock->flags |= MUTEX_FLAG_RELEASED;
799 #endif
804 status_t
805 _mutex_trylock(mutex* lock)
807 #if KDEBUG
808 InterruptsSpinLocker _(lock->lock);
810 if (lock->holder <= 0) {
811 lock->holder = thread_get_current_thread_id();
812 return B_OK;
814 #endif
815 return B_WOULD_BLOCK;
819 status_t
820 _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
822 #if KDEBUG
823 if (!gKernelStartup && !are_interrupts_enabled()) {
824 panic("_mutex_lock(): called with interrupts disabled for lock %p",
825 lock);
827 #endif
829 InterruptsSpinLocker locker(lock->lock);
831 // Might have been released after we decremented the count, but before
832 // we acquired the spinlock.
833 #if KDEBUG
834 if (lock->holder < 0) {
835 lock->holder = thread_get_current_thread_id();
836 return B_OK;
837 } else if (lock->holder == thread_get_current_thread_id()) {
838 panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
839 lock->holder);
840 } else if (lock->holder == 0)
841 panic("_mutex_lock(): using unitialized lock %p", lock);
842 #else
843 if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
844 lock->flags &= ~MUTEX_FLAG_RELEASED;
845 return B_OK;
847 #endif
849 // enqueue in waiter list
850 mutex_waiter waiter;
851 waiter.thread = thread_get_current_thread();
852 waiter.next = NULL;
854 if (lock->waiters != NULL) {
855 lock->waiters->last->next = &waiter;
856 } else
857 lock->waiters = &waiter;
859 lock->waiters->last = &waiter;
861 // block
862 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
863 locker.Unlock();
865 status_t error = thread_block_with_timeout(timeoutFlags, timeout);
867 if (error == B_OK) {
868 #if KDEBUG
869 lock->holder = waiter.thread->id;
870 #endif
871 } else {
872 locker.Lock();
874 // If the timeout occurred, we must remove our waiter structure from
875 // the queue.
876 mutex_waiter* previousWaiter = NULL;
877 mutex_waiter* otherWaiter = lock->waiters;
878 while (otherWaiter != NULL && otherWaiter != &waiter) {
879 previousWaiter = otherWaiter;
880 otherWaiter = otherWaiter->next;
882 if (otherWaiter == &waiter) {
883 // the structure is still in the list -- dequeue
884 if (&waiter == lock->waiters) {
885 if (waiter.next != NULL)
886 waiter.next->last = waiter.last;
887 lock->waiters = waiter.next;
888 } else {
889 if (waiter.next == NULL)
890 lock->waiters->last = previousWaiter;
891 previousWaiter->next = waiter.next;
894 #if !KDEBUG
895 // we need to fix the lock count
896 if (atomic_add(&lock->count, 1) == -1) {
897 // This means we were the only thread waiting for the lock and
898 // the lock owner has already called atomic_add() in
899 // mutex_unlock(). That is we probably would get the lock very
900 // soon (if the lock holder has a low priority, that might
901 // actually take rather long, though), but the timeout already
902 // occurred, so we don't try to wait. Just increment the ignore
903 // unlock count.
904 lock->ignore_unlock_count++;
906 #endif
910 return error;
914 static int
915 dump_mutex_info(int argc, char** argv)
917 if (argc < 2) {
918 print_debugger_command_usage(argv[0]);
919 return 0;
922 mutex* lock = (mutex*)parse_expression(argv[1]);
924 if (!IS_KERNEL_ADDRESS(lock)) {
925 kprintf("invalid address: %p\n", lock);
926 return 0;
929 kprintf("mutex %p:\n", lock);
930 kprintf(" name: %s\n", lock->name);
931 kprintf(" flags: 0x%x\n", lock->flags);
932 #if KDEBUG
933 kprintf(" holder: %" B_PRId32 "\n", lock->holder);
934 #else
935 kprintf(" count: %" B_PRId32 "\n", lock->count);
936 #endif
938 kprintf(" waiting threads:");
939 mutex_waiter* waiter = lock->waiters;
940 while (waiter != NULL) {
941 kprintf(" %" B_PRId32, waiter->thread->id);
942 waiter = waiter->next;
944 kputs("\n");
946 return 0;
950 // #pragma mark -
953 void
954 lock_debug_init()
956 add_debugger_command_etc("mutex", &dump_mutex_info,
957 "Dump info about a mutex",
958 "<mutex>\n"
959 "Prints info about the specified mutex.\n"
960 " <mutex> - pointer to the mutex to print the info for.\n", 0);
961 add_debugger_command_etc("rwlock", &dump_rw_lock_info,
962 "Dump info about an rw lock",
963 "<lock>\n"
964 "Prints info about the specified rw lock.\n"
965 " <lock> - pointer to the rw lock to print the info for.\n", 0);