2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
11 /*! Mutex and recursive_lock code */
24 #include <listeners.h>
25 #include <scheduling_analysis.h>
27 #include <util/AutoLock.h>
32 mutex_waiter
* next
; // next in queue
33 mutex_waiter
* last
; // last in queue (valid for the first in queue)
36 struct rw_lock_waiter
{
38 rw_lock_waiter
* next
; // next in queue
39 rw_lock_waiter
* last
; // last in queue (valid for the first in queue)
43 #define MUTEX_FLAG_OWNS_NAME MUTEX_FLAG_CLONE_NAME
44 #define MUTEX_FLAG_RELEASED 0x2
46 #define RW_LOCK_FLAG_OWNS_NAME RW_LOCK_FLAG_CLONE_NAME
50 recursive_lock_get_recursion(recursive_lock
*lock
)
52 if (RECURSIVE_LOCK_HOLDER(lock
) == thread_get_current_thread_id())
53 return lock
->recursion
;
60 recursive_lock_init(recursive_lock
*lock
, const char *name
)
62 mutex_init(&lock
->lock
, name
!= NULL
? name
: "recursive lock");
63 RECURSIVE_LOCK_HOLDER(lock
) = -1;
69 recursive_lock_init_etc(recursive_lock
*lock
, const char *name
, uint32 flags
)
71 mutex_init_etc(&lock
->lock
, name
!= NULL
? name
: "recursive lock", flags
);
72 RECURSIVE_LOCK_HOLDER(lock
) = -1;
78 recursive_lock_destroy(recursive_lock
*lock
)
83 mutex_destroy(&lock
->lock
);
88 recursive_lock_lock(recursive_lock
*lock
)
90 thread_id thread
= thread_get_current_thread_id();
92 if (!gKernelStartup
&& !are_interrupts_enabled()) {
93 panic("recursive_lock_lock: called with interrupts disabled for lock "
94 "%p (\"%s\")\n", lock
, lock
->lock
.name
);
97 if (thread
!= RECURSIVE_LOCK_HOLDER(lock
)) {
98 mutex_lock(&lock
->lock
);
100 lock
->holder
= thread
;
110 recursive_lock_trylock(recursive_lock
*lock
)
112 thread_id thread
= thread_get_current_thread_id();
114 if (!gKernelStartup
&& !are_interrupts_enabled())
115 panic("recursive_lock_lock: called with interrupts disabled for lock "
116 "%p (\"%s\")\n", lock
, lock
->lock
.name
);
118 if (thread
!= RECURSIVE_LOCK_HOLDER(lock
)) {
119 status_t status
= mutex_trylock(&lock
->lock
);
124 lock
->holder
= thread
;
134 recursive_lock_unlock(recursive_lock
*lock
)
136 if (thread_get_current_thread_id() != RECURSIVE_LOCK_HOLDER(lock
))
137 panic("recursive_lock %p unlocked by non-holder thread!\n", lock
);
139 if (--lock
->recursion
== 0) {
143 mutex_unlock(&lock
->lock
);
152 rw_lock_wait(rw_lock
* lock
, bool writer
, InterruptsSpinLocker
& locker
)
154 // enqueue in waiter list
155 rw_lock_waiter waiter
;
156 waiter
.thread
= thread_get_current_thread();
158 waiter
.writer
= writer
;
160 if (lock
->waiters
!= NULL
)
161 lock
->waiters
->last
->next
= &waiter
;
163 lock
->waiters
= &waiter
;
165 lock
->waiters
->last
= &waiter
;
168 thread_prepare_to_block(waiter
.thread
, 0, THREAD_BLOCK_TYPE_RW_LOCK
, lock
);
171 status_t result
= thread_block();
179 rw_lock_unblock(rw_lock
* lock
)
181 // Check whether there are any waiting threads at all and whether anyone
182 // has the write lock.
183 rw_lock_waiter
* waiter
= lock
->waiters
;
184 if (waiter
== NULL
|| lock
->holder
>= 0)
187 // writer at head of queue?
188 if (waiter
->writer
) {
189 if (lock
->active_readers
> 0 || lock
->pending_readers
> 0)
193 lock
->waiters
= waiter
->next
;
194 if (lock
->waiters
!= NULL
)
195 lock
->waiters
->last
= waiter
->last
;
197 lock
->holder
= waiter
->thread
->id
;
200 thread_unblock(waiter
->thread
, B_OK
);
202 waiter
->thread
= NULL
;
203 return RW_LOCK_WRITER_COUNT_BASE
;
206 // wake up one or more readers
207 uint32 readerCount
= 0;
210 lock
->waiters
= waiter
->next
;
211 if (lock
->waiters
!= NULL
)
212 lock
->waiters
->last
= waiter
->last
;
217 thread_unblock(waiter
->thread
, B_OK
);
219 waiter
->thread
= NULL
;
220 } while ((waiter
= lock
->waiters
) != NULL
&& !waiter
->writer
);
222 if (lock
->count
>= RW_LOCK_WRITER_COUNT_BASE
)
223 lock
->active_readers
+= readerCount
;
230 rw_lock_init(rw_lock
* lock
, const char* name
)
233 lock
->waiters
= NULL
;
234 B_INITIALIZE_SPINLOCK(&lock
->lock
);
237 lock
->owner_count
= 0;
238 lock
->active_readers
= 0;
239 lock
->pending_readers
= 0;
242 T_SCHEDULING_ANALYSIS(InitRWLock(lock
, name
));
243 NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized
, lock
);
248 rw_lock_init_etc(rw_lock
* lock
, const char* name
, uint32 flags
)
250 lock
->name
= (flags
& RW_LOCK_FLAG_CLONE_NAME
) != 0 ? strdup(name
) : name
;
251 lock
->waiters
= NULL
;
252 B_INITIALIZE_SPINLOCK(&lock
->lock
);
255 lock
->owner_count
= 0;
256 lock
->active_readers
= 0;
257 lock
->pending_readers
= 0;
258 lock
->flags
= flags
& RW_LOCK_FLAG_CLONE_NAME
;
260 T_SCHEDULING_ANALYSIS(InitRWLock(lock
, name
));
261 NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized
, lock
);
266 rw_lock_destroy(rw_lock
* lock
)
268 char* name
= (lock
->flags
& RW_LOCK_FLAG_CLONE_NAME
) != 0
269 ? (char*)lock
->name
: NULL
;
271 // unblock all waiters
272 InterruptsSpinLocker
locker(lock
->lock
);
275 if (lock
->waiters
!= NULL
&& thread_get_current_thread_id()
277 panic("rw_lock_destroy(): there are blocking threads, but the caller "
278 "doesn't hold the write lock (%p)", lock
);
281 if (rw_lock_write_lock(lock
) != B_OK
)
287 while (rw_lock_waiter
* waiter
= lock
->waiters
) {
289 lock
->waiters
= waiter
->next
;
292 thread_unblock(waiter
->thread
, B_ERROR
);
303 #if !KDEBUG_RW_LOCK_DEBUG
306 _rw_lock_read_lock(rw_lock
* lock
)
308 InterruptsSpinLocker
locker(lock
->lock
);
310 // We might be the writer ourselves.
311 if (lock
->holder
== thread_get_current_thread_id()) {
316 // The writer that originally had the lock when we called atomic_add() might
317 // already have gone and another writer could have overtaken us. In this
318 // case the original writer set pending_readers, so we know that we don't
320 if (lock
->pending_readers
> 0) {
321 lock
->pending_readers
--;
323 if (lock
->count
>= RW_LOCK_WRITER_COUNT_BASE
)
324 lock
->active_readers
++;
329 ASSERT(lock
->count
>= RW_LOCK_WRITER_COUNT_BASE
);
332 return rw_lock_wait(lock
, false, locker
);
337 _rw_lock_read_lock_with_timeout(rw_lock
* lock
, uint32 timeoutFlags
,
340 InterruptsSpinLocker
locker(lock
->lock
);
342 // We might be the writer ourselves.
343 if (lock
->holder
== thread_get_current_thread_id()) {
348 // The writer that originally had the lock when we called atomic_add() might
349 // already have gone and another writer could have overtaken us. In this
350 // case the original writer set pending_readers, so we know that we don't
352 if (lock
->pending_readers
> 0) {
353 lock
->pending_readers
--;
355 if (lock
->count
>= RW_LOCK_WRITER_COUNT_BASE
)
356 lock
->active_readers
++;
361 ASSERT(lock
->count
>= RW_LOCK_WRITER_COUNT_BASE
);
365 // enqueue in waiter list
366 rw_lock_waiter waiter
;
367 waiter
.thread
= thread_get_current_thread();
369 waiter
.writer
= false;
371 if (lock
->waiters
!= NULL
)
372 lock
->waiters
->last
->next
= &waiter
;
374 lock
->waiters
= &waiter
;
376 lock
->waiters
->last
= &waiter
;
379 thread_prepare_to_block(waiter
.thread
, 0, THREAD_BLOCK_TYPE_RW_LOCK
, lock
);
382 status_t error
= thread_block_with_timeout(timeoutFlags
, timeout
);
383 if (error
== B_OK
|| waiter
.thread
== NULL
) {
384 // We were unblocked successfully -- potentially our unblocker overtook
385 // us after we already failed. In either case, we've got the lock, now.
390 // We failed to get the lock -- dequeue from waiter list.
391 rw_lock_waiter
* previous
= NULL
;
392 rw_lock_waiter
* other
= lock
->waiters
;
393 while (other
!= &waiter
) {
398 if (previous
== NULL
) {
399 // we are the first in line
400 lock
->waiters
= waiter
.next
;
401 if (lock
->waiters
!= NULL
)
402 lock
->waiters
->last
= waiter
.last
;
404 // one or more other waiters are before us in the queue
405 previous
->next
= waiter
.next
;
406 if (lock
->waiters
->last
== &waiter
)
407 lock
->waiters
->last
= previous
;
410 // Decrement the count. ATM this is all we have to do. There's at least
411 // one writer ahead of us -- otherwise the last writer would have unblocked
412 // us (writers only manipulate the lock data with thread spinlock being
413 // held) -- so our leaving doesn't make a difference to the ones behind us
415 atomic_add(&lock
->count
, -1);
422 _rw_lock_read_unlock(rw_lock
* lock
)
424 InterruptsSpinLocker
locker(lock
->lock
);
426 // If we're still holding the write lock or if there are other readers,
427 // no-one can be woken up.
428 if (lock
->holder
== thread_get_current_thread_id()) {
429 ASSERT(lock
->owner_count
% RW_LOCK_WRITER_COUNT_BASE
> 0);
434 if (--lock
->active_readers
> 0)
437 if (lock
->active_readers
< 0) {
438 panic("rw_lock_read_unlock(): lock %p not read-locked", lock
);
439 lock
->active_readers
= 0;
443 rw_lock_unblock(lock
);
446 #endif // !KDEBUG_RW_LOCK_DEBUG
450 rw_lock_write_lock(rw_lock
* lock
)
452 InterruptsSpinLocker
locker(lock
->lock
);
454 // If we're already the lock holder, we just need to increment the owner
456 thread_id thread
= thread_get_current_thread_id();
457 if (lock
->holder
== thread
) {
458 lock
->owner_count
+= RW_LOCK_WRITER_COUNT_BASE
;
462 // announce our claim
463 int32 oldCount
= atomic_add(&lock
->count
, RW_LOCK_WRITER_COUNT_BASE
);
466 // No-one else held a read or write lock, so it's ours now.
467 lock
->holder
= thread
;
468 lock
->owner_count
= RW_LOCK_WRITER_COUNT_BASE
;
472 // We have to wait. If we're the first writer, note the current reader
474 if (oldCount
< RW_LOCK_WRITER_COUNT_BASE
)
475 lock
->active_readers
= oldCount
- lock
->pending_readers
;
477 status_t status
= rw_lock_wait(lock
, true, locker
);
478 if (status
== B_OK
) {
479 lock
->holder
= thread
;
480 lock
->owner_count
= RW_LOCK_WRITER_COUNT_BASE
;
488 _rw_lock_write_unlock(rw_lock
* lock
)
490 InterruptsSpinLocker
locker(lock
->lock
);
492 if (thread_get_current_thread_id() != lock
->holder
) {
493 panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
498 ASSERT(lock
->owner_count
>= RW_LOCK_WRITER_COUNT_BASE
);
500 lock
->owner_count
-= RW_LOCK_WRITER_COUNT_BASE
;
501 if (lock
->owner_count
>= RW_LOCK_WRITER_COUNT_BASE
)
504 // We gave up our last write lock -- clean up and unblock waiters.
505 int32 readerCount
= lock
->owner_count
;
507 lock
->owner_count
= 0;
509 int32 oldCount
= atomic_add(&lock
->count
, -RW_LOCK_WRITER_COUNT_BASE
);
510 oldCount
-= RW_LOCK_WRITER_COUNT_BASE
;
513 // If writers are waiting, take over our reader count.
514 if (oldCount
>= RW_LOCK_WRITER_COUNT_BASE
) {
515 lock
->active_readers
= readerCount
;
516 rw_lock_unblock(lock
);
518 // No waiting writer, but there are one or more readers. We will
519 // unblock all waiting readers -- that's the easy part -- and must
520 // also make sure that all readers that haven't entered the critical
521 // section yet, won't start to wait. Otherwise a writer overtaking
522 // such a reader will correctly start to wait, but the reader,
523 // seeing the writer count > 0, would also start to wait. We set
524 // pending_readers to the number of readers that are still expected
525 // to enter the critical section.
526 lock
->pending_readers
= oldCount
- readerCount
527 - rw_lock_unblock(lock
);
534 dump_rw_lock_info(int argc
, char** argv
)
537 print_debugger_command_usage(argv
[0]);
541 rw_lock
* lock
= (rw_lock
*)parse_expression(argv
[1]);
543 if (!IS_KERNEL_ADDRESS(lock
)) {
544 kprintf("invalid address: %p\n", lock
);
548 kprintf("rw lock %p:\n", lock
);
549 kprintf(" name: %s\n", lock
->name
);
550 kprintf(" holder: %" B_PRId32
"\n", lock
->holder
);
551 kprintf(" count: %#" B_PRIx32
"\n", lock
->count
);
552 kprintf(" active readers %d\n", lock
->active_readers
);
553 kprintf(" pending readers %d\n", lock
->pending_readers
);
554 kprintf(" owner count: %#" B_PRIx32
"\n", lock
->owner_count
);
555 kprintf(" flags: %#" B_PRIx32
"\n", lock
->flags
);
557 kprintf(" waiting threads:");
558 rw_lock_waiter
* waiter
= lock
->waiters
;
559 while (waiter
!= NULL
) {
560 kprintf(" %" B_PRId32
"/%c", waiter
->thread
->id
, waiter
->writer
? 'w' : 'r');
561 waiter
= waiter
->next
;
573 mutex_init(mutex
* lock
, const char *name
)
576 lock
->waiters
= NULL
;
577 B_INITIALIZE_SPINLOCK(&lock
->lock
);
582 lock
->ignore_unlock_count
= 0;
586 T_SCHEDULING_ANALYSIS(InitMutex(lock
, name
));
587 NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized
, lock
);
592 mutex_init_etc(mutex
* lock
, const char *name
, uint32 flags
)
594 lock
->name
= (flags
& MUTEX_FLAG_CLONE_NAME
) != 0 ? strdup(name
) : name
;
595 lock
->waiters
= NULL
;
596 B_INITIALIZE_SPINLOCK(&lock
->lock
);
601 lock
->ignore_unlock_count
= 0;
603 lock
->flags
= flags
& MUTEX_FLAG_CLONE_NAME
;
605 T_SCHEDULING_ANALYSIS(InitMutex(lock
, name
));
606 NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized
, lock
);
611 mutex_destroy(mutex
* lock
)
613 char* name
= (lock
->flags
& MUTEX_FLAG_CLONE_NAME
) != 0
614 ? (char*)lock
->name
: NULL
;
616 // unblock all waiters
617 InterruptsSpinLocker
locker(lock
->lock
);
620 if (lock
->waiters
!= NULL
&& thread_get_current_thread_id()
622 panic("mutex_destroy(): there are blocking threads, but caller doesn't "
623 "hold the lock (%p)", lock
);
624 if (_mutex_lock(lock
, &locker
) != B_OK
)
630 while (mutex_waiter
* waiter
= lock
->waiters
) {
632 lock
->waiters
= waiter
->next
;
635 thread_unblock(waiter
->thread
, B_ERROR
);
646 static inline status_t
647 mutex_lock_threads_locked(mutex
* lock
, InterruptsSpinLocker
* locker
)
650 return _mutex_lock(lock
, locker
);
652 if (atomic_add(&lock
->count
, -1) < 0)
653 return _mutex_lock(lock
, locker
);
660 mutex_switch_lock(mutex
* from
, mutex
* to
)
662 InterruptsSpinLocker
locker(to
->lock
);
665 if (atomic_add(&from
->count
, 1) < -1)
669 return mutex_lock_threads_locked(to
, &locker
);
674 mutex_switch_from_read_lock(rw_lock
* from
, mutex
* to
)
676 InterruptsSpinLocker
locker(to
->lock
);
678 #if KDEBUG_RW_LOCK_DEBUG
679 _rw_lock_write_unlock(from
);
681 int32 oldCount
= atomic_add(&from
->count
, -1);
682 if (oldCount
>= RW_LOCK_WRITER_COUNT_BASE
)
683 _rw_lock_read_unlock(from
);
686 return mutex_lock_threads_locked(to
, &locker
);
691 _mutex_lock(mutex
* lock
, void* _locker
)
694 if (!gKernelStartup
&& _locker
== NULL
&& !are_interrupts_enabled()) {
695 panic("_mutex_lock(): called with interrupts disabled for lock %p",
700 // lock only, if !lockLocked
701 InterruptsSpinLocker
* locker
702 = reinterpret_cast<InterruptsSpinLocker
*>(_locker
);
704 InterruptsSpinLocker lockLocker
;
705 if (locker
== NULL
) {
706 lockLocker
.SetTo(lock
->lock
, false);
707 locker
= &lockLocker
;
710 // Might have been released after we decremented the count, but before
711 // we acquired the spinlock.
713 if (lock
->holder
< 0) {
714 lock
->holder
= thread_get_current_thread_id();
716 } else if (lock
->holder
== thread_get_current_thread_id()) {
717 panic("_mutex_lock(): double lock of %p by thread %" B_PRId32
, lock
,
719 } else if (lock
->holder
== 0)
720 panic("_mutex_lock(): using unitialized lock %p", lock
);
722 if ((lock
->flags
& MUTEX_FLAG_RELEASED
) != 0) {
723 lock
->flags
&= ~MUTEX_FLAG_RELEASED
;
728 // enqueue in waiter list
730 waiter
.thread
= thread_get_current_thread();
733 if (lock
->waiters
!= NULL
) {
734 lock
->waiters
->last
->next
= &waiter
;
736 lock
->waiters
= &waiter
;
738 lock
->waiters
->last
= &waiter
;
741 thread_prepare_to_block(waiter
.thread
, 0, THREAD_BLOCK_TYPE_MUTEX
, lock
);
744 status_t error
= thread_block();
747 atomic_set(&lock
->holder
, waiter
.thread
->id
);
754 _mutex_unlock(mutex
* lock
)
756 InterruptsSpinLocker
locker(lock
->lock
);
759 if (thread_get_current_thread_id() != lock
->holder
) {
760 panic("_mutex_unlock() failure: thread %" B_PRId32
" is trying to "
761 "release mutex %p (current holder %" B_PRId32
")\n",
762 thread_get_current_thread_id(), lock
, lock
->holder
);
766 if (lock
->ignore_unlock_count
> 0) {
767 lock
->ignore_unlock_count
--;
772 mutex_waiter
* waiter
= lock
->waiters
;
773 if (waiter
!= NULL
) {
774 // dequeue the first waiter
775 lock
->waiters
= waiter
->next
;
776 if (lock
->waiters
!= NULL
)
777 lock
->waiters
->last
= waiter
->last
;
779 thread_id unblockedThread
= waiter
->thread
->id
;
783 thread_unblock(waiter
->thread
, B_OK
);
786 // Already set the holder to the unblocked thread. Besides that this
787 // actually reflects the current situation, setting it to -1 would
788 // cause a race condition, since another locker could think the lock
789 // is not held by anyone.
790 lock
->holder
= unblockedThread
;
793 // We've acquired the spinlock before the locker that is going to wait.
794 // Just mark the lock as released.
798 lock
->flags
|= MUTEX_FLAG_RELEASED
;
805 _mutex_trylock(mutex
* lock
)
808 InterruptsSpinLocker
_(lock
->lock
);
810 if (lock
->holder
<= 0) {
811 lock
->holder
= thread_get_current_thread_id();
815 return B_WOULD_BLOCK
;
820 _mutex_lock_with_timeout(mutex
* lock
, uint32 timeoutFlags
, bigtime_t timeout
)
823 if (!gKernelStartup
&& !are_interrupts_enabled()) {
824 panic("_mutex_lock(): called with interrupts disabled for lock %p",
829 InterruptsSpinLocker
locker(lock
->lock
);
831 // Might have been released after we decremented the count, but before
832 // we acquired the spinlock.
834 if (lock
->holder
< 0) {
835 lock
->holder
= thread_get_current_thread_id();
837 } else if (lock
->holder
== thread_get_current_thread_id()) {
838 panic("_mutex_lock(): double lock of %p by thread %" B_PRId32
, lock
,
840 } else if (lock
->holder
== 0)
841 panic("_mutex_lock(): using unitialized lock %p", lock
);
843 if ((lock
->flags
& MUTEX_FLAG_RELEASED
) != 0) {
844 lock
->flags
&= ~MUTEX_FLAG_RELEASED
;
849 // enqueue in waiter list
851 waiter
.thread
= thread_get_current_thread();
854 if (lock
->waiters
!= NULL
) {
855 lock
->waiters
->last
->next
= &waiter
;
857 lock
->waiters
= &waiter
;
859 lock
->waiters
->last
= &waiter
;
862 thread_prepare_to_block(waiter
.thread
, 0, THREAD_BLOCK_TYPE_MUTEX
, lock
);
865 status_t error
= thread_block_with_timeout(timeoutFlags
, timeout
);
869 lock
->holder
= waiter
.thread
->id
;
874 // If the timeout occurred, we must remove our waiter structure from
876 mutex_waiter
* previousWaiter
= NULL
;
877 mutex_waiter
* otherWaiter
= lock
->waiters
;
878 while (otherWaiter
!= NULL
&& otherWaiter
!= &waiter
) {
879 previousWaiter
= otherWaiter
;
880 otherWaiter
= otherWaiter
->next
;
882 if (otherWaiter
== &waiter
) {
883 // the structure is still in the list -- dequeue
884 if (&waiter
== lock
->waiters
) {
885 if (waiter
.next
!= NULL
)
886 waiter
.next
->last
= waiter
.last
;
887 lock
->waiters
= waiter
.next
;
889 if (waiter
.next
== NULL
)
890 lock
->waiters
->last
= previousWaiter
;
891 previousWaiter
->next
= waiter
.next
;
895 // we need to fix the lock count
896 if (atomic_add(&lock
->count
, 1) == -1) {
897 // This means we were the only thread waiting for the lock and
898 // the lock owner has already called atomic_add() in
899 // mutex_unlock(). That is we probably would get the lock very
900 // soon (if the lock holder has a low priority, that might
901 // actually take rather long, though), but the timeout already
902 // occurred, so we don't try to wait. Just increment the ignore
904 lock
->ignore_unlock_count
++;
915 dump_mutex_info(int argc
, char** argv
)
918 print_debugger_command_usage(argv
[0]);
922 mutex
* lock
= (mutex
*)parse_expression(argv
[1]);
924 if (!IS_KERNEL_ADDRESS(lock
)) {
925 kprintf("invalid address: %p\n", lock
);
929 kprintf("mutex %p:\n", lock
);
930 kprintf(" name: %s\n", lock
->name
);
931 kprintf(" flags: 0x%x\n", lock
->flags
);
933 kprintf(" holder: %" B_PRId32
"\n", lock
->holder
);
935 kprintf(" count: %" B_PRId32
"\n", lock
->count
);
938 kprintf(" waiting threads:");
939 mutex_waiter
* waiter
= lock
->waiters
;
940 while (waiter
!= NULL
) {
941 kprintf(" %" B_PRId32
, waiter
->thread
->id
);
942 waiter
= waiter
->next
;
956 add_debugger_command_etc("mutex", &dump_mutex_info
,
957 "Dump info about a mutex",
959 "Prints info about the specified mutex.\n"
960 " <mutex> - pointer to the mutex to print the info for.\n", 0);
961 add_debugger_command_etc("rwlock", &dump_rw_lock_info
,
962 "Dump info about an rw lock",
964 "Prints info about the specified rw lock.\n"
965 " <lock> - pointer to the rw lock to print the info for.\n", 0);