Assorted whitespace cleanup and typo fixes.
[haiku.git] / src / tests / add-ons / kernel / kernelland_emu / lock.cpp
blobeb75880c793190d4cd8b5647655502a05534d8a7
1 /*
2 * Copyright 2002-2010, Haiku Inc. All Rights Reserved.
3 * Distributed under the terms of the MIT license.
5 * Authors:
6 * Ingo Weinhold, bonefish@cs.tu-berlin.de.
7 * Axel Dörfler, axeld@pinc-software.de.
8 */
10 #include <lock.h>
12 #include <stdlib.h>
13 #include <string.h>
15 #include <AutoLocker.h>
17 // libroot
18 #include <user_thread.h>
20 // system
21 #include <syscalls.h>
22 #include <user_thread_defs.h>
24 #include "thread.h"
27 struct mutex_waiter {
28 Thread* thread;
29 mutex_waiter* next; // next in queue
30 mutex_waiter* last; // last in queue (valid for the first in queue)
33 struct rw_lock_waiter {
34 Thread* thread;
35 rw_lock_waiter* next; // next in queue
36 rw_lock_waiter* last; // last in queue (valid for the first in queue)
37 bool writer;
40 #define MUTEX_FLAG_OWNS_NAME MUTEX_FLAG_CLONE_NAME
41 #define MUTEX_FLAG_RELEASED 0x2
43 #define RW_LOCK_FLAG_OWNS_NAME RW_LOCK_FLAG_CLONE_NAME
46 static void _rw_lock_read_unlock_threads_locked(rw_lock* lock);
47 static void _rw_lock_write_unlock_threads_locked(rw_lock* lock);
49 static status_t _mutex_lock_threads_locked(mutex* lock);
50 static void _mutex_unlock_threads_locked(mutex* lock);
53 /*! Helper class playing the role of the kernel's thread spinlock. We don't use
54 as spinlock as that could be expensive in userland (due to spinlock holder
55 potentially being unscheduled), but a benaphore.
57 struct ThreadSpinlock {
58 ThreadSpinlock()
60 fCount(1),
61 fSemaphore(create_sem(0, "thread spinlock"))
63 if (fSemaphore < 0)
64 panic("Failed to create thread spinlock semaphore!");
67 ~ThreadSpinlock()
69 if (fSemaphore >= 0)
70 delete_sem(fSemaphore);
73 bool Lock()
75 if (atomic_add(&fCount, -1) > 0)
76 return true;
78 status_t error;
79 do {
80 error = acquire_sem(fSemaphore);
81 } while (error == B_INTERRUPTED);
83 return error == B_OK;
86 void Unlock()
88 if (atomic_add(&fCount, 1) < 0)
89 release_sem(fSemaphore);
92 private:
93 int32 fCount;
94 sem_id fSemaphore;
97 static ThreadSpinlock sThreadSpinlock;
100 // #pragma mark -
103 int32
104 recursive_lock_get_recursion(recursive_lock *lock)
106 if (RECURSIVE_LOCK_HOLDER(lock) == find_thread(NULL))
107 return lock->recursion;
109 return -1;
113 void
114 recursive_lock_init(recursive_lock *lock, const char *name)
116 mutex_init(&lock->lock, name != NULL ? name : "recursive lock");
117 RECURSIVE_LOCK_HOLDER(lock) = -1;
118 lock->recursion = 0;
122 void
123 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
125 mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
126 RECURSIVE_LOCK_HOLDER(lock) = -1;
127 lock->recursion = 0;
131 void
132 recursive_lock_destroy(recursive_lock *lock)
134 if (lock == NULL)
135 return;
137 mutex_destroy(&lock->lock);
141 status_t
142 recursive_lock_lock(recursive_lock *lock)
144 thread_id thread = find_thread(NULL);
146 if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
147 mutex_lock(&lock->lock);
148 #if !KDEBUG
149 lock->holder = thread;
150 #endif
153 lock->recursion++;
154 return B_OK;
158 status_t
159 recursive_lock_trylock(recursive_lock *lock)
161 thread_id thread = find_thread(NULL);
163 if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
164 status_t status = mutex_trylock(&lock->lock);
165 if (status != B_OK)
166 return status;
168 #if !KDEBUG
169 lock->holder = thread;
170 #endif
173 lock->recursion++;
174 return B_OK;
178 void
179 recursive_lock_unlock(recursive_lock *lock)
181 if (find_thread(NULL) != RECURSIVE_LOCK_HOLDER(lock))
182 panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
184 if (--lock->recursion == 0) {
185 #if !KDEBUG
186 lock->holder = -1;
187 #endif
188 mutex_unlock(&lock->lock);
193 // #pragma mark -
196 static status_t
197 rw_lock_wait(rw_lock* lock, bool writer)
199 // enqueue in waiter list
200 rw_lock_waiter waiter;
201 waiter.thread = get_current_thread();
202 waiter.next = NULL;
203 waiter.writer = writer;
205 if (lock->waiters != NULL)
206 lock->waiters->last->next = &waiter;
207 else
208 lock->waiters = &waiter;
210 lock->waiters->last = &waiter;
212 // block
213 get_user_thread()->wait_status = 1;
214 sThreadSpinlock.Unlock();
216 status_t error;
217 while ((error = _kern_block_thread(0, 0)) == B_INTERRUPTED) {
220 sThreadSpinlock.Lock();
221 return error;
225 static int32
226 rw_lock_unblock(rw_lock* lock)
228 // Check whether there any waiting threads at all and whether anyone
229 // has the write lock.
230 rw_lock_waiter* waiter = lock->waiters;
231 if (waiter == NULL || lock->holder > 0)
232 return 0;
234 // writer at head of queue?
235 if (waiter->writer) {
236 if (lock->active_readers > 0 || lock->pending_readers > 0)
237 return 0;
239 // dequeue writer
240 lock->waiters = waiter->next;
241 if (lock->waiters != NULL)
242 lock->waiters->last = waiter->last;
244 lock->holder = get_thread_id(waiter->thread);
246 // unblock thread
247 _kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
248 waiter->thread = NULL;
249 return RW_LOCK_WRITER_COUNT_BASE;
252 // wake up one or more readers
253 uint32 readerCount = 0;
254 do {
255 // dequeue reader
256 lock->waiters = waiter->next;
257 if (lock->waiters != NULL)
258 lock->waiters->last = waiter->last;
260 readerCount++;
262 // unblock thread
263 _kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
264 waiter->thread = NULL;
265 } while ((waiter = lock->waiters) != NULL && !waiter->writer);
267 if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
268 lock->active_readers += readerCount;
270 return readerCount;
274 void
275 rw_lock_init(rw_lock* lock, const char* name)
277 lock->name = name;
278 lock->waiters = NULL;
279 lock->holder = -1;
280 lock->count = 0;
281 lock->owner_count = 0;
282 lock->active_readers = 0;
283 lock->pending_readers = 0;
284 lock->flags = 0;
288 void
289 rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
291 lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
292 lock->waiters = NULL;
293 lock->holder = -1;
294 lock->count = 0;
295 lock->owner_count = 0;
296 lock->active_readers = 0;
297 lock->pending_readers = 0;
298 lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
302 void
303 rw_lock_destroy(rw_lock* lock)
305 char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
306 ? (char*)lock->name : NULL;
308 // unblock all waiters
309 AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
311 #if KDEBUG
312 if (lock->waiters != NULL && find_thread(NULL)
313 != lock->holder) {
314 panic("rw_lock_destroy(): there are blocking threads, but the caller "
315 "doesn't hold the write lock (%p)", lock);
317 locker.Unlock();
318 if (rw_lock_write_lock(lock) != B_OK)
319 return;
320 locker.Lock();
322 #endif
324 while (rw_lock_waiter* waiter = lock->waiters) {
325 // dequeue
326 lock->waiters = waiter->next;
328 // unblock thread
329 _kern_unblock_thread(get_thread_id(waiter->thread), B_ERROR);
332 lock->name = NULL;
334 locker.Unlock();
336 free(name);
340 #if !KDEBUG_RW_LOCK_DEBUG
342 status_t
343 _rw_lock_read_lock(rw_lock* lock)
345 AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
347 // We might be the writer ourselves.
348 if (lock->holder == find_thread(NULL)) {
349 lock->owner_count++;
350 return B_OK;
353 // The writer that originally had the lock when we called atomic_add() might
354 // already have gone and another writer could have overtaken us. In this
355 // case the original writer set pending_readers, so we know that we don't
356 // have to wait.
357 if (lock->pending_readers > 0) {
358 lock->pending_readers--;
360 if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
361 lock->active_readers++;
363 return B_OK;
366 // we need to wait
367 return rw_lock_wait(lock, false);
371 status_t
372 _rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
373 bigtime_t timeout)
375 AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
377 // We might be the writer ourselves.
378 if (lock->holder == find_thread(NULL)) {
379 lock->owner_count++;
380 return B_OK;
383 // The writer that originally had the lock when we called atomic_add() might
384 // already have gone and another writer could have overtaken us. In this
385 // case the original writer set pending_readers, so we know that we don't
386 // have to wait.
387 if (lock->pending_readers > 0) {
388 lock->pending_readers--;
390 if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
391 lock->active_readers++;
393 return B_OK;
396 ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
398 // we need to wait
400 // enqueue in waiter list
401 rw_lock_waiter waiter;
402 waiter.thread = get_current_thread();
403 waiter.next = NULL;
404 waiter.writer = false;
406 if (lock->waiters != NULL)
407 lock->waiters->last->next = &waiter;
408 else
409 lock->waiters = &waiter;
411 lock->waiters->last = &waiter;
413 // block
414 get_user_thread()->wait_status = 1;
415 sThreadSpinlock.Unlock();
417 status_t error;
418 while ((error = _kern_block_thread(timeoutFlags, timeout))
419 == B_INTERRUPTED) {
422 sThreadSpinlock.Lock();
424 if (error == B_OK || waiter.thread == NULL) {
425 // We were unblocked successfully -- potentially our unblocker overtook
426 // us after we already failed. In either case, we've got the lock, now.
427 return B_OK;
430 // We failed to get the lock -- dequeue from waiter list.
431 rw_lock_waiter* previous = NULL;
432 rw_lock_waiter* other = lock->waiters;
433 while (other != &waiter) {
434 previous = other;
435 other = other->next;
438 if (previous == NULL) {
439 // we are the first in line
440 lock->waiters = waiter.next;
441 if (lock->waiters != NULL)
442 lock->waiters->last = waiter.last;
443 } else {
444 // one or more other waiters are before us in the queue
445 previous->next = waiter.next;
446 if (lock->waiters->last == &waiter)
447 lock->waiters->last = previous;
450 // Decrement the count. ATM this is all we have to do. There's at least
451 // one writer ahead of us -- otherwise the last writer would have unblocked
452 // us (writers only manipulate the lock data with thread spinlock being
453 // held) -- so our leaving doesn't make a difference to the ones behind us
454 // in the queue.
455 atomic_add(&lock->count, -1);
457 return error;
461 void
462 _rw_lock_read_unlock(rw_lock* lock)
464 AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
465 _rw_lock_read_unlock_threads_locked(lock);
469 static void
470 _rw_lock_read_unlock_threads_locked(rw_lock* lock)
472 // If we're still holding the write lock or if there are other readers,
473 // no-one can be woken up.
474 if (lock->holder == find_thread(NULL)) {
475 lock->owner_count--;
476 return;
479 if (--lock->active_readers > 0)
480 return;
482 if (lock->active_readers < 0) {
483 panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
484 lock->active_readers = 0;
485 return;
488 rw_lock_unblock(lock);
491 #endif // !KDEBUG_RW_LOCK_DEBUG
494 status_t
495 rw_lock_write_lock(rw_lock* lock)
497 AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
499 // If we're already the lock holder, we just need to increment the owner
500 // count.
501 thread_id thread = find_thread(NULL);
502 if (lock->holder == thread) {
503 lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
504 return B_OK;
507 // announce our claim
508 int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
510 if (oldCount == 0) {
511 // No-one else held a read or write lock, so it's ours now.
512 lock->holder = thread;
513 lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
514 return B_OK;
517 // We have to wait. If we're the first writer, note the current reader
518 // count.
519 if (oldCount < RW_LOCK_WRITER_COUNT_BASE)
520 lock->active_readers = oldCount - lock->pending_readers;
522 status_t status = rw_lock_wait(lock, true);
523 if (status == B_OK) {
524 lock->holder = thread;
525 lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
528 return status;
532 void
533 _rw_lock_write_unlock(rw_lock* lock)
535 AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
536 _rw_lock_write_unlock_threads_locked(lock);
540 static void
541 _rw_lock_write_unlock_threads_locked(rw_lock* lock)
543 if (find_thread(NULL) != lock->holder) {
544 panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
545 lock);
546 return;
549 lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
550 if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
551 return;
553 // We gave up our last write lock -- clean up and unblock waiters.
554 int32 readerCount = lock->owner_count;
555 lock->holder = -1;
556 lock->owner_count = 0;
558 int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
559 oldCount -= RW_LOCK_WRITER_COUNT_BASE;
561 if (oldCount != 0) {
562 // If writers are waiting, take over our reader count.
563 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) {
564 lock->active_readers = readerCount;
565 rw_lock_unblock(lock);
566 } else {
567 // No waiting writer, but there are one or more readers. We will
568 // unblock all waiting readers -- that's the easy part -- and must
569 // also make sure that all readers that haven't entered the critical
570 // section yet, won't start to wait. Otherwise a writer overtaking
571 // such a reader will correctly start to wait, but the reader,
572 // seeing the writer count > 0, would also start to wait. We set
573 // pending_readers to the number of readers that are still expected
574 // to enter the critical section.
575 lock->pending_readers = oldCount - readerCount
576 - rw_lock_unblock(lock);
582 // #pragma mark -
585 void
586 mutex_init(mutex* lock, const char *name)
588 lock->name = name;
589 lock->waiters = NULL;
590 #if KDEBUG
591 lock->holder = -1;
592 #else
593 lock->count = 0;
594 #endif
595 lock->flags = 0;
599 void
600 mutex_init_etc(mutex* lock, const char *name, uint32 flags)
602 lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
603 lock->waiters = NULL;
604 #if KDEBUG
605 lock->holder = -1;
606 #else
607 lock->count = 0;
608 #endif
609 lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
613 void
614 mutex_destroy(mutex* lock)
616 char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
617 ? (char*)lock->name : NULL;
619 // unblock all waiters
620 AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
622 #if KDEBUG
623 if (lock->waiters != NULL && find_thread(NULL)
624 != lock->holder) {
625 panic("mutex_destroy(): there are blocking threads, but caller doesn't "
626 "hold the lock (%p)", lock);
627 if (_mutex_lock_threads_locked(lock) != B_OK)
628 return;
630 #endif
632 while (mutex_waiter* waiter = lock->waiters) {
633 // dequeue
634 lock->waiters = waiter->next;
636 // unblock thread
637 _kern_unblock_thread(get_thread_id(waiter->thread), B_ERROR);
640 lock->name = NULL;
642 locker.Unlock();
644 free(name);
648 status_t
649 mutex_switch_lock(mutex* from, mutex* to)
651 AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
653 #if !KDEBUG
654 if (atomic_add(&from->count, 1) < -1)
655 #endif
656 _mutex_unlock_threads_locked(from);
658 return _mutex_lock_threads_locked(to);
662 status_t
663 mutex_switch_from_read_lock(rw_lock* from, mutex* to)
665 AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
667 #if KDEBUG_RW_LOCK_DEBUG
668 _rw_lock_write_unlock_threads_locked(from);
669 #else
670 int32 oldCount = atomic_add(&from->count, -1);
671 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
672 _rw_lock_read_unlock_threads_locked(from);
673 #endif
675 return _mutex_lock_threads_locked(to);
680 static status_t
681 _mutex_lock_threads_locked(mutex* lock)
684 // Might have been released after we decremented the count, but before
685 // we acquired the spinlock.
686 #if KDEBUG
687 if (lock->holder < 0) {
688 lock->holder = find_thread(NULL);
689 return B_OK;
690 } else if (lock->holder == find_thread(NULL)) {
691 panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
692 lock->holder);
693 } else if (lock->holder == 0)
694 panic("_mutex_lock(): using unitialized lock %p", lock);
695 #else
696 if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
697 lock->flags &= ~MUTEX_FLAG_RELEASED;
698 return B_OK;
700 #endif
702 // enqueue in waiter list
703 mutex_waiter waiter;
704 waiter.thread = get_current_thread();
705 waiter.next = NULL;
707 if (lock->waiters != NULL) {
708 lock->waiters->last->next = &waiter;
709 } else
710 lock->waiters = &waiter;
712 lock->waiters->last = &waiter;
714 // block
715 get_user_thread()->wait_status = 1;
716 sThreadSpinlock.Unlock();
718 status_t error;
719 while ((error = _kern_block_thread(0, 0)) == B_INTERRUPTED) {
722 sThreadSpinlock.Lock();
724 #if KDEBUG
725 if (error == B_OK)
726 lock->holder = get_thread_id(waiter.thread);
727 #endif
729 return error;
733 status_t
734 _mutex_lock(mutex* lock, void*)
736 AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
737 return _mutex_lock_threads_locked(lock);
741 static void
742 _mutex_unlock_threads_locked(mutex* lock)
744 #if KDEBUG
745 if (find_thread(NULL) != lock->holder) {
746 panic("_mutex_unlock() failure: thread %" B_PRId32 " is trying to "
747 "release mutex %p (current holder %" B_PRId32 ")\n",
748 find_thread(NULL), lock, lock->holder);
749 return;
751 #endif
753 mutex_waiter* waiter = lock->waiters;
754 if (waiter != NULL) {
755 // dequeue the first waiter
756 lock->waiters = waiter->next;
757 if (lock->waiters != NULL)
758 lock->waiters->last = waiter->last;
760 // unblock thread
761 _kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
763 #if KDEBUG
764 // Already set the holder to the unblocked thread. Besides that this
765 // actually reflects the current situation, setting it to -1 would
766 // cause a race condition, since another locker could think the lock
767 // is not held by anyone.
768 lock->holder = get_thread_id(waiter->thread);
769 #endif
770 } else {
771 // We've acquired the spinlock before the locker that is going to wait.
772 // Just mark the lock as released.
773 #if KDEBUG
774 lock->holder = -1;
775 #else
776 lock->flags |= MUTEX_FLAG_RELEASED;
777 #endif
782 void
783 _mutex_unlock(mutex* lock)
785 AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
786 _mutex_unlock_threads_locked(lock);
790 status_t
791 _mutex_trylock(mutex* lock)
793 #if KDEBUG
794 AutoLocker<ThreadSpinlock> _(sThreadSpinlock);
796 if (lock->holder <= 0) {
797 lock->holder = find_thread(NULL);
798 return B_OK;
800 #endif
801 return B_WOULD_BLOCK;