2 * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Distributed under the terms of the MIT License.
12 #include <AutoLocker.h>
13 #include <libroot_lock.h>
15 #include <user_mutex_defs.h>
16 #include <user_thread.h>
17 #include <util/DoublyLinkedList.h>
19 #include "pthread_private.h"
21 #define MAX_READER_COUNT 1000000
23 #define RWLOCK_FLAG_SHARED 0x01
26 struct Waiter
: DoublyLinkedListLinkImpl
<Waiter
> {
29 userThread(get_user_thread()),
30 thread(find_thread(NULL
)),
36 user_thread
* userThread
;
43 typedef DoublyLinkedList
<Waiter
> WaiterList
;
53 flags
= RWLOCK_FLAG_SHARED
;
55 sem
= create_sem(MAX_READER_COUNT
, "pthread rwlock");
57 return sem
>= 0 ? B_OK
: EAGAIN
;
64 return delete_sem(sem
) == B_OK
? B_OK
: B_BAD_VALUE
;
67 status_t
ReadLock(bigtime_t timeout
)
69 return acquire_sem_etc(sem
, 1,
70 timeout
>= 0 ? B_ABSOLUTE_REAL_TIME_TIMEOUT
: 0, timeout
);
73 status_t
WriteLock(bigtime_t timeout
)
75 status_t error
= acquire_sem_etc(sem
, MAX_READER_COUNT
,
76 timeout
>= 0 ? B_ABSOLUTE_REAL_TIME_TIMEOUT
: 0, timeout
);
78 owner
= find_thread(NULL
);
84 if (find_thread(NULL
) == owner
) {
86 return release_sem_etc(sem
, MAX_READER_COUNT
, 0);
88 return release_sem(sem
);
100 // Note, that reader_count and writer_count are not used the same way.
101 // writer_count includes the write lock owner as well as waiting
102 // writers. reader_count includes read lock owners only.
112 new(&waiters
) WaiterList
;
120 if (reader_count
> 0 || waiters
.Head() != NULL
|| writer_count
> 0)
127 // Enter critical region: lock the mutex
128 int32 status
= atomic_or((int32
*)&mutex
, B_USER_MUTEX_LOCKED
);
130 // If already locked, call the kernel
131 if ((status
& (B_USER_MUTEX_LOCKED
| B_USER_MUTEX_WAITING
)) != 0) {
133 status
= _kern_mutex_lock((int32
*)&mutex
, NULL
, 0, 0);
134 } while (status
== B_INTERRUPTED
);
142 void StructureUnlock()
144 // Exit critical region: unlock the mutex
145 int32 status
= atomic_and((int32
*)&mutex
,
146 ~(int32
)B_USER_MUTEX_LOCKED
);
148 if ((status
& B_USER_MUTEX_WAITING
) != 0)
149 _kern_mutex_unlock((int32
*)&mutex
, 0);
152 status_t
ReadLock(bigtime_t timeout
)
156 if (writer_count
== 0) {
161 return _Wait(false, timeout
);
164 status_t
WriteLock(bigtime_t timeout
)
168 if (reader_count
== 0 && writer_count
== 0) {
170 owner
= find_thread(NULL
);
174 return _Wait(true, timeout
);
181 if (find_thread(NULL
) == owner
) {
193 status_t
_Wait(bool writer
, bigtime_t timeout
)
198 Waiter
waiter(writer
);
199 waiters
.Add(&waiter
);
200 waiter
.queued
= true;
201 waiter
.userThread
->wait_status
= 1;
207 status_t error
= _kern_block_thread(
208 timeout
>= 0 ? B_ABSOLUTE_REAL_TIME_TIMEOUT
: 0, timeout
);
212 return waiter
.status
;
214 // we're still queued, which means an error (timeout, interrupt)
216 waiters
.Remove(&waiter
);
228 // Check whether there any waiting threads at all and whether anyone
229 // has the write lock
230 Waiter
* waiter
= waiters
.Head();
231 if (waiter
== NULL
|| owner
>= 0)
234 // writer at head of queue?
235 if (waiter
->writer
) {
236 if (reader_count
== 0) {
237 waiter
->status
= B_OK
;
238 waiter
->queued
= false;
239 waiters
.Remove(waiter
);
240 owner
= waiter
->thread
;
242 if (waiter
->userThread
->wait_status
> 0)
243 _kern_unblock_thread(waiter
->thread
, B_OK
);
248 // wake up one or more readers -- we unblock more than one reader at
249 // a time to save trips to the kernel
250 while (!waiters
.IsEmpty() && !waiters
.Head()->writer
) {
251 static const int kMaxReaderUnblockCount
= 128;
252 thread_id readers
[kMaxReaderUnblockCount
];
255 while (readerCount
< kMaxReaderUnblockCount
256 && (waiter
= waiters
.Head()) != NULL
257 && !waiter
->writer
) {
258 waiter
->status
= B_OK
;
259 waiter
->queued
= false;
260 waiters
.Remove(waiter
);
262 if (waiter
->userThread
->wait_status
> 0) {
263 readers
[readerCount
++] = waiter
->thread
;
269 _kern_unblock_threads(readers
, readerCount
, B_OK
);
275 inline bool Lock(LocalRWLock
* lockable
)
277 return lockable
->StructureLock();
280 inline void Unlock(LocalRWLock
* lockable
)
282 lockable
->StructureUnlock();
285 typedef AutoLocker
<LocalRWLock
, Locking
> Locker
;
292 STATIC_ASSERT(sizeof(pthread_rwlock_t
) >= sizeof(SharedRWLock
));
293 STATIC_ASSERT(sizeof(pthread_rwlock_t
) >= sizeof(LocalRWLock
));
297 // #pragma mark - public lock functions
301 pthread_rwlock_init(pthread_rwlock_t
* lock
, const pthread_rwlockattr_t
* _attr
)
303 pthread_rwlockattr
* attr
= _attr
!= NULL
? *_attr
: NULL
;
304 bool shared
= attr
!= NULL
&& (attr
->flags
& RWLOCK_FLAG_SHARED
) != 0;
307 return ((SharedRWLock
*)lock
)->Init();
309 return ((LocalRWLock
*)lock
)->Init();
314 pthread_rwlock_destroy(pthread_rwlock_t
* lock
)
316 if ((lock
->flags
& RWLOCK_FLAG_SHARED
) != 0)
317 return ((SharedRWLock
*)lock
)->Destroy();
319 return ((LocalRWLock
*)lock
)->Destroy();
324 pthread_rwlock_rdlock(pthread_rwlock_t
* lock
)
326 if ((lock
->flags
& RWLOCK_FLAG_SHARED
) != 0)
327 return ((SharedRWLock
*)lock
)->ReadLock(B_INFINITE_TIMEOUT
);
329 return ((LocalRWLock
*)lock
)->ReadLock(B_INFINITE_TIMEOUT
);
334 pthread_rwlock_tryrdlock(pthread_rwlock_t
* lock
)
337 if ((lock
->flags
& RWLOCK_FLAG_SHARED
) != 0)
338 error
= ((SharedRWLock
*)lock
)->ReadLock(0);
340 error
= ((LocalRWLock
*)lock
)->ReadLock(0);
342 return error
== B_TIMED_OUT
? EBUSY
: error
;
346 int pthread_rwlock_timedrdlock(pthread_rwlock_t
* lock
,
347 const struct timespec
*timeout
)
349 bigtime_t timeoutMicros
= timeout
->tv_sec
* 1000000LL
350 + timeout
->tv_nsec
/ 1000LL;
353 if ((lock
->flags
& RWLOCK_FLAG_SHARED
) != 0)
354 error
= ((SharedRWLock
*)lock
)->ReadLock(timeoutMicros
);
356 error
= ((LocalRWLock
*)lock
)->ReadLock(timeoutMicros
);
358 return error
== B_TIMED_OUT
? EBUSY
: error
;
363 pthread_rwlock_wrlock(pthread_rwlock_t
* lock
)
365 if ((lock
->flags
& RWLOCK_FLAG_SHARED
) != 0)
366 return ((SharedRWLock
*)lock
)->WriteLock(B_INFINITE_TIMEOUT
);
368 return ((LocalRWLock
*)lock
)->WriteLock(B_INFINITE_TIMEOUT
);
373 pthread_rwlock_trywrlock(pthread_rwlock_t
* lock
)
376 if ((lock
->flags
& RWLOCK_FLAG_SHARED
) != 0)
377 error
= ((SharedRWLock
*)lock
)->WriteLock(0);
379 error
= ((LocalRWLock
*)lock
)->WriteLock(0);
381 return error
== B_TIMED_OUT
? EBUSY
: error
;
386 pthread_rwlock_timedwrlock(pthread_rwlock_t
* lock
,
387 const struct timespec
*timeout
)
389 bigtime_t timeoutMicros
= timeout
->tv_sec
* 1000000LL
390 + timeout
->tv_nsec
/ 1000LL;
393 if ((lock
->flags
& RWLOCK_FLAG_SHARED
) != 0)
394 error
= ((SharedRWLock
*)lock
)->WriteLock(timeoutMicros
);
396 error
= ((LocalRWLock
*)lock
)->WriteLock(timeoutMicros
);
398 return error
== B_TIMED_OUT
? EBUSY
: error
;
403 pthread_rwlock_unlock(pthread_rwlock_t
* lock
)
405 if ((lock
->flags
& RWLOCK_FLAG_SHARED
) != 0)
406 return ((SharedRWLock
*)lock
)->Unlock();
408 return ((LocalRWLock
*)lock
)->Unlock();
412 // #pragma mark - public attribute functions
416 pthread_rwlockattr_init(pthread_rwlockattr_t
* _attr
)
418 pthread_rwlockattr
* attr
= (pthread_rwlockattr
*)malloc(
419 sizeof(pthread_rwlockattr
));
431 pthread_rwlockattr_destroy(pthread_rwlockattr_t
* _attr
)
433 pthread_rwlockattr
* attr
= *_attr
;
441 pthread_rwlockattr_getpshared(const pthread_rwlockattr_t
* _attr
, int* shared
)
443 pthread_rwlockattr
* attr
= *_attr
;
445 *shared
= (attr
->flags
& RWLOCK_FLAG_SHARED
) != 0
446 ? PTHREAD_PROCESS_SHARED
: PTHREAD_PROCESS_PRIVATE
;
452 pthread_rwlockattr_setpshared(pthread_rwlockattr_t
* _attr
, int shared
)
454 pthread_rwlockattr
* attr
= *_attr
;
456 if (shared
== PTHREAD_PROCESS_SHARED
)
457 attr
->flags
|= RWLOCK_FLAG_SHARED
;
459 attr
->flags
&= ~RWLOCK_FLAG_SHARED
;