1 /* $NetBSD: pthread_rwlock.c,v 1.31 2008/06/23 11:00:53 ad Exp $ */
4 * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/cdefs.h>
33 __RCSID("$NetBSD: pthread_rwlock.c,v 1.31 2008/06/23 11:00:53 ad Exp $");
35 #include <sys/types.h>
36 #include <sys/lwpctl.h>
42 #include "pthread_int.h"
45 #define _RW_WANT_WRITE 1
46 #define _RW_WANT_READ 2
48 #if __GNUC_PREREQ__(3, 0)
49 #define NOINLINE __attribute ((noinline))
51 #define NOINLINE /* nothing */
54 static int pthread__rwlock_wrlock(pthread_rwlock_t
*, const struct timespec
*);
55 static int pthread__rwlock_rdlock(pthread_rwlock_t
*, const struct timespec
*);
56 static void pthread__rwlock_early(void *);
58 int _pthread_rwlock_held_np(pthread_rwlock_t
*);
59 int _pthread_rwlock_rdheld_np(pthread_rwlock_t
*);
60 int _pthread_rwlock_wrheld_np(pthread_rwlock_t
*);
63 __weak_alias(pthread_rwlock_held_np
,_pthread_rwlock_held_np
)
64 __weak_alias(pthread_rwlock_rdheld_np
,_pthread_rwlock_rdheld_np
)
65 __weak_alias(pthread_rwlock_wrheld_np
,_pthread_rwlock_wrheld_np
)
68 __strong_alias(__libc_rwlock_init
,pthread_rwlock_init
)
69 __strong_alias(__libc_rwlock_rdlock
,pthread_rwlock_rdlock
)
70 __strong_alias(__libc_rwlock_wrlock
,pthread_rwlock_wrlock
)
71 __strong_alias(__libc_rwlock_tryrdlock
,pthread_rwlock_tryrdlock
)
72 __strong_alias(__libc_rwlock_trywrlock
,pthread_rwlock_trywrlock
)
73 __strong_alias(__libc_rwlock_unlock
,pthread_rwlock_unlock
)
74 __strong_alias(__libc_rwlock_destroy
,pthread_rwlock_destroy
)
76 static inline uintptr_t
77 rw_cas(pthread_rwlock_t
*ptr
, uintptr_t o
, uintptr_t n
)
80 return (uintptr_t)atomic_cas_ptr(&ptr
->ptr_owner
, (void *)o
,
85 pthread_rwlock_init(pthread_rwlock_t
*ptr
,
86 const pthread_rwlockattr_t
*attr
)
89 if (attr
&& (attr
->ptra_magic
!= _PT_RWLOCKATTR_MAGIC
))
91 ptr
->ptr_magic
= _PT_RWLOCK_MAGIC
;
92 PTQ_INIT(&ptr
->ptr_rblocked
);
93 PTQ_INIT(&ptr
->ptr_wblocked
);
94 ptr
->ptr_nreaders
= 0;
95 ptr
->ptr_owner
= NULL
;
102 pthread_rwlock_destroy(pthread_rwlock_t
*ptr
)
105 if ((ptr
->ptr_magic
!= _PT_RWLOCK_MAGIC
) ||
106 (!PTQ_EMPTY(&ptr
->ptr_rblocked
)) ||
107 (!PTQ_EMPTY(&ptr
->ptr_wblocked
)) ||
108 (ptr
->ptr_nreaders
!= 0) ||
109 (ptr
->ptr_owner
!= NULL
))
111 ptr
->ptr_magic
= _PT_RWLOCK_DEAD
;
116 /* We want function call overhead. */
118 pthread__rwlock_pause(void)
121 pthread__smt_pause();
125 pthread__rwlock_spin(uintptr_t owner
)
130 thread
= (pthread_t
)(owner
& RW_THREAD
);
131 if (thread
== NULL
|| (owner
& ~RW_THREAD
) != RW_WRITE_LOCKED
)
133 if (thread
->pt_lwpctl
->lc_curcpu
== LWPCTL_CPU_NONE
||
136 for (i
= 128; i
!= 0; i
--)
137 pthread__rwlock_pause();
142 pthread__rwlock_rdlock(pthread_rwlock_t
*ptr
, const struct timespec
*ts
)
144 uintptr_t owner
, next
;
145 pthread_mutex_t
*interlock
;
150 if (ptr
->ptr_magic
!= _PT_RWLOCK_MAGIC
)
154 for (owner
= (uintptr_t)ptr
->ptr_owner
;; owner
= next
) {
156 * Read the lock owner field. If the need-to-wait
157 * indicator is clear, then try to acquire the lock.
159 if ((owner
& (RW_WRITE_LOCKED
| RW_WRITE_WANTED
)) == 0) {
160 next
= rw_cas(ptr
, owner
, owner
+ RW_READ_INCR
);
163 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
170 * Didn't get it -- spin around again (we'll
171 * probably sleep on the next iteration).
176 self
= pthread__self();
177 if ((owner
& RW_THREAD
) == (uintptr_t)self
)
180 /* If held write locked and no waiters, spin. */
181 if (pthread__rwlock_spin(owner
)) {
182 while (pthread__rwlock_spin(owner
)) {
183 owner
= (uintptr_t)ptr
->ptr_owner
;
190 * Grab the interlock. Once we have that, we
191 * can adjust the waiter bits and sleep queue.
193 interlock
= pthread__hashlock(ptr
);
194 pthread_mutex_lock(interlock
);
197 * Mark the rwlock as having waiters. If the set fails,
198 * then we may not need to sleep and should spin again.
200 next
= rw_cas(ptr
, owner
, owner
| RW_HAS_WAITERS
);
202 pthread_mutex_unlock(interlock
);
206 /* The waiters bit is set - it's safe to sleep. */
207 PTQ_INSERT_HEAD(&ptr
->ptr_rblocked
, self
, pt_sleep
);
209 self
->pt_rwlocked
= _RW_WANT_READ
;
210 self
->pt_sleepobj
= &ptr
->ptr_rblocked
;
211 self
->pt_early
= pthread__rwlock_early
;
212 error
= pthread__park(self
, interlock
, &ptr
->ptr_rblocked
,
213 ts
, 0, &ptr
->ptr_rblocked
);
215 /* Did we get the lock? */
216 if (self
->pt_rwlocked
== _RW_LOCKED
) {
217 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
225 pthread__errorfunc(__FILE__
, __LINE__
, __func__
,
226 "direct handoff failure");
232 pthread_rwlock_tryrdlock(pthread_rwlock_t
*ptr
)
234 uintptr_t owner
, next
;
237 if (ptr
->ptr_magic
!= _PT_RWLOCK_MAGIC
)
242 * Don't get a readlock if there is a writer or if there are waiting
243 * writers; i.e. prefer writers to readers. This strategy is dictated
246 for (owner
= (uintptr_t)ptr
->ptr_owner
;; owner
= next
) {
247 if ((owner
& (RW_WRITE_LOCKED
| RW_WRITE_WANTED
)) != 0)
249 next
= rw_cas(ptr
, owner
, owner
+ RW_READ_INCR
);
252 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
261 pthread__rwlock_wrlock(pthread_rwlock_t
*ptr
, const struct timespec
*ts
)
263 uintptr_t owner
, next
;
264 pthread_mutex_t
*interlock
;
268 self
= pthread__self();
271 if (ptr
->ptr_magic
!= _PT_RWLOCK_MAGIC
)
275 for (owner
= (uintptr_t)ptr
->ptr_owner
;; owner
= next
) {
277 * Read the lock owner field. If the need-to-wait
278 * indicator is clear, then try to acquire the lock.
280 if ((owner
& RW_THREAD
) == 0) {
281 next
= rw_cas(ptr
, owner
,
282 (uintptr_t)self
| RW_WRITE_LOCKED
);
285 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
292 * Didn't get it -- spin around again (we'll
293 * probably sleep on the next iteration).
298 if ((owner
& RW_THREAD
) == (uintptr_t)self
)
301 /* If held write locked and no waiters, spin. */
302 if (pthread__rwlock_spin(owner
)) {
303 while (pthread__rwlock_spin(owner
)) {
304 owner
= (uintptr_t)ptr
->ptr_owner
;
311 * Grab the interlock. Once we have that, we
312 * can adjust the waiter bits and sleep queue.
314 interlock
= pthread__hashlock(ptr
);
315 pthread_mutex_lock(interlock
);
318 * Mark the rwlock as having waiters. If the set fails,
319 * then we may not need to sleep and should spin again.
321 next
= rw_cas(ptr
, owner
,
322 owner
| RW_HAS_WAITERS
| RW_WRITE_WANTED
);
324 pthread_mutex_unlock(interlock
);
328 /* The waiters bit is set - it's safe to sleep. */
329 PTQ_INSERT_TAIL(&ptr
->ptr_wblocked
, self
, pt_sleep
);
330 self
->pt_rwlocked
= _RW_WANT_WRITE
;
331 self
->pt_sleepobj
= &ptr
->ptr_wblocked
;
332 self
->pt_early
= pthread__rwlock_early
;
333 error
= pthread__park(self
, interlock
, &ptr
->ptr_wblocked
,
334 ts
, 0, &ptr
->ptr_wblocked
);
336 /* Did we get the lock? */
337 if (self
->pt_rwlocked
== _RW_LOCKED
) {
338 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
346 pthread__errorfunc(__FILE__
, __LINE__
, __func__
,
347 "direct handoff failure");
353 pthread_rwlock_trywrlock(pthread_rwlock_t
*ptr
)
355 uintptr_t owner
, next
;
359 if (ptr
->ptr_magic
!= _PT_RWLOCK_MAGIC
)
363 self
= pthread__self();
365 for (owner
= (uintptr_t)ptr
->ptr_owner
;; owner
= next
) {
368 next
= rw_cas(ptr
, owner
, (uintptr_t)self
| RW_WRITE_LOCKED
);
371 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
380 pthread_rwlock_rdlock(pthread_rwlock_t
*ptr
)
383 return pthread__rwlock_rdlock(ptr
, NULL
);
387 pthread_rwlock_timedrdlock(pthread_rwlock_t
*ptr
,
388 const struct timespec
*abs_timeout
)
391 if (abs_timeout
== NULL
)
393 if ((abs_timeout
->tv_nsec
>= 1000000000) ||
394 (abs_timeout
->tv_nsec
< 0) ||
395 (abs_timeout
->tv_sec
< 0))
398 return pthread__rwlock_rdlock(ptr
, abs_timeout
);
402 pthread_rwlock_wrlock(pthread_rwlock_t
*ptr
)
405 return pthread__rwlock_wrlock(ptr
, NULL
);
409 pthread_rwlock_timedwrlock(pthread_rwlock_t
*ptr
,
410 const struct timespec
*abs_timeout
)
413 if (abs_timeout
== NULL
)
415 if ((abs_timeout
->tv_nsec
>= 1000000000) ||
416 (abs_timeout
->tv_nsec
< 0) ||
417 (abs_timeout
->tv_sec
< 0))
420 return pthread__rwlock_wrlock(ptr
, abs_timeout
);
425 pthread_rwlock_unlock(pthread_rwlock_t
*ptr
)
427 uintptr_t owner
, decr
, new, next
;
428 pthread_mutex_t
*interlock
;
429 pthread_t self
, thread
;
432 if ((ptr
== NULL
) || (ptr
->ptr_magic
!= _PT_RWLOCK_MAGIC
))
436 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
441 * Since we used an add operation to set the required lock
442 * bits, we can use a subtract to clear them, which makes
443 * the read-release and write-release path similar.
445 owner
= (uintptr_t)ptr
->ptr_owner
;
446 if ((owner
& RW_WRITE_LOCKED
) != 0) {
447 self
= pthread__self();
448 decr
= (uintptr_t)self
| RW_WRITE_LOCKED
;
449 if ((owner
& RW_THREAD
) != (uintptr_t)self
) {
459 for (;; owner
= next
) {
461 * Compute what we expect the new value of the lock to be.
462 * Only proceed to do direct handoff if there are waiters,
463 * and if the lock would become unowned.
465 new = (owner
- decr
);
466 if ((new & (RW_THREAD
| RW_HAS_WAITERS
)) != RW_HAS_WAITERS
) {
467 next
= rw_cas(ptr
, owner
, new);
476 * Grab the interlock. Once we have that, we can adjust
477 * the waiter bits. We must check to see if there are
478 * still waiters before proceeding.
480 interlock
= pthread__hashlock(ptr
);
481 pthread_mutex_lock(interlock
);
482 owner
= (uintptr_t)ptr
->ptr_owner
;
483 if ((owner
& RW_HAS_WAITERS
) == 0) {
484 pthread_mutex_unlock(interlock
);
490 * Give the lock away. SUSv3 dictates that we must give
491 * preference to writers.
493 self
= pthread__self();
494 if ((thread
= PTQ_FIRST(&ptr
->ptr_wblocked
)) != NULL
) {
495 new = (uintptr_t)thread
| RW_WRITE_LOCKED
;
497 if (PTQ_NEXT(thread
, pt_sleep
) != NULL
)
498 new |= RW_HAS_WAITERS
| RW_WRITE_WANTED
;
499 else if (ptr
->ptr_nreaders
!= 0)
500 new |= RW_HAS_WAITERS
;
503 * Set in the new value. The lock becomes owned
504 * by the writer that we are about to wake.
506 (void)atomic_swap_ptr(&ptr
->ptr_owner
, (void *)new);
508 /* Wake the writer. */
509 thread
->pt_rwlocked
= _RW_LOCKED
;
510 pthread__unpark(&ptr
->ptr_wblocked
, self
,
514 PTQ_FOREACH(thread
, &ptr
->ptr_rblocked
, pt_sleep
) {
516 * May have already been handed the lock,
517 * since pthread__unpark_all() can release
518 * our interlock before awakening all
521 if (thread
->pt_sleepobj
== NULL
)
524 thread
->pt_rwlocked
= _RW_LOCKED
;
528 * Set in the new value. The lock becomes owned
529 * by the readers that we are about to wake.
531 (void)atomic_swap_ptr(&ptr
->ptr_owner
, (void *)new);
533 /* Wake up all sleeping readers. */
534 ptr
->ptr_nreaders
= 0;
535 pthread__unpark_all(&ptr
->ptr_rblocked
, self
,
538 pthread_mutex_unlock(interlock
);
545 * Called when a timedlock awakens early to adjust the waiter bits.
546 * The rwlock's interlock is held on entry, and the caller has been
547 * removed from the waiters lists.
550 pthread__rwlock_early(void *obj
)
552 uintptr_t owner
, set
, new, next
;
553 pthread_rwlock_t
*ptr
;
557 self
= pthread__self();
559 switch (self
->pt_rwlocked
) {
561 off
= offsetof(pthread_rwlock_t
, ptr_rblocked
);
564 off
= offsetof(pthread_rwlock_t
, ptr_wblocked
);
567 pthread__errorfunc(__FILE__
, __LINE__
, __func__
,
568 "bad value of pt_rwlocked");
574 /* LINTED mind your own business */
575 ptr
= (pthread_rwlock_t
*)((uint8_t *)obj
- off
);
576 owner
= (uintptr_t)ptr
->ptr_owner
;
578 if ((owner
& RW_THREAD
) == 0) {
579 pthread__errorfunc(__FILE__
, __LINE__
, __func__
,
583 if (!PTQ_EMPTY(&ptr
->ptr_wblocked
))
584 set
= RW_HAS_WAITERS
| RW_WRITE_WANTED
;
585 else if (ptr
->ptr_nreaders
!= 0)
586 set
= RW_HAS_WAITERS
;
590 for (;; owner
= next
) {
591 new = (owner
& ~(RW_HAS_WAITERS
| RW_WRITE_WANTED
)) | set
;
592 next
= rw_cas(ptr
, owner
, new);
599 _pthread_rwlock_held_np(pthread_rwlock_t
*ptr
)
601 uintptr_t owner
= (uintptr_t)ptr
->ptr_owner
;
603 if ((owner
& RW_WRITE_LOCKED
) != 0)
604 return (owner
& RW_THREAD
) == (uintptr_t)pthread__self();
605 return (owner
& RW_THREAD
) != 0;
609 _pthread_rwlock_rdheld_np(pthread_rwlock_t
*ptr
)
611 uintptr_t owner
= (uintptr_t)ptr
->ptr_owner
;
613 return (owner
& RW_THREAD
) != 0 && (owner
& RW_WRITE_LOCKED
) == 0;
617 _pthread_rwlock_wrheld_np(pthread_rwlock_t
*ptr
)
619 uintptr_t owner
= (uintptr_t)ptr
->ptr_owner
;
621 return (owner
& (RW_THREAD
| RW_WRITE_LOCKED
)) ==
622 ((uintptr_t)pthread__self() | RW_WRITE_LOCKED
);
626 pthread_rwlockattr_init(pthread_rwlockattr_t
*attr
)
631 attr
->ptra_magic
= _PT_RWLOCKATTR_MAGIC
;
638 pthread_rwlockattr_destroy(pthread_rwlockattr_t
*attr
)
641 if ((attr
== NULL
) ||
642 (attr
->ptra_magic
!= _PT_RWLOCKATTR_MAGIC
))
644 attr
->ptra_magic
= _PT_RWLOCKATTR_DEAD
;