1 /* $NetBSD: rwlock.c,v 1.1.1.2 2009/10/25 00:02:44 christos Exp $ */
4 * Copyright (C) 2004, 2005, 2007, 2009 Internet Systems Consortium, Inc. ("ISC")
5 * Copyright (C) 1998-2001, 2003 Internet Software Consortium.
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
12 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
13 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
14 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
15 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
16 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
20 /* Id: rwlock.c,v 1.46 2009/01/18 23:48:14 tbox Exp */
29 #include <isc/atomic.h>
30 #include <isc/magic.h>
32 #include <isc/platform.h>
33 #include <isc/rwlock.h>
36 #define RWLOCK_MAGIC ISC_MAGIC('R', 'W', 'L', 'k')
37 #define VALID_RWLOCK(rwl) ISC_MAGIC_VALID(rwl, RWLOCK_MAGIC)
39 #ifdef ISC_PLATFORM_USETHREADS
40 #ifdef ISC_PLATFORM_USE_NATIVE_RWLOCKS
43 isc_rwlock_init(isc_rwlock_t
*rwl
, unsigned int read_quota
,
44 unsigned int write_quota
)
51 return pthread_rwlock_init(rwl
, NULL
) == 0 ?
52 ISC_R_SUCCESS
: ISC_R_FAILURE
;
56 isc_rwlock_lock(isc_rwlock_t
*rwl
, isc_rwlocktype_t type
)
61 case isc_rwlocktype_none
:
64 case isc_rwlocktype_read
:
65 return pthread_rwlock_rdlock(rwl
) == 0 ?
66 ISC_R_SUCCESS
: ISC_R_LOCKBUSY
;
68 case isc_rwlocktype_write
:
69 return pthread_rwlock_wrlock(rwl
) == 0 ?
70 ISC_R_SUCCESS
: ISC_R_LOCKBUSY
;
74 return (ISC_R_FAILURE
);
79 isc_rwlock_trylock(isc_rwlock_t
*rwl
, isc_rwlocktype_t type
)
84 case isc_rwlocktype_none
:
87 case isc_rwlocktype_read
:
88 return pthread_rwlock_tryrdlock(rwl
) == 0 ?
89 ISC_R_SUCCESS
: ISC_R_LOCKBUSY
;
91 case isc_rwlocktype_write
:
92 return pthread_rwlock_trywrlock(rwl
) == 0 ?
93 ISC_R_SUCCESS
: ISC_R_LOCKBUSY
;
97 return (ISC_R_FAILURE
);
102 isc_rwlock_tryupgrade(isc_rwlock_t
*rwl
)
104 REQUIRE(rwl
!= NULL
);
107 * XXX: we need to make sure we are holding a read lock here
108 * but how to do it atomically?
110 return pthread_rwlock_trywrlock(rwl
) == 0 ?
111 ISC_R_SUCCESS
: ISC_R_LOCKBUSY
;
115 isc_rwlock_downgrade(isc_rwlock_t
*rwl
)
117 REQUIRE(rwl
!= NULL
);
120 * XXX: we need to make sure we are holding a write lock here
121 * and then give it up and get a read lock but how to do it atomically?
123 pthread_rwlock_unlock(rwl
);
124 REQUIRE(pthread_rwlock_tryrdlock(rwl
) == 0);
128 isc_rwlock_unlock(isc_rwlock_t
*rwl
, isc_rwlocktype_t type
)
130 REQUIRE(rwl
!= NULL
);
133 pthread_rwlock_unlock(rwl
);
135 return (ISC_R_SUCCESS
);
139 isc_rwlock_destroy(isc_rwlock_t
*rwl
)
141 REQUIRE(rwl
!= NULL
);
144 #else /* !ISC_PLATFORM_USE_NATIVE_RWLOCKS */
146 #ifndef RWLOCK_DEFAULT_READ_QUOTA
147 #define RWLOCK_DEFAULT_READ_QUOTA 4
150 #ifndef RWLOCK_DEFAULT_WRITE_QUOTA
151 #define RWLOCK_DEFAULT_WRITE_QUOTA 4
154 #ifdef ISC_RWLOCK_TRACE
155 #include <stdio.h> /* Required for fprintf/stderr. */
156 #include <isc/thread.h> /* Required for isc_thread_self(). */
159 print_lock(const char *operation
, isc_rwlock_t
*rwl
, isc_rwlocktype_t type
) {
161 isc_msgcat_get(isc_msgcat
, ISC_MSGSET_RWLOCK
,
163 "rwlock %p thread %lu %s(%s): %s, %u active, "
164 "%u granted, %u rwaiting, %u wwaiting\n"),
165 rwl
, isc_thread_self(), operation
,
166 (type
== isc_rwlocktype_read
?
167 isc_msgcat_get(isc_msgcat
, ISC_MSGSET_RWLOCK
,
168 ISC_MSG_READ
, "read") :
169 isc_msgcat_get(isc_msgcat
, ISC_MSGSET_RWLOCK
,
170 ISC_MSG_WRITE
, "write")),
171 (rwl
->type
== isc_rwlocktype_read
?
172 isc_msgcat_get(isc_msgcat
, ISC_MSGSET_RWLOCK
,
173 ISC_MSG_READING
, "reading") :
174 isc_msgcat_get(isc_msgcat
, ISC_MSGSET_RWLOCK
,
175 ISC_MSG_WRITING
, "writing")),
176 rwl
->active
, rwl
->granted
, rwl
->readers_waiting
,
177 rwl
->writers_waiting
);
182 isc_rwlock_init(isc_rwlock_t
*rwl
, unsigned int read_quota
,
183 unsigned int write_quota
)
187 REQUIRE(rwl
!= NULL
);
190 * In case there's trouble initializing, we zero magic now. If all
191 * goes well, we'll set it to RWLOCK_MAGIC.
195 #if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG)
196 rwl
->write_requests
= 0;
197 rwl
->write_completions
= 0;
198 rwl
->cnt_and_flag
= 0;
199 rwl
->readers_waiting
= 0;
200 rwl
->write_granted
= 0;
201 if (read_quota
!= 0) {
202 UNEXPECTED_ERROR(__FILE__
, __LINE__
,
203 "read quota is not supported");
205 if (write_quota
== 0)
206 write_quota
= RWLOCK_DEFAULT_WRITE_QUOTA
;
207 rwl
->write_quota
= write_quota
;
209 rwl
->type
= isc_rwlocktype_read
;
210 rwl
->original
= isc_rwlocktype_none
;
213 rwl
->readers_waiting
= 0;
214 rwl
->writers_waiting
= 0;
216 read_quota
= RWLOCK_DEFAULT_READ_QUOTA
;
217 rwl
->read_quota
= read_quota
;
218 if (write_quota
== 0)
219 write_quota
= RWLOCK_DEFAULT_WRITE_QUOTA
;
220 rwl
->write_quota
= write_quota
;
223 result
= isc_mutex_init(&rwl
->lock
);
224 if (result
!= ISC_R_SUCCESS
)
227 result
= isc_condition_init(&rwl
->readable
);
228 if (result
!= ISC_R_SUCCESS
) {
229 UNEXPECTED_ERROR(__FILE__
, __LINE__
,
230 "isc_condition_init(readable) %s: %s",
231 isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
232 ISC_MSG_FAILED
, "failed"),
233 isc_result_totext(result
));
234 result
= ISC_R_UNEXPECTED
;
237 result
= isc_condition_init(&rwl
->writeable
);
238 if (result
!= ISC_R_SUCCESS
) {
239 UNEXPECTED_ERROR(__FILE__
, __LINE__
,
240 "isc_condition_init(writeable) %s: %s",
241 isc_msgcat_get(isc_msgcat
, ISC_MSGSET_GENERAL
,
242 ISC_MSG_FAILED
, "failed"),
243 isc_result_totext(result
));
244 result
= ISC_R_UNEXPECTED
;
248 rwl
->magic
= RWLOCK_MAGIC
;
250 return (ISC_R_SUCCESS
);
253 (void)isc_condition_destroy(&rwl
->readable
);
255 DESTROYLOCK(&rwl
->lock
);
261 isc_rwlock_destroy(isc_rwlock_t
*rwl
) {
262 REQUIRE(VALID_RWLOCK(rwl
));
264 #if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG)
265 REQUIRE(rwl
->write_requests
== rwl
->write_completions
&&
266 rwl
->cnt_and_flag
== 0 && rwl
->readers_waiting
== 0);
269 REQUIRE(rwl
->active
== 0 &&
270 rwl
->readers_waiting
== 0 &&
271 rwl
->writers_waiting
== 0);
276 (void)isc_condition_destroy(&rwl
->readable
);
277 (void)isc_condition_destroy(&rwl
->writeable
);
278 DESTROYLOCK(&rwl
->lock
);
281 #if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG)
284 * When some architecture-dependent atomic operations are available,
285 * rwlock can be more efficient than the generic algorithm defined below.
286 * The basic algorithm is described in the following URL:
287 * http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html
289 * The key is to use the following integer variables modified atomically:
290 * write_requests, write_completions, and cnt_and_flag.
292 * write_requests and write_completions act as a waiting queue for writers
293 * in order to ensure the FIFO order. Both variables begin with the initial
294 * value of 0. When a new writer tries to get a write lock, it increments
295 * write_requests and gets the previous value of the variable as a "ticket".
296 * When write_completions reaches the ticket number, the new writer can start
297 * writing. When the writer completes its work, it increments
298 * write_completions so that another new writer can start working. If the
299 * write_requests is not equal to write_completions, it means a writer is now
300 * working or waiting. In this case, a new readers cannot start reading, or
301 * in other words, this algorithm basically prefers writers.
303 * cnt_and_flag is a "lock" shared by all readers and writers. This integer
304 * variable is a kind of structure with two members: writer_flag (1 bit) and
305 * reader_count (31 bits). The writer_flag shows whether a writer is working,
306 * and the reader_count shows the number of readers currently working or almost
307 * ready for working. A writer who has the current "ticket" tries to get the
308 * lock by exclusively setting the writer_flag to 1, provided that the whole
309 * 32-bit is 0 (meaning no readers or writers working). On the other hand,
310 * a new reader tries to increment the "reader_count" field provided that
311 * the writer_flag is 0 (meaning there is no writer working).
313 * If some of the above operations fail, the reader or the writer sleeps
314 * until the related condition changes. When a working reader or writer
315 * completes its work, some readers or writers are sleeping, and the condition
316 * that suspended the reader or writer has changed, it wakes up the sleeping
317 * readers or writers.
319 * As already noted, this algorithm basically prefers writers. In order to
320 * prevent readers from starving, however, the algorithm also introduces the
321 * "writer quota" (Q). When Q consecutive writers have completed their work,
322 * suspending readers, the last writer will wake up the readers, even if a new
325 * Implementation specific note: due to the combination of atomic operations
326 * and a mutex lock, ordering between the atomic operation and locks can be
327 * very sensitive in some cases. In particular, it is generally very important
328 * to check the atomic variable that requires a reader or writer to sleep after
329 * locking the mutex and before actually sleeping; otherwise, it could be very
330 * likely to cause a deadlock. For example, assume "var" is a variable
331 * atomically modified, then the corresponding code would be:
332 * if (var == need_sleep) {
334 * if (var == need_sleep)
338 * The second check is important, since "var" is protected by the atomic
339 * operation, not by the mutex, and can be changed just before sleeping.
340 * (The first "if" could be omitted, but this is also important in order to
341 * make the code efficient by avoiding the use of the mutex unless it is
345 #define WRITER_ACTIVE 0x1
346 #define READER_INCR 0x2
349 isc_rwlock_lock(isc_rwlock_t
*rwl
, isc_rwlocktype_t type
) {
352 REQUIRE(VALID_RWLOCK(rwl
));
354 #ifdef ISC_RWLOCK_TRACE
355 print_lock(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_RWLOCK
,
356 ISC_MSG_PRELOCK
, "prelock"), rwl
, type
);
359 if (type
== isc_rwlocktype_read
) {
360 if (rwl
->write_requests
!= rwl
->write_completions
) {
361 /* there is a waiting or active writer */
363 if (rwl
->write_requests
!= rwl
->write_completions
) {
364 rwl
->readers_waiting
++;
365 WAIT(&rwl
->readable
, &rwl
->lock
);
366 rwl
->readers_waiting
--;
371 cntflag
= isc_atomic_xadd(&rwl
->cnt_and_flag
, READER_INCR
);
373 if ((rwl
->cnt_and_flag
& WRITER_ACTIVE
) == 0)
376 /* A writer is still working */
378 rwl
->readers_waiting
++;
379 if ((rwl
->cnt_and_flag
& WRITER_ACTIVE
) != 0)
380 WAIT(&rwl
->readable
, &rwl
->lock
);
381 rwl
->readers_waiting
--;
385 * Typically, the reader should be able to get a lock
387 * (1) there should have been no pending writer when
388 * the reader was trying to increment the
389 * counter; otherwise, the writer should be in
390 * the waiting queue, preventing the reader from
391 * proceeding to this point.
392 * (2) once the reader increments the counter, no
393 * more writer can get a lock.
394 * Still, it is possible another writer can work at
395 * this point, e.g. in the following scenario:
396 * A previous writer unlocks the writer lock.
397 * This reader proceeds to point (1).
398 * A new writer appears, and gets a new lock before
399 * the reader increments the counter.
400 * The reader then increments the counter.
401 * The previous writer notices there is a waiting
402 * reader who is almost ready, and wakes it up.
403 * So, the reader needs to confirm whether it can now
404 * read explicitly (thus we loop). Note that this is
405 * not an infinite process, since the reader has
406 * incremented the counter at this point.
411 * If we are temporarily preferred to writers due to the writer
412 * quota, reset the condition (race among readers doesn't
415 rwl
->write_granted
= 0;
417 isc_int32_t prev_writer
;
419 /* enter the waiting queue, and wait for our turn */
420 prev_writer
= isc_atomic_xadd(&rwl
->write_requests
, 1);
421 while (rwl
->write_completions
!= prev_writer
) {
423 if (rwl
->write_completions
!= prev_writer
) {
424 WAIT(&rwl
->writeable
, &rwl
->lock
);
433 cntflag
= isc_atomic_cmpxchg(&rwl
->cnt_and_flag
, 0,
438 /* Another active reader or writer is working. */
440 if (rwl
->cnt_and_flag
!= 0)
441 WAIT(&rwl
->writeable
, &rwl
->lock
);
445 INSIST((rwl
->cnt_and_flag
& WRITER_ACTIVE
) != 0);
446 rwl
->write_granted
++;
449 #ifdef ISC_RWLOCK_TRACE
450 print_lock(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_RWLOCK
,
451 ISC_MSG_POSTLOCK
, "postlock"), rwl
, type
);
454 return (ISC_R_SUCCESS
);
458 isc_rwlock_trylock(isc_rwlock_t
*rwl
, isc_rwlocktype_t type
) {
461 REQUIRE(VALID_RWLOCK(rwl
));
463 #ifdef ISC_RWLOCK_TRACE
464 print_lock(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_RWLOCK
,
465 ISC_MSG_PRELOCK
, "prelock"), rwl
, type
);
468 if (type
== isc_rwlocktype_read
) {
469 /* If a writer is waiting or working, we fail. */
470 if (rwl
->write_requests
!= rwl
->write_completions
)
471 return (ISC_R_LOCKBUSY
);
473 /* Otherwise, be ready for reading. */
474 cntflag
= isc_atomic_xadd(&rwl
->cnt_and_flag
, READER_INCR
);
475 if ((cntflag
& WRITER_ACTIVE
) != 0) {
477 * A writer is working. We lose, and cancel the read
480 cntflag
= isc_atomic_xadd(&rwl
->cnt_and_flag
,
483 * If no other readers are waiting and we've suspended
484 * new writers in this short period, wake them up.
486 if (cntflag
== READER_INCR
&&
487 rwl
->write_completions
!= rwl
->write_requests
) {
489 BROADCAST(&rwl
->writeable
);
493 return (ISC_R_LOCKBUSY
);
496 /* Try locking without entering the waiting queue. */
497 cntflag
= isc_atomic_cmpxchg(&rwl
->cnt_and_flag
, 0,
500 return (ISC_R_LOCKBUSY
);
503 * XXXJT: jump into the queue, possibly breaking the writer
506 (void)isc_atomic_xadd(&rwl
->write_completions
, -1);
508 rwl
->write_granted
++;
511 #ifdef ISC_RWLOCK_TRACE
512 print_lock(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_RWLOCK
,
513 ISC_MSG_POSTLOCK
, "postlock"), rwl
, type
);
516 return (ISC_R_SUCCESS
);
520 isc_rwlock_tryupgrade(isc_rwlock_t
*rwl
) {
523 REQUIRE(VALID_RWLOCK(rwl
));
525 /* Try to acquire write access. */
526 prevcnt
= isc_atomic_cmpxchg(&rwl
->cnt_and_flag
,
527 READER_INCR
, WRITER_ACTIVE
);
529 * There must have been no writer, and there must have been at least
532 INSIST((prevcnt
& WRITER_ACTIVE
) == 0 &&
533 (prevcnt
& ~WRITER_ACTIVE
) != 0);
535 if (prevcnt
== READER_INCR
) {
537 * We are the only reader and have been upgraded.
538 * Now jump into the head of the writer waiting queue.
540 (void)isc_atomic_xadd(&rwl
->write_completions
, -1);
542 return (ISC_R_LOCKBUSY
);
544 return (ISC_R_SUCCESS
);
549 isc_rwlock_downgrade(isc_rwlock_t
*rwl
) {
550 isc_int32_t prev_readers
;
552 REQUIRE(VALID_RWLOCK(rwl
));
554 /* Become an active reader. */
555 prev_readers
= isc_atomic_xadd(&rwl
->cnt_and_flag
, READER_INCR
);
556 /* We must have been a writer. */
557 INSIST((prev_readers
& WRITER_ACTIVE
) != 0);
560 (void)isc_atomic_xadd(&rwl
->cnt_and_flag
, -WRITER_ACTIVE
);
561 (void)isc_atomic_xadd(&rwl
->write_completions
, 1);
563 /* Resume other readers */
565 if (rwl
->readers_waiting
> 0)
566 BROADCAST(&rwl
->readable
);
571 isc_rwlock_unlock(isc_rwlock_t
*rwl
, isc_rwlocktype_t type
) {
572 isc_int32_t prev_cnt
;
574 REQUIRE(VALID_RWLOCK(rwl
));
576 #ifdef ISC_RWLOCK_TRACE
577 print_lock(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_RWLOCK
,
578 ISC_MSG_PREUNLOCK
, "preunlock"), rwl
, type
);
581 if (type
== isc_rwlocktype_read
) {
582 prev_cnt
= isc_atomic_xadd(&rwl
->cnt_and_flag
, -READER_INCR
);
585 * If we're the last reader and any writers are waiting, wake
586 * them up. We need to wake up all of them to ensure the
589 if (prev_cnt
== READER_INCR
&&
590 rwl
->write_completions
!= rwl
->write_requests
) {
592 BROADCAST(&rwl
->writeable
);
596 isc_boolean_t wakeup_writers
= ISC_TRUE
;
599 * Reset the flag, and (implicitly) tell other writers
602 (void)isc_atomic_xadd(&rwl
->cnt_and_flag
, -WRITER_ACTIVE
);
603 (void)isc_atomic_xadd(&rwl
->write_completions
, 1);
605 if (rwl
->write_granted
>= rwl
->write_quota
||
606 rwl
->write_requests
== rwl
->write_completions
||
607 (rwl
->cnt_and_flag
& ~WRITER_ACTIVE
) != 0) {
609 * We have passed the write quota, no writer is
610 * waiting, or some readers are almost ready, pending
611 * possible writers. Note that the last case can
612 * happen even if write_requests != write_completions
613 * (which means a new writer in the queue), so we need
614 * to catch the case explicitly.
617 if (rwl
->readers_waiting
> 0) {
618 wakeup_writers
= ISC_FALSE
;
619 BROADCAST(&rwl
->readable
);
624 if (rwl
->write_requests
!= rwl
->write_completions
&&
627 BROADCAST(&rwl
->writeable
);
632 #ifdef ISC_RWLOCK_TRACE
633 print_lock(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_RWLOCK
,
634 ISC_MSG_POSTUNLOCK
, "postunlock"),
638 return (ISC_R_SUCCESS
);
641 #else /* ISC_PLATFORM_HAVEXADD && ISC_PLATFORM_HAVECMPXCHG */
644 doit(isc_rwlock_t
*rwl
, isc_rwlocktype_t type
, isc_boolean_t nonblock
) {
645 isc_boolean_t skip
= ISC_FALSE
;
646 isc_boolean_t done
= ISC_FALSE
;
647 isc_result_t result
= ISC_R_SUCCESS
;
649 REQUIRE(VALID_RWLOCK(rwl
));
653 #ifdef ISC_RWLOCK_TRACE
654 print_lock(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_RWLOCK
,
655 ISC_MSG_PRELOCK
, "prelock"), rwl
, type
);
658 if (type
== isc_rwlocktype_read
) {
659 if (rwl
->readers_waiting
!= 0)
663 ((rwl
->active
== 0 ||
664 (rwl
->type
== isc_rwlocktype_read
&&
665 (rwl
->writers_waiting
== 0 ||
666 rwl
->granted
< rwl
->read_quota
)))))
668 rwl
->type
= isc_rwlocktype_read
;
672 } else if (nonblock
) {
673 result
= ISC_R_LOCKBUSY
;
677 rwl
->readers_waiting
++;
678 WAIT(&rwl
->readable
, &rwl
->lock
);
679 rwl
->readers_waiting
--;
683 if (rwl
->writers_waiting
!= 0)
686 if (!skip
&& rwl
->active
== 0) {
687 rwl
->type
= isc_rwlocktype_write
;
691 } else if (nonblock
) {
692 result
= ISC_R_LOCKBUSY
;
696 rwl
->writers_waiting
++;
697 WAIT(&rwl
->writeable
, &rwl
->lock
);
698 rwl
->writers_waiting
--;
703 #ifdef ISC_RWLOCK_TRACE
704 print_lock(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_RWLOCK
,
705 ISC_MSG_POSTLOCK
, "postlock"), rwl
, type
);
714 isc_rwlock_lock(isc_rwlock_t
*rwl
, isc_rwlocktype_t type
) {
715 return (doit(rwl
, type
, ISC_FALSE
));
719 isc_rwlock_trylock(isc_rwlock_t
*rwl
, isc_rwlocktype_t type
) {
720 return (doit(rwl
, type
, ISC_TRUE
));
724 isc_rwlock_tryupgrade(isc_rwlock_t
*rwl
) {
725 isc_result_t result
= ISC_R_SUCCESS
;
727 REQUIRE(VALID_RWLOCK(rwl
));
729 REQUIRE(rwl
->type
== isc_rwlocktype_read
);
730 REQUIRE(rwl
->active
!= 0);
732 /* If we are the only reader then succeed. */
733 if (rwl
->active
== 1) {
734 rwl
->original
= (rwl
->original
== isc_rwlocktype_none
) ?
735 isc_rwlocktype_read
: isc_rwlocktype_none
;
736 rwl
->type
= isc_rwlocktype_write
;
738 result
= ISC_R_LOCKBUSY
;
745 isc_rwlock_downgrade(isc_rwlock_t
*rwl
) {
747 REQUIRE(VALID_RWLOCK(rwl
));
749 REQUIRE(rwl
->type
== isc_rwlocktype_write
);
750 REQUIRE(rwl
->active
== 1);
752 rwl
->type
= isc_rwlocktype_read
;
753 rwl
->original
= (rwl
->original
== isc_rwlocktype_none
) ?
754 isc_rwlocktype_write
: isc_rwlocktype_none
;
756 * Resume processing any read request that were blocked when
759 if (rwl
->original
== isc_rwlocktype_none
&&
760 (rwl
->writers_waiting
== 0 || rwl
->granted
< rwl
->read_quota
) &&
761 rwl
->readers_waiting
> 0)
762 BROADCAST(&rwl
->readable
);
768 isc_rwlock_unlock(isc_rwlock_t
*rwl
, isc_rwlocktype_t type
) {
770 REQUIRE(VALID_RWLOCK(rwl
));
772 REQUIRE(rwl
->type
== type
);
776 #ifdef ISC_RWLOCK_TRACE
777 print_lock(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_RWLOCK
,
778 ISC_MSG_PREUNLOCK
, "preunlock"), rwl
, type
);
781 INSIST(rwl
->active
> 0);
783 if (rwl
->active
== 0) {
784 if (rwl
->original
!= isc_rwlocktype_none
) {
785 rwl
->type
= rwl
->original
;
786 rwl
->original
= isc_rwlocktype_none
;
788 if (rwl
->type
== isc_rwlocktype_read
) {
790 if (rwl
->writers_waiting
> 0) {
791 rwl
->type
= isc_rwlocktype_write
;
792 SIGNAL(&rwl
->writeable
);
793 } else if (rwl
->readers_waiting
> 0) {
794 /* Does this case ever happen? */
795 BROADCAST(&rwl
->readable
);
798 if (rwl
->readers_waiting
> 0) {
799 if (rwl
->writers_waiting
> 0 &&
800 rwl
->granted
< rwl
->write_quota
) {
801 SIGNAL(&rwl
->writeable
);
804 rwl
->type
= isc_rwlocktype_read
;
805 BROADCAST(&rwl
->readable
);
807 } else if (rwl
->writers_waiting
> 0) {
809 SIGNAL(&rwl
->writeable
);
815 INSIST(rwl
->original
== isc_rwlocktype_none
);
817 #ifdef ISC_RWLOCK_TRACE
818 print_lock(isc_msgcat_get(isc_msgcat
, ISC_MSGSET_RWLOCK
,
819 ISC_MSG_POSTUNLOCK
, "postunlock"),
825 return (ISC_R_SUCCESS
);
828 #endif /* ISC_PLATFORM_HAVEXADD && ISC_PLATFORM_HAVECMPXCHG */
829 #endif /* !ISC_PLATFORM_USE_NATIVE_RWLOCKS */
830 #else /* ISC_PLATFORM_USETHREADS */
833 isc_rwlock_init(isc_rwlock_t
*rwl
, unsigned int read_quota
,
834 unsigned int write_quota
)
836 REQUIRE(rwl
!= NULL
);
841 rwl
->type
= isc_rwlocktype_read
;
843 rwl
->magic
= RWLOCK_MAGIC
;
845 return (ISC_R_SUCCESS
);
849 isc_rwlock_lock(isc_rwlock_t
*rwl
, isc_rwlocktype_t type
) {
850 REQUIRE(VALID_RWLOCK(rwl
));
852 if (type
== isc_rwlocktype_read
) {
853 if (rwl
->type
!= isc_rwlocktype_read
&& rwl
->active
!= 0)
854 return (ISC_R_LOCKBUSY
);
855 rwl
->type
= isc_rwlocktype_read
;
858 if (rwl
->active
!= 0)
859 return (ISC_R_LOCKBUSY
);
860 rwl
->type
= isc_rwlocktype_write
;
863 return (ISC_R_SUCCESS
);
867 isc_rwlock_trylock(isc_rwlock_t
*rwl
, isc_rwlocktype_t type
) {
868 return (isc_rwlock_lock(rwl
, type
));
872 isc_rwlock_tryupgrade(isc_rwlock_t
*rwl
) {
873 isc_result_t result
= ISC_R_SUCCESS
;
875 REQUIRE(VALID_RWLOCK(rwl
));
876 REQUIRE(rwl
->type
== isc_rwlocktype_read
);
877 REQUIRE(rwl
->active
!= 0);
879 /* If we are the only reader then succeed. */
880 if (rwl
->active
== 1)
881 rwl
->type
= isc_rwlocktype_write
;
883 result
= ISC_R_LOCKBUSY
;
888 isc_rwlock_downgrade(isc_rwlock_t
*rwl
) {
890 REQUIRE(VALID_RWLOCK(rwl
));
891 REQUIRE(rwl
->type
== isc_rwlocktype_write
);
892 REQUIRE(rwl
->active
== 1);
894 rwl
->type
= isc_rwlocktype_read
;
898 isc_rwlock_unlock(isc_rwlock_t
*rwl
, isc_rwlocktype_t type
) {
899 REQUIRE(VALID_RWLOCK(rwl
));
900 REQUIRE(rwl
->type
== type
);
904 INSIST(rwl
->active
> 0);
907 return (ISC_R_SUCCESS
);
911 isc_rwlock_destroy(isc_rwlock_t
*rwl
) {
912 REQUIRE(rwl
!= NULL
);
913 REQUIRE(rwl
->active
== 0);
917 #endif /* ISC_PLATFORM_USETHREADS */