Backed out changeset b71c8c052463 (bug 1943846) for causing mass failures. CLOSED...
[gecko.git] / nsprpub / pr / src / threads / prrwlock.c
blob41ab2313a4e1e6a21e424ed084822a070e01a139
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 #include "primpl.h"
8 #include <string.h>
10 #if defined(HPUX) && defined(_PR_PTHREADS)
12 # include <pthread.h>
13 # define HAVE_UNIX98_RWLOCK
14 # define RWLOCK_T pthread_rwlock_t
15 # define RWLOCK_INIT(lock) pthread_rwlock_init(lock, NULL)
16 # define RWLOCK_DESTROY(lock) pthread_rwlock_destroy(lock)
17 # define RWLOCK_RDLOCK(lock) pthread_rwlock_rdlock(lock)
18 # define RWLOCK_WRLOCK(lock) pthread_rwlock_wrlock(lock)
19 # define RWLOCK_UNLOCK(lock) pthread_rwlock_unlock(lock)
21 #elif defined(SOLARIS) && \
22 (defined(_PR_PTHREADS) || defined(_PR_GLOBAL_THREADS_ONLY))
24 # include <synch.h>
25 # define HAVE_UI_RWLOCK
26 # define RWLOCK_T rwlock_t
27 # define RWLOCK_INIT(lock) rwlock_init(lock, USYNC_THREAD, NULL)
28 # define RWLOCK_DESTROY(lock) rwlock_destroy(lock)
29 # define RWLOCK_RDLOCK(lock) rw_rdlock(lock)
30 # define RWLOCK_WRLOCK(lock) rw_wrlock(lock)
31 # define RWLOCK_UNLOCK(lock) rw_unlock(lock)
33 #endif
36 * Reader-writer lock
38 struct PRRWLock {
39 char* rw_name; /* lock name */
40 PRUint32 rw_rank; /* rank of the lock */
42 #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK)
43 RWLOCK_T rw_lock;
44 #else
45 PRLock* rw_lock;
46 PRInt32 rw_lock_cnt; /* == 0, if unlocked */
47 /* == -1, if write-locked */
48 /* > 0 , # of read locks */
49 PRUint32 rw_reader_cnt; /* number of waiting readers */
50 PRUint32 rw_writer_cnt; /* number of waiting writers */
51 PRCondVar* rw_reader_waitq; /* cvar for readers */
52 PRCondVar* rw_writer_waitq; /* cvar for writers */
53 # ifdef DEBUG
54 PRThread* rw_owner; /* lock owner for write-lock */
55 # endif
56 #endif
59 #ifdef DEBUG
60 # define _PR_RWLOCK_RANK_ORDER_DEBUG /* enable deadlock detection using \
61 rank-order for locks \
63 #endif
65 #ifdef _PR_RWLOCK_RANK_ORDER_DEBUG
67 static PRUintn pr_thread_rwlock_key; /* TPD key for lock stack */
68 static PRUintn pr_thread_rwlock_alloc_failed;
70 # define _PR_RWLOCK_RANK_ORDER_LIMIT 10
72 typedef struct thread_rwlock_stack {
73 PRInt32 trs_index; /* top of stack */
74 PRRWLock* trs_stack[_PR_RWLOCK_RANK_ORDER_LIMIT]; /* stack of lock
75 pointers */
77 } thread_rwlock_stack;
79 static void _PR_SET_THREAD_RWLOCK_RANK(PRRWLock* rwlock);
80 static PRUint32 _PR_GET_THREAD_RWLOCK_RANK(void);
81 static void _PR_UNSET_THREAD_RWLOCK_RANK(PRRWLock* rwlock);
82 static void _PR_RELEASE_LOCK_STACK(void* lock_stack);
84 #endif
87 * Reader/Writer Locks
91 * PR_NewRWLock
92 * Create a reader-writer lock, with the given lock rank and lock name
96 PR_IMPLEMENT(PRRWLock*)
97 PR_NewRWLock(PRUint32 lock_rank, const char* lock_name) {
98 PRRWLock* rwlock;
99 #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK)
100 int err;
101 #endif
103 if (!_pr_initialized) {
104 _PR_ImplicitInitialization();
107 rwlock = PR_NEWZAP(PRRWLock);
108 if (rwlock == NULL) {
109 return NULL;
112 rwlock->rw_rank = lock_rank;
113 if (lock_name != NULL) {
114 rwlock->rw_name = (char*)PR_Malloc(strlen(lock_name) + 1);
115 if (rwlock->rw_name == NULL) {
116 PR_DELETE(rwlock);
117 return (NULL);
119 strcpy(rwlock->rw_name, lock_name);
120 } else {
121 rwlock->rw_name = NULL;
124 #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK)
125 err = RWLOCK_INIT(&rwlock->rw_lock);
126 if (err != 0) {
127 PR_SetError(PR_UNKNOWN_ERROR, err);
128 PR_Free(rwlock->rw_name);
129 PR_DELETE(rwlock);
130 return NULL;
132 return rwlock;
133 #else
134 rwlock->rw_lock = PR_NewLock();
135 if (rwlock->rw_lock == NULL) {
136 goto failed;
138 rwlock->rw_reader_waitq = PR_NewCondVar(rwlock->rw_lock);
139 if (rwlock->rw_reader_waitq == NULL) {
140 goto failed;
142 rwlock->rw_writer_waitq = PR_NewCondVar(rwlock->rw_lock);
143 if (rwlock->rw_writer_waitq == NULL) {
144 goto failed;
146 rwlock->rw_reader_cnt = 0;
147 rwlock->rw_writer_cnt = 0;
148 rwlock->rw_lock_cnt = 0;
149 return rwlock;
151 failed:
152 if (rwlock->rw_reader_waitq != NULL) {
153 PR_DestroyCondVar(rwlock->rw_reader_waitq);
155 if (rwlock->rw_lock != NULL) {
156 PR_DestroyLock(rwlock->rw_lock);
158 PR_Free(rwlock->rw_name);
159 PR_DELETE(rwlock);
160 return NULL;
161 #endif
165 ** Destroy the given RWLock "lock".
167 PR_IMPLEMENT(void)
168 PR_DestroyRWLock(PRRWLock* rwlock) {
169 #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK)
170 int err;
171 err = RWLOCK_DESTROY(&rwlock->rw_lock);
172 PR_ASSERT(err == 0);
173 #else
174 PR_ASSERT(rwlock->rw_reader_cnt == 0);
175 PR_DestroyCondVar(rwlock->rw_reader_waitq);
176 PR_DestroyCondVar(rwlock->rw_writer_waitq);
177 PR_DestroyLock(rwlock->rw_lock);
178 #endif
179 if (rwlock->rw_name != NULL) {
180 PR_Free(rwlock->rw_name);
182 PR_DELETE(rwlock);
186 ** Read-lock the RWLock.
188 PR_IMPLEMENT(void)
189 PR_RWLock_Rlock(PRRWLock* rwlock) {
190 #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK)
191 int err;
192 #endif
194 #ifdef _PR_RWLOCK_RANK_ORDER_DEBUG
196 * assert that rank ordering is not violated; the rank of 'rwlock' should
197 * be equal to or greater than the highest rank of all the locks held by
198 * the thread.
200 PR_ASSERT((rwlock->rw_rank == PR_RWLOCK_RANK_NONE) ||
201 (rwlock->rw_rank >= _PR_GET_THREAD_RWLOCK_RANK()));
202 #endif
204 #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK)
205 err = RWLOCK_RDLOCK(&rwlock->rw_lock);
206 PR_ASSERT(err == 0);
207 #else
208 PR_Lock(rwlock->rw_lock);
210 * wait if write-locked or if a writer is waiting; preference for writers
212 while ((rwlock->rw_lock_cnt < 0) || (rwlock->rw_writer_cnt > 0)) {
213 rwlock->rw_reader_cnt++;
214 PR_WaitCondVar(rwlock->rw_reader_waitq, PR_INTERVAL_NO_TIMEOUT);
215 rwlock->rw_reader_cnt--;
218 * Increment read-lock count
220 rwlock->rw_lock_cnt++;
222 PR_Unlock(rwlock->rw_lock);
223 #endif
225 #ifdef _PR_RWLOCK_RANK_ORDER_DEBUG
227 * update thread's lock rank
229 if (rwlock->rw_rank != PR_RWLOCK_RANK_NONE) {
230 _PR_SET_THREAD_RWLOCK_RANK(rwlock);
232 #endif
236 ** Write-lock the RWLock.
238 PR_IMPLEMENT(void)
239 PR_RWLock_Wlock(PRRWLock* rwlock) {
240 #if defined(DEBUG)
241 PRThread* me = PR_GetCurrentThread();
242 #endif
243 #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK)
244 int err;
245 #endif
247 #ifdef _PR_RWLOCK_RANK_ORDER_DEBUG
249 * assert that rank ordering is not violated; the rank of 'rwlock' should
250 * be equal to or greater than the highest rank of all the locks held by
251 * the thread.
253 PR_ASSERT((rwlock->rw_rank == PR_RWLOCK_RANK_NONE) ||
254 (rwlock->rw_rank >= _PR_GET_THREAD_RWLOCK_RANK()));
255 #endif
257 #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK)
258 err = RWLOCK_WRLOCK(&rwlock->rw_lock);
259 PR_ASSERT(err == 0);
260 #else
261 PR_Lock(rwlock->rw_lock);
263 * wait if read locked
265 while (rwlock->rw_lock_cnt != 0) {
266 rwlock->rw_writer_cnt++;
267 PR_WaitCondVar(rwlock->rw_writer_waitq, PR_INTERVAL_NO_TIMEOUT);
268 rwlock->rw_writer_cnt--;
271 * apply write lock
273 rwlock->rw_lock_cnt--;
274 PR_ASSERT(rwlock->rw_lock_cnt == -1);
275 # ifdef DEBUG
276 PR_ASSERT(me != NULL);
277 rwlock->rw_owner = me;
278 # endif
279 PR_Unlock(rwlock->rw_lock);
280 #endif
282 #ifdef _PR_RWLOCK_RANK_ORDER_DEBUG
284 * update thread's lock rank
286 if (rwlock->rw_rank != PR_RWLOCK_RANK_NONE) {
287 _PR_SET_THREAD_RWLOCK_RANK(rwlock);
289 #endif
293 ** Unlock the RW lock.
295 PR_IMPLEMENT(void)
296 PR_RWLock_Unlock(PRRWLock* rwlock) {
297 #if defined(DEBUG)
298 PRThread* me = PR_GetCurrentThread();
299 #endif
300 #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK)
301 int err;
302 #endif
304 #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK)
305 err = RWLOCK_UNLOCK(&rwlock->rw_lock);
306 PR_ASSERT(err == 0);
307 #else
308 PR_Lock(rwlock->rw_lock);
310 * lock must be read or write-locked
312 PR_ASSERT(rwlock->rw_lock_cnt != 0);
313 if (rwlock->rw_lock_cnt > 0) {
315 * decrement read-lock count
317 rwlock->rw_lock_cnt--;
318 if (rwlock->rw_lock_cnt == 0) {
320 * lock is not read-locked anymore; wakeup a waiting writer
322 if (rwlock->rw_writer_cnt > 0) {
323 PR_NotifyCondVar(rwlock->rw_writer_waitq);
326 } else {
327 PR_ASSERT(rwlock->rw_lock_cnt == -1);
329 rwlock->rw_lock_cnt = 0;
330 # ifdef DEBUG
331 PR_ASSERT(rwlock->rw_owner == me);
332 rwlock->rw_owner = NULL;
333 # endif
335 * wakeup a writer, if present; preference for writers
337 if (rwlock->rw_writer_cnt > 0) {
338 PR_NotifyCondVar(rwlock->rw_writer_waitq);
341 * else, wakeup all readers, if any
343 else if (rwlock->rw_reader_cnt > 0) {
344 PR_NotifyAllCondVar(rwlock->rw_reader_waitq);
347 PR_Unlock(rwlock->rw_lock);
348 #endif
350 #ifdef _PR_RWLOCK_RANK_ORDER_DEBUG
352 * update thread's lock rank
354 if (rwlock->rw_rank != PR_RWLOCK_RANK_NONE) {
355 _PR_UNSET_THREAD_RWLOCK_RANK(rwlock);
357 #endif
358 return;
361 #ifndef _PR_RWLOCK_RANK_ORDER_DEBUG
363 void _PR_InitRWLocks(void) {}
365 #else
367 void _PR_InitRWLocks(void) {
369 * allocated thread-private-data index for rwlock list
371 if (PR_NewThreadPrivateIndex(&pr_thread_rwlock_key, _PR_RELEASE_LOCK_STACK) ==
372 PR_FAILURE) {
373 pr_thread_rwlock_alloc_failed = 1;
374 return;
379 * _PR_SET_THREAD_RWLOCK_RANK
380 * Set a thread's lock rank, which is the highest of the ranks of all
381 * the locks held by the thread. Pointers to the locks are added to a
382 * per-thread list, which is anchored off a thread-private data key.
385 static void _PR_SET_THREAD_RWLOCK_RANK(PRRWLock* rwlock) {
386 thread_rwlock_stack* lock_stack;
387 PRStatus rv;
390 * allocate a lock stack
392 if ((lock_stack = PR_GetThreadPrivate(pr_thread_rwlock_key)) == NULL) {
393 lock_stack =
394 (thread_rwlock_stack*)PR_CALLOC(1 * sizeof(thread_rwlock_stack));
395 if (lock_stack) {
396 rv = PR_SetThreadPrivate(pr_thread_rwlock_key, lock_stack);
397 if (rv == PR_FAILURE) {
398 PR_DELETE(lock_stack);
399 pr_thread_rwlock_alloc_failed = 1;
400 return;
402 } else {
403 pr_thread_rwlock_alloc_failed = 1;
404 return;
408 * add rwlock to lock stack, if limit is not exceeded
410 if (lock_stack) {
411 if (lock_stack->trs_index < _PR_RWLOCK_RANK_ORDER_LIMIT) {
412 lock_stack->trs_stack[lock_stack->trs_index++] = rwlock;
417 static void _PR_RELEASE_LOCK_STACK(void* lock_stack) {
418 PR_ASSERT(lock_stack);
419 PR_DELETE(lock_stack);
423 * _PR_GET_THREAD_RWLOCK_RANK
425 * return thread's lock rank. If thread-private-data for the lock
426 * stack is not allocated, return PR_RWLOCK_RANK_NONE.
429 static PRUint32 _PR_GET_THREAD_RWLOCK_RANK(void) {
430 thread_rwlock_stack* lock_stack;
432 lock_stack = PR_GetThreadPrivate(pr_thread_rwlock_key);
433 if (lock_stack == NULL || lock_stack->trs_index == 0) {
434 return (PR_RWLOCK_RANK_NONE);
435 } else {
436 return (lock_stack->trs_stack[lock_stack->trs_index - 1]->rw_rank);
441 * _PR_UNSET_THREAD_RWLOCK_RANK
443 * remove the rwlock from the lock stack. Since locks may not be
444 * unlocked in a FIFO order, the entire lock stack is searched.
447 static void _PR_UNSET_THREAD_RWLOCK_RANK(PRRWLock* rwlock) {
448 thread_rwlock_stack* lock_stack;
449 int new_index = 0, index, done = 0;
451 lock_stack = PR_GetThreadPrivate(pr_thread_rwlock_key);
453 PR_ASSERT(lock_stack != NULL);
455 for (index = lock_stack->trs_index - 1; index >= 0; index--) {
456 if (!done && (lock_stack->trs_stack[index] == rwlock)) {
458 * reset the slot for rwlock
460 lock_stack->trs_stack[index] = NULL;
461 done = 1;
464 * search for the lowest-numbered empty slot, above which there are
465 * no non-empty slots
467 if (!new_index && (lock_stack->trs_stack[index] != NULL)) {
468 new_index = index + 1;
470 if (done && new_index) {
471 break;
475 * set top of stack to highest numbered empty slot
477 lock_stack->trs_index = new_index;
480 #endif /* _PR_RWLOCK_RANK_ORDER_DEBUG */