Ignore machine-check MSRs
[freebsd-src/fkvm-freebsd.git] / lib / libc_r / uthread / uthread_rwlock.c
blobfdda04cd46ed2f23938b2b2eeed9e9edaf8fcda0
1 /*-
2 * Copyright (c) 1998 Alex Nash
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
26 * $FreeBSD$
29 #include <errno.h>
30 #include <limits.h>
31 #include <stdlib.h>
33 #include "namespace.h"
34 #include <pthread.h>
35 #include "un-namespace.h"
37 #include "pthread_private.h"
39 /* maximum number of times a read lock may be obtained */
40 #define MAX_READ_LOCKS (INT_MAX - 1)
42 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
43 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
44 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
45 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
46 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
47 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
48 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
50 static int init_static (pthread_rwlock_t *rwlock);
52 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
54 static int
55 init_static (pthread_rwlock_t *rwlock)
57 int ret;
59 _SPINLOCK(&static_init_lock);
61 if (*rwlock == NULL)
62 ret = _pthread_rwlock_init(rwlock, NULL);
63 else
64 ret = 0;
66 _SPINUNLOCK(&static_init_lock);
68 return (ret);
71 int
72 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
74 int ret;
76 if (rwlock == NULL)
77 ret = EINVAL;
78 else {
79 pthread_rwlock_t prwlock;
81 prwlock = *rwlock;
83 _pthread_mutex_destroy(&prwlock->lock);
84 _pthread_cond_destroy(&prwlock->read_signal);
85 _pthread_cond_destroy(&prwlock->write_signal);
86 free(prwlock);
88 *rwlock = NULL;
90 ret = 0;
92 return (ret);
95 int
96 _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
98 pthread_rwlock_t prwlock;
99 int ret;
101 /* allocate rwlock object */
102 prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
104 if (prwlock == NULL)
105 return(ENOMEM);
107 /* initialize the lock */
108 if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0)
109 free(prwlock);
110 else {
111 /* initialize the read condition signal */
112 ret = _pthread_cond_init(&prwlock->read_signal, NULL);
114 if (ret != 0) {
115 _pthread_mutex_destroy(&prwlock->lock);
116 free(prwlock);
117 } else {
118 /* initialize the write condition signal */
119 ret = _pthread_cond_init(&prwlock->write_signal, NULL);
121 if (ret != 0) {
122 _pthread_cond_destroy(&prwlock->read_signal);
123 _pthread_mutex_destroy(&prwlock->lock);
124 free(prwlock);
125 } else {
126 /* success */
127 prwlock->state = 0;
128 prwlock->blocked_writers = 0;
130 *rwlock = prwlock;
135 return (ret);
139 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
141 pthread_rwlock_t prwlock;
142 struct pthread *curthread;
143 int ret;
145 if (rwlock == NULL)
146 return(EINVAL);
148 prwlock = *rwlock;
150 /* check for static initialization */
151 if (prwlock == NULL) {
152 if ((ret = init_static(rwlock)) != 0)
153 return(ret);
155 prwlock = *rwlock;
158 /* grab the monitor lock */
159 if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
160 return(ret);
162 /* check lock count */
163 if (prwlock->state == MAX_READ_LOCKS) {
164 _pthread_mutex_unlock(&prwlock->lock);
165 return (EAGAIN);
168 curthread = _get_curthread();
169 if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
171 * To avoid having to track all the rdlocks held by
172 * a thread or all of the threads that hold a rdlock,
173 * we keep a simple count of all the rdlocks held by
174 * a thread. If a thread holds any rdlocks it is
175 * possible that it is attempting to take a recursive
176 * rdlock. If there are blocked writers and precedence
177 * is given to them, then that would result in the thread
178 * deadlocking. So allowing a thread to take the rdlock
179 * when it already has one or more rdlocks avoids the
180 * deadlock. I hope the reader can follow that logic ;-)
182 ; /* nothing needed */
183 } else {
184 /* give writers priority over readers */
185 while (prwlock->blocked_writers || prwlock->state < 0) {
186 ret = _pthread_cond_wait(&prwlock->read_signal,
187 &prwlock->lock);
189 if (ret != 0) {
190 /* can't do a whole lot if this fails */
191 _pthread_mutex_unlock(&prwlock->lock);
192 return(ret);
197 curthread->rdlock_count++;
198 prwlock->state++; /* indicate we are locked for reading */
201 * Something is really wrong if this call fails. Returning
202 * error won't do because we've already obtained the read
203 * lock. Decrementing 'state' is no good because we probably
204 * don't have the monitor lock.
206 _pthread_mutex_unlock(&prwlock->lock);
208 return (ret);
212 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
214 pthread_rwlock_t prwlock;
215 struct pthread *curthread;
216 int ret;
218 if (rwlock == NULL)
219 return(EINVAL);
221 prwlock = *rwlock;
223 /* check for static initialization */
224 if (prwlock == NULL) {
225 if ((ret = init_static(rwlock)) != 0)
226 return(ret);
228 prwlock = *rwlock;
231 /* grab the monitor lock */
232 if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
233 return(ret);
235 curthread = _get_curthread();
236 if (prwlock->state == MAX_READ_LOCKS)
237 ret = EAGAIN; /* too many read locks acquired */
238 else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
239 /* see comment for pthread_rwlock_rdlock() */
240 curthread->rdlock_count++;
241 prwlock->state++;
243 /* give writers priority over readers */
244 else if (prwlock->blocked_writers || prwlock->state < 0)
245 ret = EBUSY;
246 else {
247 prwlock->state++; /* indicate we are locked for reading */
248 curthread->rdlock_count++;
251 /* see the comment on this in pthread_rwlock_rdlock */
252 _pthread_mutex_unlock(&prwlock->lock);
254 return (ret);
258 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
260 pthread_rwlock_t prwlock;
261 int ret;
263 if (rwlock == NULL)
264 return(EINVAL);
266 prwlock = *rwlock;
268 /* check for static initialization */
269 if (prwlock == NULL) {
270 if ((ret = init_static(rwlock)) != 0)
271 return(ret);
273 prwlock = *rwlock;
276 /* grab the monitor lock */
277 if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
278 return(ret);
280 if (prwlock->state != 0)
281 ret = EBUSY;
282 else
283 /* indicate we are locked for writing */
284 prwlock->state = -1;
286 /* see the comment on this in pthread_rwlock_rdlock */
287 _pthread_mutex_unlock(&prwlock->lock);
289 return (ret);
293 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
295 pthread_rwlock_t prwlock;
296 struct pthread *curthread;
297 int ret;
299 if (rwlock == NULL)
300 return(EINVAL);
302 prwlock = *rwlock;
304 if (prwlock == NULL)
305 return(EINVAL);
307 /* grab the monitor lock */
308 if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
309 return(ret);
311 curthread = _get_curthread();
312 if (prwlock->state > 0) {
313 curthread->rdlock_count--;
314 prwlock->state--;
315 if (prwlock->state == 0 && prwlock->blocked_writers)
316 ret = _pthread_cond_signal(&prwlock->write_signal);
317 } else if (prwlock->state < 0) {
318 prwlock->state = 0;
320 if (prwlock->blocked_writers)
321 ret = _pthread_cond_signal(&prwlock->write_signal);
322 else
323 ret = _pthread_cond_broadcast(&prwlock->read_signal);
324 } else
325 ret = EINVAL;
327 /* see the comment on this in pthread_rwlock_rdlock */
328 _pthread_mutex_unlock(&prwlock->lock);
330 return (ret);
334 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
336 pthread_rwlock_t prwlock;
337 int ret;
339 if (rwlock == NULL)
340 return(EINVAL);
342 prwlock = *rwlock;
344 /* check for static initialization */
345 if (prwlock == NULL) {
346 if ((ret = init_static(rwlock)) != 0)
347 return(ret);
349 prwlock = *rwlock;
352 /* grab the monitor lock */
353 if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
354 return(ret);
356 while (prwlock->state != 0) {
357 prwlock->blocked_writers++;
359 ret = _pthread_cond_wait(&prwlock->write_signal,
360 &prwlock->lock);
362 if (ret != 0) {
363 prwlock->blocked_writers--;
364 _pthread_mutex_unlock(&prwlock->lock);
365 return(ret);
368 prwlock->blocked_writers--;
371 /* indicate we are locked for writing */
372 prwlock->state = -1;
374 /* see the comment on this in pthread_rwlock_rdlock */
375 _pthread_mutex_unlock(&prwlock->lock);
377 return (ret);