Sync usage with man page.
[netbsd-mini2440.git] / sys / rump / librump / rumpkern / locks.c
blob8e4bb5a22d9eab9ee976bc8422da36679ec657e9
1 /* $NetBSD: locks.c,v 1.36 2009/12/01 09:50:51 pooka Exp $ */
3 /*
4 * Copyright (c) 2007, 2008 Antti Kantee. All Rights Reserved.
6 * Development of this software was supported by the
7 * Finnish Cultural Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.36 2009/12/01 09:50:51 pooka Exp $");
34 #include <sys/param.h>
35 #include <sys/kmem.h>
36 #include <sys/mutex.h>
37 #include <sys/rwlock.h>
39 #include <rump/rumpuser.h>
41 #include "rump_private.h"
44 * We map locks to pthread routines. The difference between kernel
45 * and rumpuser routines is that while the kernel uses static
46 * storage, rumpuser allocates the object from the heap. This
47 * indirection is necessary because we don't know the size of
48 * pthread objects here. It is also benefitial, since we can
49 * be easily compatible with the kernel ABI because all kernel
50 * objects regardless of machine architecture are always at least
51 * the size of a pointer. The downside, of course, is a performance
52 * penalty.
55 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
57 void
58 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
61 CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
63 rumpuser_mutex_init((struct rumpuser_mtx **)mtx);
66 void
67 mutex_destroy(kmutex_t *mtx)
70 rumpuser_mutex_destroy(RUMPMTX(mtx));
73 void
74 mutex_enter(kmutex_t *mtx)
77 rumpuser_mutex_enter(RUMPMTX(mtx));
80 void
81 mutex_spin_enter(kmutex_t *mtx)
84 mutex_enter(mtx);
87 int
88 mutex_tryenter(kmutex_t *mtx)
91 return rumpuser_mutex_tryenter(RUMPMTX(mtx));
94 void
95 mutex_exit(kmutex_t *mtx)
98 rumpuser_mutex_exit(RUMPMTX(mtx));
101 void
102 mutex_spin_exit(kmutex_t *mtx)
105 mutex_exit(mtx);
109 mutex_owned(kmutex_t *mtx)
112 return rumpuser_mutex_held(RUMPMTX(mtx));
115 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
117 /* reader/writer locks */
119 void
120 rw_init(krwlock_t *rw)
123 CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
125 rumpuser_rw_init((struct rumpuser_rw **)rw);
128 void
129 rw_destroy(krwlock_t *rw)
132 rumpuser_rw_destroy(RUMPRW(rw));
135 void
136 rw_enter(krwlock_t *rw, const krw_t op)
139 rumpuser_rw_enter(RUMPRW(rw), op == RW_WRITER);
143 rw_tryenter(krwlock_t *rw, const krw_t op)
146 return rumpuser_rw_tryenter(RUMPRW(rw), op == RW_WRITER);
149 void
150 rw_exit(krwlock_t *rw)
153 rumpuser_rw_exit(RUMPRW(rw));
156 /* always fails */
158 rw_tryupgrade(krwlock_t *rw)
161 return 0;
165 rw_write_held(krwlock_t *rw)
168 return rumpuser_rw_wrheld(RUMPRW(rw));
172 rw_read_held(krwlock_t *rw)
175 return rumpuser_rw_rdheld(RUMPRW(rw));
179 rw_lock_held(krwlock_t *rw)
182 return rumpuser_rw_held(RUMPRW(rw));
185 /* curriculum vitaes */
187 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
189 void
190 cv_init(kcondvar_t *cv, const char *msg)
193 CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
195 rumpuser_cv_init((struct rumpuser_cv **)cv);
198 void
199 cv_destroy(kcondvar_t *cv)
202 rumpuser_cv_destroy(RUMPCV(cv));
205 void
206 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
209 if (rump_threads == 0)
210 panic("cv_wait without threads");
211 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
215 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
218 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
219 return 0;
223 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
225 struct timespec ts, tick;
226 extern int hz;
228 nanotime(&ts);
229 tick.tv_sec = ticks / hz;
230 tick.tv_nsec = (ticks % hz) * (1000000000/hz);
231 timespecadd(&ts, &tick, &ts);
233 if (ticks == 0) {
234 cv_wait(cv, mtx);
235 return 0;
236 } else {
237 if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
238 ts.tv_sec, ts.tv_nsec))
239 return EWOULDBLOCK;
240 else
241 return 0;
246 cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int ticks)
249 return cv_timedwait(cv, mtx, ticks);
252 void
253 cv_signal(kcondvar_t *cv)
256 rumpuser_cv_signal(RUMPCV(cv));
259 void
260 cv_broadcast(kcondvar_t *cv)
263 rumpuser_cv_broadcast(RUMPCV(cv));
266 bool
267 cv_has_waiters(kcondvar_t *cv)
270 return rumpuser_cv_has_waiters(RUMPCV(cv));
273 /* this is not much of an attempt, but ... */
274 bool
275 cv_is_valid(kcondvar_t *cv)
278 return RUMPCV(cv) != NULL;
282 * giant lock
285 static volatile int lockcnt;
287 bool
288 kernel_biglocked()
291 return rumpuser_mutex_held(rump_giantlock) && lockcnt > 0;
294 void
295 kernel_unlock_allbutone(int *countp)
297 int minusone = lockcnt-1;
299 KASSERT(kernel_biglocked());
300 if (minusone) {
301 _kernel_unlock(minusone, countp);
303 KASSERT(lockcnt == 1);
304 *countp = minusone;
307 * We drop lockcnt to 0 since rumpuser doesn't know that the
308 * kernel biglock is being used as the interlock for cv in
309 * tsleep.
311 lockcnt = 0;
314 void
315 kernel_ununlock_allbutone(int nlocks)
318 KASSERT(rumpuser_mutex_held(rump_giantlock) && lockcnt == 0);
319 lockcnt = 1;
320 _kernel_lock(nlocks);
323 void
324 _kernel_lock(int nlocks)
327 while (nlocks--) {
328 if (!rumpuser_mutex_tryenter(rump_giantlock)) {
329 struct lwp *l = curlwp;
331 rump_unschedule_cpu1(l);
332 rumpuser_mutex_enter_nowrap(rump_giantlock);
333 rump_schedule_cpu(l);
335 lockcnt++;
339 void
340 _kernel_unlock(int nlocks, int *countp)
343 if (!rumpuser_mutex_held(rump_giantlock)) {
344 KASSERT(nlocks == 0);
345 if (countp)
346 *countp = 0;
347 return;
350 if (countp)
351 *countp = lockcnt;
352 if (nlocks == 0)
353 nlocks = lockcnt;
354 if (nlocks == -1) {
355 KASSERT(lockcnt == 1);
356 nlocks = 1;
358 KASSERT(nlocks <= lockcnt);
359 while (nlocks--) {
360 lockcnt--;
361 rumpuser_mutex_exit(rump_giantlock);
365 void
366 rump_user_unschedule(int nlocks, int *countp)
369 _kernel_unlock(nlocks, countp);
371 * XXX: technically we should unschedule_cpu1() here, but that
372 * requires rump_intr_enter/exit to be implemented.
374 rump_unschedule_cpu(curlwp);
377 void
378 rump_user_schedule(int nlocks)
381 rump_schedule_cpu(curlwp);
383 if (nlocks)
384 _kernel_lock(nlocks);