1 /* $NetBSD: locks.c,v 1.36 2009/12/01 09:50:51 pooka Exp $ */
4 * Copyright (c) 2007, 2008 Antti Kantee. All Rights Reserved.
6 * Development of this software was supported by the
7 * Finnish Cultural Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.36 2009/12/01 09:50:51 pooka Exp $");
34 #include <sys/param.h>
36 #include <sys/mutex.h>
37 #include <sys/rwlock.h>
39 #include <rump/rumpuser.h>
41 #include "rump_private.h"
44 * We map locks to pthread routines. The difference between kernel
45 * and rumpuser routines is that while the kernel uses static
46 * storage, rumpuser allocates the object from the heap. This
47 * indirection is necessary because we don't know the size of
48 * pthread objects here. It is also benefitial, since we can
49 * be easily compatible with the kernel ABI because all kernel
50 * objects regardless of machine architecture are always at least
51 * the size of a pointer. The downside, of course, is a performance
55 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
58 mutex_init(kmutex_t
*mtx
, kmutex_type_t type
, int ipl
)
61 CTASSERT(sizeof(kmutex_t
) >= sizeof(void *));
63 rumpuser_mutex_init((struct rumpuser_mtx
**)mtx
);
67 mutex_destroy(kmutex_t
*mtx
)
70 rumpuser_mutex_destroy(RUMPMTX(mtx
));
74 mutex_enter(kmutex_t
*mtx
)
77 rumpuser_mutex_enter(RUMPMTX(mtx
));
81 mutex_spin_enter(kmutex_t
*mtx
)
88 mutex_tryenter(kmutex_t
*mtx
)
91 return rumpuser_mutex_tryenter(RUMPMTX(mtx
));
95 mutex_exit(kmutex_t
*mtx
)
98 rumpuser_mutex_exit(RUMPMTX(mtx
));
102 mutex_spin_exit(kmutex_t
*mtx
)
109 mutex_owned(kmutex_t
*mtx
)
112 return rumpuser_mutex_held(RUMPMTX(mtx
));
115 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
117 /* reader/writer locks */
120 rw_init(krwlock_t
*rw
)
123 CTASSERT(sizeof(krwlock_t
) >= sizeof(void *));
125 rumpuser_rw_init((struct rumpuser_rw
**)rw
);
129 rw_destroy(krwlock_t
*rw
)
132 rumpuser_rw_destroy(RUMPRW(rw
));
136 rw_enter(krwlock_t
*rw
, const krw_t op
)
139 rumpuser_rw_enter(RUMPRW(rw
), op
== RW_WRITER
);
143 rw_tryenter(krwlock_t
*rw
, const krw_t op
)
146 return rumpuser_rw_tryenter(RUMPRW(rw
), op
== RW_WRITER
);
150 rw_exit(krwlock_t
*rw
)
153 rumpuser_rw_exit(RUMPRW(rw
));
158 rw_tryupgrade(krwlock_t
*rw
)
165 rw_write_held(krwlock_t
*rw
)
168 return rumpuser_rw_wrheld(RUMPRW(rw
));
172 rw_read_held(krwlock_t
*rw
)
175 return rumpuser_rw_rdheld(RUMPRW(rw
));
179 rw_lock_held(krwlock_t
*rw
)
182 return rumpuser_rw_held(RUMPRW(rw
));
185 /* curriculum vitaes */
187 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
190 cv_init(kcondvar_t
*cv
, const char *msg
)
193 CTASSERT(sizeof(kcondvar_t
) >= sizeof(void *));
195 rumpuser_cv_init((struct rumpuser_cv
**)cv
);
199 cv_destroy(kcondvar_t
*cv
)
202 rumpuser_cv_destroy(RUMPCV(cv
));
206 cv_wait(kcondvar_t
*cv
, kmutex_t
*mtx
)
209 if (rump_threads
== 0)
210 panic("cv_wait without threads");
211 rumpuser_cv_wait(RUMPCV(cv
), RUMPMTX(mtx
));
215 cv_wait_sig(kcondvar_t
*cv
, kmutex_t
*mtx
)
218 rumpuser_cv_wait(RUMPCV(cv
), RUMPMTX(mtx
));
223 cv_timedwait(kcondvar_t
*cv
, kmutex_t
*mtx
, int ticks
)
225 struct timespec ts
, tick
;
229 tick
.tv_sec
= ticks
/ hz
;
230 tick
.tv_nsec
= (ticks
% hz
) * (1000000000/hz
);
231 timespecadd(&ts
, &tick
, &ts
);
237 if (rumpuser_cv_timedwait(RUMPCV(cv
), RUMPMTX(mtx
),
238 ts
.tv_sec
, ts
.tv_nsec
))
246 cv_timedwait_sig(kcondvar_t
*cv
, kmutex_t
*mtx
, int ticks
)
249 return cv_timedwait(cv
, mtx
, ticks
);
253 cv_signal(kcondvar_t
*cv
)
256 rumpuser_cv_signal(RUMPCV(cv
));
260 cv_broadcast(kcondvar_t
*cv
)
263 rumpuser_cv_broadcast(RUMPCV(cv
));
267 cv_has_waiters(kcondvar_t
*cv
)
270 return rumpuser_cv_has_waiters(RUMPCV(cv
));
273 /* this is not much of an attempt, but ... */
275 cv_is_valid(kcondvar_t
*cv
)
278 return RUMPCV(cv
) != NULL
;
285 static volatile int lockcnt
;
291 return rumpuser_mutex_held(rump_giantlock
) && lockcnt
> 0;
295 kernel_unlock_allbutone(int *countp
)
297 int minusone
= lockcnt
-1;
299 KASSERT(kernel_biglocked());
301 _kernel_unlock(minusone
, countp
);
303 KASSERT(lockcnt
== 1);
307 * We drop lockcnt to 0 since rumpuser doesn't know that the
308 * kernel biglock is being used as the interlock for cv in
315 kernel_ununlock_allbutone(int nlocks
)
318 KASSERT(rumpuser_mutex_held(rump_giantlock
) && lockcnt
== 0);
320 _kernel_lock(nlocks
);
324 _kernel_lock(int nlocks
)
328 if (!rumpuser_mutex_tryenter(rump_giantlock
)) {
329 struct lwp
*l
= curlwp
;
331 rump_unschedule_cpu1(l
);
332 rumpuser_mutex_enter_nowrap(rump_giantlock
);
333 rump_schedule_cpu(l
);
340 _kernel_unlock(int nlocks
, int *countp
)
343 if (!rumpuser_mutex_held(rump_giantlock
)) {
344 KASSERT(nlocks
== 0);
355 KASSERT(lockcnt
== 1);
358 KASSERT(nlocks
<= lockcnt
);
361 rumpuser_mutex_exit(rump_giantlock
);
366 rump_user_unschedule(int nlocks
, int *countp
)
369 _kernel_unlock(nlocks
, countp
);
371 * XXX: technically we should unschedule_cpu1() here, but that
372 * requires rump_intr_enter/exit to be implemented.
374 rump_unschedule_cpu(curlwp
);
378 rump_user_schedule(int nlocks
)
381 rump_schedule_cpu(curlwp
);
384 _kernel_lock(nlocks
);