1 /* $NetBSD: sleepq.c,v 1.5 2009/10/21 23:13:53 rmind Exp $ */
4 * Copyright (c) 2008 Antti Kantee. All Rights Reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: sleepq.c,v 1.5 2009/10/21 23:13:53 rmind Exp $");
31 #include <sys/param.h>
32 #include <sys/condvar.h>
33 #include <sys/mutex.h>
35 #include <sys/queue.h>
36 #include <sys/sleepq.h>
37 #include <sys/syncobj.h>
39 #include "rump_private.h"
42 * Flimsy and minimalistic sleepq implementation. This is implemented
43 * only for the use of callouts in kern_timeout.c. locking etc is
44 * completely incorrect, horrible, etc etc etc.
47 syncobj_t sleep_syncobj
;
48 static kcondvar_t sq_cv
;
54 cv_init(&sq_cv
, "sleepq");
60 sleepq_init(sleepq_t
*sq
)
64 RUN_ONCE(&sqctl
, sqinit1
);
70 sleepq_enqueue(sleepq_t
*sq
, wchan_t wc
, const char *wmsg
, syncobj_t
*sob
)
72 struct lwp
*l
= curlwp
;
76 TAILQ_INSERT_TAIL(sq
, l
, l_sleepchain
);
80 sleepq_block(int timo
, bool catch)
82 struct lwp
*l
= curlwp
;
84 kmutex_t
*mp
= l
->l_mutex
;
85 int biglocks
= l
->l_biglocks
;
88 if ((error
=cv_timedwait(&sq_cv
, mp
, timo
)) == EWOULDBLOCK
) {
89 TAILQ_REMOVE(l
->l_sleepq
, l
, l_sleepchain
);
96 KERNEL_LOCK(biglocks
, curlwp
);
102 sleepq_wake(sleepq_t
*sq
, wchan_t wchan
, u_int expected
, kmutex_t
*mp
)
104 struct lwp
*l
, *l_next
;
107 if (__predict_false(expected
!= -1))
108 panic("sleepq_wake: \"expected\" not supported");
110 for (l
= TAILQ_FIRST(sq
); l
; l
= l_next
) {
111 l_next
= TAILQ_NEXT(l
, l_sleepchain
);
112 if (l
->l_wchan
== wchan
) {
115 TAILQ_REMOVE(sq
, l
, l_sleepchain
);
119 cv_broadcast(&sq_cv
);
126 sleepq_unsleep(struct lwp
*l
, bool cleanup
)
130 TAILQ_REMOVE(l
->l_sleepq
, l
, l_sleepchain
);
131 cv_broadcast(&sq_cv
);
134 mutex_spin_exit(l
->l_mutex
);
139 * Thread scheduler handles priorities. Therefore no action here.
140 * (maybe do something if we're deperate?)
143 sleepq_changepri(struct lwp
*l
, pri_t pri
)
149 sleepq_lendpri(struct lwp
*l
, pri_t pri
)
155 syncobj_noowner(wchan_t wc
)
162 * XXX: used only by callout, therefore here. should try to use
163 * one in kern_lwp directly.
166 lwp_lock_retry(struct lwp
*l
, kmutex_t
*old
)
169 while (l
->l_mutex
!= old
) {
170 mutex_spin_exit(old
);
172 mutex_spin_enter(old
);