1 /* $NetBSD: scheduler.c,v 1.7 2009/11/09 19:16:18 pooka Exp $ */
4 * Copyright (c) 2009 Antti Kantee. All Rights Reserved.
6 * Development of this software was supported by
7 * The Finnish Cultural Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.7 2009/11/09 19:16:18 pooka Exp $");
34 #include <sys/param.h>
37 #include <sys/mutex.h>
38 #include <sys/namei.h>
39 #include <sys/queue.h>
40 #include <sys/select.h>
42 #include <rump/rumpuser.h>
44 #include "rump_private.h"
46 /* should go for MAXCPUS at some point */
47 static struct cpu_info rump_cpus
[MAXCPUS
];
48 static struct rumpcpu
{
49 struct cpu_info
*rcpu_ci
;
51 struct rumpuser_cv
*rcpu_cv
;
52 LIST_ENTRY(rumpcpu
) rcpu_entries
;
53 } rcpu_storage
[MAXCPUS
];
54 struct cpu_info
*rump_cpu
= &rump_cpus
[0];
57 #define RCPU_WANTED 0x01 /* someone wants this specific CPU */
58 #define RCPU_BUSY 0x02 /* CPU is busy */
59 #define RCPU_FREELIST 0x04 /* CPU is on freelist */
61 static LIST_HEAD(,rumpcpu
) cpu_freelist
= LIST_HEAD_INITIALIZER(cpu_freelist
);
62 static struct rumpuser_mtx
*schedmtx
;
63 static struct rumpuser_cv
*schedcv
, *lwp0cv
;
65 static bool lwp0busy
= false;
68 cpu_lookup(u_int index
)
71 return &rump_cpus
[index
];
81 rumpuser_mutex_init(&schedmtx
);
82 rumpuser_cv_init(&schedcv
);
83 rumpuser_cv_init(&lwp0cv
);
84 for (i
= 0; i
< ncpu
; i
++) {
85 rcpu
= &rcpu_storage
[i
];
87 rump_cpu_bootstrap(ci
);
88 ci
->ci_schedstate
.spc_mutex
=
89 mutex_obj_alloc(MUTEX_DEFAULT
, IPL_NONE
);
91 LIST_INSERT_HEAD(&cpu_freelist
, rcpu
, rcpu_entries
);
92 rcpu
->rcpu_flags
= RCPU_FREELIST
;
93 rumpuser_cv_init(&rcpu
->rcpu_cv
);
103 * If there is no dedicated lwp, allocate a temp one and
104 * set it to be free'd upon unschedule(). Use lwp0 context
105 * for reserving the necessary resources.
107 l
= rumpuser_get_curlwp();
110 rumpuser_mutex_enter_nowrap(schedmtx
);
112 rumpuser_cv_wait_nowrap(lwp0cv
, schedmtx
);
114 rumpuser_mutex_exit(schedmtx
);
116 /* schedule cpu and use lwp0 */
117 rump_schedule_cpu(&lwp0
);
118 rumpuser_set_curlwp(&lwp0
);
119 l
= rump_lwp_alloc(0, rump_nextlid());
123 rumpuser_mutex_enter_nowrap(schedmtx
);
125 rumpuser_cv_signal(lwp0cv
);
126 rumpuser_mutex_exit(schedmtx
);
128 /* mark new lwp as dead-on-exit */
131 rump_schedule_cpu(l
);
136 rump_schedule_cpu(struct lwp
*l
)
138 struct rumpcpu
*rcpu
;
140 rumpuser_mutex_enter_nowrap(schedmtx
);
141 if (l
->l_pflag
& LP_BOUND
) {
142 KASSERT(l
->l_cpu
!= NULL
);
143 rcpu
= &rcpu_storage
[l
->l_cpu
-&rump_cpus
[0]];
144 if (rcpu
->rcpu_flags
& RCPU_BUSY
) {
145 KASSERT((rcpu
->rcpu_flags
& RCPU_FREELIST
) == 0);
146 while (rcpu
->rcpu_flags
& RCPU_BUSY
) {
147 rcpu
->rcpu_flags
|= RCPU_WANTED
;
148 rumpuser_cv_wait_nowrap(rcpu
->rcpu_cv
,
151 rcpu
->rcpu_flags
&= ~RCPU_WANTED
;
153 KASSERT(rcpu
->rcpu_flags
& (RCPU_FREELIST
|RCPU_WANTED
));
155 if (rcpu
->rcpu_flags
& RCPU_FREELIST
) {
156 LIST_REMOVE(rcpu
, rcpu_entries
);
157 rcpu
->rcpu_flags
&= ~RCPU_FREELIST
;
160 while ((rcpu
= LIST_FIRST(&cpu_freelist
)) == NULL
) {
161 rumpuser_cv_wait_nowrap(schedcv
, schedmtx
);
163 KASSERT(rcpu
->rcpu_flags
& RCPU_FREELIST
);
164 LIST_REMOVE(rcpu
, rcpu_entries
);
165 rcpu
->rcpu_flags
&= ~RCPU_FREELIST
;
166 KASSERT(l
->l_cpu
== NULL
);
167 l
->l_cpu
= rcpu
->rcpu_ci
;
169 rcpu
->rcpu_flags
|= RCPU_BUSY
;
170 rumpuser_mutex_exit(schedmtx
);
171 l
->l_mutex
= rcpu
->rcpu_ci
->ci_schedstate
.spc_mutex
;
179 l
= rumpuser_get_curlwp();
180 KASSERT(l
->l_mutex
== l
->l_cpu
->ci_schedstate
.spc_mutex
);
181 rump_unschedule_cpu(l
);
185 * If we're using a temp lwp, need to take lwp0 for rump_lwp_free().
186 * (we could maybe cache idle lwp's to avoid constant bouncing)
188 if (l
->l_flag
& LW_WEXIT
) {
189 rumpuser_set_curlwp(NULL
);
192 rumpuser_mutex_enter_nowrap(schedmtx
);
194 rumpuser_cv_wait_nowrap(lwp0cv
, schedmtx
);
196 rumpuser_mutex_exit(schedmtx
);
198 rump_schedule_cpu(&lwp0
);
199 rumpuser_set_curlwp(&lwp0
);
201 rump_unschedule_cpu(&lwp0
);
202 rumpuser_set_curlwp(NULL
);
204 rumpuser_mutex_enter_nowrap(schedmtx
);
206 rumpuser_cv_signal(lwp0cv
);
207 rumpuser_mutex_exit(schedmtx
);
212 rump_unschedule_cpu(struct lwp
*l
)
215 if ((l
->l_pflag
& LP_INTR
) == 0)
216 rump_softint_run(l
->l_cpu
);
217 rump_unschedule_cpu1(l
);
221 rump_unschedule_cpu1(struct lwp
*l
)
223 struct rumpcpu
*rcpu
;
227 if ((l
->l_pflag
& LP_BOUND
) == 0) {
230 rcpu
= &rcpu_storage
[ci
-&rump_cpus
[0]];
231 KASSERT(rcpu
->rcpu_ci
== ci
);
232 KASSERT(rcpu
->rcpu_flags
& RCPU_BUSY
);
234 rumpuser_mutex_enter_nowrap(schedmtx
);
235 if (rcpu
->rcpu_flags
& RCPU_WANTED
) {
237 * The assumption is that there will usually be max 1
238 * thread waiting on the rcpu_cv, so broadcast is fine.
239 * (and the current structure requires it because of
240 * only a bitmask being used for wanting).
242 rumpuser_cv_broadcast(rcpu
->rcpu_cv
);
244 LIST_INSERT_HEAD(&cpu_freelist
, rcpu
, rcpu_entries
);
245 rcpu
->rcpu_flags
|= RCPU_FREELIST
;
246 rumpuser_cv_signal(schedcv
);
248 rcpu
->rcpu_flags
&= ~RCPU_BUSY
;
249 rumpuser_mutex_exit(schedmtx
);
252 /* Give up and retake CPU (perhaps a different one) */
256 struct lwp
*l
= curlwp
;
259 KERNEL_UNLOCK_ALL(l
, &nlocks
);
260 rump_unschedule_cpu(l
);
261 rump_schedule_cpu(l
);
262 KERNEL_LOCK(nlocks
, l
);