Sync usage with man page.
[netbsd-mini2440.git] / sys / rump / librump / rumpkern / scheduler.c
blobb450139bb37552f7a47d17cebfeccfbcdf59502e
1 /* $NetBSD: scheduler.c,v 1.7 2009/11/09 19:16:18 pooka Exp $ */
3 /*
4 * Copyright (c) 2009 Antti Kantee. All Rights Reserved.
6 * Development of this software was supported by
7 * The Finnish Cultural Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.7 2009/11/09 19:16:18 pooka Exp $");
34 #include <sys/param.h>
35 #include <sys/cpu.h>
36 #include <sys/kmem.h>
37 #include <sys/mutex.h>
38 #include <sys/namei.h>
39 #include <sys/queue.h>
40 #include <sys/select.h>
42 #include <rump/rumpuser.h>
44 #include "rump_private.h"
46 /* should go for MAXCPUS at some point */
47 static struct cpu_info rump_cpus[MAXCPUS];
48 static struct rumpcpu {
49 struct cpu_info *rcpu_ci;
50 int rcpu_flags;
51 struct rumpuser_cv *rcpu_cv;
52 LIST_ENTRY(rumpcpu) rcpu_entries;
53 } rcpu_storage[MAXCPUS];
54 struct cpu_info *rump_cpu = &rump_cpus[0];
55 int ncpu = 1;
57 #define RCPU_WANTED 0x01 /* someone wants this specific CPU */
58 #define RCPU_BUSY 0x02 /* CPU is busy */
59 #define RCPU_FREELIST 0x04 /* CPU is on freelist */
61 static LIST_HEAD(,rumpcpu) cpu_freelist = LIST_HEAD_INITIALIZER(cpu_freelist);
62 static struct rumpuser_mtx *schedmtx;
63 static struct rumpuser_cv *schedcv, *lwp0cv;
65 static bool lwp0busy = false;
67 struct cpu_info *
68 cpu_lookup(u_int index)
71 return &rump_cpus[index];
74 void
75 rump_scheduler_init()
77 struct rumpcpu *rcpu;
78 struct cpu_info *ci;
79 int i;
81 rumpuser_mutex_init(&schedmtx);
82 rumpuser_cv_init(&schedcv);
83 rumpuser_cv_init(&lwp0cv);
84 for (i = 0; i < ncpu; i++) {
85 rcpu = &rcpu_storage[i];
86 ci = &rump_cpus[i];
87 rump_cpu_bootstrap(ci);
88 ci->ci_schedstate.spc_mutex =
89 mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
90 rcpu->rcpu_ci = ci;
91 LIST_INSERT_HEAD(&cpu_freelist, rcpu, rcpu_entries);
92 rcpu->rcpu_flags = RCPU_FREELIST;
93 rumpuser_cv_init(&rcpu->rcpu_cv);
97 void
98 rump_schedule()
100 struct lwp *l;
103 * If there is no dedicated lwp, allocate a temp one and
104 * set it to be free'd upon unschedule(). Use lwp0 context
105 * for reserving the necessary resources.
107 l = rumpuser_get_curlwp();
108 if (l == NULL) {
109 /* busy lwp0 */
110 rumpuser_mutex_enter_nowrap(schedmtx);
111 while (lwp0busy)
112 rumpuser_cv_wait_nowrap(lwp0cv, schedmtx);
113 lwp0busy = true;
114 rumpuser_mutex_exit(schedmtx);
116 /* schedule cpu and use lwp0 */
117 rump_schedule_cpu(&lwp0);
118 rumpuser_set_curlwp(&lwp0);
119 l = rump_lwp_alloc(0, rump_nextlid());
121 /* release lwp0 */
122 rump_lwp_switch(l);
123 rumpuser_mutex_enter_nowrap(schedmtx);
124 lwp0busy = false;
125 rumpuser_cv_signal(lwp0cv);
126 rumpuser_mutex_exit(schedmtx);
128 /* mark new lwp as dead-on-exit */
129 rump_lwp_release(l);
130 } else {
131 rump_schedule_cpu(l);
135 void
136 rump_schedule_cpu(struct lwp *l)
138 struct rumpcpu *rcpu;
140 rumpuser_mutex_enter_nowrap(schedmtx);
141 if (l->l_pflag & LP_BOUND) {
142 KASSERT(l->l_cpu != NULL);
143 rcpu = &rcpu_storage[l->l_cpu-&rump_cpus[0]];
144 if (rcpu->rcpu_flags & RCPU_BUSY) {
145 KASSERT((rcpu->rcpu_flags & RCPU_FREELIST) == 0);
146 while (rcpu->rcpu_flags & RCPU_BUSY) {
147 rcpu->rcpu_flags |= RCPU_WANTED;
148 rumpuser_cv_wait_nowrap(rcpu->rcpu_cv,
149 schedmtx);
151 rcpu->rcpu_flags &= ~RCPU_WANTED;
152 } else {
153 KASSERT(rcpu->rcpu_flags & (RCPU_FREELIST|RCPU_WANTED));
155 if (rcpu->rcpu_flags & RCPU_FREELIST) {
156 LIST_REMOVE(rcpu, rcpu_entries);
157 rcpu->rcpu_flags &= ~RCPU_FREELIST;
159 } else {
160 while ((rcpu = LIST_FIRST(&cpu_freelist)) == NULL) {
161 rumpuser_cv_wait_nowrap(schedcv, schedmtx);
163 KASSERT(rcpu->rcpu_flags & RCPU_FREELIST);
164 LIST_REMOVE(rcpu, rcpu_entries);
165 rcpu->rcpu_flags &= ~RCPU_FREELIST;
166 KASSERT(l->l_cpu == NULL);
167 l->l_cpu = rcpu->rcpu_ci;
169 rcpu->rcpu_flags |= RCPU_BUSY;
170 rumpuser_mutex_exit(schedmtx);
171 l->l_mutex = rcpu->rcpu_ci->ci_schedstate.spc_mutex;
174 void
175 rump_unschedule()
177 struct lwp *l;
179 l = rumpuser_get_curlwp();
180 KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex);
181 rump_unschedule_cpu(l);
182 l->l_mutex = NULL;
185 * If we're using a temp lwp, need to take lwp0 for rump_lwp_free().
186 * (we could maybe cache idle lwp's to avoid constant bouncing)
188 if (l->l_flag & LW_WEXIT) {
189 rumpuser_set_curlwp(NULL);
191 /* busy lwp0 */
192 rumpuser_mutex_enter_nowrap(schedmtx);
193 while (lwp0busy)
194 rumpuser_cv_wait_nowrap(lwp0cv, schedmtx);
195 lwp0busy = true;
196 rumpuser_mutex_exit(schedmtx);
198 rump_schedule_cpu(&lwp0);
199 rumpuser_set_curlwp(&lwp0);
200 rump_lwp_free(l);
201 rump_unschedule_cpu(&lwp0);
202 rumpuser_set_curlwp(NULL);
204 rumpuser_mutex_enter_nowrap(schedmtx);
205 lwp0busy = false;
206 rumpuser_cv_signal(lwp0cv);
207 rumpuser_mutex_exit(schedmtx);
211 void
212 rump_unschedule_cpu(struct lwp *l)
215 if ((l->l_pflag & LP_INTR) == 0)
216 rump_softint_run(l->l_cpu);
217 rump_unschedule_cpu1(l);
220 void
221 rump_unschedule_cpu1(struct lwp *l)
223 struct rumpcpu *rcpu;
224 struct cpu_info *ci;
226 ci = l->l_cpu;
227 if ((l->l_pflag & LP_BOUND) == 0) {
228 l->l_cpu = NULL;
230 rcpu = &rcpu_storage[ci-&rump_cpus[0]];
231 KASSERT(rcpu->rcpu_ci == ci);
232 KASSERT(rcpu->rcpu_flags & RCPU_BUSY);
234 rumpuser_mutex_enter_nowrap(schedmtx);
235 if (rcpu->rcpu_flags & RCPU_WANTED) {
237 * The assumption is that there will usually be max 1
238 * thread waiting on the rcpu_cv, so broadcast is fine.
239 * (and the current structure requires it because of
240 * only a bitmask being used for wanting).
242 rumpuser_cv_broadcast(rcpu->rcpu_cv);
243 } else {
244 LIST_INSERT_HEAD(&cpu_freelist, rcpu, rcpu_entries);
245 rcpu->rcpu_flags |= RCPU_FREELIST;
246 rumpuser_cv_signal(schedcv);
248 rcpu->rcpu_flags &= ~RCPU_BUSY;
249 rumpuser_mutex_exit(schedmtx);
252 /* Give up and retake CPU (perhaps a different one) */
253 void
254 yield()
256 struct lwp *l = curlwp;
257 int nlocks;
259 KERNEL_UNLOCK_ALL(l, &nlocks);
260 rump_unschedule_cpu(l);
261 rump_schedule_cpu(l);
262 KERNEL_LOCK(nlocks, l);
265 void
266 preempt()
269 yield();