Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / kern / kern_cpu.c
blobcb13aa67083c549e07b8df0bfc1177b2408dbbae
1 /* $NetBSD: kern_cpu.c,v 1.41 2009/01/19 23:04:26 njoly Exp $ */
3 /*-
4 * Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 /*-
33 * Copyright (c)2007 YAMAMOTO Takashi,
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
58 #include <sys/cdefs.h>
59 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.41 2009/01/19 23:04:26 njoly Exp $");
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/idle.h>
64 #include <sys/sched.h>
65 #include <sys/intr.h>
66 #include <sys/conf.h>
67 #include <sys/cpu.h>
68 #include <sys/cpuio.h>
69 #include <sys/proc.h>
70 #include <sys/percpu.h>
71 #include <sys/kernel.h>
72 #include <sys/kauth.h>
73 #include <sys/xcall.h>
74 #include <sys/pool.h>
75 #include <sys/kmem.h>
76 #include <sys/select.h>
77 #include <sys/namei.h>
78 #include <sys/callout.h>
80 #include <uvm/uvm_extern.h>
82 void cpuctlattach(int);
84 static void cpu_xc_online(struct cpu_info *);
85 static void cpu_xc_offline(struct cpu_info *);
87 dev_type_ioctl(cpuctl_ioctl);
89 const struct cdevsw cpuctl_cdevsw = {
90 nullopen, nullclose, nullread, nullwrite, cpuctl_ioctl,
91 nullstop, notty, nopoll, nommap, nokqfilter,
92 D_OTHER | D_MPSAFE
95 kmutex_t cpu_lock;
96 int ncpu;
97 int ncpuonline;
98 bool mp_online;
99 struct cpuqueue cpu_queue = CIRCLEQ_HEAD_INITIALIZER(cpu_queue);
101 static struct cpu_info *cpu_infos[MAXCPUS];
104 mi_cpu_attach(struct cpu_info *ci)
106 int error;
108 ci->ci_index = ncpu;
109 cpu_infos[cpu_index(ci)] = ci;
110 CIRCLEQ_INSERT_TAIL(&cpu_queue, ci, ci_data.cpu_qchain);
111 TAILQ_INIT(&ci->ci_data.cpu_ld_locks);
112 __cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock);
114 sched_cpuattach(ci);
116 error = create_idle_lwp(ci);
117 if (error != 0) {
118 /* XXX revert sched_cpuattach */
119 return error;
122 if (ci == curcpu())
123 ci->ci_data.cpu_onproc = curlwp;
124 else
125 ci->ci_data.cpu_onproc = ci->ci_data.cpu_idlelwp;
127 percpu_init_cpu(ci);
128 softint_init(ci);
129 callout_init_cpu(ci);
130 xc_init_cpu(ci);
131 pool_cache_cpu_init(ci);
132 selsysinit(ci);
133 cache_cpu_init(ci);
134 TAILQ_INIT(&ci->ci_data.cpu_biodone);
135 ncpu++;
136 ncpuonline++;
138 return 0;
141 void
142 cpuctlattach(int dummy)
148 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
150 CPU_INFO_ITERATOR cii;
151 cpustate_t *cs;
152 struct cpu_info *ci;
153 int error, i;
154 u_int id;
156 error = 0;
158 mutex_enter(&cpu_lock);
159 switch (cmd) {
160 case IOC_CPU_SETSTATE:
161 if (error == 0)
162 cs = data;
163 error = kauth_authorize_system(l->l_cred,
164 KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL,
165 NULL);
166 if (error != 0)
167 break;
168 if (cs->cs_id >= __arraycount(cpu_infos) ||
169 (ci = cpu_lookup(cs->cs_id)) == NULL) {
170 error = ESRCH;
171 break;
173 error = cpu_setintr(ci, cs->cs_intr);
174 error = cpu_setstate(ci, cs->cs_online);
175 break;
177 case IOC_CPU_GETSTATE:
178 if (error == 0)
179 cs = data;
180 id = cs->cs_id;
181 memset(cs, 0, sizeof(*cs));
182 cs->cs_id = id;
183 if (cs->cs_id >= __arraycount(cpu_infos) ||
184 (ci = cpu_lookup(id)) == NULL) {
185 error = ESRCH;
186 break;
188 if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
189 cs->cs_online = false;
190 else
191 cs->cs_online = true;
192 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
193 cs->cs_intr = false;
194 else
195 cs->cs_intr = true;
196 cs->cs_lastmod = (int32_t)ci->ci_schedstate.spc_lastmod;
197 cs->cs_lastmodhi = (int32_t)
198 (ci->ci_schedstate.spc_lastmod >> 32);
199 cs->cs_intrcnt = cpu_intr_count(ci) + 1;
200 break;
202 case IOC_CPU_MAPID:
203 i = 0;
204 for (CPU_INFO_FOREACH(cii, ci)) {
205 if (i++ == *(int *)data)
206 break;
208 if (ci == NULL)
209 error = ESRCH;
210 else
211 *(int *)data = cpu_index(ci);
212 break;
214 case IOC_CPU_GETCOUNT:
215 *(int *)data = ncpu;
216 break;
218 default:
219 error = ENOTTY;
220 break;
222 mutex_exit(&cpu_lock);
224 return error;
227 struct cpu_info *
228 cpu_lookup(u_int idx)
230 struct cpu_info *ci = cpu_infos[idx];
232 KASSERT(idx < __arraycount(cpu_infos));
233 KASSERT(ci == NULL || cpu_index(ci) == idx);
235 return ci;
238 static void
239 cpu_xc_offline(struct cpu_info *ci)
241 struct schedstate_percpu *spc, *mspc = NULL;
242 struct cpu_info *target_ci;
243 struct lwp *l;
244 CPU_INFO_ITERATOR cii;
245 int s;
248 * Thread that made the cross call (separate context) holds
249 * cpu_lock on our behalf.
251 spc = &ci->ci_schedstate;
252 s = splsched();
253 spc->spc_flags |= SPCF_OFFLINE;
254 splx(s);
256 /* Take the first available CPU for the migration. */
257 for (CPU_INFO_FOREACH(cii, target_ci)) {
258 mspc = &target_ci->ci_schedstate;
259 if ((mspc->spc_flags & SPCF_OFFLINE) == 0)
260 break;
262 KASSERT(target_ci != NULL);
265 * Migrate all non-bound threads to the other CPU. Note that this
266 * runs from the xcall thread, thus handling of LSONPROC is not needed.
268 mutex_enter(proc_lock);
269 LIST_FOREACH(l, &alllwp, l_list) {
270 struct cpu_info *mci;
272 lwp_lock(l);
273 if (l->l_cpu != ci || (l->l_pflag & (LP_BOUND | LP_INTR))) {
274 lwp_unlock(l);
275 continue;
277 /* Normal case - no affinity */
278 if ((l->l_flag & LW_AFFINITY) == 0) {
279 lwp_migrate(l, target_ci);
280 continue;
282 /* Affinity is set, find an online CPU in the set */
283 KASSERT(l->l_affinity != NULL);
284 for (CPU_INFO_FOREACH(cii, mci)) {
285 mspc = &mci->ci_schedstate;
286 if ((mspc->spc_flags & SPCF_OFFLINE) == 0 &&
287 kcpuset_isset(cpu_index(mci), l->l_affinity))
288 break;
290 if (mci == NULL) {
291 lwp_unlock(l);
292 mutex_exit(proc_lock);
293 goto fail;
295 lwp_migrate(l, mci);
297 mutex_exit(proc_lock);
299 #ifdef __HAVE_MD_CPU_OFFLINE
300 cpu_offline_md();
301 #endif
302 return;
303 fail:
304 /* Just unset the SPCF_OFFLINE flag, caller will check */
305 s = splsched();
306 spc->spc_flags &= ~SPCF_OFFLINE;
307 splx(s);
310 static void
311 cpu_xc_online(struct cpu_info *ci)
313 struct schedstate_percpu *spc;
314 int s;
316 spc = &ci->ci_schedstate;
317 s = splsched();
318 spc->spc_flags &= ~SPCF_OFFLINE;
319 splx(s);
323 cpu_setstate(struct cpu_info *ci, bool online)
325 struct schedstate_percpu *spc;
326 CPU_INFO_ITERATOR cii;
327 struct cpu_info *ci2;
328 uint64_t where;
329 xcfunc_t func;
330 int nonline;
332 spc = &ci->ci_schedstate;
334 KASSERT(mutex_owned(&cpu_lock));
336 if (online) {
337 if ((spc->spc_flags & SPCF_OFFLINE) == 0)
338 return 0;
339 func = (xcfunc_t)cpu_xc_online;
340 ncpuonline++;
341 } else {
342 if ((spc->spc_flags & SPCF_OFFLINE) != 0)
343 return 0;
344 nonline = 0;
346 * Ensure that at least one CPU within the processor set
347 * stays online. Revisit this later.
349 for (CPU_INFO_FOREACH(cii, ci2)) {
350 if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
351 continue;
352 if (ci2->ci_schedstate.spc_psid != spc->spc_psid)
353 continue;
354 nonline++;
356 if (nonline == 1)
357 return EBUSY;
358 func = (xcfunc_t)cpu_xc_offline;
359 ncpuonline--;
362 where = xc_unicast(0, func, ci, NULL, ci);
363 xc_wait(where);
364 if (online) {
365 KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0);
366 } else if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
367 /* If was not set offline, then it is busy */
368 return EBUSY;
371 spc->spc_lastmod = time_second;
372 return 0;
375 #ifdef __HAVE_INTR_CONTROL
376 static void
377 cpu_xc_intr(struct cpu_info *ci)
379 struct schedstate_percpu *spc;
380 int s;
382 spc = &ci->ci_schedstate;
383 s = splsched();
384 spc->spc_flags &= ~SPCF_NOINTR;
385 splx(s);
388 static void
389 cpu_xc_nointr(struct cpu_info *ci)
391 struct schedstate_percpu *spc;
392 int s;
394 spc = &ci->ci_schedstate;
395 s = splsched();
396 spc->spc_flags |= SPCF_NOINTR;
397 splx(s);
401 cpu_setintr(struct cpu_info *ci, bool intr)
403 struct schedstate_percpu *spc;
404 CPU_INFO_ITERATOR cii;
405 struct cpu_info *ci2;
406 uint64_t where;
407 xcfunc_t func;
408 int nintr;
410 spc = &ci->ci_schedstate;
412 KASSERT(mutex_owned(&cpu_lock));
414 if (intr) {
415 if ((spc->spc_flags & SPCF_NOINTR) == 0)
416 return 0;
417 func = (xcfunc_t)cpu_xc_intr;
418 } else {
419 if ((spc->spc_flags & SPCF_NOINTR) != 0)
420 return 0;
422 * Ensure that at least one CPU within the system
423 * is handing device interrupts.
425 nintr = 0;
426 for (CPU_INFO_FOREACH(cii, ci2)) {
427 if ((ci2->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
428 continue;
429 if (ci2 == ci)
430 continue;
431 nintr++;
433 if (nintr == 0)
434 return EBUSY;
435 func = (xcfunc_t)cpu_xc_nointr;
438 where = xc_unicast(0, func, ci, NULL, ci);
439 xc_wait(where);
440 if (intr) {
441 KASSERT((spc->spc_flags & SPCF_NOINTR) == 0);
442 } else if ((spc->spc_flags & SPCF_NOINTR) == 0) {
443 /* If was not set offline, then it is busy */
444 return EBUSY;
447 /* Direct interrupts away from the CPU and record the change. */
448 cpu_intr_redistribute();
449 spc->spc_lastmod = time_second;
450 return 0;
452 #else /* __HAVE_INTR_CONTROL */
454 cpu_setintr(struct cpu_info *ci, bool intr)
457 return EOPNOTSUPP;
460 u_int
461 cpu_intr_count(struct cpu_info *ci)
464 return 0; /* 0 == "don't know" */
466 #endif /* __HAVE_INTR_CONTROL */
468 bool
469 cpu_softintr_p(void)
472 return (curlwp->l_pflag & LP_INTR) != 0;