1 /* $NetBSD: kern_cpu.c,v 1.41 2009/01/19 23:04:26 njoly Exp $ */
4 * Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (c)2007 YAMAMOTO Takashi,
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 #include <sys/cdefs.h>
59 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.41 2009/01/19 23:04:26 njoly Exp $");
61 #include <sys/param.h>
62 #include <sys/systm.h>
64 #include <sys/sched.h>
68 #include <sys/cpuio.h>
70 #include <sys/percpu.h>
71 #include <sys/kernel.h>
72 #include <sys/kauth.h>
73 #include <sys/xcall.h>
76 #include <sys/select.h>
77 #include <sys/namei.h>
78 #include <sys/callout.h>
80 #include <uvm/uvm_extern.h>
82 void cpuctlattach(int);
84 static void cpu_xc_online(struct cpu_info
*);
85 static void cpu_xc_offline(struct cpu_info
*);
87 dev_type_ioctl(cpuctl_ioctl
);
89 const struct cdevsw cpuctl_cdevsw
= {
90 nullopen
, nullclose
, nullread
, nullwrite
, cpuctl_ioctl
,
91 nullstop
, notty
, nopoll
, nommap
, nokqfilter
,
99 struct cpuqueue cpu_queue
= CIRCLEQ_HEAD_INITIALIZER(cpu_queue
);
101 static struct cpu_info
*cpu_infos
[MAXCPUS
];
104 mi_cpu_attach(struct cpu_info
*ci
)
109 cpu_infos
[cpu_index(ci
)] = ci
;
110 CIRCLEQ_INSERT_TAIL(&cpu_queue
, ci
, ci_data
.cpu_qchain
);
111 TAILQ_INIT(&ci
->ci_data
.cpu_ld_locks
);
112 __cpu_simple_lock_init(&ci
->ci_data
.cpu_ld_lock
);
116 error
= create_idle_lwp(ci
);
118 /* XXX revert sched_cpuattach */
123 ci
->ci_data
.cpu_onproc
= curlwp
;
125 ci
->ci_data
.cpu_onproc
= ci
->ci_data
.cpu_idlelwp
;
129 callout_init_cpu(ci
);
131 pool_cache_cpu_init(ci
);
134 TAILQ_INIT(&ci
->ci_data
.cpu_biodone
);
142 cpuctlattach(int dummy
)
148 cpuctl_ioctl(dev_t dev
, u_long cmd
, void *data
, int flag
, lwp_t
*l
)
150 CPU_INFO_ITERATOR cii
;
158 mutex_enter(&cpu_lock
);
160 case IOC_CPU_SETSTATE
:
163 error
= kauth_authorize_system(l
->l_cred
,
164 KAUTH_SYSTEM_CPU
, KAUTH_REQ_SYSTEM_CPU_SETSTATE
, cs
, NULL
,
168 if (cs
->cs_id
>= __arraycount(cpu_infos
) ||
169 (ci
= cpu_lookup(cs
->cs_id
)) == NULL
) {
173 error
= cpu_setintr(ci
, cs
->cs_intr
);
174 error
= cpu_setstate(ci
, cs
->cs_online
);
177 case IOC_CPU_GETSTATE
:
181 memset(cs
, 0, sizeof(*cs
));
183 if (cs
->cs_id
>= __arraycount(cpu_infos
) ||
184 (ci
= cpu_lookup(id
)) == NULL
) {
188 if ((ci
->ci_schedstate
.spc_flags
& SPCF_OFFLINE
) != 0)
189 cs
->cs_online
= false;
191 cs
->cs_online
= true;
192 if ((ci
->ci_schedstate
.spc_flags
& SPCF_NOINTR
) != 0)
196 cs
->cs_lastmod
= (int32_t)ci
->ci_schedstate
.spc_lastmod
;
197 cs
->cs_lastmodhi
= (int32_t)
198 (ci
->ci_schedstate
.spc_lastmod
>> 32);
199 cs
->cs_intrcnt
= cpu_intr_count(ci
) + 1;
204 for (CPU_INFO_FOREACH(cii
, ci
)) {
205 if (i
++ == *(int *)data
)
211 *(int *)data
= cpu_index(ci
);
214 case IOC_CPU_GETCOUNT
:
222 mutex_exit(&cpu_lock
);
228 cpu_lookup(u_int idx
)
230 struct cpu_info
*ci
= cpu_infos
[idx
];
232 KASSERT(idx
< __arraycount(cpu_infos
));
233 KASSERT(ci
== NULL
|| cpu_index(ci
) == idx
);
239 cpu_xc_offline(struct cpu_info
*ci
)
241 struct schedstate_percpu
*spc
, *mspc
= NULL
;
242 struct cpu_info
*target_ci
;
244 CPU_INFO_ITERATOR cii
;
248 * Thread that made the cross call (separate context) holds
249 * cpu_lock on our behalf.
251 spc
= &ci
->ci_schedstate
;
253 spc
->spc_flags
|= SPCF_OFFLINE
;
256 /* Take the first available CPU for the migration. */
257 for (CPU_INFO_FOREACH(cii
, target_ci
)) {
258 mspc
= &target_ci
->ci_schedstate
;
259 if ((mspc
->spc_flags
& SPCF_OFFLINE
) == 0)
262 KASSERT(target_ci
!= NULL
);
265 * Migrate all non-bound threads to the other CPU. Note that this
266 * runs from the xcall thread, thus handling of LSONPROC is not needed.
268 mutex_enter(proc_lock
);
269 LIST_FOREACH(l
, &alllwp
, l_list
) {
270 struct cpu_info
*mci
;
273 if (l
->l_cpu
!= ci
|| (l
->l_pflag
& (LP_BOUND
| LP_INTR
))) {
277 /* Normal case - no affinity */
278 if ((l
->l_flag
& LW_AFFINITY
) == 0) {
279 lwp_migrate(l
, target_ci
);
282 /* Affinity is set, find an online CPU in the set */
283 KASSERT(l
->l_affinity
!= NULL
);
284 for (CPU_INFO_FOREACH(cii
, mci
)) {
285 mspc
= &mci
->ci_schedstate
;
286 if ((mspc
->spc_flags
& SPCF_OFFLINE
) == 0 &&
287 kcpuset_isset(cpu_index(mci
), l
->l_affinity
))
292 mutex_exit(proc_lock
);
297 mutex_exit(proc_lock
);
299 #ifdef __HAVE_MD_CPU_OFFLINE
304 /* Just unset the SPCF_OFFLINE flag, caller will check */
306 spc
->spc_flags
&= ~SPCF_OFFLINE
;
311 cpu_xc_online(struct cpu_info
*ci
)
313 struct schedstate_percpu
*spc
;
316 spc
= &ci
->ci_schedstate
;
318 spc
->spc_flags
&= ~SPCF_OFFLINE
;
323 cpu_setstate(struct cpu_info
*ci
, bool online
)
325 struct schedstate_percpu
*spc
;
326 CPU_INFO_ITERATOR cii
;
327 struct cpu_info
*ci2
;
332 spc
= &ci
->ci_schedstate
;
334 KASSERT(mutex_owned(&cpu_lock
));
337 if ((spc
->spc_flags
& SPCF_OFFLINE
) == 0)
339 func
= (xcfunc_t
)cpu_xc_online
;
342 if ((spc
->spc_flags
& SPCF_OFFLINE
) != 0)
346 * Ensure that at least one CPU within the processor set
347 * stays online. Revisit this later.
349 for (CPU_INFO_FOREACH(cii
, ci2
)) {
350 if ((ci2
->ci_schedstate
.spc_flags
& SPCF_OFFLINE
) != 0)
352 if (ci2
->ci_schedstate
.spc_psid
!= spc
->spc_psid
)
358 func
= (xcfunc_t
)cpu_xc_offline
;
362 where
= xc_unicast(0, func
, ci
, NULL
, ci
);
365 KASSERT((spc
->spc_flags
& SPCF_OFFLINE
) == 0);
366 } else if ((spc
->spc_flags
& SPCF_OFFLINE
) == 0) {
367 /* If was not set offline, then it is busy */
371 spc
->spc_lastmod
= time_second
;
375 #ifdef __HAVE_INTR_CONTROL
377 cpu_xc_intr(struct cpu_info
*ci
)
379 struct schedstate_percpu
*spc
;
382 spc
= &ci
->ci_schedstate
;
384 spc
->spc_flags
&= ~SPCF_NOINTR
;
389 cpu_xc_nointr(struct cpu_info
*ci
)
391 struct schedstate_percpu
*spc
;
394 spc
= &ci
->ci_schedstate
;
396 spc
->spc_flags
|= SPCF_NOINTR
;
401 cpu_setintr(struct cpu_info
*ci
, bool intr
)
403 struct schedstate_percpu
*spc
;
404 CPU_INFO_ITERATOR cii
;
405 struct cpu_info
*ci2
;
410 spc
= &ci
->ci_schedstate
;
412 KASSERT(mutex_owned(&cpu_lock
));
415 if ((spc
->spc_flags
& SPCF_NOINTR
) == 0)
417 func
= (xcfunc_t
)cpu_xc_intr
;
419 if ((spc
->spc_flags
& SPCF_NOINTR
) != 0)
422 * Ensure that at least one CPU within the system
423 * is handing device interrupts.
426 for (CPU_INFO_FOREACH(cii
, ci2
)) {
427 if ((ci2
->ci_schedstate
.spc_flags
& SPCF_NOINTR
) != 0)
435 func
= (xcfunc_t
)cpu_xc_nointr
;
438 where
= xc_unicast(0, func
, ci
, NULL
, ci
);
441 KASSERT((spc
->spc_flags
& SPCF_NOINTR
) == 0);
442 } else if ((spc
->spc_flags
& SPCF_NOINTR
) == 0) {
443 /* If was not set offline, then it is busy */
447 /* Direct interrupts away from the CPU and record the change. */
448 cpu_intr_redistribute();
449 spc
->spc_lastmod
= time_second
;
452 #else /* __HAVE_INTR_CONTROL */
454 cpu_setintr(struct cpu_info
*ci
, bool intr
)
461 cpu_intr_count(struct cpu_info
*ci
)
464 return 0; /* 0 == "don't know" */
466 #endif /* __HAVE_INTR_CONTROL */
472 return (curlwp
->l_pflag
& LP_INTR
) != 0;