2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include "opt_sched.h"
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/kernel.h>
39 #include <sys/mutex.h>
41 #include <sys/queue.h>
42 #include <sys/sched.h>
44 #include <sys/sysctl.h>
46 #include <machine/cpu.h>
48 /* Uncomment this to enable logging of critical_enter/exit. */
50 #define KTR_CRITICAL KTR_SCHED
52 #define KTR_CRITICAL 0
55 #ifdef FULL_PREEMPTION
57 #error "The FULL_PREEMPTION option requires the PREEMPTION option"
61 CTASSERT((RQB_BPW
* RQB_LEN
) == RQ_NQS
);
64 * kern.sched.preemption allows user space to determine if preemption support
65 * is compiled in or not. It is not currently a boot or runtime flag that
69 static int kern_sched_preemption
= 1;
71 static int kern_sched_preemption
= 0;
73 SYSCTL_INT(_kern_sched
, OID_AUTO
, preemption
, CTLFLAG_RD
,
74 &kern_sched_preemption
, 0, "Kernel preemption enabled");
77 * Support for scheduler stats exported via kern.sched.stats. All stats may
78 * be reset with kern.sched.stats.reset = 1. Stats may be defined elsewhere
79 * with SCHED_STAT_DEFINE().
82 long sched_switch_stats
[SWT_COUNT
]; /* Switch reasons from mi_switch(). */
84 SYSCTL_NODE(_kern_sched
, OID_AUTO
, stats
, CTLFLAG_RW
, 0, "switch stats");
85 SCHED_STAT_DEFINE_VAR(uncategorized
, &sched_switch_stats
[SWT_NONE
], "");
86 SCHED_STAT_DEFINE_VAR(preempt
, &sched_switch_stats
[SWT_PREEMPT
], "");
87 SCHED_STAT_DEFINE_VAR(owepreempt
, &sched_switch_stats
[SWT_OWEPREEMPT
], "");
88 SCHED_STAT_DEFINE_VAR(turnstile
, &sched_switch_stats
[SWT_TURNSTILE
], "");
89 SCHED_STAT_DEFINE_VAR(sleepq
, &sched_switch_stats
[SWT_SLEEPQ
], "");
90 SCHED_STAT_DEFINE_VAR(sleepqtimo
, &sched_switch_stats
[SWT_SLEEPQTIMO
], "");
91 SCHED_STAT_DEFINE_VAR(relinquish
, &sched_switch_stats
[SWT_RELINQUISH
], "");
92 SCHED_STAT_DEFINE_VAR(needresched
, &sched_switch_stats
[SWT_NEEDRESCHED
], "");
93 SCHED_STAT_DEFINE_VAR(idle
, &sched_switch_stats
[SWT_IDLE
], "");
94 SCHED_STAT_DEFINE_VAR(iwait
, &sched_switch_stats
[SWT_IWAIT
], "");
95 SCHED_STAT_DEFINE_VAR(suspend
, &sched_switch_stats
[SWT_SUSPEND
], "");
96 SCHED_STAT_DEFINE_VAR(remotepreempt
, &sched_switch_stats
[SWT_REMOTEPREEMPT
],
98 SCHED_STAT_DEFINE_VAR(remotewakeidle
, &sched_switch_stats
[SWT_REMOTEWAKEIDLE
],
102 sysctl_stats_reset(SYSCTL_HANDLER_ARGS
)
104 struct sysctl_oid
*p
;
109 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
110 if (error
!= 0 || req
->newptr
== NULL
)
115 * Traverse the list of children of _kern_sched_stats and reset each
116 * to 0. Skip the reset entry.
118 SLIST_FOREACH(p
, oidp
->oid_parent
, oid_link
) {
119 if (p
== oidp
|| p
->oid_arg1
== NULL
)
121 *(long *)p
->oid_arg1
= 0;
126 SYSCTL_PROC(_kern_sched_stats
, OID_AUTO
, reset
, CTLTYPE_INT
| CTLFLAG_WR
, NULL
,
127 0, sysctl_stats_reset
, "I", "Reset scheduler statistics");
130 /************************************************************************
131 * Functions that manipulate runnability from a thread perspective. *
132 ************************************************************************/
134 * Select the thread that will be run next.
145 * If we are in panic, only allow system threads,
146 * plus the one we are running in, to be run.
148 if (panicstr
&& ((td
->td_proc
->p_flag
& P_SYSTEM
) == 0 &&
149 (td
->td_flags
& TDF_INPANIC
) == 0)) {
150 /* note that it is no longer on the run queue */
160 * Kernel thread preemption implementation. Critical sections mark
161 * regions of code in which preemptions are not allowed.
170 CTR4(KTR_CRITICAL
, "critical_enter by thread %p (%ld, %s) to %d", td
,
171 (long)td
->td_proc
->p_pid
, td
->td_name
, td
->td_critnest
);
181 KASSERT(td
->td_critnest
!= 0,
182 ("critical_exit: td_critnest == 0"));
184 if (td
->td_critnest
== 1) {
186 if (td
->td_owepreempt
) {
190 flags
= SW_INVOL
| SW_PREEMPT
;
191 if (TD_IS_IDLETHREAD(td
))
194 flags
|= SWT_OWEPREEMPT
;
195 mi_switch(flags
, NULL
);
201 CTR4(KTR_CRITICAL
, "critical_exit by thread %p (%ld, %s) to %d", td
,
202 (long)td
->td_proc
->p_pid
, td
->td_name
, td
->td_critnest
);
205 /************************************************************************
206 * SYSTEM RUN QUEUE manipulations and tests *
207 ************************************************************************/
209 * Initialize a run structure.
212 runq_init(struct runq
*rq
)
216 bzero(rq
, sizeof *rq
);
217 for (i
= 0; i
< RQ_NQS
; i
++)
218 TAILQ_INIT(&rq
->rq_queues
[i
]);
222 * Clear the status bit of the queue corresponding to priority level pri,
223 * indicating that it is empty.
226 runq_clrbit(struct runq
*rq
, int pri
)
230 rqb
= &rq
->rq_status
;
231 CTR4(KTR_RUNQ
, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
232 rqb
->rqb_bits
[RQB_WORD(pri
)],
233 rqb
->rqb_bits
[RQB_WORD(pri
)] & ~RQB_BIT(pri
),
234 RQB_BIT(pri
), RQB_WORD(pri
));
235 rqb
->rqb_bits
[RQB_WORD(pri
)] &= ~RQB_BIT(pri
);
239 * Find the index of the first non-empty run queue. This is done by
240 * scanning the status bits, a set bit indicates a non-empty queue.
243 runq_findbit(struct runq
*rq
)
249 rqb
= &rq
->rq_status
;
250 for (i
= 0; i
< RQB_LEN
; i
++)
251 if (rqb
->rqb_bits
[i
]) {
252 pri
= RQB_FFS(rqb
->rqb_bits
[i
]) + (i
<< RQB_L2BPW
);
253 CTR3(KTR_RUNQ
, "runq_findbit: bits=%#x i=%d pri=%d",
254 rqb
->rqb_bits
[i
], i
, pri
);
262 runq_findbit_from(struct runq
*rq
, u_char pri
)
269 * Set the mask for the first word so we ignore priorities before 'pri'.
271 mask
= (rqb_word_t
)-1 << (pri
& (RQB_BPW
- 1));
272 rqb
= &rq
->rq_status
;
274 for (i
= RQB_WORD(pri
); i
< RQB_LEN
; mask
= -1, i
++) {
275 mask
= rqb
->rqb_bits
[i
] & mask
;
278 pri
= RQB_FFS(mask
) + (i
<< RQB_L2BPW
);
279 CTR3(KTR_RUNQ
, "runq_findbit_from: bits=%#x i=%d pri=%d",
286 * Wrap back around to the beginning of the list just once so we
287 * scan the whole thing.
294 * Set the status bit of the queue corresponding to priority level pri,
295 * indicating that it is non-empty.
298 runq_setbit(struct runq
*rq
, int pri
)
302 rqb
= &rq
->rq_status
;
303 CTR4(KTR_RUNQ
, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
304 rqb
->rqb_bits
[RQB_WORD(pri
)],
305 rqb
->rqb_bits
[RQB_WORD(pri
)] | RQB_BIT(pri
),
306 RQB_BIT(pri
), RQB_WORD(pri
));
307 rqb
->rqb_bits
[RQB_WORD(pri
)] |= RQB_BIT(pri
);
311 * Add the thread to the queue specified by its priority, and set the
312 * corresponding status bit.
315 runq_add(struct runq
*rq
, struct thread
*td
, int flags
)
320 pri
= td
->td_priority
/ RQ_PPQ
;
321 td
->td_rqindex
= pri
;
322 runq_setbit(rq
, pri
);
323 rqh
= &rq
->rq_queues
[pri
];
324 CTR4(KTR_RUNQ
, "runq_add: td=%p pri=%d %d rqh=%p",
325 td
, td
->td_priority
, pri
, rqh
);
326 if (flags
& SRQ_PREEMPTED
) {
327 TAILQ_INSERT_HEAD(rqh
, td
, td_runq
);
329 TAILQ_INSERT_TAIL(rqh
, td
, td_runq
);
334 runq_add_pri(struct runq
*rq
, struct thread
*td
, u_char pri
, int flags
)
338 KASSERT(pri
< RQ_NQS
, ("runq_add_pri: %d out of range", pri
));
339 td
->td_rqindex
= pri
;
340 runq_setbit(rq
, pri
);
341 rqh
= &rq
->rq_queues
[pri
];
342 CTR4(KTR_RUNQ
, "runq_add_pri: td=%p pri=%d idx=%d rqh=%p",
343 td
, td
->td_priority
, pri
, rqh
);
344 if (flags
& SRQ_PREEMPTED
) {
345 TAILQ_INSERT_HEAD(rqh
, td
, td_runq
);
347 TAILQ_INSERT_TAIL(rqh
, td
, td_runq
);
351 * Return true if there are runnable processes of any priority on the run
352 * queue, false otherwise. Has no side effects, does not modify the run
356 runq_check(struct runq
*rq
)
361 rqb
= &rq
->rq_status
;
362 for (i
= 0; i
< RQB_LEN
; i
++)
363 if (rqb
->rqb_bits
[i
]) {
364 CTR2(KTR_RUNQ
, "runq_check: bits=%#x i=%d",
365 rqb
->rqb_bits
[i
], i
);
368 CTR0(KTR_RUNQ
, "runq_check: empty");
374 * Find the highest priority process on the run queue.
377 runq_choose_fuzz(struct runq
*rq
, int fuzz
)
383 while ((pri
= runq_findbit(rq
)) != -1) {
384 rqh
= &rq
->rq_queues
[pri
];
385 /* fuzz == 1 is normal.. 0 or less are ignored */
388 * In the first couple of entries, check if
389 * there is one for our CPU as a preference.
392 int cpu
= PCPU_GET(cpuid
);
394 td2
= td
= TAILQ_FIRST(rqh
);
396 while (count
-- && td2
) {
397 if (td2
->td_lastcpu
== cpu
) {
401 td2
= TAILQ_NEXT(td2
, td_runq
);
404 td
= TAILQ_FIRST(rqh
);
405 KASSERT(td
!= NULL
, ("runq_choose_fuzz: no proc on busy queue"));
407 "runq_choose_fuzz: pri=%d thread=%p rqh=%p", pri
, td
, rqh
);
410 CTR1(KTR_RUNQ
, "runq_choose_fuzz: idleproc pri=%d", pri
);
416 * Find the highest priority process on the run queue.
419 runq_choose(struct runq
*rq
)
425 while ((pri
= runq_findbit(rq
)) != -1) {
426 rqh
= &rq
->rq_queues
[pri
];
427 td
= TAILQ_FIRST(rqh
);
428 KASSERT(td
!= NULL
, ("runq_choose: no thread on busy queue"));
430 "runq_choose: pri=%d thread=%p rqh=%p", pri
, td
, rqh
);
433 CTR1(KTR_RUNQ
, "runq_choose: idlethread pri=%d", pri
);
439 runq_choose_from(struct runq
*rq
, u_char idx
)
445 if ((pri
= runq_findbit_from(rq
, idx
)) != -1) {
446 rqh
= &rq
->rq_queues
[pri
];
447 td
= TAILQ_FIRST(rqh
);
448 KASSERT(td
!= NULL
, ("runq_choose: no thread on busy queue"));
450 "runq_choose_from: pri=%d thread=%p idx=%d rqh=%p",
451 pri
, td
, td
->td_rqindex
, rqh
);
454 CTR1(KTR_RUNQ
, "runq_choose_from: idlethread pri=%d", pri
);
459 * Remove the thread from the queue specified by its priority, and clear the
460 * corresponding status bit if the queue becomes empty.
461 * Caller must set state afterwards.
464 runq_remove(struct runq
*rq
, struct thread
*td
)
467 runq_remove_idx(rq
, td
, NULL
);
471 runq_remove_idx(struct runq
*rq
, struct thread
*td
, u_char
*idx
)
476 KASSERT(td
->td_flags
& TDF_INMEM
,
477 ("runq_remove_idx: thread swapped out"));
478 pri
= td
->td_rqindex
;
479 KASSERT(pri
< RQ_NQS
, ("runq_remove_idx: Invalid index %d\n", pri
));
480 rqh
= &rq
->rq_queues
[pri
];
481 CTR4(KTR_RUNQ
, "runq_remove_idx: td=%p, pri=%d %d rqh=%p",
482 td
, td
->td_priority
, pri
, rqh
);
483 TAILQ_REMOVE(rqh
, td
, td_runq
);
484 if (TAILQ_EMPTY(rqh
)) {
485 CTR0(KTR_RUNQ
, "runq_remove_idx: empty");
486 runq_clrbit(rq
, pri
);
487 if (idx
!= NULL
&& *idx
== pri
)
488 *idx
= (pri
+ 1) % RQ_NQS
;