1 /* $NetBSD: kern_softint.c,v 1.29 2009/07/19 10:11:55 yamt Exp $ */
4 * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Generic software interrupt framework.
37 * The soft interrupt framework provides a mechanism to schedule a
38 * low priority callback that runs with thread context. It allows
39 * for dynamic registration of software interrupts, and for fair
40 * queueing and prioritization of those interrupts. The callbacks
41 * can be scheduled to run from nearly any point in the kernel: by
42 * code running with thread context, by code running from a
43 * hardware interrupt handler, and at any interrupt priority
48 * Since soft interrupt dispatch can be tied to the underlying
49 * architecture's interrupt dispatch code, it can be limited
50 * both by the capabilities of the hardware and the capabilities
51 * of the interrupt dispatch code itself. The number of priority
52 * levels is restricted to four. In order of priority (lowest to
53 * highest) the levels are: clock, bio, net, serial.
55 * The names are symbolic and in isolation do not have any direct
56 * connection with a particular kind of device activity: they are
57 * only meant as a guide.
59 * The four priority levels map directly to scheduler priority
60 * levels, and where the architecture implements 'fast' software
61 * interrupts, they also map onto interrupt priorities. The
62 * interrupt priorities are intended to be hidden from machine
63 * independent code, which should use thread-safe mechanisms to
64 * synchronize with software interrupts (for example: mutexes).
68 * Software interrupts run with limited machine context. In
69 * particular, they do not posess any address space context. They
70 * should not try to operate on user space addresses, or to use
71 * virtual memory facilities other than those noted as interrupt
74 * Unlike hardware interrupts, software interrupts do have thread
75 * context. They may block on synchronization objects, sleep, and
76 * resume execution at a later time.
78 * Since software interrupts are a limited resource and run with
79 * higher priority than most other LWPs in the system, all
80 * block-and-resume activity by a software interrupt must be kept
81 * short to allow futher processing at that level to continue. By
82 * extension, code running with process context must take care to
83 * ensure that any lock that may be taken from a software interrupt
84 * can not be held for more than a short period of time.
86 * The kernel does not allow software interrupts to use facilities
87 * or perform actions that may block for a significant amount of
88 * time. This means that it's not valid for a software interrupt
89 * to sleep on condition variables or wait for resources to become
90 * available (for example, memory).
94 * If a soft interrupt is triggered on a CPU, it can only be
95 * dispatched on the same CPU. Each LWP dedicated to handling a
96 * soft interrupt is bound to its home CPU, so if the LWP blocks
97 * and needs to run again, it can only run there. Nearly all data
98 * structures used to manage software interrupts are per-CPU.
100 * The per-CPU requirement is intended to reduce "ping-pong" of
101 * cache lines between CPUs: lines occupied by data structures
102 * used to manage the soft interrupts, and lines occupied by data
103 * items being passed down to the soft interrupt. As a positive
104 * side effect, this also means that the soft interrupt dispatch
105 * code does not need to to use spinlocks to synchronize.
107 * Generic implementation
109 * A generic, low performance implementation is provided that
110 * works across all architectures, with no machine-dependent
111 * modifications needed. This implementation uses the scheduler,
112 * and so has a number of restrictions:
114 * 1) The software interrupts are not currently preemptive, so
115 * must wait for the currently executing LWP to yield the CPU.
116 * This can introduce latency.
118 * 2) An expensive context switch is required for a software
119 * interrupt to be handled.
121 * 'Fast' software interrupts
123 * If an architectures defines __HAVE_FAST_SOFTINTS, it implements
124 * the fast mechanism. Threads running either in the kernel or in
125 * userspace will be interrupted, but will not be preempted. When
126 * the soft interrupt completes execution, the interrupted LWP
127 * is resumed. Interrupt dispatch code must provide the minimum
128 * level of context necessary for the soft interrupt to block and
129 * be resumed at a later time. The machine-dependent dispatch
130 * path looks something like the following:
134 * go to IPL_HIGH if necessary for switch;
135 * save any necessary registers in a format that can be
136 * restored by cpu_switchto if the softint blocks;
137 * arrange for cpu_switchto() to restore into the
138 * trampoline function;
139 * identify LWP to handle this interrupt;
140 * switch to the LWP's stack;
141 * switch register stacks, if necessary;
142 * assign new value of curlwp;
143 * call MI softint_dispatch, passing old curlwp and IPL
144 * to execute interrupt at;
145 * switch back to old stack;
146 * switch back to old register stack, if necessary;
148 * return to interrupted LWP;
151 * If the soft interrupt blocks, a trampoline function is returned
152 * to in the context of the interrupted LWP, as arranged for by
157 * unlock soft interrupt LWP;
158 * resume interrupt processing, likely returning to
159 * interrupted LWP or dispatching another, different
163 * Once the soft interrupt has fired (and even if it has blocked),
164 * no further soft interrupts at that level will be triggered by
165 * MI code until the soft interrupt handler has ceased execution.
166 * If a soft interrupt handler blocks and is resumed, it resumes
167 * execution as a normal LWP (kthread) and gains VM context. Only
168 * when it has completed and is ready to fire again will it
169 * interrupt other threads.
173 * Provide a cheap way to direct software interrupts to remote
174 * CPUs. Provide a way to enqueue work items into the handler
175 * record, removing additional spl calls (see subr_workqueue.c).
178 #include <sys/cdefs.h>
179 __KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.29 2009/07/19 10:11:55 yamt Exp $");
181 #include <sys/param.h>
182 #include <sys/malloc.h>
183 #include <sys/proc.h>
184 #include <sys/intr.h>
185 #include <sys/mutex.h>
186 #include <sys/kthread.h>
187 #include <sys/evcnt.h>
189 #include <sys/xcall.h>
191 #include <net/netisr.h>
193 #include <uvm/uvm_extern.h>
195 /* This could overlap with signal info in struct lwp. */
196 typedef struct softint
{
197 SIMPLEQ_HEAD(, softhand
) si_q
;
199 struct cpu_info
*si_cpu
;
200 uintptr_t si_machdep
;
201 struct evcnt si_evcnt
;
202 struct evcnt si_evcnt_block
;
205 char si_name_block
[8+6];
208 typedef struct softhand
{
209 SIMPLEQ_ENTRY(softhand
) sh_q
;
210 void (*sh_func
)(void *);
216 typedef struct softcpu
{
217 struct cpu_info
*sc_cpu
;
218 softint_t sc_int
[SOFTINT_COUNT
];
219 softhand_t sc_hand
[1];
222 static void softint_thread(void *);
224 u_int softint_bytes
= 8192;
225 u_int softint_timing
;
226 static u_int softint_max
;
227 static kmutex_t softint_lock
;
228 static void *softint_netisrs
[NETISR_MAX
];
233 * Initialize a single interrupt level for a single CPU.
236 softint_init_isr(softcpu_t
*sc
, const char *desc
, pri_t pri
, u_int level
)
242 si
= &sc
->sc_int
[level
];
246 SIMPLEQ_INIT(&si
->si_q
);
248 error
= kthread_create(pri
, KTHREAD_MPSAFE
| KTHREAD_INTR
|
249 KTHREAD_IDLE
, ci
, softint_thread
, si
, &si
->si_lwp
,
250 "soft%s/%u", desc
, ci
->ci_index
);
252 panic("softint_init_isr: error %d", error
);
254 snprintf(si
->si_name
, sizeof(si
->si_name
), "%s/%u", desc
,
256 evcnt_attach_dynamic(&si
->si_evcnt
, EVCNT_TYPE_MISC
, NULL
,
257 "softint", si
->si_name
);
258 snprintf(si
->si_name_block
, sizeof(si
->si_name_block
), "%s block/%u",
260 evcnt_attach_dynamic(&si
->si_evcnt_block
, EVCNT_TYPE_MISC
, NULL
,
261 "softint", si
->si_name_block
);
263 si
->si_lwp
->l_private
= si
;
264 softint_init_md(si
->si_lwp
, level
, &si
->si_machdep
);
269 * Initialize per-CPU data structures. Called from mi_cpu_attach().
272 softint_init(struct cpu_info
*ci
)
274 static struct cpu_info
*first
;
275 softcpu_t
*sc
, *scfirst
;
276 softhand_t
*sh
, *shmax
;
281 mutex_init(&softint_lock
, MUTEX_DEFAULT
, IPL_NONE
);
282 softint_bytes
= round_page(softint_bytes
);
283 softint_max
= (softint_bytes
- sizeof(softcpu_t
)) /
287 sc
= (softcpu_t
*)uvm_km_alloc(kernel_map
, softint_bytes
, 0,
288 UVM_KMF_WIRED
| UVM_KMF_ZERO
);
290 panic("softint_init_cpu: cannot allocate memory");
292 ci
->ci_data
.cpu_softcpu
= sc
;
293 ci
->ci_data
.cpu_softints
= 0;
296 softint_init_isr(sc
, "net", PRI_SOFTNET
, SOFTINT_NET
);
297 softint_init_isr(sc
, "bio", PRI_SOFTBIO
, SOFTINT_BIO
);
298 softint_init_isr(sc
, "clk", PRI_SOFTCLOCK
, SOFTINT_CLOCK
);
299 softint_init_isr(sc
, "ser", PRI_SOFTSERIAL
, SOFTINT_SERIAL
);
302 mutex_enter(&softint_lock
);
303 scfirst
= first
->ci_data
.cpu_softcpu
;
305 memcpy(sh
, scfirst
->sc_hand
, sizeof(*sh
) * softint_max
);
306 /* Update pointers for this CPU. */
307 for (shmax
= sh
+ softint_max
; sh
< shmax
; sh
++) {
308 if (sh
->sh_func
== NULL
)
311 &sc
->sc_int
[sh
->sh_flags
& SOFTINT_LVLMASK
];
313 mutex_exit(&softint_lock
);
316 * Establish handlers for legacy net interrupts.
317 * XXX Needs to go away.
319 #define DONETISR(n, f) \
320 softint_netisrs[(n)] = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,\
321 (void (*)(void *))(f), NULL)
322 #include <net/netisr_dispatch.h>
329 * Register a software interrupt handler.
332 softint_establish(u_int flags
, void (*func
)(void *), void *arg
)
334 CPU_INFO_ITERATOR cii
;
340 level
= (flags
& SOFTINT_LVLMASK
);
341 KASSERT(level
< SOFTINT_COUNT
);
342 KASSERT((flags
& SOFTINT_IMPMASK
) == 0);
344 mutex_enter(&softint_lock
);
346 /* Find a free slot. */
347 sc
= curcpu()->ci_data
.cpu_softcpu
;
348 for (index
= 1; index
< softint_max
; index
++)
349 if (sc
->sc_hand
[index
].sh_func
== NULL
)
351 if (index
== softint_max
) {
352 mutex_exit(&softint_lock
);
353 printf("WARNING: softint_establish: table full, "
354 "increase softint_bytes\n");
358 /* Set up the handler on each CPU. */
360 /* XXX hack for machines with no CPU_INFO_FOREACH() early on */
361 sc
= curcpu()->ci_data
.cpu_softcpu
;
362 sh
= &sc
->sc_hand
[index
];
363 sh
->sh_isr
= &sc
->sc_int
[level
];
366 sh
->sh_flags
= flags
;
367 } else for (CPU_INFO_FOREACH(cii
, ci
)) {
368 sc
= ci
->ci_data
.cpu_softcpu
;
369 sh
= &sc
->sc_hand
[index
];
370 sh
->sh_isr
= &sc
->sc_int
[level
];
373 sh
->sh_flags
= flags
;
376 mutex_exit(&softint_lock
);
378 return (void *)((uint8_t *)&sc
->sc_hand
[index
] - (uint8_t *)sc
);
382 * softint_disestablish:
384 * Unregister a software interrupt handler. The soft interrupt could
385 * still be active at this point, but the caller commits not to try
386 * and trigger it again once this call is made. The caller must not
387 * hold any locks that could be taken from soft interrupt context,
388 * because we will wait for the softint to complete if it's still
392 softint_disestablish(void *arg
)
394 CPU_INFO_ITERATOR cii
;
402 offset
= (uintptr_t)arg
;
403 KASSERT(offset
!= 0 && offset
< softint_bytes
);
406 * Run a cross call so we see up to date values of sh_flags from
407 * all CPUs. Once softint_disestablish() is called, the caller
408 * commits to not trigger the interrupt and set SOFTINT_ACTIVE on
409 * it again. So, we are only looking for handler records with
410 * SOFTINT_ACTIVE already set.
412 where
= xc_broadcast(0, (xcfunc_t
)nullop
, NULL
, NULL
);
416 /* Collect flag values from each CPU. */
418 for (CPU_INFO_FOREACH(cii
, ci
)) {
419 sc
= ci
->ci_data
.cpu_softcpu
;
420 sh
= (softhand_t
*)((uint8_t *)sc
+ offset
);
421 KASSERT(sh
->sh_func
!= NULL
);
422 flags
|= sh
->sh_flags
;
424 /* Inactive on all CPUs? */
425 if ((flags
& SOFTINT_ACTIVE
) == 0) {
428 /* Oops, still active. Wait for it to clear. */
429 (void)kpause("softdis", false, 1, NULL
);
432 /* Clear the handler on each CPU. */
433 mutex_enter(&softint_lock
);
434 for (CPU_INFO_FOREACH(cii
, ci
)) {
435 sc
= ci
->ci_data
.cpu_softcpu
;
436 sh
= (softhand_t
*)((uint8_t *)sc
+ offset
);
437 KASSERT(sh
->sh_func
!= NULL
);
440 mutex_exit(&softint_lock
);
446 * Trigger a software interrupt. Must be called from a hardware
447 * interrupt handler, or with preemption disabled (since we are
448 * using the value of curcpu()).
451 softint_schedule(void *arg
)
458 KASSERT(kpreempt_disabled());
460 /* Find the handler record for this CPU. */
461 offset
= (uintptr_t)arg
;
462 KASSERT(offset
!= 0 && offset
< softint_bytes
);
463 sh
= (softhand_t
*)((uint8_t *)curcpu()->ci_data
.cpu_softcpu
+ offset
);
465 /* If it's already pending there's nothing to do. */
466 if ((sh
->sh_flags
& SOFTINT_PENDING
) != 0)
470 * Enqueue the handler into the LWP's pending list.
471 * If the LWP is completely idle, then make it run.
474 if ((sh
->sh_flags
& SOFTINT_PENDING
) == 0) {
476 sh
->sh_flags
|= SOFTINT_PENDING
;
477 SIMPLEQ_INSERT_TAIL(&si
->si_q
, sh
, sh_q
);
478 if (si
->si_active
== 0) {
480 softint_trigger(si
->si_machdep
);
489 * Invoke handlers for the specified soft interrupt.
490 * Must be entered at splhigh. Will drop the priority
491 * to the level specified, but returns back at splhigh.
494 softint_execute(softint_t
*si
, lwp_t
*l
, int s
)
499 #ifdef __HAVE_FAST_SOFTINTS
500 KASSERT(si
->si_lwp
== curlwp
);
502 /* May be running in user context. */
504 KASSERT(si
->si_cpu
== curcpu());
505 KASSERT(si
->si_lwp
->l_wchan
== NULL
);
506 KASSERT(si
->si_active
);
511 * Note: due to priority inheritance we may have interrupted a
512 * higher priority LWP. Since the soft interrupt must be quick
513 * and is non-preemptable, we don't bother yielding.
516 while (!SIMPLEQ_EMPTY(&si
->si_q
)) {
518 * Pick the longest waiting handler to run. We block
519 * interrupts but do not lock in order to do this, as
520 * we are protecting against the local CPU only.
522 sh
= SIMPLEQ_FIRST(&si
->si_q
);
523 SIMPLEQ_REMOVE_HEAD(&si
->si_q
, sh_q
);
524 KASSERT((sh
->sh_flags
& SOFTINT_PENDING
) != 0);
525 KASSERT((sh
->sh_flags
& SOFTINT_ACTIVE
) == 0);
526 sh
->sh_flags
^= (SOFTINT_PENDING
| SOFTINT_ACTIVE
);
529 /* Run the handler. */
530 if (sh
->sh_flags
& SOFTINT_MPSAFE
) {
532 KERNEL_UNLOCK_ONE(l
);
535 } else if (!havelock
) {
539 (*sh
->sh_func
)(sh
->sh_arg
);
542 KASSERT((sh
->sh_flags
& SOFTINT_ACTIVE
) != 0);
543 sh
->sh_flags
^= SOFTINT_ACTIVE
;
547 KERNEL_UNLOCK_ONE(l
);
551 * Unlocked, but only for statistics.
552 * Should be per-CPU to prevent cache ping-pong.
556 KASSERT(si
->si_cpu
== curcpu());
557 KASSERT(si
->si_lwp
->l_wchan
== NULL
);
558 KASSERT(si
->si_active
);
559 si
->si_evcnt
.ev_count
++;
566 * Update statistics when the soft interrupt blocks.
569 softint_block(lwp_t
*l
)
571 softint_t
*si
= l
->l_private
;
573 KASSERT((l
->l_pflag
& LP_INTR
) != 0);
574 si
->si_evcnt_block
.ev_count
++;
580 * Trigger a legacy network interrupt. XXX Needs to go away.
586 softint_schedule(softint_netisrs
[isr
]);
589 #ifndef __HAVE_FAST_SOFTINTS
591 #ifdef __HAVE_PREEMPTION
592 #error __HAVE_PREEMPTION requires __HAVE_FAST_SOFTINTS
598 * Slow path: perform machine-dependent initialization.
601 softint_init_md(lwp_t
*l
, u_int level
, uintptr_t *machdep
)
605 *machdep
= (1 << level
);
609 lwp_unlock_to(l
, l
->l_cpu
->ci_schedstate
.spc_mutex
);
611 /* Cheat and make the KASSERT in softint_thread() happy. */
614 sched_enqueue(l
, false);
621 * Slow path: cause a soft interrupt handler to begin executing.
622 * Called at IPL_HIGH.
625 softint_trigger(uintptr_t machdep
)
632 ci
->ci_data
.cpu_softints
|= machdep
;
633 if (l
== ci
->ci_data
.cpu_idlelwp
) {
634 cpu_need_resched(ci
, 0);
636 /* MI equivalent of aston() */
644 * Slow path: MI software interrupt dispatch.
647 softint_thread(void *cookie
)
658 * Clear pending status and run it. We must drop the
659 * spl before mi_switch(), since IPL_HIGH may be higher
660 * than IPL_SCHED (and it is not safe to switch at a
664 l
->l_cpu
->ci_data
.cpu_softints
&= ~si
->si_machdep
;
665 softint_execute(si
, l
, s
);
677 * Slow path: called from mi_switch() to pick the highest priority
678 * soft interrupt LWP that needs to run.
681 softint_picklwp(void)
689 si
= ((softcpu_t
*)ci
->ci_data
.cpu_softcpu
)->sc_int
;
690 mask
= ci
->ci_data
.cpu_softints
;
692 if ((mask
& (1 << SOFTINT_SERIAL
)) != 0) {
693 l
= si
[SOFTINT_SERIAL
].si_lwp
;
694 } else if ((mask
& (1 << SOFTINT_NET
)) != 0) {
695 l
= si
[SOFTINT_NET
].si_lwp
;
696 } else if ((mask
& (1 << SOFTINT_BIO
)) != 0) {
697 l
= si
[SOFTINT_BIO
].si_lwp
;
698 } else if ((mask
& (1 << SOFTINT_CLOCK
)) != 0) {
699 l
= si
[SOFTINT_CLOCK
].si_lwp
;
701 panic("softint_picklwp");
710 * Slow path: called from lwp_userret() to run a soft interrupt
711 * within the context of a user thread.
714 softint_overlay(void)
717 u_int softints
, oflag
;
725 si
= ((softcpu_t
*)ci
->ci_data
.cpu_softcpu
)->sc_int
;
727 KASSERT((l
->l_pflag
& LP_INTR
) == 0);
729 /* Arrange to elevate priority if the LWP blocks. */
731 obase
= l
->l_kpribase
;
732 l
->l_kpribase
= PRI_KERNEL_RT
;
734 l
->l_pflag
= oflag
| LP_INTR
| LP_BOUND
;
735 while ((softints
= ci
->ci_data
.cpu_softints
) != 0) {
736 if ((softints
& (1 << SOFTINT_SERIAL
)) != 0) {
737 ci
->ci_data
.cpu_softints
&= ~(1 << SOFTINT_SERIAL
);
738 softint_execute(&si
[SOFTINT_SERIAL
], l
, s
);
741 if ((softints
& (1 << SOFTINT_NET
)) != 0) {
742 ci
->ci_data
.cpu_softints
&= ~(1 << SOFTINT_NET
);
743 softint_execute(&si
[SOFTINT_NET
], l
, s
);
746 if ((softints
& (1 << SOFTINT_BIO
)) != 0) {
747 ci
->ci_data
.cpu_softints
&= ~(1 << SOFTINT_BIO
);
748 softint_execute(&si
[SOFTINT_BIO
], l
, s
);
751 if ((softints
& (1 << SOFTINT_CLOCK
)) != 0) {
752 ci
->ci_data
.cpu_softints
&= ~(1 << SOFTINT_CLOCK
);
753 softint_execute(&si
[SOFTINT_CLOCK
], l
, s
);
758 l
->l_kpribase
= obase
;
762 #else /* !__HAVE_FAST_SOFTINTS */
767 * Fast path: the LWP is switched to without restoring any state,
768 * so we should not arrive here - there is a direct handoff between
769 * the interrupt stub and softint_dispatch().
772 softint_thread(void *cookie
)
775 panic("softint_thread");
781 * Fast path: entry point from machine-dependent code.
784 softint_dispatch(lwp_t
*pinned
, int s
)
791 KASSERT((pinned
->l_pflag
& LP_RUNNING
) != 0);
796 * Note the interrupted LWP, and mark the current LWP as running
797 * before proceeding. Although this must as a rule be done with
798 * the LWP locked, at this point no external agents will want to
799 * modify the interrupt LWP's state.
801 timing
= (softint_timing
? LP_TIMEINTR
: 0);
802 l
->l_switchto
= pinned
;
803 l
->l_stat
= LSONPROC
;
804 l
->l_pflag
|= (LP_RUNNING
| timing
);
807 * Dispatch the interrupt. If softints are being timed, charge
811 binuptime(&l
->l_stime
);
812 softint_execute(si
, l
, s
);
815 updatertime(l
, &now
);
816 l
->l_pflag
&= ~LP_TIMEINTR
;
820 * If we blocked while handling the interrupt, the pinned LWP is
821 * gone so switch to the idle LWP. It will select a new LWP to
824 * We must drop the priority level as switching at IPL_HIGH could
825 * deadlock the system. We have already set si->si_active = 0,
826 * which means another interrupt at this level can be triggered.
827 * That's not be a problem: we are lowering to level 's' which will
828 * prevent softint_dispatch() from being reentered at level 's',
829 * until the priority is finally dropped to IPL_NONE on entry to
830 * the LWP chosen by lwp_exit_switchaway().
833 if (l
->l_switchto
== NULL
) {
836 lwp_exit_switchaway(l
);
839 l
->l_switchto
= NULL
;
840 l
->l_pflag
&= ~LP_RUNNING
;
843 #endif /* !__HAVE_FAST_SOFTINTS */