2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 #include "opt_witness.h"
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
38 #include <sys/mutex.h>
40 #include <sys/resourcevar.h>
42 #include <sys/sysctl.h>
43 #include <sys/sched.h>
44 #include <sys/sleepqueue.h>
45 #include <sys/selinfo.h>
46 #include <sys/turnstile.h>
49 #include <sys/cpuset.h>
51 #include <security/audit/audit.h>
54 #include <vm/vm_extern.h>
56 #include <sys/eventhandler.h>
59 * thread related storage.
61 static uma_zone_t thread_zone
;
63 SYSCTL_NODE(_kern
, OID_AUTO
, threads
, CTLFLAG_RW
, 0, "thread allocation");
65 int max_threads_per_proc
= 1500;
66 SYSCTL_INT(_kern_threads
, OID_AUTO
, max_threads_per_proc
, CTLFLAG_RW
,
67 &max_threads_per_proc
, 0, "Limit on threads per proc");
70 SYSCTL_INT(_kern_threads
, OID_AUTO
, max_threads_hits
, CTLFLAG_RD
,
71 &max_threads_hits
, 0, "");
73 TAILQ_HEAD(, thread
) zombie_threads
= TAILQ_HEAD_INITIALIZER(zombie_threads
);
74 static struct mtx zombie_lock
;
75 MTX_SYSINIT(zombie_lock
, &zombie_lock
, "zombie lock", MTX_SPIN
);
77 static void thread_zombie(struct thread
*);
80 static struct unrhdr
*tid_unrhdr
;
83 * Prepare a thread for use.
86 thread_ctor(void *mem
, int size
, void *arg
, int flags
)
90 td
= (struct thread
*)mem
;
91 td
->td_state
= TDS_INACTIVE
;
94 td
->td_tid
= alloc_unr(tid_unrhdr
);
98 * Note that td_critnest begins life as 1 because the thread is not
99 * running and is thereby implicitly waiting to be on the receiving
100 * end of a context switch.
103 EVENTHANDLER_INVOKE(thread_ctor
, td
);
105 audit_thread_alloc(td
);
107 umtx_thread_alloc(td
);
112 * Reclaim a thread after use.
115 thread_dtor(void *mem
, int size
, void *arg
)
119 td
= (struct thread
*)mem
;
122 /* Verify that this thread is in a safe state to free. */
123 switch (td
->td_state
) {
129 * We must never unlink a thread that is in one of
130 * these states, because it is currently active.
132 panic("bad state for thread unlinking");
137 panic("bad thread state");
142 audit_thread_free(td
);
144 EVENTHANDLER_INVOKE(thread_dtor
, td
);
145 free_unr(tid_unrhdr
, td
->td_tid
);
149 * Initialize type-stable parts of a thread (when newly created).
152 thread_init(void *mem
, int size
, int flags
)
156 td
= (struct thread
*)mem
;
158 td
->td_sleepqueue
= sleepq_alloc();
159 td
->td_turnstile
= turnstile_alloc();
160 EVENTHANDLER_INVOKE(thread_init
, td
);
161 td
->td_sched
= (struct td_sched
*)&td
[1];
162 umtx_thread_init(td
);
168 * Tear down type-stable parts of a thread (just before being discarded).
171 thread_fini(void *mem
, int size
)
175 td
= (struct thread
*)mem
;
176 EVENTHANDLER_INVOKE(thread_fini
, td
);
177 turnstile_free(td
->td_turnstile
);
178 sleepq_free(td
->td_sleepqueue
);
179 umtx_thread_fini(td
);
184 * For a newly created process,
185 * link up all the structures and its initial threads etc.
187 * {arch}/{arch}/machdep.c ia64_init(), init386() etc.
188 * proc_dtor() (should go away)
192 proc_linkup0(struct proc
*p
, struct thread
*td
)
194 TAILQ_INIT(&p
->p_threads
); /* all threads in proc */
199 proc_linkup(struct proc
*p
, struct thread
*td
)
202 sigqueue_init(&p
->p_sigqueue
, p
);
203 p
->p_ksi
= ksiginfo_alloc(1);
204 if (p
->p_ksi
!= NULL
) {
205 /* XXX p_ksi may be null if ksiginfo zone is not ready */
206 p
->p_ksi
->ksi_flags
= KSI_EXT
| KSI_INS
;
208 LIST_INIT(&p
->p_mqnotifier
);
214 * Initialize global thread allocation resources.
220 mtx_init(&tid_lock
, "TID lock", NULL
, MTX_DEF
);
221 /* leave one number for thread0 */
222 tid_unrhdr
= new_unrhdr(PID_MAX
+ 2, INT_MAX
, &tid_lock
);
224 thread_zone
= uma_zcreate("THREAD", sched_sizeof_thread(),
225 thread_ctor
, thread_dtor
, thread_init
, thread_fini
,
230 * Place an unused thread on the zombie list.
231 * Use the slpq as that must be unused by now.
234 thread_zombie(struct thread
*td
)
236 mtx_lock_spin(&zombie_lock
);
237 TAILQ_INSERT_HEAD(&zombie_threads
, td
, td_slpq
);
238 mtx_unlock_spin(&zombie_lock
);
242 * Release a thread that has exited after cpu_throw().
245 thread_stash(struct thread
*td
)
247 atomic_subtract_rel_int(&td
->td_proc
->p_exitthreads
, 1);
252 * Reap zombie resources.
257 struct thread
*td_first
, *td_next
;
260 * Don't even bother to lock if none at this instant,
261 * we really don't care about the next instant..
263 if (!TAILQ_EMPTY(&zombie_threads
)) {
264 mtx_lock_spin(&zombie_lock
);
265 td_first
= TAILQ_FIRST(&zombie_threads
);
267 TAILQ_INIT(&zombie_threads
);
268 mtx_unlock_spin(&zombie_lock
);
270 td_next
= TAILQ_NEXT(td_first
, td_slpq
);
271 if (td_first
->td_ucred
)
272 crfree(td_first
->td_ucred
);
273 thread_free(td_first
);
287 thread_reap(); /* check if any zombies to get */
289 td
= (struct thread
*)uma_zalloc(thread_zone
, M_WAITOK
);
290 KASSERT(td
->td_kstack
== 0, ("thread_alloc got thread with kstack"));
291 if (!vm_thread_new(td
, 0)) {
292 uma_zfree(thread_zone
, td
);
295 cpu_thread_alloc(td
);
301 * Deallocate a thread.
304 thread_free(struct thread
*td
)
307 cpuset_rel(td
->td_cpuset
);
308 td
->td_cpuset
= NULL
;
310 if (td
->td_altkstack
!= 0)
311 vm_thread_dispose_altkstack(td
);
312 if (td
->td_kstack
!= 0)
313 vm_thread_dispose(td
);
314 uma_zfree(thread_zone
, td
);
318 * Discard the current thread and exit from its context.
319 * Always called with scheduler locked.
321 * Because we can't free a thread while we're operating under its context,
322 * push the current thread into our CPU's deadthread holder. This means
323 * we needn't worry about someone else grabbing our context before we
329 uint64_t new_switchtime
;
338 PROC_SLOCK_ASSERT(p
, MA_OWNED
);
339 mtx_assert(&Giant
, MA_NOTOWNED
);
341 PROC_LOCK_ASSERT(p
, MA_OWNED
);
342 KASSERT(p
!= NULL
, ("thread exiting without a process"));
343 CTR3(KTR_PROC
, "thread_exit: thread %p (pid %ld, %s)", td
,
344 (long)p
->p_pid
, td
->td_name
);
345 KASSERT(TAILQ_EMPTY(&td
->td_sigqueue
.sq_list
), ("signal pending"));
348 AUDIT_SYSCALL_EXIT(0, td
);
350 umtx_thread_exit(td
);
352 * drop FPU & debug register state storage, or any other
353 * architecture specific resources that
354 * would not be on a new untouched process.
356 cpu_thread_exit(td
); /* XXXSMP */
358 /* Do the same timestamp bookkeeping that mi_switch() would do. */
359 new_switchtime
= cpu_ticks();
360 p
->p_rux
.rux_runtime
+= (new_switchtime
- PCPU_GET(switchtime
));
361 PCPU_SET(switchtime
, new_switchtime
);
362 PCPU_SET(switchticks
, ticks
);
363 PCPU_INC(cnt
.v_swtch
);
364 /* Save our resource usage in our process. */
365 td
->td_ru
.ru_nvcsw
++;
366 rucollect(&p
->p_ru
, &td
->td_ru
);
368 * The last thread is left attached to the process
369 * So that the whole bundle gets recycled. Skip
370 * all this stuff if we never had threads.
371 * EXIT clears all sign of other threads when
372 * it goes to single threading, so the last thread always
373 * takes the short path.
375 if (p
->p_flag
& P_HADTHREADS
) {
376 if (p
->p_numthreads
> 1) {
378 td2
= FIRST_THREAD_IN_PROC(p
);
379 sched_exit_thread(td2
, td
);
382 * The test below is NOT true if we are the
383 * sole exiting thread. P_STOPPED_SNGL is unset
384 * in exit1() after it is the only survivor.
386 if (P_SHOULDSTOP(p
) == P_STOPPED_SINGLE
) {
387 if (p
->p_numthreads
== p
->p_suspcount
) {
388 thread_lock(p
->p_singlethread
);
389 wakeup_swapper
= thread_unsuspend_one(
391 thread_unlock(p
->p_singlethread
);
397 atomic_add_int(&td
->td_proc
->p_exitthreads
, 1);
398 PCPU_SET(deadthread
, td
);
401 * The last thread is exiting.. but not through exit()
403 panic ("thread_exit: Last thread exiting on its own");
408 /* Save our tick information with both the thread and proc locked */
409 ruxagg(&p
->p_rux
, td
);
411 td
->td_state
= TDS_INACTIVE
;
413 witness_thread_exit(td
);
415 CTR1(KTR_PROC
, "thread_exit: cpu_throw() thread %p", td
);
417 panic("I'm a teapot!");
422 * Do any thread specific cleanups that may be needed in wait()
423 * called with Giant, proc and schedlock not held.
426 thread_wait(struct proc
*p
)
430 mtx_assert(&Giant
, MA_NOTOWNED
);
431 KASSERT((p
->p_numthreads
== 1), ("Multiple threads in wait1()"));
432 td
= FIRST_THREAD_IN_PROC(p
);
433 /* Lock the last thread so we spin until it exits cpu_throw(). */
436 /* Wait for any remaining threads to exit cpu_throw(). */
437 while (p
->p_exitthreads
)
438 sched_relinquish(curthread
);
439 cpuset_rel(td
->td_cpuset
);
440 td
->td_cpuset
= NULL
;
441 cpu_thread_clean(td
);
442 crfree(td
->td_ucred
);
443 thread_reap(); /* check for zombie threads etc. */
447 * Link a thread to a process.
448 * set up anything that needs to be initialized for it to
449 * be used by the process.
452 thread_link(struct thread
*td
, struct proc
*p
)
456 * XXX This can't be enabled because it's called for proc0 before
457 * its lock has been created.
458 * PROC_LOCK_ASSERT(p, MA_OWNED);
460 td
->td_state
= TDS_INACTIVE
;
462 td
->td_flags
= TDF_INMEM
;
464 LIST_INIT(&td
->td_contested
);
465 LIST_INIT(&td
->td_lprof
[0]);
466 LIST_INIT(&td
->td_lprof
[1]);
467 sigqueue_init(&td
->td_sigqueue
, p
);
468 callout_init(&td
->td_slpcallout
, CALLOUT_MPSAFE
);
469 TAILQ_INSERT_HEAD(&p
->p_threads
, td
, td_plist
);
474 * Convert a process with one thread to an unthreaded process.
477 thread_unthread(struct thread
*td
)
479 struct proc
*p
= td
->td_proc
;
481 KASSERT((p
->p_numthreads
== 1), ("Unthreading with >1 threads"));
482 p
->p_flag
&= ~P_HADTHREADS
;
490 thread_unlink(struct thread
*td
)
492 struct proc
*p
= td
->td_proc
;
494 PROC_LOCK_ASSERT(p
, MA_OWNED
);
495 TAILQ_REMOVE(&p
->p_threads
, td
, td_plist
);
497 /* could clear a few other things here */
498 /* Must NOT clear links to proc! */
502 * Enforce single-threading.
504 * Returns 1 if the caller must abort (another thread is waiting to
505 * exit the process or similar). Process is locked!
506 * Returns 0 when you are successfully the only thread running.
507 * A process has successfully single threaded in the suspend mode when
508 * There are no threads in user mode. Threads in the kernel must be
509 * allowed to continue until they get to the user boundary. They may even
510 * copy out their return values and data before suspending. They may however be
511 * accelerated in reaching the user boundary as we will wake up
512 * any sleeping threads that are interruptable. (PCATCH).
515 thread_single(int mode
)
520 int remaining
, wakeup_swapper
;
524 mtx_assert(&Giant
, MA_NOTOWNED
);
525 PROC_LOCK_ASSERT(p
, MA_OWNED
);
526 KASSERT((td
!= NULL
), ("curthread is NULL"));
528 if ((p
->p_flag
& P_HADTHREADS
) == 0)
531 /* Is someone already single threading? */
532 if (p
->p_singlethread
!= NULL
&& p
->p_singlethread
!= td
)
535 if (mode
== SINGLE_EXIT
) {
536 p
->p_flag
|= P_SINGLE_EXIT
;
537 p
->p_flag
&= ~P_SINGLE_BOUNDARY
;
539 p
->p_flag
&= ~P_SINGLE_EXIT
;
540 if (mode
== SINGLE_BOUNDARY
)
541 p
->p_flag
|= P_SINGLE_BOUNDARY
;
543 p
->p_flag
&= ~P_SINGLE_BOUNDARY
;
545 p
->p_flag
|= P_STOPPED_SINGLE
;
547 p
->p_singlethread
= td
;
548 if (mode
== SINGLE_EXIT
)
549 remaining
= p
->p_numthreads
;
550 else if (mode
== SINGLE_BOUNDARY
)
551 remaining
= p
->p_numthreads
- p
->p_boundary_count
;
553 remaining
= p
->p_numthreads
- p
->p_suspcount
;
554 while (remaining
!= 1) {
555 if (P_SHOULDSTOP(p
) != P_STOPPED_SINGLE
)
558 FOREACH_THREAD_IN_PROC(p
, td2
) {
562 td2
->td_flags
|= TDF_ASTPENDING
| TDF_NEEDSUSPCHK
;
563 if (TD_IS_INHIBITED(td2
)) {
566 if (td
->td_flags
& TDF_DBSUSPEND
)
567 td
->td_flags
&= ~TDF_DBSUSPEND
;
568 if (TD_IS_SUSPENDED(td2
))
570 thread_unsuspend_one(td2
);
571 if (TD_ON_SLEEPQ(td2
) &&
572 (td2
->td_flags
& TDF_SINTR
))
574 sleepq_abort(td2
, EINTR
);
576 case SINGLE_BOUNDARY
:
579 if (TD_IS_SUSPENDED(td2
)) {
584 * maybe other inhibited states too?
586 if ((td2
->td_flags
& TDF_SINTR
) &&
587 (td2
->td_inhibitors
&
588 (TDI_SLEEPING
| TDI_SWAPPED
)))
589 thread_suspend_one(td2
);
594 else if (TD_IS_RUNNING(td2
) && td
!= td2
) {
602 if (mode
== SINGLE_EXIT
)
603 remaining
= p
->p_numthreads
;
604 else if (mode
== SINGLE_BOUNDARY
)
605 remaining
= p
->p_numthreads
- p
->p_boundary_count
;
607 remaining
= p
->p_numthreads
- p
->p_suspcount
;
610 * Maybe we suspended some threads.. was it enough?
617 * Wake us up when everyone else has suspended.
618 * In the mean time we suspend as well.
620 thread_suspend_switch(td
);
621 if (mode
== SINGLE_EXIT
)
622 remaining
= p
->p_numthreads
;
623 else if (mode
== SINGLE_BOUNDARY
)
624 remaining
= p
->p_numthreads
- p
->p_boundary_count
;
626 remaining
= p
->p_numthreads
- p
->p_suspcount
;
628 if (mode
== SINGLE_EXIT
) {
630 * We have gotten rid of all the other threads and we
631 * are about to either exit or exec. In either case,
632 * we try our utmost to revert to being a non-threaded
635 p
->p_singlethread
= NULL
;
636 p
->p_flag
&= ~(P_STOPPED_SINGLE
| P_SINGLE_EXIT
);
644 * Called in from locations that can safely check to see
645 * whether we have to suspend or at least throttle for a
646 * single-thread event (e.g. fork).
648 * Such locations include userret().
649 * If the "return_instead" argument is non zero, the thread must be able to
650 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
652 * The 'return_instead' argument tells the function if it may do a
653 * thread_exit() or suspend, or whether the caller must abort and back
656 * If the thread that set the single_threading request has set the
657 * P_SINGLE_EXIT bit in the process flags then this call will never return
658 * if 'return_instead' is false, but will exit.
660 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
661 *---------------+--------------------+---------------------
662 * 0 | returns 0 | returns 0 or 1
663 * | when ST ends | immediatly
664 *---------------+--------------------+---------------------
665 * 1 | thread exits | returns 1
667 * 0 = thread_exit() or suspension ok,
668 * other = return error instead of stopping the thread.
670 * While a full suspension is under effect, even a single threading
671 * thread would be suspended if it made this call (but it shouldn't).
672 * This call should only be made from places where
673 * thread_exit() would be safe as that may be the outcome unless
674 * return_instead is set.
677 thread_suspend_check(int return_instead
)
685 mtx_assert(&Giant
, MA_NOTOWNED
);
686 PROC_LOCK_ASSERT(p
, MA_OWNED
);
687 while (P_SHOULDSTOP(p
) ||
688 ((p
->p_flag
& P_TRACED
) && (td
->td_flags
& TDF_DBSUSPEND
))) {
689 if (P_SHOULDSTOP(p
) == P_STOPPED_SINGLE
) {
690 KASSERT(p
->p_singlethread
!= NULL
,
691 ("singlethread not set"));
693 * The only suspension in action is a
694 * single-threading. Single threader need not stop.
695 * XXX Should be safe to access unlocked
696 * as it can only be set to be true by us.
698 if (p
->p_singlethread
== td
)
699 return (0); /* Exempt from stopping. */
701 if ((p
->p_flag
& P_SINGLE_EXIT
) && return_instead
)
704 /* Should we goto user boundary if we didn't come from there? */
705 if (P_SHOULDSTOP(p
) == P_STOPPED_SINGLE
&&
706 (p
->p_flag
& P_SINGLE_BOUNDARY
) && return_instead
)
709 /* If thread will exit, flush its pending signals */
710 if ((p
->p_flag
& P_SINGLE_EXIT
) && (p
->p_singlethread
!= td
))
711 sigqueue_flush(&td
->td_sigqueue
);
716 * If the process is waiting for us to exit,
717 * this thread should just suicide.
718 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
720 if ((p
->p_flag
& P_SINGLE_EXIT
) && (p
->p_singlethread
!= td
))
722 if (P_SHOULDSTOP(p
) == P_STOPPED_SINGLE
) {
723 if (p
->p_numthreads
== p
->p_suspcount
+ 1) {
724 thread_lock(p
->p_singlethread
);
726 thread_unsuspend_one(p
->p_singlethread
);
727 thread_unlock(p
->p_singlethread
);
735 * When a thread suspends, it just
736 * gets taken off all queues.
738 thread_suspend_one(td
);
739 if (return_instead
== 0) {
740 p
->p_boundary_count
++;
741 td
->td_flags
|= TDF_BOUNDARY
;
744 mi_switch(SW_INVOL
| SWT_SUSPEND
, NULL
);
745 if (return_instead
== 0)
746 td
->td_flags
&= ~TDF_BOUNDARY
;
749 if (return_instead
== 0)
750 p
->p_boundary_count
--;
756 thread_suspend_switch(struct thread
*td
)
761 KASSERT(!TD_IS_SUSPENDED(td
), ("already suspended"));
762 PROC_LOCK_ASSERT(p
, MA_OWNED
);
763 PROC_SLOCK_ASSERT(p
, MA_OWNED
);
765 * We implement thread_suspend_one in stages here to avoid
766 * dropping the proc lock while the thread lock is owned.
772 td
->td_flags
&= ~TDF_NEEDSUSPCHK
;
773 TD_SET_SUSPENDED(td
);
777 mi_switch(SW_VOL
| SWT_SUSPEND
, NULL
);
785 thread_suspend_one(struct thread
*td
)
787 struct proc
*p
= td
->td_proc
;
789 PROC_SLOCK_ASSERT(p
, MA_OWNED
);
790 THREAD_LOCK_ASSERT(td
, MA_OWNED
);
791 KASSERT(!TD_IS_SUSPENDED(td
), ("already suspended"));
793 td
->td_flags
&= ~TDF_NEEDSUSPCHK
;
794 TD_SET_SUSPENDED(td
);
799 thread_unsuspend_one(struct thread
*td
)
801 struct proc
*p
= td
->td_proc
;
803 PROC_SLOCK_ASSERT(p
, MA_OWNED
);
804 THREAD_LOCK_ASSERT(td
, MA_OWNED
);
805 KASSERT(TD_IS_SUSPENDED(td
), ("Thread not suspended"));
806 TD_CLR_SUSPENDED(td
);
808 return (setrunnable(td
));
812 * Allow all threads blocked by single threading to continue running.
815 thread_unsuspend(struct proc
*p
)
820 PROC_LOCK_ASSERT(p
, MA_OWNED
);
821 PROC_SLOCK_ASSERT(p
, MA_OWNED
);
823 if (!P_SHOULDSTOP(p
)) {
824 FOREACH_THREAD_IN_PROC(p
, td
) {
826 if (TD_IS_SUSPENDED(td
)) {
827 wakeup_swapper
|= thread_unsuspend_one(td
);
831 } else if ((P_SHOULDSTOP(p
) == P_STOPPED_SINGLE
) &&
832 (p
->p_numthreads
== p
->p_suspcount
)) {
834 * Stopping everything also did the job for the single
835 * threading request. Now we've downgraded to single-threaded,
838 thread_lock(p
->p_singlethread
);
839 wakeup_swapper
= thread_unsuspend_one(p
->p_singlethread
);
840 thread_unlock(p
->p_singlethread
);
847 * End the single threading mode..
850 thread_single_end(void)
858 PROC_LOCK_ASSERT(p
, MA_OWNED
);
859 p
->p_flag
&= ~(P_STOPPED_SINGLE
| P_SINGLE_EXIT
| P_SINGLE_BOUNDARY
);
861 p
->p_singlethread
= NULL
;
864 * If there are other threads they may now run,
865 * unless of course there is a blanket 'stop order'
866 * on the process. The single threader must be allowed
867 * to continue however as this is a bad place to stop.
869 if ((p
->p_numthreads
!= 1) && (!P_SHOULDSTOP(p
))) {
870 FOREACH_THREAD_IN_PROC(p
, td
) {
872 if (TD_IS_SUSPENDED(td
)) {
873 wakeup_swapper
|= thread_unsuspend_one(td
);
884 thread_find(struct proc
*p
, lwpid_t tid
)
888 PROC_LOCK_ASSERT(p
, MA_OWNED
);
889 FOREACH_THREAD_IN_PROC(p
, td
) {
890 if (td
->td_tid
== tid
)