1 /* $NetBSD: kern_exit.c,v 1.224 2009/11/01 21:05:30 rmind Exp $ */
4 * Copyright (c) 1998, 1999, 2006, 2007, 2008 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
34 * Copyright (c) 1982, 1986, 1989, 1991, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)kern_exit.c 8.10 (Berkeley) 2/23/95
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.224 2009/11/01 21:05:30 rmind Exp $");
72 #include "opt_ktrace.h"
73 #include "opt_perfctrs.h"
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/ioctl.h>
82 #include <sys/resource.h>
83 #include <sys/kernel.h>
88 #include <sys/vnode.h>
89 #include <sys/syslog.h>
91 #include <sys/uidinfo.h>
95 #include <sys/ptrace.h>
97 #include <sys/filedesc.h>
99 #include <sys/signalvar.h>
100 #include <sys/sched.h>
102 #include <sys/savar.h>
103 #include <sys/mount.h>
104 #include <sys/syscallargs.h>
105 #include <sys/kauth.h>
106 #include <sys/sleepq.h>
107 #include <sys/lockdebug.h>
108 #include <sys/ktrace.h>
110 #include <sys/lwpctl.h>
111 #include <sys/atomic.h>
113 #include <uvm/uvm_extern.h>
117 #define DPRINTF(x) if (debug_exit) printf x
122 static int find_stopped_child(struct proc
*, pid_t
, int, struct proc
**, int *);
123 static void proc_free(struct proc
*, struct rusage
*);
126 * Fill in the appropriate signal information, and signal the parent.
129 exit_psignal(struct proc
*p
, struct proc
*pp
, ksiginfo_t
*ksi
)
133 if ((ksi
->ksi_signo
= P_EXITSIG(p
)) == SIGCHLD
) {
134 if (WIFSIGNALED(p
->p_xstat
)) {
135 if (WCOREDUMP(p
->p_xstat
))
136 ksi
->ksi_code
= CLD_DUMPED
;
138 ksi
->ksi_code
= CLD_KILLED
;
140 ksi
->ksi_code
= CLD_EXITED
;
144 * We fill those in, even for non-SIGCHLD.
145 * It's safe to access p->p_cred unlocked here.
147 ksi
->ksi_pid
= p
->p_pid
;
148 ksi
->ksi_uid
= kauth_cred_geteuid(p
->p_cred
);
149 ksi
->ksi_status
= p
->p_xstat
;
150 /* XXX: is this still valid? */
151 ksi
->ksi_utime
= p
->p_stats
->p_ru
.ru_utime
.tv_sec
;
152 ksi
->ksi_stime
= p
->p_stats
->p_ru
.ru_stime
.tv_sec
;
160 sys_exit(struct lwp
*l
, const struct sys_exit_args
*uap
, register_t
*retval
)
163 syscallarg(int) rval;
165 struct proc
*p
= l
->l_proc
;
167 /* Don't call exit1() multiple times in the same process. */
168 mutex_enter(p
->p_lock
);
169 if (p
->p_sflag
& PS_WEXIT
) {
170 mutex_exit(p
->p_lock
);
174 /* exit1() will release the mutex. */
175 exit1(l
, W_EXITCODE(SCARG(uap
, rval
), 0));
181 * Exit: deallocate address space and other resources, change proc state
182 * to zombie, and unlink proc from allproc and parent's lists. Save exit
183 * status and rusage for wait(). Check for child processes and orphan them.
185 * Must be called with p->p_lock held. Does not return.
188 exit1(struct lwp
*l
, int rv
)
190 struct proc
*p
, *q
, *nq
;
198 KASSERT(mutex_owned(p
->p_lock
));
200 if (__predict_false(p
== initproc
))
201 panic("init died (signal %d, exit %d)",
202 WTERMSIG(rv
), WEXITSTATUS(rv
));
205 * Disable scheduler activation upcalls. We're trying to get out of
210 if ((p
->p_sa
!= NULL
)) {
211 l
->l_pflag
|= LP_SA_NOBLOCK
;
216 p
->p_sflag
|= PS_WEXIT
;
219 * Force all other LWPs to exit before we do. Only then can we
220 * begin to tear down the rest of the process state.
222 if (sa
|| p
->p_nlwps
> 1)
225 ksiginfo_queue_init(&kq
);
228 * If we have been asked to stop on exit, do so now.
230 if (__predict_false(p
->p_sflag
& PS_STOPEXIT
)) {
231 KERNEL_UNLOCK_ALL(l
, &l
->l_biglocks
);
232 sigclearall(p
, &contsigmask
, &kq
);
239 mutex_exit(p
->p_lock
);
241 KERNEL_LOCK(l
->l_biglocks
, l
);
242 mutex_enter(p
->p_lock
);
246 * Bin any remaining signals and mark the process as dying so it will
247 * not be found for, e.g. signals.
249 sigfillset(&p
->p_sigctx
.ps_sigignore
);
250 sigclearall(p
, NULL
, &kq
);
252 mutex_exit(p
->p_lock
);
253 ksiginfo_queue_drain(&kq
);
255 /* Destroy any lwpctl info. */
256 if (p
->p_lwpctl
!= NULL
)
260 * Drain all remaining references that procfs, ptrace and others may
261 * have on the process.
263 rw_enter(&p
->p_reflock
, RW_WRITER
);
265 DPRINTF(("exit1: %d.%d exiting.\n", p
->p_pid
, l
->l_lid
));
267 timers_free(p
, TIMERS_ALL
);
268 #if defined(__HAVE_RAS)
273 * Close open files, release open-file table and free signal
274 * actions. This may block!
280 sigactsfree(p
->p_sigacts
);
283 * Write out accounting data.
285 (void)acct_process(l
);
289 * Release trace file.
291 if (p
->p_tracep
!= NULL
) {
292 mutex_enter(&ktrace_lock
);
294 mutex_exit(&ktrace_lock
);
299 * If emulation has process exit hook, call it now.
300 * Set the exit status now so that the exit hook has
301 * an opportunity to tweak it (COMPAT_LINUX requires
302 * this for thread group emulation)
305 if (p
->p_emul
->e_proc_exit
)
306 (*p
->p_emul
->e_proc_exit
)(p
);
309 * Free the VM resources we're still holding on to.
310 * We must do this from a valid thread because doing
311 * so may block. This frees vmspace, which we don't
312 * need anymore. The only remaining lwp is the one
313 * we run at this moment, nothing runs in userland
321 if (__predict_false((p
->p_stflag
& PST_PROFIL
) != 0)) {
322 mutex_spin_enter(&p
->p_stmutex
);
324 mutex_spin_exit(&p
->p_stmutex
);
328 * If parent is waiting for us to exit or exec, PL_PPWAIT is set; we
329 * wake up the parent early to avoid deadlock. We can do this once
330 * the VM resources are released.
332 mutex_enter(proc_lock
);
333 if (p
->p_lflag
& PL_PPWAIT
) {
334 p
->p_lflag
&= ~PL_PPWAIT
;
335 cv_broadcast(&p
->p_pptr
->p_waitcv
);
338 if (SESS_LEADER(p
)) {
339 struct vnode
*vprele
= NULL
, *vprevoke
= NULL
;
340 struct session
*sp
= p
->p_session
;
345 * Controlling process.
346 * Signal foreground pgrp,
347 * drain controlling terminal
348 * and revoke access to controlling terminal.
351 mutex_spin_enter(&tty_lock
);
352 if (tp
->t_session
== sp
) {
353 /* we can't guarantee the revoke will do this */
356 tp
->t_session
= NULL
;
357 mutex_spin_exit(&tty_lock
);
359 pgsignal(pgrp
, SIGHUP
, 1);
361 mutex_exit(proc_lock
);
363 mutex_enter(proc_lock
);
365 /* The tty could have been revoked. */
366 vprevoke
= sp
->s_ttyvp
;
368 mutex_spin_exit(&tty_lock
);
369 vprele
= sp
->s_ttyvp
;
372 * s_ttyp is not zero'd; we use this to indicate
373 * that the session once had a controlling terminal.
374 * (for logging and informational purposes)
379 if (vprevoke
!= NULL
|| vprele
!= NULL
) {
380 if (vprevoke
!= NULL
) {
381 /* Releases proc_lock. */
383 VOP_REVOKE(vprevoke
, REVOKEALL
);
385 mutex_exit(proc_lock
);
388 mutex_enter(proc_lock
);
391 fixjobc(p
, p
->p_pgrp
, 0);
394 * Finalize the last LWP's specificdata, as well as the
395 * specificdata for the proc itself.
398 proc_finispecific(p
);
401 * Notify interested parties of our demise.
403 KNOTE(&p
->p_klist
, NOTE_EXIT
);
407 * Save final PMC information in parent process & clean up.
409 if (PMC_ENABLED(p
)) {
411 pmc_accumulate(p
->p_pptr
, p
);
417 * Reset p_opptr pointer of all former children which got
418 * traced by another process and were reparented. We reset
419 * it to NULL here; the trace detach code then reparents
420 * the child to initproc. We only check allproc list, since
421 * eventual former children on zombproc list won't reference
424 if (__predict_false(p
->p_slflag
& PSL_CHTRACED
)) {
425 PROCLIST_FOREACH(q
, &allproc
) {
426 if ((q
->p_flag
& PK_MARKER
) != 0)
434 * Give orphaned children to init(8).
436 q
= LIST_FIRST(&p
->p_children
);
437 wakeinit
= (q
!= NULL
);
438 for (; q
!= NULL
; q
= nq
) {
439 nq
= LIST_NEXT(q
, p_sibling
);
442 * Traced processes are killed since their existence
443 * means someone is screwing up. Since we reset the
444 * trace flags, the logic in sys_wait4() would not be
445 * triggered to reparent the process to its
446 * original parent, so we must do this here.
448 if (__predict_false(q
->p_slflag
& PSL_TRACED
)) {
449 mutex_enter(p
->p_lock
);
450 q
->p_slflag
&= ~(PSL_TRACED
|PSL_FSTRACE
|PSL_SYSCALL
);
451 mutex_exit(p
->p_lock
);
452 if (q
->p_opptr
!= q
->p_pptr
) {
453 struct proc
*t
= q
->p_opptr
;
454 proc_reparent(q
, t
? t
: initproc
);
457 proc_reparent(q
, initproc
);
458 killproc(q
, "orphaned traced process");
460 proc_reparent(q
, initproc
);
464 * Move proc from allproc to zombproc, it's now nearly ready to be
465 * collected by parent.
467 LIST_REMOVE(l
, l_list
);
468 LIST_REMOVE(p
, p_list
);
469 LIST_INSERT_HEAD(&zombproc
, p
, p_list
);
472 * Mark the process as dead. We must do this before we signal
477 /* Put in front of parent's sibling list for parent to collect it */
480 if (LIST_FIRST(&q
->p_children
) != p
) {
481 /* Put child where it can be found quickly */
482 LIST_REMOVE(p
, p_sibling
);
483 LIST_INSERT_HEAD(&q
->p_children
, p
, p_sibling
);
487 * Notify parent that we're gone. If parent has the P_NOCLDWAIT
488 * flag set, notify init instead (and hope it will handle
491 if (q
->p_flag
& (PK_NOCLDWAIT
|PK_CLDSIGIGN
)) {
492 proc_reparent(p
, initproc
);
496 * If this was the last child of our parent, notify
497 * parent, so in case he was wait(2)ing, he will
500 if (LIST_FIRST(&q
->p_children
) == NULL
)
501 cv_broadcast(&q
->p_waitcv
);
504 /* Reload parent pointer, since p may have been reparented above */
507 if (__predict_false((p
->p_slflag
& PSL_FSTRACE
) == 0 &&
508 p
->p_exitsig
!= 0)) {
509 exit_psignal(p
, q
, &ksi
);
510 kpsignal(q
, &ksi
, NULL
);
513 /* Calculate the final rusage info. */
514 calcru(p
, &p
->p_stats
->p_ru
.ru_utime
, &p
->p_stats
->p_ru
.ru_stime
,
518 cv_broadcast(&initproc
->p_waitcv
);
520 callout_destroy(&l
->l_timeout_ch
);
523 * Remaining lwp resources will be freed in lwp_exit2() once we've
524 * switch to idle context; at that point, we will be marked as a
527 mutex_enter(p
->p_lock
);
530 l
->l_prflag
&= ~LPR_DETACHED
;
533 KASSERT(curlwp
== l
);
534 KASSERT(p
->p_nrlwps
== 1);
535 KASSERT(p
->p_nlwps
== 1);
540 mutex_exit(p
->p_lock
);
543 * Signal the parent to collect us, and drop the proclist lock.
544 * Drop debugger/procfs lock; no new references can be gained.
546 cv_broadcast(&p
->p_pptr
->p_waitcv
);
547 rw_exit(&p
->p_reflock
);
548 mutex_exit(proc_lock
);
550 /* Verify that we hold no locks other than the kernel lock. */
551 LOCKDEBUG_BARRIER(&kernel_lock
, 0);
554 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP!
558 * Give machine-dependent code a chance to free any MD LWP
559 * resources. This must be done before uvm_lwp_exit(), in
560 * case these resources are in the PCB.
565 /* This process no longer needs to hold the kernel lock. */
567 /* XXXSMP hold in lwp_userret() */
568 KERNEL_UNLOCK_LAST(l
);
570 KERNEL_UNLOCK_ALL(l
, NULL
);
573 lwp_exit_switchaway(l
);
577 exit_lwps(struct lwp
*l
)
585 KERNEL_UNLOCK_ALL(l
, &nlocks
);
588 KASSERT(mutex_owned(p
->p_lock
));
591 if (p
->p_sa
!= NULL
) {
592 struct sadata_vp
*vp
;
593 SLIST_FOREACH(vp
, &p
->p_sa
->sa_vps
, savp_next
) {
595 * Make SA-cached LWPs normal process interruptable
596 * so that the exit code can wake them. Locking
597 * savp_mutex locks all the lwps on this vp that
600 mutex_enter(&vp
->savp_mutex
);
601 DPRINTF(("exit_lwps: Making cached LWPs of %d on "
602 "VP %d interruptable: ", p
->p_pid
, vp
->savp_id
));
603 TAILQ_FOREACH(l2
, &vp
->savp_lwpcache
, l_sleepchain
) {
604 l2
->l_flag
|= LW_SINTR
;
605 DPRINTF(("%d ", l2
->l_lid
));
609 DPRINTF(("exit_lwps: Making unblocking LWPs of %d on "
610 "VP %d interruptable: ", p
->p_pid
, vp
->savp_id
));
611 TAILQ_FOREACH(l2
, &vp
->savp_woken
, l_sleepchain
) {
612 vp
->savp_woken_count
--;
613 l2
->l_flag
|= LW_SINTR
;
614 DPRINTF(("%d ", l2
->l_lid
));
617 mutex_exit(&vp
->savp_mutex
);
624 * Interrupt LWPs in interruptable sleep, unsuspend suspended
625 * LWPs and then wait for everyone else to finish.
627 LIST_FOREACH(l2
, &p
->p_lwps
, l_sibling
) {
631 l2
->l_flag
&= ~LW_SA
;
632 l2
->l_flag
|= LW_WEXIT
;
633 if ((l2
->l_stat
== LSSLEEP
&& (l2
->l_flag
& LW_SINTR
)) ||
634 l2
->l_stat
== LSSUSPENDED
|| l2
->l_stat
== LSSTOP
) {
635 /* setrunnable() will release the lock. */
637 DPRINTF(("exit_lwps: Made %d.%d runnable\n",
638 p
->p_pid
, l2
->l_lid
));
643 while (p
->p_nlwps
> 1) {
644 DPRINTF(("exit_lwps: waiting for %d LWPs (%d zombies)\n",
645 p
->p_nlwps
, p
->p_nzlwps
));
646 error
= lwp_wait1(l
, 0, &waited
, LWPWAIT_EXITCONTROL
);
649 if (error
== EDEADLK
) {
651 * LWPs can get suspended/slept behind us.
653 * kick them again and retry.
658 panic("exit_lwps: lwp_wait1 failed with error %d",
660 DPRINTF(("exit_lwps: Got LWP %d from lwp_wait1()\n", waited
));
663 KERNEL_LOCK(nlocks
, l
);
664 KASSERT(p
->p_nlwps
== 1);
668 do_sys_wait(int *pid
, int *status
, int options
, struct rusage
*ru
)
674 memset(ru
, 0, sizeof(*ru
));
676 mutex_enter(proc_lock
);
677 error
= find_stopped_child(curproc
, *pid
, options
, &child
, status
);
679 mutex_exit(proc_lock
);
685 if (child
->p_stat
== SZOMB
) {
686 /* proc_free() will release the proc_lock. */
687 if (options
& WNOWAIT
) {
688 mutex_exit(proc_lock
);
690 proc_free(child
, ru
);
693 /* Child state must have been SSTOP. */
694 mutex_exit(proc_lock
);
695 *status
= W_STOPCODE(*status
);
701 sys___wait450(struct lwp
*l
, const struct sys___wait450_args
*uap
,
706 syscallarg(int *) status;
707 syscallarg(int) options;
708 syscallarg(struct rusage *) rusage;
710 int error
, status
, pid
= SCARG(uap
, pid
);
713 error
= do_sys_wait(&pid
, &status
, SCARG(uap
, options
),
714 SCARG(uap
, rusage
) != NULL
? &ru
: NULL
);
720 if (SCARG(uap
, status
)) {
721 error
= copyout(&status
, SCARG(uap
, status
), sizeof(status
));
723 if (SCARG(uap
, rusage
) && error
== 0) {
724 error
= copyout(&ru
, SCARG(uap
, rusage
), sizeof(ru
));
730 * Scan list of child processes for a child process that has stopped or
731 * exited. Used by sys_wait4 and 'compat' equivalents.
733 * Must be called with the proc_lock held, and may release while waiting.
736 find_stopped_child(struct proc
*parent
, pid_t pid
, int options
,
737 struct proc
**child_p
, int *status_p
)
739 struct proc
*child
, *dead
;
742 KASSERT(mutex_owned(proc_lock
));
744 if (options
& ~(WUNTRACED
|WNOHANG
|WALTSIG
|WALLSIG
)
745 && !(options
& WOPTSCHECKED
)) {
750 if (pid
== 0 && !(options
& WOPTSCHECKED
))
751 pid
= -parent
->p_pgid
;
757 LIST_FOREACH(child
, &parent
->p_children
, p_sibling
) {
759 if (child
->p_pid
!= pid
) {
760 child
= p_find(pid
, PFIND_ZOMBIE
|
763 child
->p_pptr
!= parent
) {
768 } else if (pid
!= WAIT_ANY
&& child
->p_pgid
!= -pid
) {
769 /* Child not in correct pgrp */
774 * Wait for processes with p_exitsig != SIGCHLD
775 * processes only if WALTSIG is set; wait for
776 * processes with p_exitsig == SIGCHLD only
777 * if WALTSIG is clear.
779 if (((options
& WALLSIG
) == 0) &&
780 (options
& WALTSIG
? child
->p_exitsig
== SIGCHLD
781 : P_EXITSIG(child
) != SIGCHLD
)){
782 if (child
->p_pid
== pid
) {
790 if ((options
& WNOZOMBIE
) == 0) {
791 if (child
->p_stat
== SZOMB
)
793 if (child
->p_stat
== SDEAD
) {
795 * We may occasionally arrive here
796 * after receiving a signal, but
797 * immediatley before the child
798 * process is zombified. The wait
799 * will be short, so avoid returning
806 if (child
->p_stat
== SSTOP
&&
807 child
->p_waited
== 0 &&
808 (child
->p_slflag
& PSL_TRACED
||
809 options
& WUNTRACED
)) {
810 if ((options
& WNOWAIT
) == 0) {
812 parent
->p_nstopchild
--;
816 if (parent
->p_nstopchild
== 0 || child
->p_pid
== pid
) {
822 if (child
!= NULL
|| error
!= 0 ||
823 ((options
& WNOHANG
) != 0 && dead
== NULL
)) {
825 *status_p
= child
->p_xstat
;
832 * Wait for another child process to stop.
834 error
= cv_wait_sig(&parent
->p_waitcv
, proc_lock
);
844 * Free a process after parent has taken all the state info. Must be called
845 * with the proclist lock held, and will release before returning.
847 * *ru is returned to the caller, and must be freed by the caller.
850 proc_free(struct proc
*p
, struct rusage
*ru
)
852 struct proc
*parent
= p
->p_pptr
;
855 kauth_cred_t cred1
, cred2
;
858 KASSERT(mutex_owned(proc_lock
));
859 KASSERT(p
->p_nlwps
== 1);
860 KASSERT(p
->p_nzlwps
== 1);
861 KASSERT(p
->p_nrlwps
== 0);
862 KASSERT(p
->p_stat
== SZOMB
);
865 * If we got the child via ptrace(2) or procfs, and
866 * the parent is different (meaning the process was
867 * attached, rather than run as a child), then we need
868 * to give it back to the old parent, and send the
869 * parent the exit signal. The rest of the cleanup
870 * will be done when the old parent waits on the child.
872 if ((p
->p_slflag
& PSL_TRACED
) != 0 && p
->p_opptr
!= parent
) {
873 mutex_enter(p
->p_lock
);
874 p
->p_slflag
&= ~(PSL_TRACED
|PSL_FSTRACE
|PSL_SYSCALL
);
875 mutex_exit(p
->p_lock
);
876 parent
= (p
->p_opptr
== NULL
) ? initproc
: p
->p_opptr
;
877 proc_reparent(p
, parent
);
879 if (p
->p_exitsig
!= 0) {
880 exit_psignal(p
, parent
, &ksi
);
881 kpsignal(parent
, &ksi
, NULL
);
883 cv_broadcast(&parent
->p_waitcv
);
884 mutex_exit(proc_lock
);
888 sched_proc_exit(parent
, p
);
891 * Add child times of exiting process onto its own times.
892 * This cannot be done any earlier else it might get done twice.
894 l
= LIST_FIRST(&p
->p_lwps
);
895 p
->p_stats
->p_ru
.ru_nvcsw
+= (l
->l_ncsw
- l
->l_nivcsw
);
896 p
->p_stats
->p_ru
.ru_nivcsw
+= l
->l_nivcsw
;
897 ruadd(&p
->p_stats
->p_ru
, &l
->l_ru
);
898 ruadd(&p
->p_stats
->p_ru
, &p
->p_stats
->p_cru
);
899 ruadd(&parent
->p_stats
->p_cru
, &p
->p_stats
->p_ru
);
901 *ru
= p
->p_stats
->p_ru
;
904 /* Release any SA state. */
911 * At this point we are going to start freeing the final resources.
912 * If anyone tries to access the proc structure after here they will
913 * get a shock - bits are missing. Attempt to make it hard! We
914 * don't bother with any further locking past this point.
916 p
->p_stat
= SIDL
; /* not even a zombie any more */
917 LIST_REMOVE(p
, p_list
); /* off zombproc */
918 parent
->p_nstopchild
--;
919 LIST_REMOVE(p
, p_sibling
);
922 * Let pid be reallocated.
927 * Unlink process from its process group.
928 * Releases the proc_lock.
933 * Delay release until after lwp_free.
938 * Free the last LWP's resources.
940 * lwp_free ensures the LWP is no longer running on another CPU.
942 lwp_free(l
, false, true);
945 * Now no one except us can reach the process p.
949 * Decrement the count of procs running with this uid.
952 uid
= kauth_cred_getuid(cred1
);
953 (void)chgproccnt(uid
, -1);
956 * Release substructures.
960 pstatsfree(p
->p_stats
);
961 kauth_cred_free(cred1
);
962 kauth_cred_free(cred2
);
965 * Release reference to text vnode
970 mutex_destroy(&p
->p_auxlock
);
971 mutex_obj_free(p
->p_lock
);
972 mutex_destroy(&p
->p_stmutex
);
973 cv_destroy(&p
->p_waitcv
);
974 cv_destroy(&p
->p_lwpcv
);
975 rw_destroy(&p
->p_reflock
);
981 * make process 'parent' the new parent of process 'child'.
983 * Must be called with proc_lock held.
986 proc_reparent(struct proc
*child
, struct proc
*parent
)
989 KASSERT(mutex_owned(proc_lock
));
991 if (child
->p_pptr
== parent
)
994 if (child
->p_stat
== SZOMB
||
995 (child
->p_stat
== SSTOP
&& !child
->p_waited
)) {
996 child
->p_pptr
->p_nstopchild
--;
997 parent
->p_nstopchild
++;
999 if (parent
== initproc
)
1000 child
->p_exitsig
= SIGCHLD
;
1002 LIST_REMOVE(child
, p_sibling
);
1003 LIST_INSERT_HEAD(&parent
->p_children
, child
, p_sibling
);
1004 child
->p_pptr
= parent
;
1005 child
->p_ppid
= parent
->p_pid
;