4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
25 * Copyright 2014 Garrett D'Amore <garrett@damore.org>
28 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
29 /* All Rights Reserved */
31 #include <sys/types.h>
32 #include <sys/t_lock.h>
33 #include <sys/param.h>
34 #include <sys/cmn_err.h>
37 #include <sys/debug.h>
38 #include <sys/errno.h>
39 #include <sys/inline.h>
43 #include <sys/brand.h>
44 #include <sys/sobject.h>
45 #include <sys/sysmacros.h>
46 #include <sys/systm.h>
50 #include <sys/vnode.h>
51 #include <sys/session.h>
53 #include <sys/signal.h>
56 #include <sys/class.h>
58 #include <sys/bitmap.h>
60 #include <sys/shm_impl.h>
61 #include <sys/fault.h>
62 #include <sys/syscall.h>
63 #include <sys/procfs.h>
64 #include <sys/processor.h>
65 #include <sys/cpuvar.h>
66 #include <sys/copyops.h>
68 #include <sys/msacct.h>
72 #include <vm/seg_vn.h>
73 #include <vm/seg_dev.h>
74 #include <vm/seg_spt.h>
76 #include <sys/vmparam.h>
78 #include <sys/proc/prdata.h>
80 #include <sys/project.h>
81 #include <sys/contract_impl.h>
82 #include <sys/contract/process.h>
83 #include <sys/contract/process_impl.h>
84 #include <sys/schedctl.h>
87 #include <sys/atomic.h>
90 #define MAX_ITERS_SPIN 5
92 typedef struct prpagev
{
93 uint_t
*pg_protv
; /* vector of page permissions */
94 char *pg_incore
; /* vector of incore flags */
95 size_t pg_npages
; /* number of pages in protv and incore */
96 ulong_t pg_pnbase
; /* pn within segment of first protv element */
99 size_t pagev_lim
= 256 * 1024; /* limit on number of pages in prpagev_t */
101 extern const struct seg_ops segdev_ops
; /* needs a header file */
102 extern const struct seg_ops segspt_shmops
; /* needs a header file */
104 static int set_watched_page(proc_t
*, caddr_t
, caddr_t
, ulong_t
, ulong_t
);
105 static void clear_watched_page(proc_t
*, caddr_t
, caddr_t
, ulong_t
);
108 * Choose an lwp from the complete set of lwps for the process.
109 * This is called for any operation applied to the process
110 * file descriptor that requires an lwp to operate upon.
112 * Returns a pointer to the thread for the selected LWP,
113 * and with the dispatcher lock held for the thread.
115 * The algorithm for choosing an lwp is critical for /proc semantics;
116 * don't touch this code unless you know all of the implications.
122 kthread_t
*t_onproc
= NULL
; /* running on processor */
123 kthread_t
*t_run
= NULL
; /* runnable, on disp queue */
124 kthread_t
*t_sleep
= NULL
; /* sleeping */
125 kthread_t
*t_hold
= NULL
; /* sleeping, performing hold */
126 kthread_t
*t_susp
= NULL
; /* suspended stop */
127 kthread_t
*t_jstop
= NULL
; /* jobcontrol stop, w/o directed stop */
128 kthread_t
*t_jdstop
= NULL
; /* jobcontrol stop with directed stop */
129 kthread_t
*t_req
= NULL
; /* requested stop */
130 kthread_t
*t_istop
= NULL
; /* event-of-interest stop */
131 kthread_t
*t_dtrace
= NULL
; /* DTrace stop */
133 ASSERT(MUTEX_HELD(&p
->p_lock
));
136 * If the agent lwp exists, it takes precedence over all others.
138 if ((t
= p
->p_agenttp
) != NULL
) {
143 if ((t
= p
->p_tlist
) == NULL
) /* start at the head of the list */
145 do { /* for eacn lwp in the process */
146 if (VSTOPPED(t
)) { /* virtually stopped */
152 thread_lock(t
); /* make sure thread is in good state */
153 switch (t
->t_state
) {
155 panic("prchoose: bad thread state %d, thread 0x%p",
156 t
->t_state
, (void *)t
);
160 if (t
->t_wchan
== (caddr_t
)&p
->p_holdlwps
&&
161 t
->t_wchan0
== NULL
) {
175 if (t_onproc
== NULL
)
178 case TS_ZOMB
: /* last possible choice */
181 switch (t
->t_whystop
) {
187 if (t
->t_proc_flag
& TP_PRSTOP
) {
188 if (t_jdstop
== NULL
)
196 if (t
->t_dtrace_stop
&& t_dtrace
== NULL
)
198 else if (t_req
== NULL
)
206 * Make an lwp calling exit() be the
207 * last lwp seen in the process.
209 if (t_istop
== NULL
||
210 (t_istop
->t_whystop
== PR_SYSENTRY
&&
211 t_istop
->t_whatstop
== SYS_exit
))
214 case PR_CHECKPOINT
: /* can't happen? */
217 panic("prchoose: bad t_whystop %d, thread 0x%p",
218 t
->t_whystop
, (void *)t
);
224 } while ((t
= t
->t_forw
) != p
->p_tlist
);
255 * Wakeup anyone sleeping on the /proc vnode for the process/lwp to stop.
256 * Also call pollwakeup() if any lwps are waiting in poll() for POLLPRI
257 * on the /proc file descriptor. Called from stop() when a traced
258 * process stops on an event of interest. Also called from exit()
259 * and prinvalidate() to indicate POLLHUP and POLLERR respectively.
262 prnotify(struct vnode
*vp
)
264 prcommon_t
*pcp
= VTOP(vp
)->pr_common
;
266 mutex_enter(&pcp
->prc_mutex
);
267 cv_broadcast(&pcp
->prc_wait
);
268 mutex_exit(&pcp
->prc_mutex
);
269 if (pcp
->prc_flags
& PRC_POLL
) {
271 * We call pollwakeup() with POLLHUP to ensure that
272 * the pollers are awakened even if they are polling
273 * for nothing (i.e., waiting for the process to exit).
274 * This enables the use of the PRC_POLL flag for optimization
275 * (we can turn off PRC_POLL only if we know no pollers remain).
277 pcp
->prc_flags
&= ~PRC_POLL
;
278 pollwakeup(&pcp
->prc_pollhead
, POLLHUP
);
282 /* called immediately below, in prfree() */
284 prfreenotify(vnode_t
*vp
)
291 pcp
= pnp
->pr_common
;
292 ASSERT(pcp
->prc_thread
== NULL
);
293 pcp
->prc_proc
= NULL
;
295 * We can't call prnotify() here because we are holding
296 * pidlock. We assert that there is no need to.
298 mutex_enter(&pcp
->prc_mutex
);
299 cv_broadcast(&pcp
->prc_wait
);
300 mutex_exit(&pcp
->prc_mutex
);
301 ASSERT(!(pcp
->prc_flags
& PRC_POLL
));
309 * Called from a hook in freeproc() when a traced process is removed
310 * from the process table. The proc-table pointers of all associated
311 * /proc vnodes are cleared to indicate that the process has gone away.
316 uint_t slot
= p
->p_slot
;
318 ASSERT(MUTEX_HELD(&pidlock
));
321 * Block the process against /proc so it can be freed.
322 * It cannot be freed while locked by some controlling process.
324 * pidlock -> pr_pidlock -> p->p_lock -> pcp->prc_mutex
326 mutex_enter(&pr_pidlock
); /* protects pcp->prc_proc */
327 mutex_enter(&p
->p_lock
);
328 while (p
->p_proc_flag
& P_PR_LOCK
) {
329 mutex_exit(&pr_pidlock
);
330 cv_wait(&pr_pid_cv
[slot
], &p
->p_lock
);
331 mutex_exit(&p
->p_lock
);
332 mutex_enter(&pr_pidlock
);
333 mutex_enter(&p
->p_lock
);
336 ASSERT(p
->p_tlist
== NULL
);
338 prfreenotify(p
->p_plist
);
341 prfreenotify(p
->p_trace
);
345 * We broadcast to wake up everyone waiting for this process.
346 * No one can reach this process from this point on.
348 cv_broadcast(&pr_pid_cv
[slot
]);
350 mutex_exit(&p
->p_lock
);
351 mutex_exit(&pr_pidlock
);
355 * Called from a hook in exit() when a traced process is becoming a zombie.
360 ASSERT(MUTEX_HELD(&p
->p_lock
));
362 if (pr_watch_active(p
)) {
363 pr_free_watchpoints(p
);
364 watch_disable(curthread
);
366 /* pr_free_watched_pages() is called in exit(), after dropping p_lock */
368 VTOP(p
->p_trace
)->pr_common
->prc_flags
|= PRC_DESTROY
;
369 prnotify(p
->p_trace
);
371 cv_broadcast(&pr_pid_cv
[p
->p_slot
]); /* pauselwps() */
375 * Called when a thread calls lwp_exit().
378 prlwpexit(kthread_t
*t
)
383 proc_t
*p
= ttoproc(t
);
384 lwpent_t
*lep
= p
->p_lwpdir
[t
->t_dslot
].ld_entry
;
386 ASSERT(t
== curthread
);
387 ASSERT(MUTEX_HELD(&p
->p_lock
));
390 * The process must be blocked against /proc to do this safely.
391 * The lwp must not disappear while the process is marked P_PR_LOCK.
392 * It is the caller's responsibility to have called prbarrier(p).
394 ASSERT(!(p
->p_proc_flag
& P_PR_LOCK
));
396 for (vp
= p
->p_plist
; vp
!= NULL
; vp
= pnp
->pr_next
) {
398 pcp
= pnp
->pr_common
;
399 if (pcp
->prc_thread
== t
) {
400 pcp
->prc_thread
= NULL
;
401 pcp
->prc_flags
|= PRC_DESTROY
;
405 for (vp
= lep
->le_trace
; vp
!= NULL
; vp
= pnp
->pr_next
) {
407 pcp
= pnp
->pr_common
;
408 pcp
->prc_thread
= NULL
;
409 pcp
->prc_flags
|= PRC_DESTROY
;
414 prnotify(p
->p_trace
);
418 * Called when a zombie thread is joined or when a
419 * detached lwp exits. Called from lwp_hash_out().
422 prlwpfree(proc_t
*p
, lwpent_t
*lep
)
428 ASSERT(MUTEX_HELD(&p
->p_lock
));
431 * The process must be blocked against /proc to do this safely.
432 * The lwp must not disappear while the process is marked P_PR_LOCK.
433 * It is the caller's responsibility to have called prbarrier(p).
435 ASSERT(!(p
->p_proc_flag
& P_PR_LOCK
));
438 lep
->le_trace
= NULL
;
442 pcp
= pnp
->pr_common
;
443 ASSERT(pcp
->prc_thread
== NULL
&&
444 (pcp
->prc_flags
& PRC_DESTROY
));
451 prnotify(p
->p_trace
);
455 * Called from a hook in exec() when a thread starts exec().
460 proc_t
*p
= ttoproc(curthread
);
461 klwp_t
*lwp
= ttolwp(curthread
);
464 * The P_PR_EXEC flag blocks /proc operations for
465 * the duration of the exec().
466 * We can't start exec() while the process is
467 * locked by /proc, so we call prbarrier().
468 * lwp_nostop keeps the process from being stopped
469 * via job control for the duration of the exec().
472 ASSERT(MUTEX_HELD(&p
->p_lock
));
475 p
->p_proc_flag
|= P_PR_EXEC
;
479 * Called from a hook in exec() when a thread finishes exec().
480 * The thread may or may not have succeeded. Some other thread
481 * may have beat it to the punch.
486 proc_t
*p
= ttoproc(curthread
);
487 klwp_t
*lwp
= ttolwp(curthread
);
491 model_t model
= p
->p_model
;
492 id_t tid
= curthread
->t_tid
;
493 int tslot
= curthread
->t_dslot
;
495 ASSERT(MUTEX_HELD(&p
->p_lock
));
498 if (p
->p_flag
& SEXITLWPS
) {
500 * We are on our way to exiting because some
501 * other thread beat us in the race to exec().
502 * Don't clear the P_PR_EXEC flag in this case.
508 * Wake up anyone waiting in /proc for the process to complete exec().
510 p
->p_proc_flag
&= ~P_PR_EXEC
;
511 if ((vp
= p
->p_trace
) != NULL
) {
512 pcp
= VTOP(vp
)->pr_common
;
513 mutex_enter(&pcp
->prc_mutex
);
514 cv_broadcast(&pcp
->prc_wait
);
515 mutex_exit(&pcp
->prc_mutex
);
516 for (; vp
!= NULL
; vp
= pnp
->pr_next
) {
518 pnp
->pr_common
->prc_datamodel
= model
;
521 if ((vp
= p
->p_lwpdir
[tslot
].ld_entry
->le_trace
) != NULL
) {
523 * We dealt with the process common above.
525 ASSERT(p
->p_trace
!= NULL
);
526 pcp
= VTOP(vp
)->pr_common
;
527 mutex_enter(&pcp
->prc_mutex
);
528 cv_broadcast(&pcp
->prc_wait
);
529 mutex_exit(&pcp
->prc_mutex
);
530 for (; vp
!= NULL
; vp
= pnp
->pr_next
) {
532 pcp
= pnp
->pr_common
;
533 pcp
->prc_datamodel
= model
;
535 pcp
->prc_tslot
= tslot
;
541 * Called from a hook in relvm() just before freeing the address space.
542 * We free all the watched areas now.
547 proc_t
*p
= ttoproc(curthread
);
549 mutex_enter(&p
->p_lock
);
550 prbarrier(p
); /* block all other /proc operations */
551 if (pr_watch_active(p
)) {
552 pr_free_watchpoints(p
);
553 watch_disable(curthread
);
555 mutex_exit(&p
->p_lock
);
556 pr_free_watched_pages(p
);
560 * Called from hooks in exec-related code when a traced process
561 * attempts to exec(2) a setuid/setgid program or an unreadable
562 * file. Rather than fail the exec we invalidate the associated
563 * /proc vnodes so that subsequent attempts to use them will fail.
565 * All /proc vnodes, except directory vnodes, are retained on a linked
566 * list (rooted at p_plist in the process structure) until last close.
568 * A controlling process must re-open the /proc files in order to
572 prinvalidate(struct user
*up
)
574 kthread_t
*t
= curthread
;
575 proc_t
*p
= ttoproc(t
);
580 mutex_enter(&p
->p_lock
);
581 prbarrier(p
); /* block all other /proc operations */
584 * At this moment, there can be only one lwp in the process.
586 ASSERT(p
->p_lwpcnt
== 1 && p
->p_zombcnt
== 0);
589 * Invalidate any currently active /proc vnodes.
591 for (vp
= p
->p_plist
; vp
!= NULL
; vp
= pnp
->pr_next
) {
593 switch (pnp
->pr_type
) {
594 case PR_PSINFO
: /* these files can read by anyone */
604 pnp
->pr_flags
|= PR_INVAL
;
609 * Wake up anyone waiting for the process or lwp.
610 * p->p_trace is guaranteed to be non-NULL if there
611 * are any open /proc files for this process.
613 if ((vp
= p
->p_trace
) != NULL
) {
614 prcommon_t
*pcp
= VTOP(vp
)->pr_pcommon
;
618 * Are there any writers?
620 if ((writers
= pcp
->prc_writers
) != 0) {
622 * Clear the exclusive open flag (old /proc interface).
623 * Set prc_selfopens equal to prc_writers so that
624 * the next O_EXCL|O_WRITE open will succeed
625 * even with existing (though invalid) writers.
626 * prclose() must decrement prc_selfopens when
627 * the invalid files are closed.
629 pcp
->prc_flags
&= ~PRC_EXCL
;
630 ASSERT(pcp
->prc_selfopens
<= writers
);
631 pcp
->prc_selfopens
= writers
;
634 vp
= p
->p_lwpdir
[t
->t_dslot
].ld_entry
->le_trace
;
637 * We should not invalidate the lwpiddir vnodes,
638 * but the necessities of maintaining the old
639 * ioctl()-based version of /proc require it.
642 pnp
->pr_flags
|= PR_INVAL
;
648 * If any tracing flags are in effect and any vnodes are open for
649 * writing then set the requested-stop and run-on-last-close flags.
650 * Otherwise, clear all tracing flags.
652 t
->t_proc_flag
&= ~TP_PAUSE
;
653 if ((p
->p_proc_flag
& P_PR_TRACE
) && writers
) {
654 t
->t_proc_flag
|= TP_PRSTOP
;
655 aston(t
); /* so ISSIG will see the flag */
656 p
->p_proc_flag
|= P_PR_RUNLCL
;
658 premptyset(&up
->u_entrymask
); /* syscalls */
659 premptyset(&up
->u_exitmask
);
661 premptyset(&p
->p_sigmask
); /* signals */
662 premptyset(&p
->p_fltmask
); /* faults */
663 t
->t_proc_flag
&= ~(TP_PRSTOP
|TP_PRVSTOP
|TP_STOPPING
);
664 p
->p_proc_flag
&= ~(P_PR_RUNLCL
|P_PR_KILLCL
|P_PR_TRACE
);
668 mutex_exit(&p
->p_lock
);
672 * Acquire the controlled process's p_lock and mark it P_PR_LOCK.
673 * Return with pr_pidlock held in all cases.
674 * Return with p_lock held if the the process still exists.
675 * Return value is the process pointer if the process still exists, else NULL.
676 * If we lock the process, give ourself kernel priority to avoid deadlocks;
677 * this is undone in prunlock().
680 pr_p_lock(prnode_t
*pnp
)
685 mutex_enter(&pr_pidlock
);
686 if ((pcp
= pnp
->pr_pcommon
) == NULL
|| (p
= pcp
->prc_proc
) == NULL
)
688 mutex_enter(&p
->p_lock
);
689 while (p
->p_proc_flag
& P_PR_LOCK
) {
691 * This cv/mutex pair is persistent even if
692 * the process disappears while we sleep.
694 kcondvar_t
*cv
= &pr_pid_cv
[p
->p_slot
];
695 kmutex_t
*mp
= &p
->p_lock
;
697 mutex_exit(&pr_pidlock
);
700 mutex_enter(&pr_pidlock
);
701 if (pcp
->prc_proc
== NULL
)
703 ASSERT(p
== pcp
->prc_proc
);
704 mutex_enter(&p
->p_lock
);
706 p
->p_proc_flag
|= P_PR_LOCK
;
707 THREAD_KPRI_REQUEST();
712 * Lock the target process by setting P_PR_LOCK and grabbing p->p_lock.
713 * This prevents any lwp of the process from disappearing and
714 * blocks most operations that a process can perform on itself.
715 * Returns 0 on success, a non-zero error number on failure.
717 * 'zdisp' is ZYES or ZNO to indicate whether prlock() should succeed when
718 * the subject process is a zombie (ZYES) or fail for zombies (ZNO).
721 * ENOENT: process or lwp has disappeared or process is exiting
722 * (or has become a zombie and zdisp == ZNO).
723 * EAGAIN: procfs vnode has become invalid.
724 * EINTR: signal arrived while waiting for exec to complete.
727 prlock(prnode_t
*pnp
, int zdisp
)
733 pcp
= pnp
->pr_common
;
735 mutex_exit(&pr_pidlock
);
738 * Return ENOENT immediately if there is no process.
743 ASSERT(p
== pcp
->prc_proc
&& p
->p_stat
!= 0 && p
->p_stat
!= SIDL
);
746 * Return ENOENT if process entered zombie state or is exiting
747 * and the 'zdisp' flag is set to ZNO indicating not to lock zombies.
750 ((pcp
->prc_flags
& PRC_DESTROY
) || (p
->p_flag
& SEXITING
))) {
756 * If lwp-specific, check to see if lwp has disappeared.
758 if (pcp
->prc_flags
& PRC_LWP
) {
759 if ((zdisp
== ZNO
&& (pcp
->prc_flags
& PRC_DESTROY
)) ||
760 pcp
->prc_tslot
== -1) {
767 * Return EAGAIN if we have encountered a security violation.
768 * (The process exec'd a set-id or unreadable executable file.)
770 if (pnp
->pr_flags
& PR_INVAL
) {
776 * If process is undergoing an exec(), wait for
777 * completion and then start all over again.
779 if (p
->p_proc_flag
& P_PR_EXEC
) {
780 pcp
= pnp
->pr_pcommon
; /* Put on the correct sleep queue */
781 mutex_enter(&pcp
->prc_mutex
);
783 if (!cv_wait_sig(&pcp
->prc_wait
, &pcp
->prc_mutex
)) {
784 mutex_exit(&pcp
->prc_mutex
);
787 mutex_exit(&pcp
->prc_mutex
);
792 * We return holding p->p_lock.
798 * Undo prlock() and pr_p_lock().
799 * p->p_lock is still held; pr_pidlock is no longer held.
801 * prunmark() drops the P_PR_LOCK flag and wakes up another thread,
802 * if any, waiting for the flag to be dropped; it retains p->p_lock.
804 * prunlock() calls prunmark() and then drops p->p_lock.
809 ASSERT(p
->p_proc_flag
& P_PR_LOCK
);
810 ASSERT(MUTEX_HELD(&p
->p_lock
));
812 cv_signal(&pr_pid_cv
[p
->p_slot
]);
813 p
->p_proc_flag
&= ~P_PR_LOCK
;
814 THREAD_KPRI_RELEASE();
818 prunlock(prnode_t
*pnp
)
820 prcommon_t
*pcp
= pnp
->pr_common
;
821 proc_t
*p
= pcp
->prc_proc
;
824 * If we (or someone) gave it a SIGKILL, and it is not
825 * already a zombie, set it running unconditionally.
827 if ((p
->p_flag
& SKILLED
) &&
828 !(p
->p_flag
& SEXITING
) &&
829 !(pcp
->prc_flags
& PRC_DESTROY
) &&
830 !((pcp
->prc_flags
& PRC_LWP
) && pcp
->prc_tslot
== -1))
831 (void) pr_setrun(pnp
, 0);
833 mutex_exit(&p
->p_lock
);
837 * Called while holding p->p_lock to delay until the process is unlocked.
838 * We enter holding p->p_lock; p->p_lock is dropped and reacquired.
839 * The process cannot become locked again until p->p_lock is dropped.
844 ASSERT(MUTEX_HELD(&p
->p_lock
));
846 if (p
->p_proc_flag
& P_PR_LOCK
) {
847 /* The process is locked; delay until not locked */
848 uint_t slot
= p
->p_slot
;
850 while (p
->p_proc_flag
& P_PR_LOCK
)
851 cv_wait(&pr_pid_cv
[slot
], &p
->p_lock
);
852 cv_signal(&pr_pid_cv
[slot
]);
857 * Return process/lwp status.
858 * The u-block is mapped in by this routine and unmapped at the end.
861 prgetstatus(proc_t
*p
, pstatus_t
*sp
, zone_t
*zp
)
865 ASSERT(MUTEX_HELD(&p
->p_lock
));
867 t
= prchoose(p
); /* returns locked thread */
871 /* just bzero the process part, prgetlwpstatus() does the rest */
872 bzero(sp
, sizeof (pstatus_t
) - sizeof (lwpstatus_t
));
873 sp
->pr_nlwp
= p
->p_lwpcnt
;
874 sp
->pr_nzomb
= p
->p_zombcnt
;
875 prassignset(&sp
->pr_sigpend
, &p
->p_sig
);
876 sp
->pr_brkbase
= (uintptr_t)p
->p_brkbase
;
877 sp
->pr_brksize
= p
->p_brksize
;
878 sp
->pr_stkbase
= (uintptr_t)prgetstackbase(p
);
879 sp
->pr_stksize
= p
->p_stksize
;
880 sp
->pr_pid
= p
->p_pid
;
881 if (curproc
->p_zone
->zone_id
!= GLOBAL_ZONEID
&&
882 (p
->p_flag
& SZONETOP
)) {
883 ASSERT(p
->p_zone
->zone_id
!= GLOBAL_ZONEID
);
885 * Inside local zones, fake zsched's pid as parent pids for
886 * processes which reference processes outside of the zone.
888 sp
->pr_ppid
= curproc
->p_zone
->zone_zsched
->p_pid
;
890 sp
->pr_ppid
= p
->p_ppid
;
892 sp
->pr_pgid
= p
->p_pgrp
;
893 sp
->pr_sid
= p
->p_sessp
->s_sid
;
894 sp
->pr_taskid
= p
->p_task
->tk_tkid
;
895 sp
->pr_projid
= p
->p_task
->tk_proj
->kpj_id
;
896 sp
->pr_zoneid
= p
->p_zone
->zone_id
;
897 hrt2ts(mstate_aggr_state(p
, LMS_USER
), &sp
->pr_utime
);
898 hrt2ts(mstate_aggr_state(p
, LMS_SYSTEM
), &sp
->pr_stime
);
899 TICK_TO_TIMESTRUC(p
->p_cutime
, &sp
->pr_cutime
);
900 TICK_TO_TIMESTRUC(p
->p_cstime
, &sp
->pr_cstime
);
901 prassignset(&sp
->pr_sigtrace
, &p
->p_sigmask
);
902 prassignset(&sp
->pr_flttrace
, &p
->p_fltmask
);
903 prassignset(&sp
->pr_sysentry
, &PTOU(p
)->u_entrymask
);
904 prassignset(&sp
->pr_sysexit
, &PTOU(p
)->u_exitmask
);
905 switch (p
->p_model
) {
906 case DATAMODEL_ILP32
:
907 sp
->pr_dmodel
= PR_MODEL_ILP32
;
910 sp
->pr_dmodel
= PR_MODEL_LP64
;
914 sp
->pr_agentid
= p
->p_agenttp
->t_tid
;
916 /* get the chosen lwp's status */
917 prgetlwpstatus(t
, &sp
->pr_lwp
, zp
);
919 /* replicate the flags */
920 sp
->pr_flags
= sp
->pr_lwp
.pr_flags
;
923 #ifdef _SYSCALL32_IMPL
925 prgetlwpstatus32(kthread_t
*t
, lwpstatus32_t
*sp
, zone_t
*zp
)
927 proc_t
*p
= ttoproc(t
);
928 klwp_t
*lwp
= ttolwp(t
);
929 struct mstate
*ms
= &lwp
->lwp_mstate
;
934 ASSERT(MUTEX_HELD(&p
->p_lock
));
936 bzero(sp
, sizeof (*sp
));
938 if (t
->t_state
== TS_STOPPED
) {
940 if ((t
->t_schedflag
& TS_PSTART
) == 0)
942 } else if (VSTOPPED(t
)) {
943 flags
|= PR_STOPPED
|PR_ISTOP
;
945 if (!(flags
& PR_ISTOP
) && (t
->t_proc_flag
& TP_PRSTOP
))
949 if (t
== p
->p_agenttp
)
951 if (!(t
->t_proc_flag
& TP_TWAIT
))
953 if (t
->t_proc_flag
& TP_DAEMON
)
955 if (p
->p_proc_flag
& P_PR_FORK
)
957 if (p
->p_proc_flag
& P_PR_RUNLCL
)
959 if (p
->p_proc_flag
& P_PR_KILLCL
)
961 if (p
->p_proc_flag
& P_PR_ASYNC
)
963 if (p
->p_proc_flag
& P_PR_BPTADJ
)
965 if (p
->p_proc_flag
& P_PR_PTRACE
)
967 if (p
->p_flag
& SMSACCT
)
969 if (p
->p_flag
& SMSFORK
)
971 if (p
->p_flag
& SVFWAIT
)
973 sp
->pr_flags
= flags
;
975 sp
->pr_why
= PR_REQUESTED
;
978 sp
->pr_why
= t
->t_whystop
;
979 sp
->pr_what
= t
->t_whatstop
;
981 sp
->pr_lwpid
= t
->t_tid
;
982 sp
->pr_cursig
= lwp
->lwp_cursig
;
983 prassignset(&sp
->pr_lwppend
, &t
->t_sig
);
984 schedctl_finish_sigblock(t
);
985 prassignset(&sp
->pr_lwphold
, &t
->t_hold
);
986 if (t
->t_whystop
== PR_FAULTED
) {
987 siginfo_kto32(&lwp
->lwp_siginfo
, &sp
->pr_info
);
988 if (t
->t_whatstop
== FLTPAGE
)
989 sp
->pr_info
.si_addr
=
990 (caddr32_t
)(uintptr_t)lwp
->lwp_siginfo
.si_addr
;
991 } else if (lwp
->lwp_curinfo
)
992 siginfo_kto32(&lwp
->lwp_curinfo
->sq_info
, &sp
->pr_info
);
993 if (SI_FROMUSER(&lwp
->lwp_siginfo
) && zp
->zone_id
!= GLOBAL_ZONEID
&&
994 sp
->pr_info
.si_zoneid
!= zp
->zone_id
) {
995 sp
->pr_info
.si_pid
= zp
->zone_zsched
->p_pid
;
996 sp
->pr_info
.si_uid
= 0;
997 sp
->pr_info
.si_ctid
= -1;
998 sp
->pr_info
.si_zoneid
= zp
->zone_id
;
1000 sp
->pr_altstack
.ss_sp
=
1001 (caddr32_t
)(uintptr_t)lwp
->lwp_sigaltstack
.ss_sp
;
1002 sp
->pr_altstack
.ss_size
= (size32_t
)lwp
->lwp_sigaltstack
.ss_size
;
1003 sp
->pr_altstack
.ss_flags
= (int32_t)lwp
->lwp_sigaltstack
.ss_flags
;
1004 prgetaction32(p
, PTOU(p
), lwp
->lwp_cursig
, &sp
->pr_action
);
1005 sp
->pr_oldcontext
= (caddr32_t
)lwp
->lwp_oldcontext
;
1006 sp
->pr_ustack
= (caddr32_t
)lwp
->lwp_ustack
;
1007 (void) strncpy(sp
->pr_clname
, sclass
[t
->t_cid
].cl_name
,
1008 sizeof (sp
->pr_clname
) - 1);
1009 if (flags
& PR_STOPPED
)
1010 hrt2ts32(t
->t_stoptime
, &sp
->pr_tstamp
);
1011 usr
= ms
->ms_acct
[LMS_USER
];
1012 sys
= ms
->ms_acct
[LMS_SYSTEM
] + ms
->ms_acct
[LMS_TRAP
];
1015 hrt2ts32(usr
, &sp
->pr_utime
);
1016 hrt2ts32(sys
, &sp
->pr_stime
);
1019 * Fetch the current instruction, if not a system process.
1020 * We don't attempt this unless the lwp is stopped.
1022 if ((p
->p_flag
& SSYS
) || p
->p_as
== &kas
)
1023 sp
->pr_flags
|= (PR_ISSYS
|PR_PCINVAL
);
1024 else if (!(flags
& PR_STOPPED
))
1025 sp
->pr_flags
|= PR_PCINVAL
;
1026 else if (!prfetchinstr(lwp
, &instr
))
1027 sp
->pr_flags
|= PR_PCINVAL
;
1029 sp
->pr_instr
= (uint32_t)instr
;
1032 * Drop p_lock while touching the lwp's stack.
1034 mutex_exit(&p
->p_lock
);
1036 sp
->pr_flags
|= PR_STEP
;
1037 if ((flags
& (PR_STOPPED
|PR_ASLEEP
)) && t
->t_sysnum
) {
1040 sp
->pr_syscall
= get_syscall32_args(lwp
,
1041 (int *)sp
->pr_sysarg
, &i
);
1042 sp
->pr_nsysarg
= (ushort_t
)i
;
1044 if ((flags
& PR_STOPPED
) || t
== curthread
)
1045 prgetprregs32(lwp
, sp
->pr_reg
);
1046 if ((t
->t_state
== TS_STOPPED
&& t
->t_whystop
== PR_SYSEXIT
) ||
1047 (flags
& PR_VFORKP
)) {
1053 sp
->pr_errno
= prgetrvals(lwp
, &r1
, &r2
);
1054 if (sp
->pr_errno
== 0) {
1055 sp
->pr_rval1
= (int32_t)r1
;
1056 sp
->pr_rval2
= (int32_t)r2
;
1057 sp
->pr_errpriv
= PRIV_NONE
;
1059 sp
->pr_errpriv
= lwp
->lwp_badpriv
;
1061 if (t
->t_sysnum
== SYS_execve
) {
1063 sp
->pr_sysarg
[0] = 0;
1064 sp
->pr_sysarg
[1] = (caddr32_t
)up
->u_argv
;
1065 sp
->pr_sysarg
[2] = (caddr32_t
)up
->u_envp
;
1066 for (i
= 0, auxp
= up
->u_auxv
;
1067 i
< sizeof (up
->u_auxv
) / sizeof (up
->u_auxv
[0]);
1069 if (auxp
->a_type
== AT_SUN_EXECNAME
) {
1072 (uintptr_t)auxp
->a_un
.a_ptr
;
1079 prgetprfpregs32(lwp
, &sp
->pr_fpreg
);
1080 mutex_enter(&p
->p_lock
);
1084 prgetstatus32(proc_t
*p
, pstatus32_t
*sp
, zone_t
*zp
)
1088 ASSERT(MUTEX_HELD(&p
->p_lock
));
1090 t
= prchoose(p
); /* returns locked thread */
1094 /* just bzero the process part, prgetlwpstatus32() does the rest */
1095 bzero(sp
, sizeof (pstatus32_t
) - sizeof (lwpstatus32_t
));
1096 sp
->pr_nlwp
= p
->p_lwpcnt
;
1097 sp
->pr_nzomb
= p
->p_zombcnt
;
1098 prassignset(&sp
->pr_sigpend
, &p
->p_sig
);
1099 sp
->pr_brkbase
= (uint32_t)(uintptr_t)p
->p_brkbase
;
1100 sp
->pr_brksize
= (uint32_t)p
->p_brksize
;
1101 sp
->pr_stkbase
= (uint32_t)(uintptr_t)prgetstackbase(p
);
1102 sp
->pr_stksize
= (uint32_t)p
->p_stksize
;
1103 sp
->pr_pid
= p
->p_pid
;
1104 if (curproc
->p_zone
->zone_id
!= GLOBAL_ZONEID
&&
1105 (p
->p_flag
& SZONETOP
)) {
1106 ASSERT(p
->p_zone
->zone_id
!= GLOBAL_ZONEID
);
1108 * Inside local zones, fake zsched's pid as parent pids for
1109 * processes which reference processes outside of the zone.
1111 sp
->pr_ppid
= curproc
->p_zone
->zone_zsched
->p_pid
;
1113 sp
->pr_ppid
= p
->p_ppid
;
1115 sp
->pr_pgid
= p
->p_pgrp
;
1116 sp
->pr_sid
= p
->p_sessp
->s_sid
;
1117 sp
->pr_taskid
= p
->p_task
->tk_tkid
;
1118 sp
->pr_projid
= p
->p_task
->tk_proj
->kpj_id
;
1119 sp
->pr_zoneid
= p
->p_zone
->zone_id
;
1120 hrt2ts32(mstate_aggr_state(p
, LMS_USER
), &sp
->pr_utime
);
1121 hrt2ts32(mstate_aggr_state(p
, LMS_SYSTEM
), &sp
->pr_stime
);
1122 TICK_TO_TIMESTRUC32(p
->p_cutime
, &sp
->pr_cutime
);
1123 TICK_TO_TIMESTRUC32(p
->p_cstime
, &sp
->pr_cstime
);
1124 prassignset(&sp
->pr_sigtrace
, &p
->p_sigmask
);
1125 prassignset(&sp
->pr_flttrace
, &p
->p_fltmask
);
1126 prassignset(&sp
->pr_sysentry
, &PTOU(p
)->u_entrymask
);
1127 prassignset(&sp
->pr_sysexit
, &PTOU(p
)->u_exitmask
);
1128 switch (p
->p_model
) {
1129 case DATAMODEL_ILP32
:
1130 sp
->pr_dmodel
= PR_MODEL_ILP32
;
1132 case DATAMODEL_LP64
:
1133 sp
->pr_dmodel
= PR_MODEL_LP64
;
1137 sp
->pr_agentid
= p
->p_agenttp
->t_tid
;
1139 /* get the chosen lwp's status */
1140 prgetlwpstatus32(t
, &sp
->pr_lwp
, zp
);
1142 /* replicate the flags */
1143 sp
->pr_flags
= sp
->pr_lwp
.pr_flags
;
1145 #endif /* _SYSCALL32_IMPL */
1148 * Return lwp status.
1151 prgetlwpstatus(kthread_t
*t
, lwpstatus_t
*sp
, zone_t
*zp
)
1153 proc_t
*p
= ttoproc(t
);
1154 klwp_t
*lwp
= ttolwp(t
);
1155 struct mstate
*ms
= &lwp
->lwp_mstate
;
1160 ASSERT(MUTEX_HELD(&p
->p_lock
));
1162 bzero(sp
, sizeof (*sp
));
1164 if (t
->t_state
== TS_STOPPED
) {
1165 flags
|= PR_STOPPED
;
1166 if ((t
->t_schedflag
& TS_PSTART
) == 0)
1168 } else if (VSTOPPED(t
)) {
1169 flags
|= PR_STOPPED
|PR_ISTOP
;
1171 if (!(flags
& PR_ISTOP
) && (t
->t_proc_flag
& TP_PRSTOP
))
1173 if (lwp
->lwp_asleep
)
1175 if (t
== p
->p_agenttp
)
1177 if (!(t
->t_proc_flag
& TP_TWAIT
))
1179 if (t
->t_proc_flag
& TP_DAEMON
)
1181 if (p
->p_proc_flag
& P_PR_FORK
)
1183 if (p
->p_proc_flag
& P_PR_RUNLCL
)
1185 if (p
->p_proc_flag
& P_PR_KILLCL
)
1187 if (p
->p_proc_flag
& P_PR_ASYNC
)
1189 if (p
->p_proc_flag
& P_PR_BPTADJ
)
1191 if (p
->p_proc_flag
& P_PR_PTRACE
)
1193 if (p
->p_flag
& SMSACCT
)
1195 if (p
->p_flag
& SMSFORK
)
1197 if (p
->p_flag
& SVFWAIT
)
1199 if (p
->p_pgidp
->pid_pgorphaned
)
1201 if (p
->p_pidflag
& CLDNOSIGCHLD
)
1202 flags
|= PR_NOSIGCHLD
;
1203 if (p
->p_pidflag
& CLDWAITPID
)
1204 flags
|= PR_WAITPID
;
1205 sp
->pr_flags
= flags
;
1207 sp
->pr_why
= PR_REQUESTED
;
1210 sp
->pr_why
= t
->t_whystop
;
1211 sp
->pr_what
= t
->t_whatstop
;
1213 sp
->pr_lwpid
= t
->t_tid
;
1214 sp
->pr_cursig
= lwp
->lwp_cursig
;
1215 prassignset(&sp
->pr_lwppend
, &t
->t_sig
);
1216 schedctl_finish_sigblock(t
);
1217 prassignset(&sp
->pr_lwphold
, &t
->t_hold
);
1218 if (t
->t_whystop
== PR_FAULTED
)
1219 bcopy(&lwp
->lwp_siginfo
,
1220 &sp
->pr_info
, sizeof (k_siginfo_t
));
1221 else if (lwp
->lwp_curinfo
)
1222 bcopy(&lwp
->lwp_curinfo
->sq_info
,
1223 &sp
->pr_info
, sizeof (k_siginfo_t
));
1224 if (SI_FROMUSER(&lwp
->lwp_siginfo
) && zp
->zone_id
!= GLOBAL_ZONEID
&&
1225 sp
->pr_info
.si_zoneid
!= zp
->zone_id
) {
1226 sp
->pr_info
.si_pid
= zp
->zone_zsched
->p_pid
;
1227 sp
->pr_info
.si_uid
= 0;
1228 sp
->pr_info
.si_ctid
= -1;
1229 sp
->pr_info
.si_zoneid
= zp
->zone_id
;
1231 sp
->pr_altstack
= lwp
->lwp_sigaltstack
;
1232 prgetaction(p
, PTOU(p
), lwp
->lwp_cursig
, &sp
->pr_action
);
1233 sp
->pr_oldcontext
= (uintptr_t)lwp
->lwp_oldcontext
;
1234 sp
->pr_ustack
= lwp
->lwp_ustack
;
1235 (void) strncpy(sp
->pr_clname
, sclass
[t
->t_cid
].cl_name
,
1236 sizeof (sp
->pr_clname
) - 1);
1237 if (flags
& PR_STOPPED
)
1238 hrt2ts(t
->t_stoptime
, &sp
->pr_tstamp
);
1239 usr
= ms
->ms_acct
[LMS_USER
];
1240 sys
= ms
->ms_acct
[LMS_SYSTEM
] + ms
->ms_acct
[LMS_TRAP
];
1243 hrt2ts(usr
, &sp
->pr_utime
);
1244 hrt2ts(sys
, &sp
->pr_stime
);
1247 * Fetch the current instruction, if not a system process.
1248 * We don't attempt this unless the lwp is stopped.
1250 if ((p
->p_flag
& SSYS
) || p
->p_as
== &kas
)
1251 sp
->pr_flags
|= (PR_ISSYS
|PR_PCINVAL
);
1252 else if (!(flags
& PR_STOPPED
))
1253 sp
->pr_flags
|= PR_PCINVAL
;
1254 else if (!prfetchinstr(lwp
, &instr
))
1255 sp
->pr_flags
|= PR_PCINVAL
;
1257 sp
->pr_instr
= instr
;
1260 * Drop p_lock while touching the lwp's stack.
1262 mutex_exit(&p
->p_lock
);
1264 sp
->pr_flags
|= PR_STEP
;
1265 if ((flags
& (PR_STOPPED
|PR_ASLEEP
)) && t
->t_sysnum
) {
1268 sp
->pr_syscall
= get_syscall_args(lwp
,
1269 (long *)sp
->pr_sysarg
, &i
);
1270 sp
->pr_nsysarg
= (ushort_t
)i
;
1272 if ((flags
& PR_STOPPED
) || t
== curthread
)
1273 prgetprregs(lwp
, sp
->pr_reg
);
1274 if ((t
->t_state
== TS_STOPPED
&& t
->t_whystop
== PR_SYSEXIT
) ||
1275 (flags
& PR_VFORKP
)) {
1280 sp
->pr_errno
= prgetrvals(lwp
, &sp
->pr_rval1
, &sp
->pr_rval2
);
1281 if (sp
->pr_errno
== 0)
1282 sp
->pr_errpriv
= PRIV_NONE
;
1284 sp
->pr_errpriv
= lwp
->lwp_badpriv
;
1286 if (t
->t_sysnum
== SYS_execve
) {
1288 sp
->pr_sysarg
[0] = 0;
1289 sp
->pr_sysarg
[1] = (uintptr_t)up
->u_argv
;
1290 sp
->pr_sysarg
[2] = (uintptr_t)up
->u_envp
;
1291 for (i
= 0, auxp
= up
->u_auxv
;
1292 i
< sizeof (up
->u_auxv
) / sizeof (up
->u_auxv
[0]);
1294 if (auxp
->a_type
== AT_SUN_EXECNAME
) {
1296 (uintptr_t)auxp
->a_un
.a_ptr
;
1303 prgetprfpregs(lwp
, &sp
->pr_fpreg
);
1304 mutex_enter(&p
->p_lock
);
1308 * Get the sigaction structure for the specified signal. The u-block
1309 * must already have been mapped in by the caller.
1312 prgetaction(proc_t
*p
, user_t
*up
, uint_t sig
, struct sigaction
*sp
)
1314 int nsig
= PROC_IS_BRANDED(curproc
)? BROP(curproc
)->b_nsig
: NSIG
;
1316 bzero(sp
, sizeof (*sp
));
1318 if (sig
!= 0 && (unsigned)sig
< nsig
) {
1319 sp
->sa_handler
= up
->u_signal
[sig
-1];
1320 prassignset(&sp
->sa_mask
, &up
->u_sigmask
[sig
-1]);
1321 if (sigismember(&up
->u_sigonstack
, sig
))
1322 sp
->sa_flags
|= SA_ONSTACK
;
1323 if (sigismember(&up
->u_sigresethand
, sig
))
1324 sp
->sa_flags
|= SA_RESETHAND
;
1325 if (sigismember(&up
->u_sigrestart
, sig
))
1326 sp
->sa_flags
|= SA_RESTART
;
1327 if (sigismember(&p
->p_siginfo
, sig
))
1328 sp
->sa_flags
|= SA_SIGINFO
;
1329 if (sigismember(&up
->u_signodefer
, sig
))
1330 sp
->sa_flags
|= SA_NODEFER
;
1331 if (sig
== SIGCLD
) {
1332 if (p
->p_flag
& SNOWAIT
)
1333 sp
->sa_flags
|= SA_NOCLDWAIT
;
1334 if ((p
->p_flag
& SJCTL
) == 0)
1335 sp
->sa_flags
|= SA_NOCLDSTOP
;
1340 #ifdef _SYSCALL32_IMPL
1342 prgetaction32(proc_t
*p
, user_t
*up
, uint_t sig
, struct sigaction32
*sp
)
1344 int nsig
= PROC_IS_BRANDED(curproc
)? BROP(curproc
)->b_nsig
: NSIG
;
1346 bzero(sp
, sizeof (*sp
));
1348 if (sig
!= 0 && (unsigned)sig
< nsig
) {
1349 sp
->sa_handler
= (caddr32_t
)(uintptr_t)up
->u_signal
[sig
-1];
1350 prassignset(&sp
->sa_mask
, &up
->u_sigmask
[sig
-1]);
1351 if (sigismember(&up
->u_sigonstack
, sig
))
1352 sp
->sa_flags
|= SA_ONSTACK
;
1353 if (sigismember(&up
->u_sigresethand
, sig
))
1354 sp
->sa_flags
|= SA_RESETHAND
;
1355 if (sigismember(&up
->u_sigrestart
, sig
))
1356 sp
->sa_flags
|= SA_RESTART
;
1357 if (sigismember(&p
->p_siginfo
, sig
))
1358 sp
->sa_flags
|= SA_SIGINFO
;
1359 if (sigismember(&up
->u_signodefer
, sig
))
1360 sp
->sa_flags
|= SA_NODEFER
;
1361 if (sig
== SIGCLD
) {
1362 if (p
->p_flag
& SNOWAIT
)
1363 sp
->sa_flags
|= SA_NOCLDWAIT
;
1364 if ((p
->p_flag
& SJCTL
) == 0)
1365 sp
->sa_flags
|= SA_NOCLDSTOP
;
1369 #endif /* _SYSCALL32_IMPL */
1372 * Count the number of segments in this process's address space.
1375 prnsegs(struct as
*as
, int reserved
)
1380 ASSERT(as
!= &kas
&& AS_WRITE_HELD(as
));
1382 for (seg
= AS_SEGFIRST(as
); seg
!= NULL
; seg
= AS_SEGNEXT(as
, seg
)) {
1383 caddr_t eaddr
= seg
->s_base
+ pr_getsegsize(seg
, reserved
);
1384 caddr_t saddr
, naddr
;
1387 for (saddr
= seg
->s_base
; saddr
< eaddr
; saddr
= naddr
) {
1388 (void) pr_getprot(seg
, reserved
, &tmp
,
1389 &saddr
, &naddr
, eaddr
);
1394 ASSERT(tmp
== NULL
);
1401 * Convert uint32_t to decimal string w/o leading zeros.
1402 * Add trailing null characters if 'len' is greater than string length.
1403 * Return the string length.
1406 pr_u32tos(uint32_t n
, char *s
, int len
)
1408 char cbuf
[11]; /* 32-bit unsigned integer fits in 10 digits */
1410 char *end
= s
+ len
;
1413 *cp
++ = (char)(n
% 10 + '0');
1417 len
= (int)(cp
- cbuf
);
1421 } while (cp
> cbuf
);
1423 while (s
< end
) /* optional pad */
1430 * Convert uint64_t to decimal string w/o leading zeros.
1431 * Return the string length.
1434 pr_u64tos(uint64_t n
, char *s
)
1436 char cbuf
[21]; /* 64-bit unsigned integer fits in 20 digits */
1441 *cp
++ = (char)(n
% 10 + '0');
1445 len
= (int)(cp
- cbuf
);
1449 } while (cp
> cbuf
);
1455 pr_object_name(char *name
, vnode_t
*vp
, struct vattr
*vattr
)
1459 struct vfssw
*vfsswp
;
1461 if ((vfsp
= vp
->v_vfsp
) != NULL
&&
1462 ((vfsswp
= vfssw
+ vfsp
->vfs_fstype
), vfsswp
->vsw_name
) &&
1463 *vfsswp
->vsw_name
) {
1464 (void) strcpy(s
, vfsswp
->vsw_name
);
1468 s
+= pr_u32tos(getmajor(vattr
->va_fsid
), s
, 0);
1470 s
+= pr_u32tos(getminor(vattr
->va_fsid
), s
, 0);
1472 s
+= pr_u64tos(vattr
->va_nodeid
, s
);
1477 break_seg(proc_t
*p
)
1479 caddr_t addr
= p
->p_brkbase
;
1483 if (p
->p_brksize
!= 0)
1484 addr
+= p
->p_brksize
- 1;
1485 seg
= as_segat(p
->p_as
, addr
);
1486 if (seg
!= NULL
&& seg
->s_ops
== &segvn_ops
&&
1487 (segop_getvp(seg
, seg
->s_base
, &vp
) != 0 || vp
== NULL
))
1493 * Implementation of service functions to handle procfs generic chained
1496 typedef struct pr_iobuf_list
{
1497 list_node_t piol_link
; /* buffer linkage */
1498 size_t piol_size
; /* total size (header + data) */
1499 size_t piol_usedsize
; /* amount to copy out from this buf */
1502 #define MAPSIZE (64 * 1024)
1503 #define PIOL_DATABUF(iol) ((void *)(&(iol)[1]))
1506 pr_iol_initlist(list_t
*iolhead
, size_t itemsize
, int n
)
1509 size_t initial_size
= MIN(1, n
) * itemsize
;
1511 list_create(iolhead
, sizeof (piol_t
), offsetof(piol_t
, piol_link
));
1513 ASSERT(list_head(iolhead
) == NULL
);
1514 ASSERT(itemsize
< MAPSIZE
- sizeof (*iol
));
1515 ASSERT(initial_size
> 0);
1518 * Someone creating chained copyout buffers may ask for less than
1519 * MAPSIZE if the amount of data to be buffered is known to be
1520 * smaller than that.
1521 * But in order to prevent involuntary self-denial of service,
1522 * the requested input size is clamped at MAPSIZE.
1524 initial_size
= MIN(MAPSIZE
, initial_size
+ sizeof (*iol
));
1525 iol
= kmem_alloc(initial_size
, KM_SLEEP
);
1526 list_insert_head(iolhead
, iol
);
1527 iol
->piol_usedsize
= 0;
1528 iol
->piol_size
= initial_size
;
1532 pr_iol_newbuf(list_t
*iolhead
, size_t itemsize
)
1537 ASSERT(itemsize
< MAPSIZE
- sizeof (*iol
));
1538 ASSERT(list_head(iolhead
) != NULL
);
1540 iol
= (piol_t
*)list_tail(iolhead
);
1542 if (iol
->piol_size
<
1543 iol
->piol_usedsize
+ sizeof (*iol
) + itemsize
) {
1545 * Out of space in the current buffer. Allocate more.
1549 newiol
= kmem_alloc(MAPSIZE
, KM_SLEEP
);
1550 newiol
->piol_size
= MAPSIZE
;
1551 newiol
->piol_usedsize
= 0;
1553 list_insert_after(iolhead
, iol
, newiol
);
1554 iol
= list_next(iolhead
, iol
);
1555 ASSERT(iol
== newiol
);
1557 new = (char *)PIOL_DATABUF(iol
) + iol
->piol_usedsize
;
1558 iol
->piol_usedsize
+= itemsize
;
1559 bzero(new, itemsize
);
1564 pr_iol_copyout_and_free(list_t
*iolhead
, caddr_t
*tgt
, int errin
)
1569 while ((iol
= list_head(iolhead
)) != NULL
) {
1570 list_remove(iolhead
, iol
);
1572 if (copyout(PIOL_DATABUF(iol
), *tgt
,
1573 iol
->piol_usedsize
))
1575 *tgt
+= iol
->piol_usedsize
;
1577 kmem_free(iol
, iol
->piol_size
);
1579 list_destroy(iolhead
);
1585 pr_iol_uiomove_and_free(list_t
*iolhead
, uio_t
*uiop
, int errin
)
1587 offset_t off
= uiop
->uio_offset
;
1593 while ((iol
= list_head(iolhead
)) != NULL
) {
1594 list_remove(iolhead
, iol
);
1595 base
= PIOL_DATABUF(iol
);
1596 size
= iol
->piol_usedsize
;
1597 if (off
<= size
&& error
== 0 && uiop
->uio_resid
> 0)
1598 error
= uiomove(base
+ off
, size
- off
,
1600 off
= MAX(0, off
- (offset_t
)size
);
1601 kmem_free(iol
, iol
->piol_size
);
1603 list_destroy(iolhead
);
1609 * Return an array of structures with memory map information.
1610 * We allocate here; the caller must deallocate.
1613 prgetmap(proc_t
*p
, int reserved
, list_t
*iolhead
)
1615 struct as
*as
= p
->p_as
;
1618 struct seg
*brkseg
, *stkseg
;
1623 ASSERT(as
!= &kas
&& AS_WRITE_HELD(as
));
1626 * Request an initial buffer size that doesn't waste memory
1627 * if the address space has only a small number of segments.
1629 pr_iol_initlist(iolhead
, sizeof (*mp
), avl_numnodes(&as
->a_segtree
));
1631 if ((seg
= AS_SEGFIRST(as
)) == NULL
)
1634 brkseg
= break_seg(p
);
1635 stkseg
= as_segat(as
, prgetstackbase(p
));
1638 caddr_t eaddr
= seg
->s_base
+ pr_getsegsize(seg
, reserved
);
1639 caddr_t saddr
, naddr
;
1642 for (saddr
= seg
->s_base
; saddr
< eaddr
; saddr
= naddr
) {
1643 prot
= pr_getprot(seg
, reserved
, &tmp
,
1644 &saddr
, &naddr
, eaddr
);
1648 mp
= pr_iol_newbuf(iolhead
, sizeof (*mp
));
1650 mp
->pr_vaddr
= (uintptr_t)saddr
;
1651 mp
->pr_size
= naddr
- saddr
;
1652 mp
->pr_offset
= segop_getoffset(seg
, saddr
);
1654 if (prot
& PROT_READ
)
1655 mp
->pr_mflags
|= MA_READ
;
1656 if (prot
& PROT_WRITE
)
1657 mp
->pr_mflags
|= MA_WRITE
;
1658 if (prot
& PROT_EXEC
)
1659 mp
->pr_mflags
|= MA_EXEC
;
1660 if (segop_gettype(seg
, saddr
) & MAP_SHARED
)
1661 mp
->pr_mflags
|= MA_SHARED
;
1662 if (segop_gettype(seg
, saddr
) & MAP_NORESERVE
)
1663 mp
->pr_mflags
|= MA_NORESERVE
;
1664 if (seg
->s_ops
== &segspt_shmops
||
1665 (seg
->s_ops
== &segvn_ops
&&
1666 (segop_getvp(seg
, saddr
, &vp
) != 0 || vp
== NULL
)))
1667 mp
->pr_mflags
|= MA_ANON
;
1669 mp
->pr_mflags
|= MA_BREAK
;
1670 else if (seg
== stkseg
) {
1671 mp
->pr_mflags
|= MA_STACK
;
1674 ((size_t)p
->p_stk_ctl
+
1675 PAGEOFFSET
) & PAGEMASK
;
1677 (uintptr_t)prgetstackbase(p
) +
1678 p
->p_stksize
- maxstack
;
1679 mp
->pr_size
= (uintptr_t)naddr
-
1683 if (seg
->s_ops
== &segspt_shmops
)
1684 mp
->pr_mflags
|= MA_ISM
| MA_SHM
;
1685 mp
->pr_pagesize
= PAGESIZE
;
1688 * Manufacture a filename for the "object" directory.
1690 vattr
.va_mask
= AT_FSID
|AT_NODEID
;
1691 if (seg
->s_ops
== &segvn_ops
&&
1692 segop_getvp(seg
, saddr
, &vp
) == 0 &&
1693 vp
!= NULL
&& vp
->v_type
== VREG
&&
1694 fop_getattr(vp
, &vattr
, 0, CRED(), NULL
) == 0) {
1695 if (vp
== p
->p_exec
)
1696 (void) strcpy(mp
->pr_mapname
, "a.out");
1698 pr_object_name(mp
->pr_mapname
,
1703 * Get the SysV shared memory id, if any.
1705 if ((mp
->pr_mflags
& MA_SHARED
) && p
->p_segacct
&&
1706 (mp
->pr_shmid
= shmgetid(p
, seg
->s_base
)) !=
1708 if (mp
->pr_shmid
== SHMID_FREE
)
1711 mp
->pr_mflags
|= MA_SHM
;
1716 ASSERT(tmp
== NULL
);
1717 } while ((seg
= AS_SEGNEXT(as
, seg
)) != NULL
);
1722 #ifdef _SYSCALL32_IMPL
1724 prgetmap32(proc_t
*p
, int reserved
, list_t
*iolhead
)
1726 struct as
*as
= p
->p_as
;
1729 struct seg
*brkseg
, *stkseg
;
1734 ASSERT(as
!= &kas
&& AS_WRITE_HELD(as
));
1737 * Request an initial buffer size that doesn't waste memory
1738 * if the address space has only a small number of segments.
1740 pr_iol_initlist(iolhead
, sizeof (*mp
), avl_numnodes(&as
->a_segtree
));
1742 if ((seg
= AS_SEGFIRST(as
)) == NULL
)
1745 brkseg
= break_seg(p
);
1746 stkseg
= as_segat(as
, prgetstackbase(p
));
1749 caddr_t eaddr
= seg
->s_base
+ pr_getsegsize(seg
, reserved
);
1750 caddr_t saddr
, naddr
;
1753 for (saddr
= seg
->s_base
; saddr
< eaddr
; saddr
= naddr
) {
1754 prot
= pr_getprot(seg
, reserved
, &tmp
,
1755 &saddr
, &naddr
, eaddr
);
1759 mp
= pr_iol_newbuf(iolhead
, sizeof (*mp
));
1761 mp
->pr_vaddr
= (caddr32_t
)(uintptr_t)saddr
;
1762 mp
->pr_size
= (size32_t
)(naddr
- saddr
);
1763 mp
->pr_offset
= segop_getoffset(seg
, saddr
);
1765 if (prot
& PROT_READ
)
1766 mp
->pr_mflags
|= MA_READ
;
1767 if (prot
& PROT_WRITE
)
1768 mp
->pr_mflags
|= MA_WRITE
;
1769 if (prot
& PROT_EXEC
)
1770 mp
->pr_mflags
|= MA_EXEC
;
1771 if (segop_gettype(seg
, saddr
) & MAP_SHARED
)
1772 mp
->pr_mflags
|= MA_SHARED
;
1773 if (segop_gettype(seg
, saddr
) & MAP_NORESERVE
)
1774 mp
->pr_mflags
|= MA_NORESERVE
;
1775 if (seg
->s_ops
== &segspt_shmops
||
1776 (seg
->s_ops
== &segvn_ops
&&
1777 (segop_getvp(seg
, saddr
, &vp
) != 0 || vp
== NULL
)))
1778 mp
->pr_mflags
|= MA_ANON
;
1780 mp
->pr_mflags
|= MA_BREAK
;
1781 else if (seg
== stkseg
) {
1782 mp
->pr_mflags
|= MA_STACK
;
1785 ((size_t)p
->p_stk_ctl
+
1786 PAGEOFFSET
) & PAGEMASK
;
1788 (uintptr_t)prgetstackbase(p
) +
1789 p
->p_stksize
- maxstack
;
1790 mp
->pr_vaddr
= (caddr32_t
)vaddr
;
1791 mp
->pr_size
= (size32_t
)
1792 ((uintptr_t)naddr
- vaddr
);
1795 if (seg
->s_ops
== &segspt_shmops
)
1796 mp
->pr_mflags
|= MA_ISM
| MA_SHM
;
1797 mp
->pr_pagesize
= PAGESIZE
;
1800 * Manufacture a filename for the "object" directory.
1802 vattr
.va_mask
= AT_FSID
|AT_NODEID
;
1803 if (seg
->s_ops
== &segvn_ops
&&
1804 segop_getvp(seg
, saddr
, &vp
) == 0 &&
1805 vp
!= NULL
&& vp
->v_type
== VREG
&&
1806 fop_getattr(vp
, &vattr
, 0, CRED(), NULL
) == 0) {
1807 if (vp
== p
->p_exec
)
1808 (void) strcpy(mp
->pr_mapname
, "a.out");
1810 pr_object_name(mp
->pr_mapname
,
1815 * Get the SysV shared memory id, if any.
1817 if ((mp
->pr_mflags
& MA_SHARED
) && p
->p_segacct
&&
1818 (mp
->pr_shmid
= shmgetid(p
, seg
->s_base
)) !=
1820 if (mp
->pr_shmid
== SHMID_FREE
)
1823 mp
->pr_mflags
|= MA_SHM
;
1828 ASSERT(tmp
== NULL
);
1829 } while ((seg
= AS_SEGNEXT(as
, seg
)) != NULL
);
1833 #endif /* _SYSCALL32_IMPL */
1836 * Return the size of the /proc page data file.
1839 prpdsize(struct as
*as
)
1844 ASSERT(as
!= &kas
&& AS_WRITE_HELD(as
));
1846 if ((seg
= AS_SEGFIRST(as
)) == NULL
)
1849 size
= sizeof (prpageheader_t
);
1851 caddr_t eaddr
= seg
->s_base
+ pr_getsegsize(seg
, 0);
1852 caddr_t saddr
, naddr
;
1856 for (saddr
= seg
->s_base
; saddr
< eaddr
; saddr
= naddr
) {
1857 (void) pr_getprot(seg
, 0, &tmp
, &saddr
, &naddr
, eaddr
);
1858 if ((npage
= (naddr
- saddr
) / PAGESIZE
) != 0)
1859 size
+= sizeof (prasmap_t
) + round8(npage
);
1861 ASSERT(tmp
== NULL
);
1862 } while ((seg
= AS_SEGNEXT(as
, seg
)) != NULL
);
1867 #ifdef _SYSCALL32_IMPL
1869 prpdsize32(struct as
*as
)
1874 ASSERT(as
!= &kas
&& AS_WRITE_HELD(as
));
1876 if ((seg
= AS_SEGFIRST(as
)) == NULL
)
1879 size
= sizeof (prpageheader32_t
);
1881 caddr_t eaddr
= seg
->s_base
+ pr_getsegsize(seg
, 0);
1882 caddr_t saddr
, naddr
;
1886 for (saddr
= seg
->s_base
; saddr
< eaddr
; saddr
= naddr
) {
1887 (void) pr_getprot(seg
, 0, &tmp
, &saddr
, &naddr
, eaddr
);
1888 if ((npage
= (naddr
- saddr
) / PAGESIZE
) != 0)
1889 size
+= sizeof (prasmap32_t
) + round8(npage
);
1891 ASSERT(tmp
== NULL
);
1892 } while ((seg
= AS_SEGNEXT(as
, seg
)) != NULL
);
1896 #endif /* _SYSCALL32_IMPL */
1899 * Read page data information.
1902 prpdread(proc_t
*p
, uint_t hatid
, struct uio
*uiop
)
1904 struct as
*as
= p
->p_as
;
1907 prpageheader_t
*php
;
1913 AS_LOCK_ENTER(as
, RW_WRITER
);
1915 if ((seg
= AS_SEGFIRST(as
)) == NULL
) {
1919 size
= prpdsize(as
);
1920 if (uiop
->uio_resid
< size
) {
1925 buf
= kmem_zalloc(size
, KM_SLEEP
);
1926 php
= (prpageheader_t
*)buf
;
1927 pmp
= (prasmap_t
*)(buf
+ sizeof (prpageheader_t
));
1929 hrt2ts(gethrtime(), &php
->pr_tstamp
);
1933 caddr_t eaddr
= seg
->s_base
+ pr_getsegsize(seg
, 0);
1934 caddr_t saddr
, naddr
;
1937 for (saddr
= seg
->s_base
; saddr
< eaddr
; saddr
= naddr
) {
1945 prot
= pr_getprot(seg
, 0, &tmp
, &saddr
, &naddr
, eaddr
);
1946 if ((len
= (size_t)(naddr
- saddr
)) == 0)
1948 npage
= len
/ PAGESIZE
;
1949 next
= (uintptr_t)(pmp
+ 1) + round8(npage
);
1951 * It's possible that the address space can change
1952 * subtlely even though we're holding as->a_lock
1953 * due to the nondeterminism of page_exists() in
1954 * the presence of asychronously flushed pages or
1955 * mapped files whose sizes are changing.
1956 * page_exists() may be called indirectly from
1957 * pr_getprot() by a segop_incore() routine.
1958 * If this happens we need to make sure we don't
1959 * overrun the buffer whose size we computed based
1960 * on the initial iteration through the segments.
1961 * Once we've detected an overflow, we need to clean
1962 * up the temporary memory allocated in pr_getprot()
1963 * and retry. If there's a pending signal, we return
1964 * EINTR so that this thread can be dislodged if
1965 * a latent bug causes us to spin indefinitely.
1967 if (next
> (uintptr_t)buf
+ size
) {
1968 pr_getprot_done(&tmp
);
1971 kmem_free(buf
, size
);
1973 if (ISSIG(curthread
, JUSTLOOKING
))
1980 php
->pr_npage
+= npage
;
1981 pmp
->pr_vaddr
= (uintptr_t)saddr
;
1982 pmp
->pr_npage
= npage
;
1983 pmp
->pr_offset
= segop_getoffset(seg
, saddr
);
1985 if (prot
& PROT_READ
)
1986 pmp
->pr_mflags
|= MA_READ
;
1987 if (prot
& PROT_WRITE
)
1988 pmp
->pr_mflags
|= MA_WRITE
;
1989 if (prot
& PROT_EXEC
)
1990 pmp
->pr_mflags
|= MA_EXEC
;
1991 if (segop_gettype(seg
, saddr
) & MAP_SHARED
)
1992 pmp
->pr_mflags
|= MA_SHARED
;
1993 if (segop_gettype(seg
, saddr
) & MAP_NORESERVE
)
1994 pmp
->pr_mflags
|= MA_NORESERVE
;
1995 if (seg
->s_ops
== &segspt_shmops
||
1996 (seg
->s_ops
== &segvn_ops
&&
1997 (segop_getvp(seg
, saddr
, &vp
) != 0 || vp
== NULL
)))
1998 pmp
->pr_mflags
|= MA_ANON
;
1999 if (seg
->s_ops
== &segspt_shmops
)
2000 pmp
->pr_mflags
|= MA_ISM
| MA_SHM
;
2001 pmp
->pr_pagesize
= PAGESIZE
;
2003 * Manufacture a filename for the "object" directory.
2005 vattr
.va_mask
= AT_FSID
|AT_NODEID
;
2006 if (seg
->s_ops
== &segvn_ops
&&
2007 segop_getvp(seg
, saddr
, &vp
) == 0 &&
2008 vp
!= NULL
&& vp
->v_type
== VREG
&&
2009 fop_getattr(vp
, &vattr
, 0, CRED(), NULL
) == 0) {
2010 if (vp
== p
->p_exec
)
2011 (void) strcpy(pmp
->pr_mapname
, "a.out");
2013 pr_object_name(pmp
->pr_mapname
,
2018 * Get the SysV shared memory id, if any.
2020 if ((pmp
->pr_mflags
& MA_SHARED
) && p
->p_segacct
&&
2021 (pmp
->pr_shmid
= shmgetid(p
, seg
->s_base
)) !=
2023 if (pmp
->pr_shmid
== SHMID_FREE
)
2026 pmp
->pr_mflags
|= MA_SHM
;
2031 hat_getstat(as
, saddr
, len
, hatid
,
2032 (char *)(pmp
+ 1), HAT_SYNC_ZERORM
);
2033 pmp
= (prasmap_t
*)next
;
2035 ASSERT(tmp
== NULL
);
2036 } while ((seg
= AS_SEGNEXT(as
, seg
)) != NULL
);
2040 ASSERT((uintptr_t)pmp
<= (uintptr_t)buf
+ size
);
2041 error
= uiomove(buf
, (caddr_t
)pmp
- buf
, UIO_READ
, uiop
);
2042 kmem_free(buf
, size
);
2047 #ifdef _SYSCALL32_IMPL
2049 prpdread32(proc_t
*p
, uint_t hatid
, struct uio
*uiop
)
2051 struct as
*as
= p
->p_as
;
2054 prpageheader32_t
*php
;
2060 AS_LOCK_ENTER(as
, RW_WRITER
);
2062 if ((seg
= AS_SEGFIRST(as
)) == NULL
) {
2066 size
= prpdsize32(as
);
2067 if (uiop
->uio_resid
< size
) {
2072 buf
= kmem_zalloc(size
, KM_SLEEP
);
2073 php
= (prpageheader32_t
*)buf
;
2074 pmp
= (prasmap32_t
*)(buf
+ sizeof (prpageheader32_t
));
2076 hrt2ts32(gethrtime(), &php
->pr_tstamp
);
2080 caddr_t eaddr
= seg
->s_base
+ pr_getsegsize(seg
, 0);
2081 caddr_t saddr
, naddr
;
2084 for (saddr
= seg
->s_base
; saddr
< eaddr
; saddr
= naddr
) {
2092 prot
= pr_getprot(seg
, 0, &tmp
, &saddr
, &naddr
, eaddr
);
2093 if ((len
= (size_t)(naddr
- saddr
)) == 0)
2095 npage
= len
/ PAGESIZE
;
2096 next
= (uintptr_t)(pmp
+ 1) + round8(npage
);
2098 * It's possible that the address space can change
2099 * subtlely even though we're holding as->a_lock
2100 * due to the nondeterminism of page_exists() in
2101 * the presence of asychronously flushed pages or
2102 * mapped files whose sizes are changing.
2103 * page_exists() may be called indirectly from
2104 * pr_getprot() by a segop_incore() routine.
2105 * If this happens we need to make sure we don't
2106 * overrun the buffer whose size we computed based
2107 * on the initial iteration through the segments.
2108 * Once we've detected an overflow, we need to clean
2109 * up the temporary memory allocated in pr_getprot()
2110 * and retry. If there's a pending signal, we return
2111 * EINTR so that this thread can be dislodged if
2112 * a latent bug causes us to spin indefinitely.
2114 if (next
> (uintptr_t)buf
+ size
) {
2115 pr_getprot_done(&tmp
);
2118 kmem_free(buf
, size
);
2120 if (ISSIG(curthread
, JUSTLOOKING
))
2127 php
->pr_npage
+= npage
;
2128 pmp
->pr_vaddr
= (caddr32_t
)(uintptr_t)saddr
;
2129 pmp
->pr_npage
= (size32_t
)npage
;
2130 pmp
->pr_offset
= segop_getoffset(seg
, saddr
);
2132 if (prot
& PROT_READ
)
2133 pmp
->pr_mflags
|= MA_READ
;
2134 if (prot
& PROT_WRITE
)
2135 pmp
->pr_mflags
|= MA_WRITE
;
2136 if (prot
& PROT_EXEC
)
2137 pmp
->pr_mflags
|= MA_EXEC
;
2138 if (segop_gettype(seg
, saddr
) & MAP_SHARED
)
2139 pmp
->pr_mflags
|= MA_SHARED
;
2140 if (segop_gettype(seg
, saddr
) & MAP_NORESERVE
)
2141 pmp
->pr_mflags
|= MA_NORESERVE
;
2142 if (seg
->s_ops
== &segspt_shmops
||
2143 (seg
->s_ops
== &segvn_ops
&&
2144 (segop_getvp(seg
, saddr
, &vp
) != 0 || vp
== NULL
)))
2145 pmp
->pr_mflags
|= MA_ANON
;
2146 if (seg
->s_ops
== &segspt_shmops
)
2147 pmp
->pr_mflags
|= MA_ISM
| MA_SHM
;
2148 pmp
->pr_pagesize
= PAGESIZE
;
2150 * Manufacture a filename for the "object" directory.
2152 vattr
.va_mask
= AT_FSID
|AT_NODEID
;
2153 if (seg
->s_ops
== &segvn_ops
&&
2154 segop_getvp(seg
, saddr
, &vp
) == 0 &&
2155 vp
!= NULL
&& vp
->v_type
== VREG
&&
2156 fop_getattr(vp
, &vattr
, 0, CRED(), NULL
) == 0) {
2157 if (vp
== p
->p_exec
)
2158 (void) strcpy(pmp
->pr_mapname
, "a.out");
2160 pr_object_name(pmp
->pr_mapname
,
2165 * Get the SysV shared memory id, if any.
2167 if ((pmp
->pr_mflags
& MA_SHARED
) && p
->p_segacct
&&
2168 (pmp
->pr_shmid
= shmgetid(p
, seg
->s_base
)) !=
2170 if (pmp
->pr_shmid
== SHMID_FREE
)
2173 pmp
->pr_mflags
|= MA_SHM
;
2178 hat_getstat(as
, saddr
, len
, hatid
,
2179 (char *)(pmp
+ 1), HAT_SYNC_ZERORM
);
2180 pmp
= (prasmap32_t
*)next
;
2182 ASSERT(tmp
== NULL
);
2183 } while ((seg
= AS_SEGNEXT(as
, seg
)) != NULL
);
2187 ASSERT((uintptr_t)pmp
<= (uintptr_t)buf
+ size
);
2188 error
= uiomove(buf
, (caddr_t
)pmp
- buf
, UIO_READ
, uiop
);
2189 kmem_free(buf
, size
);
2193 #endif /* _SYSCALL32_IMPL */
2196 prgetpctcpu(uint64_t pct
)
2199 * The value returned will be relevant in the zone of the examiner,
2200 * which may not be the same as the zone which performed the procfs
2203 int nonline
= zone_ncpus_online_get(curproc
->p_zone
);
2206 * Prorate over online cpus so we don't exceed 100%
2210 pct
>>= 16; /* convert to 16-bit scaled integer */
2211 if (pct
> 0x8000) /* might happen, due to rounding */
2213 return ((ushort_t
)pct
);
2217 * Return information used by ps(1).
2220 prgetpsinfo(proc_t
*p
, psinfo_t
*psp
)
2224 hrtime_t hrutime
, hrstime
;
2226 ASSERT(MUTEX_HELD(&p
->p_lock
));
2228 if ((t
= prchoose(p
)) == NULL
) /* returns locked thread */
2229 bzero(psp
, sizeof (*psp
));
2232 bzero(psp
, sizeof (*psp
) - sizeof (psp
->pr_lwp
));
2236 * only export SSYS and SMSACCT; everything else is off-limits to
2239 psp
->pr_flag
= p
->p_flag
& (SSYS
| SMSACCT
);
2240 psp
->pr_nlwp
= p
->p_lwpcnt
;
2241 psp
->pr_nzomb
= p
->p_zombcnt
;
2242 mutex_enter(&p
->p_crlock
);
2244 psp
->pr_uid
= crgetruid(cred
);
2245 psp
->pr_euid
= crgetuid(cred
);
2246 psp
->pr_gid
= crgetrgid(cred
);
2247 psp
->pr_egid
= crgetgid(cred
);
2248 mutex_exit(&p
->p_crlock
);
2249 psp
->pr_pid
= p
->p_pid
;
2250 if (curproc
->p_zone
->zone_id
!= GLOBAL_ZONEID
&&
2251 (p
->p_flag
& SZONETOP
)) {
2252 ASSERT(p
->p_zone
->zone_id
!= GLOBAL_ZONEID
);
2254 * Inside local zones, fake zsched's pid as parent pids for
2255 * processes which reference processes outside of the zone.
2257 psp
->pr_ppid
= curproc
->p_zone
->zone_zsched
->p_pid
;
2259 psp
->pr_ppid
= p
->p_ppid
;
2261 psp
->pr_pgid
= p
->p_pgrp
;
2262 psp
->pr_sid
= p
->p_sessp
->s_sid
;
2263 psp
->pr_taskid
= p
->p_task
->tk_tkid
;
2264 psp
->pr_projid
= p
->p_task
->tk_proj
->kpj_id
;
2265 psp
->pr_poolid
= p
->p_pool
->pool_id
;
2266 psp
->pr_zoneid
= p
->p_zone
->zone_id
;
2267 if ((psp
->pr_contract
= PRCTID(p
)) == 0)
2268 psp
->pr_contract
= -1;
2269 psp
->pr_addr
= (uintptr_t)prgetpsaddr(p
);
2270 switch (p
->p_model
) {
2271 case DATAMODEL_ILP32
:
2272 psp
->pr_dmodel
= PR_MODEL_ILP32
;
2274 case DATAMODEL_LP64
:
2275 psp
->pr_dmodel
= PR_MODEL_LP64
;
2278 hrutime
= mstate_aggr_state(p
, LMS_USER
);
2279 hrstime
= mstate_aggr_state(p
, LMS_SYSTEM
);
2280 hrt2ts((hrutime
+ hrstime
), &psp
->pr_time
);
2281 TICK_TO_TIMESTRUC(p
->p_cutime
+ p
->p_cstime
, &psp
->pr_ctime
);
2284 int wcode
= p
->p_wcode
; /* must be atomic read */
2287 psp
->pr_wstat
= wstat(wcode
, p
->p_wdata
);
2288 psp
->pr_ttydev
= PRNODEV
;
2289 psp
->pr_lwp
.pr_state
= SZOMB
;
2290 psp
->pr_lwp
.pr_sname
= 'Z';
2291 psp
->pr_lwp
.pr_bindpro
= PBIND_NONE
;
2292 psp
->pr_lwp
.pr_bindpset
= PS_NONE
;
2294 user_t
*up
= PTOU(p
);
2297 extern dev_t rwsconsdev
, rconsdev
, uconsdev
;
2301 * If the controlling terminal is the real
2302 * or workstation console device, map to what the
2303 * user thinks is the console device. Handle case when
2304 * rwsconsdev or rconsdev is set to NODEV for Starfire.
2306 if ((d
== rwsconsdev
|| d
== rconsdev
) && d
!= NODEV
)
2308 psp
->pr_ttydev
= (d
== NODEV
) ? PRNODEV
: d
;
2309 psp
->pr_start
= up
->u_start
;
2310 bcopy(up
->u_comm
, psp
->pr_fname
,
2311 MIN(sizeof (up
->u_comm
), sizeof (psp
->pr_fname
)-1));
2312 bcopy(up
->u_psargs
, psp
->pr_psargs
,
2313 MIN(PRARGSZ
-1, PSARGSZ
));
2314 psp
->pr_argc
= up
->u_argc
;
2315 psp
->pr_argv
= up
->u_argv
;
2316 psp
->pr_envp
= up
->u_envp
;
2318 /* get the chosen lwp's lwpsinfo */
2319 prgetlwpsinfo(t
, &psp
->pr_lwp
);
2321 /* compute %cpu for the process */
2322 if (p
->p_lwpcnt
== 1)
2323 psp
->pr_pctcpu
= psp
->pr_lwp
.pr_pctcpu
;
2326 hrtime_t cur_time
= gethrtime_unscaled();
2330 pct
+= cpu_update_pct(t
, cur_time
);
2331 } while ((t
= t
->t_forw
) != p
->p_tlist
);
2333 psp
->pr_pctcpu
= prgetpctcpu(pct
);
2335 if ((p
->p_flag
& SSYS
) || (as
= p
->p_as
) == &kas
) {
2339 mutex_exit(&p
->p_lock
);
2340 AS_LOCK_ENTER(as
, RW_READER
);
2341 psp
->pr_size
= btopr(as
->a_resvsize
) *
2343 psp
->pr_rssize
= rm_asrss(as
) * (PAGESIZE
/ 1024);
2344 psp
->pr_pctmem
= rm_pctmemory(as
);
2346 mutex_enter(&p
->p_lock
);
2351 #ifdef _SYSCALL32_IMPL
2353 prgetpsinfo32(proc_t
*p
, psinfo32_t
*psp
)
2357 hrtime_t hrutime
, hrstime
;
2359 ASSERT(MUTEX_HELD(&p
->p_lock
));
2361 if ((t
= prchoose(p
)) == NULL
) /* returns locked thread */
2362 bzero(psp
, sizeof (*psp
));
2365 bzero(psp
, sizeof (*psp
) - sizeof (psp
->pr_lwp
));
2369 * only export SSYS and SMSACCT; everything else is off-limits to
2372 psp
->pr_flag
= p
->p_flag
& (SSYS
| SMSACCT
);
2373 psp
->pr_nlwp
= p
->p_lwpcnt
;
2374 psp
->pr_nzomb
= p
->p_zombcnt
;
2375 mutex_enter(&p
->p_crlock
);
2377 psp
->pr_uid
= crgetruid(cred
);
2378 psp
->pr_euid
= crgetuid(cred
);
2379 psp
->pr_gid
= crgetrgid(cred
);
2380 psp
->pr_egid
= crgetgid(cred
);
2381 mutex_exit(&p
->p_crlock
);
2382 psp
->pr_pid
= p
->p_pid
;
2383 if (curproc
->p_zone
->zone_id
!= GLOBAL_ZONEID
&&
2384 (p
->p_flag
& SZONETOP
)) {
2385 ASSERT(p
->p_zone
->zone_id
!= GLOBAL_ZONEID
);
2387 * Inside local zones, fake zsched's pid as parent pids for
2388 * processes which reference processes outside of the zone.
2390 psp
->pr_ppid
= curproc
->p_zone
->zone_zsched
->p_pid
;
2392 psp
->pr_ppid
= p
->p_ppid
;
2394 psp
->pr_pgid
= p
->p_pgrp
;
2395 psp
->pr_sid
= p
->p_sessp
->s_sid
;
2396 psp
->pr_taskid
= p
->p_task
->tk_tkid
;
2397 psp
->pr_projid
= p
->p_task
->tk_proj
->kpj_id
;
2398 psp
->pr_poolid
= p
->p_pool
->pool_id
;
2399 psp
->pr_zoneid
= p
->p_zone
->zone_id
;
2400 if ((psp
->pr_contract
= PRCTID(p
)) == 0)
2401 psp
->pr_contract
= -1;
2402 psp
->pr_addr
= 0; /* cannot represent 64-bit addr in 32 bits */
2403 switch (p
->p_model
) {
2404 case DATAMODEL_ILP32
:
2405 psp
->pr_dmodel
= PR_MODEL_ILP32
;
2407 case DATAMODEL_LP64
:
2408 psp
->pr_dmodel
= PR_MODEL_LP64
;
2411 hrutime
= mstate_aggr_state(p
, LMS_USER
);
2412 hrstime
= mstate_aggr_state(p
, LMS_SYSTEM
);
2413 hrt2ts32(hrutime
+ hrstime
, &psp
->pr_time
);
2414 TICK_TO_TIMESTRUC32(p
->p_cutime
+ p
->p_cstime
, &psp
->pr_ctime
);
2417 extern int wstat(int, int); /* needs a header file */
2418 int wcode
= p
->p_wcode
; /* must be atomic read */
2421 psp
->pr_wstat
= wstat(wcode
, p
->p_wdata
);
2422 psp
->pr_ttydev
= PRNODEV32
;
2423 psp
->pr_lwp
.pr_state
= SZOMB
;
2424 psp
->pr_lwp
.pr_sname
= 'Z';
2426 user_t
*up
= PTOU(p
);
2429 extern dev_t rwsconsdev
, rconsdev
, uconsdev
;
2433 * If the controlling terminal is the real
2434 * or workstation console device, map to what the
2435 * user thinks is the console device. Handle case when
2436 * rwsconsdev or rconsdev is set to NODEV for Starfire.
2438 if ((d
== rwsconsdev
|| d
== rconsdev
) && d
!= NODEV
)
2440 (void) cmpldev(&psp
->pr_ttydev
, d
);
2441 TIMESPEC_TO_TIMESPEC32(&psp
->pr_start
, &up
->u_start
);
2442 bcopy(up
->u_comm
, psp
->pr_fname
,
2443 MIN(sizeof (up
->u_comm
), sizeof (psp
->pr_fname
)-1));
2444 bcopy(up
->u_psargs
, psp
->pr_psargs
,
2445 MIN(PRARGSZ
-1, PSARGSZ
));
2446 psp
->pr_argc
= up
->u_argc
;
2447 psp
->pr_argv
= (caddr32_t
)up
->u_argv
;
2448 psp
->pr_envp
= (caddr32_t
)up
->u_envp
;
2450 /* get the chosen lwp's lwpsinfo */
2451 prgetlwpsinfo32(t
, &psp
->pr_lwp
);
2453 /* compute %cpu for the process */
2454 if (p
->p_lwpcnt
== 1)
2455 psp
->pr_pctcpu
= psp
->pr_lwp
.pr_pctcpu
;
2461 cur_time
= gethrtime_unscaled();
2463 pct
+= cpu_update_pct(t
, cur_time
);
2464 } while ((t
= t
->t_forw
) != p
->p_tlist
);
2466 psp
->pr_pctcpu
= prgetpctcpu(pct
);
2468 if ((p
->p_flag
& SSYS
) || (as
= p
->p_as
) == &kas
) {
2472 mutex_exit(&p
->p_lock
);
2473 AS_LOCK_ENTER(as
, RW_READER
);
2474 psp
->pr_size
= (size32_t
)
2475 (btopr(as
->a_resvsize
) * (PAGESIZE
/ 1024));
2476 psp
->pr_rssize
= (size32_t
)
2477 (rm_asrss(as
) * (PAGESIZE
/ 1024));
2478 psp
->pr_pctmem
= rm_pctmemory(as
);
2480 mutex_enter(&p
->p_lock
);
2485 * If we are looking at an LP64 process, zero out
2486 * the fields that cannot be represented in ILP32.
2488 if (p
->p_model
!= DATAMODEL_ILP32
) {
2496 #endif /* _SYSCALL32_IMPL */
2499 prgetlwpsinfo(kthread_t
*t
, lwpsinfo_t
*psp
)
2501 klwp_t
*lwp
= ttolwp(t
);
2505 int retval
, niceval
;
2506 hrtime_t hrutime
, hrstime
;
2508 ASSERT(MUTEX_HELD(&ttoproc(t
)->p_lock
));
2510 bzero(psp
, sizeof (*psp
));
2512 psp
->pr_flag
= 0; /* lwpsinfo_t.pr_flag is deprecated */
2513 psp
->pr_lwpid
= t
->t_tid
;
2514 psp
->pr_addr
= (uintptr_t)t
;
2515 psp
->pr_wchan
= (uintptr_t)t
->t_wchan
;
2517 /* map the thread state enum into a process state enum */
2518 state
= VSTOPPED(t
) ? TS_STOPPED
: t
->t_state
;
2520 case TS_SLEEP
: state
= SSLEEP
; c
= 'S'; break;
2521 case TS_RUN
: state
= SRUN
; c
= 'R'; break;
2522 case TS_ONPROC
: state
= SONPROC
; c
= 'O'; break;
2523 case TS_ZOMB
: state
= SZOMB
; c
= 'Z'; break;
2524 case TS_STOPPED
: state
= SSTOP
; c
= 'T'; break;
2525 case TS_WAIT
: state
= SWAIT
; c
= 'W'; break;
2526 default: state
= 0; c
= '?'; break;
2528 psp
->pr_state
= state
;
2530 if ((sobj
= t
->t_sobj_ops
) != NULL
)
2531 psp
->pr_stype
= SOBJ_TYPE(sobj
);
2532 retval
= CL_DONICE(t
, NULL
, 0, &niceval
);
2534 psp
->pr_oldpri
= v
.v_maxsyspri
- t
->t_pri
;
2535 psp
->pr_nice
= niceval
+ NZERO
;
2537 psp
->pr_syscall
= t
->t_sysnum
;
2538 psp
->pr_pri
= t
->t_pri
;
2539 psp
->pr_start
.tv_sec
= t
->t_start
;
2540 psp
->pr_start
.tv_nsec
= 0L;
2541 hrutime
= lwp
->lwp_mstate
.ms_acct
[LMS_USER
];
2542 scalehrtime(&hrutime
);
2543 hrstime
= lwp
->lwp_mstate
.ms_acct
[LMS_SYSTEM
] +
2544 lwp
->lwp_mstate
.ms_acct
[LMS_TRAP
];
2545 scalehrtime(&hrstime
);
2546 hrt2ts(hrutime
+ hrstime
, &psp
->pr_time
);
2547 /* compute %cpu for the lwp */
2548 pct
= cpu_update_pct(t
, gethrtime_unscaled());
2549 psp
->pr_pctcpu
= prgetpctcpu(pct
);
2550 psp
->pr_cpu
= (psp
->pr_pctcpu
*100 + 0x6000) >> 15; /* [0..99] */
2551 if (psp
->pr_cpu
> 99)
2554 (void) strncpy(psp
->pr_clname
, sclass
[t
->t_cid
].cl_name
,
2555 sizeof (psp
->pr_clname
) - 1);
2556 bzero(psp
->pr_name
, sizeof (psp
->pr_name
)); /* XXX ??? */
2557 psp
->pr_onpro
= t
->t_cpu
->cpu_id
;
2558 psp
->pr_bindpro
= t
->t_bind_cpu
;
2559 psp
->pr_bindpset
= t
->t_bind_pset
;
2560 psp
->pr_lgrp
= t
->t_lpl
->lpl_lgrpid
;
2563 #ifdef _SYSCALL32_IMPL
2565 prgetlwpsinfo32(kthread_t
*t
, lwpsinfo32_t
*psp
)
2567 proc_t
*p
= ttoproc(t
);
2568 klwp_t
*lwp
= ttolwp(t
);
2572 int retval
, niceval
;
2573 hrtime_t hrutime
, hrstime
;
2575 ASSERT(MUTEX_HELD(&p
->p_lock
));
2577 bzero(psp
, sizeof (*psp
));
2579 psp
->pr_flag
= 0; /* lwpsinfo_t.pr_flag is deprecated */
2580 psp
->pr_lwpid
= t
->t_tid
;
2581 psp
->pr_addr
= 0; /* cannot represent 64-bit addr in 32 bits */
2582 psp
->pr_wchan
= 0; /* cannot represent 64-bit addr in 32 bits */
2584 /* map the thread state enum into a process state enum */
2585 state
= VSTOPPED(t
) ? TS_STOPPED
: t
->t_state
;
2587 case TS_SLEEP
: state
= SSLEEP
; c
= 'S'; break;
2588 case TS_RUN
: state
= SRUN
; c
= 'R'; break;
2589 case TS_ONPROC
: state
= SONPROC
; c
= 'O'; break;
2590 case TS_ZOMB
: state
= SZOMB
; c
= 'Z'; break;
2591 case TS_STOPPED
: state
= SSTOP
; c
= 'T'; break;
2592 case TS_WAIT
: state
= SWAIT
; c
= 'W'; break;
2593 default: state
= 0; c
= '?'; break;
2595 psp
->pr_state
= state
;
2597 if ((sobj
= t
->t_sobj_ops
) != NULL
)
2598 psp
->pr_stype
= SOBJ_TYPE(sobj
);
2599 retval
= CL_DONICE(t
, NULL
, 0, &niceval
);
2601 psp
->pr_oldpri
= v
.v_maxsyspri
- t
->t_pri
;
2602 psp
->pr_nice
= niceval
+ NZERO
;
2607 psp
->pr_syscall
= t
->t_sysnum
;
2608 psp
->pr_pri
= t
->t_pri
;
2609 psp
->pr_start
.tv_sec
= (time32_t
)t
->t_start
;
2610 psp
->pr_start
.tv_nsec
= 0L;
2611 hrutime
= lwp
->lwp_mstate
.ms_acct
[LMS_USER
];
2612 scalehrtime(&hrutime
);
2613 hrstime
= lwp
->lwp_mstate
.ms_acct
[LMS_SYSTEM
] +
2614 lwp
->lwp_mstate
.ms_acct
[LMS_TRAP
];
2615 scalehrtime(&hrstime
);
2616 hrt2ts32(hrutime
+ hrstime
, &psp
->pr_time
);
2617 /* compute %cpu for the lwp */
2618 pct
= cpu_update_pct(t
, gethrtime_unscaled());
2619 psp
->pr_pctcpu
= prgetpctcpu(pct
);
2620 psp
->pr_cpu
= (psp
->pr_pctcpu
*100 + 0x6000) >> 15; /* [0..99] */
2621 if (psp
->pr_cpu
> 99)
2624 (void) strncpy(psp
->pr_clname
, sclass
[t
->t_cid
].cl_name
,
2625 sizeof (psp
->pr_clname
) - 1);
2626 bzero(psp
->pr_name
, sizeof (psp
->pr_name
)); /* XXX ??? */
2627 psp
->pr_onpro
= t
->t_cpu
->cpu_id
;
2628 psp
->pr_bindpro
= t
->t_bind_cpu
;
2629 psp
->pr_bindpset
= t
->t_bind_pset
;
2630 psp
->pr_lgrp
= t
->t_lpl
->lpl_lgrpid
;
2632 #endif /* _SYSCALL32_IMPL */
2634 #ifdef _SYSCALL32_IMPL
2636 #define PR_COPY_FIELD(s, d, field) d->field = s->field
2638 #define PR_COPY_FIELD_ILP32(s, d, field) \
2639 if (s->pr_dmodel == PR_MODEL_ILP32) { \
2640 d->field = s->field; \
2643 #define PR_COPY_TIMESPEC(s, d, field) \
2644 TIMESPEC_TO_TIMESPEC32(&d->field, &s->field);
2646 #define PR_COPY_BUF(s, d, field) \
2647 bcopy(s->field, d->field, sizeof (d->field));
2649 #define PR_IGNORE_FIELD(s, d, field)
2652 lwpsinfo_kto32(const struct lwpsinfo
*src
, struct lwpsinfo32
*dest
)
2654 bzero(dest
, sizeof (*dest
));
2656 PR_COPY_FIELD(src
, dest
, pr_flag
);
2657 PR_COPY_FIELD(src
, dest
, pr_lwpid
);
2658 PR_IGNORE_FIELD(src
, dest
, pr_addr
);
2659 PR_IGNORE_FIELD(src
, dest
, pr_wchan
);
2660 PR_COPY_FIELD(src
, dest
, pr_stype
);
2661 PR_COPY_FIELD(src
, dest
, pr_state
);
2662 PR_COPY_FIELD(src
, dest
, pr_sname
);
2663 PR_COPY_FIELD(src
, dest
, pr_nice
);
2664 PR_COPY_FIELD(src
, dest
, pr_syscall
);
2665 PR_COPY_FIELD(src
, dest
, pr_oldpri
);
2666 PR_COPY_FIELD(src
, dest
, pr_cpu
);
2667 PR_COPY_FIELD(src
, dest
, pr_pri
);
2668 PR_COPY_FIELD(src
, dest
, pr_pctcpu
);
2669 PR_COPY_TIMESPEC(src
, dest
, pr_start
);
2670 PR_COPY_BUF(src
, dest
, pr_clname
);
2671 PR_COPY_BUF(src
, dest
, pr_name
);
2672 PR_COPY_FIELD(src
, dest
, pr_onpro
);
2673 PR_COPY_FIELD(src
, dest
, pr_bindpro
);
2674 PR_COPY_FIELD(src
, dest
, pr_bindpset
);
2675 PR_COPY_FIELD(src
, dest
, pr_lgrp
);
2679 psinfo_kto32(const struct psinfo
*src
, struct psinfo32
*dest
)
2681 bzero(dest
, sizeof (*dest
));
2683 PR_COPY_FIELD(src
, dest
, pr_flag
);
2684 PR_COPY_FIELD(src
, dest
, pr_nlwp
);
2685 PR_COPY_FIELD(src
, dest
, pr_pid
);
2686 PR_COPY_FIELD(src
, dest
, pr_ppid
);
2687 PR_COPY_FIELD(src
, dest
, pr_pgid
);
2688 PR_COPY_FIELD(src
, dest
, pr_sid
);
2689 PR_COPY_FIELD(src
, dest
, pr_uid
);
2690 PR_COPY_FIELD(src
, dest
, pr_euid
);
2691 PR_COPY_FIELD(src
, dest
, pr_gid
);
2692 PR_COPY_FIELD(src
, dest
, pr_egid
);
2693 PR_IGNORE_FIELD(src
, dest
, pr_addr
);
2694 PR_COPY_FIELD_ILP32(src
, dest
, pr_size
);
2695 PR_COPY_FIELD_ILP32(src
, dest
, pr_rssize
);
2696 PR_COPY_FIELD(src
, dest
, pr_ttydev
);
2697 PR_COPY_FIELD(src
, dest
, pr_pctcpu
);
2698 PR_COPY_FIELD(src
, dest
, pr_pctmem
);
2699 PR_COPY_TIMESPEC(src
, dest
, pr_start
);
2700 PR_COPY_TIMESPEC(src
, dest
, pr_time
);
2701 PR_COPY_TIMESPEC(src
, dest
, pr_ctime
);
2702 PR_COPY_BUF(src
, dest
, pr_fname
);
2703 PR_COPY_BUF(src
, dest
, pr_psargs
);
2704 PR_COPY_FIELD(src
, dest
, pr_wstat
);
2705 PR_COPY_FIELD(src
, dest
, pr_argc
);
2706 PR_COPY_FIELD_ILP32(src
, dest
, pr_argv
);
2707 PR_COPY_FIELD_ILP32(src
, dest
, pr_envp
);
2708 PR_COPY_FIELD(src
, dest
, pr_dmodel
);
2709 PR_COPY_FIELD(src
, dest
, pr_taskid
);
2710 PR_COPY_FIELD(src
, dest
, pr_projid
);
2711 PR_COPY_FIELD(src
, dest
, pr_nzomb
);
2712 PR_COPY_FIELD(src
, dest
, pr_poolid
);
2713 PR_COPY_FIELD(src
, dest
, pr_contract
);
2714 PR_COPY_FIELD(src
, dest
, pr_poolid
);
2715 PR_COPY_FIELD(src
, dest
, pr_poolid
);
2717 lwpsinfo_kto32(&src
->pr_lwp
, &dest
->pr_lwp
);
2720 #undef PR_COPY_FIELD
2721 #undef PR_COPY_FIELD_ILP32
2722 #undef PR_COPY_TIMESPEC
2724 #undef PR_IGNORE_FIELD
2726 #endif /* _SYSCALL32_IMPL */
2729 * This used to get called when microstate accounting was disabled but
2730 * microstate information was requested. Since Microstate accounting is on
2731 * regardless of the proc flags, this simply makes it appear to procfs that
2732 * microstate accounting is on. This is relatively meaningless since you
2733 * can't turn it off, but this is here for the sake of appearances.
2738 estimate_msacct(kthread_t
*t
, hrtime_t curtime
)
2746 ASSERT(MUTEX_HELD(&p
->p_lock
));
2749 * A system process (p0) could be referenced if the thread is
2750 * in the process of exiting. Don't turn on microstate accounting
2753 if (p
->p_flag
& SSYS
)
2757 * Loop through all the LWPs (kernel threads) in the process.
2761 t
->t_proc_flag
|= TP_MSACCT
;
2762 } while ((t
= t
->t_forw
) != p
->p_tlist
);
2764 p
->p_flag
|= SMSACCT
; /* set process-wide MSACCT */
2768 * It's not really possible to disable microstate accounting anymore.
2769 * However, this routine simply turns off the ms accounting flags in a process
2770 * This way procfs can still pretend to turn microstate accounting on and
2771 * off for a process, but it actually doesn't do anything. This is
2772 * a neutered form of preemptive idiot-proofing.
2775 disable_msacct(proc_t
*p
)
2779 ASSERT(MUTEX_HELD(&p
->p_lock
));
2781 p
->p_flag
&= ~SMSACCT
; /* clear process-wide MSACCT */
2783 * Loop through all the LWPs (kernel threads) in the process.
2785 if ((t
= p
->p_tlist
) != NULL
) {
2787 /* clear per-thread flag */
2788 t
->t_proc_flag
&= ~TP_MSACCT
;
2789 } while ((t
= t
->t_forw
) != p
->p_tlist
);
2794 * Return resource usage information.
2797 prgetusage(kthread_t
*t
, prhusage_t
*pup
)
2799 klwp_t
*lwp
= ttolwp(t
);
2801 struct mstate
*ms
= &lwp
->lwp_mstate
;
2808 curtime
= gethrtime_unscaled();
2810 pup
->pr_lwpid
= t
->t_tid
;
2812 pup
->pr_create
= ms
->ms_start
;
2813 pup
->pr_term
= ms
->ms_term
;
2814 scalehrtime(&pup
->pr_create
);
2815 scalehrtime(&pup
->pr_term
);
2816 if (ms
->ms_term
== 0) {
2817 pup
->pr_rtime
= curtime
- ms
->ms_start
;
2818 scalehrtime(&pup
->pr_rtime
);
2820 pup
->pr_rtime
= ms
->ms_term
- ms
->ms_start
;
2821 scalehrtime(&pup
->pr_rtime
);
2825 pup
->pr_utime
= ms
->ms_acct
[LMS_USER
];
2826 pup
->pr_stime
= ms
->ms_acct
[LMS_SYSTEM
];
2827 pup
->pr_ttime
= ms
->ms_acct
[LMS_TRAP
];
2828 pup
->pr_tftime
= ms
->ms_acct
[LMS_TFAULT
];
2829 pup
->pr_dftime
= ms
->ms_acct
[LMS_DFAULT
];
2830 pup
->pr_kftime
= ms
->ms_acct
[LMS_KFAULT
];
2831 pup
->pr_ltime
= ms
->ms_acct
[LMS_USER_LOCK
];
2832 pup
->pr_slptime
= ms
->ms_acct
[LMS_SLEEP
];
2833 pup
->pr_wtime
= ms
->ms_acct
[LMS_WAIT_CPU
];
2834 pup
->pr_stoptime
= ms
->ms_acct
[LMS_STOPPED
];
2839 * Adjust for time waiting in the dispatcher queue.
2841 waitrq
= t
->t_waitrq
; /* hopefully atomic */
2843 if (waitrq
> curtime
) {
2844 curtime
= gethrtime_unscaled();
2846 tmp1
= curtime
- waitrq
;
2848 pup
->pr_wtime
+= tmp1
;
2853 * Adjust for time spent in current microstate.
2855 if (ms
->ms_state_start
> curtime
) {
2856 curtime
= gethrtime_unscaled();
2861 switch (state
= t
->t_mstate
) {
2864 * Update the timer for the current sleep state.
2866 switch (state
= ms
->ms_prev
) {
2885 case LMS_USER
: mstimep
= &pup
->pr_utime
; break;
2886 case LMS_SYSTEM
: mstimep
= &pup
->pr_stime
; break;
2887 case LMS_TRAP
: mstimep
= &pup
->pr_ttime
; break;
2888 case LMS_TFAULT
: mstimep
= &pup
->pr_tftime
; break;
2889 case LMS_DFAULT
: mstimep
= &pup
->pr_dftime
; break;
2890 case LMS_KFAULT
: mstimep
= &pup
->pr_kftime
; break;
2891 case LMS_USER_LOCK
: mstimep
= &pup
->pr_ltime
; break;
2892 case LMS_SLEEP
: mstimep
= &pup
->pr_slptime
; break;
2893 case LMS_WAIT_CPU
: mstimep
= &pup
->pr_wtime
; break;
2894 case LMS_STOPPED
: mstimep
= &pup
->pr_stoptime
; break;
2895 default: panic("prgetusage: unknown microstate");
2897 tmp1
= curtime
- ms
->ms_state_start
;
2899 curtime
= gethrtime_unscaled();
2904 } while (tmp1
< 0 && i
< MAX_ITERS_SPIN
);
2908 /* update pup timestamp */
2909 pup
->pr_tstamp
= curtime
;
2910 scalehrtime(&pup
->pr_tstamp
);
2913 * Resource usage counters.
2915 pup
->pr_minf
= lwp
->lwp_ru
.minflt
;
2916 pup
->pr_majf
= lwp
->lwp_ru
.majflt
;
2917 pup
->pr_nswap
= lwp
->lwp_ru
.nswap
;
2918 pup
->pr_inblk
= lwp
->lwp_ru
.inblock
;
2919 pup
->pr_oublk
= lwp
->lwp_ru
.oublock
;
2920 pup
->pr_msnd
= lwp
->lwp_ru
.msgsnd
;
2921 pup
->pr_mrcv
= lwp
->lwp_ru
.msgrcv
;
2922 pup
->pr_sigs
= lwp
->lwp_ru
.nsignals
;
2923 pup
->pr_vctx
= lwp
->lwp_ru
.nvcsw
;
2924 pup
->pr_ictx
= lwp
->lwp_ru
.nivcsw
;
2925 pup
->pr_sysc
= lwp
->lwp_ru
.sysc
;
2926 pup
->pr_ioch
= lwp
->lwp_ru
.ioch
;
2930 * Convert ms_acct stats from unscaled high-res time to nanoseconds
2933 prscaleusage(prhusage_t
*usg
)
2935 scalehrtime(&usg
->pr_utime
);
2936 scalehrtime(&usg
->pr_stime
);
2937 scalehrtime(&usg
->pr_ttime
);
2938 scalehrtime(&usg
->pr_tftime
);
2939 scalehrtime(&usg
->pr_dftime
);
2940 scalehrtime(&usg
->pr_kftime
);
2941 scalehrtime(&usg
->pr_ltime
);
2942 scalehrtime(&usg
->pr_slptime
);
2943 scalehrtime(&usg
->pr_wtime
);
2944 scalehrtime(&usg
->pr_stoptime
);
2949 * Sum resource usage information.
2952 praddusage(kthread_t
*t
, prhusage_t
*pup
)
2954 klwp_t
*lwp
= ttolwp(t
);
2956 struct mstate
*ms
= &lwp
->lwp_mstate
;
2964 curtime
= gethrtime_unscaled();
2966 if (ms
->ms_term
== 0) {
2967 tmp
= curtime
- ms
->ms_start
;
2969 pup
->pr_rtime
+= tmp
;
2971 tmp
= ms
->ms_term
- ms
->ms_start
;
2973 pup
->pr_rtime
+= tmp
;
2976 conv
.pr_utime
= ms
->ms_acct
[LMS_USER
];
2977 conv
.pr_stime
= ms
->ms_acct
[LMS_SYSTEM
];
2978 conv
.pr_ttime
= ms
->ms_acct
[LMS_TRAP
];
2979 conv
.pr_tftime
= ms
->ms_acct
[LMS_TFAULT
];
2980 conv
.pr_dftime
= ms
->ms_acct
[LMS_DFAULT
];
2981 conv
.pr_kftime
= ms
->ms_acct
[LMS_KFAULT
];
2982 conv
.pr_ltime
= ms
->ms_acct
[LMS_USER_LOCK
];
2983 conv
.pr_slptime
= ms
->ms_acct
[LMS_SLEEP
];
2984 conv
.pr_wtime
= ms
->ms_acct
[LMS_WAIT_CPU
];
2985 conv
.pr_stoptime
= ms
->ms_acct
[LMS_STOPPED
];
2987 prscaleusage(&conv
);
2989 pup
->pr_utime
+= conv
.pr_utime
;
2990 pup
->pr_stime
+= conv
.pr_stime
;
2991 pup
->pr_ttime
+= conv
.pr_ttime
;
2992 pup
->pr_tftime
+= conv
.pr_tftime
;
2993 pup
->pr_dftime
+= conv
.pr_dftime
;
2994 pup
->pr_kftime
+= conv
.pr_kftime
;
2995 pup
->pr_ltime
+= conv
.pr_ltime
;
2996 pup
->pr_slptime
+= conv
.pr_slptime
;
2997 pup
->pr_wtime
+= conv
.pr_wtime
;
2998 pup
->pr_stoptime
+= conv
.pr_stoptime
;
3001 * Adjust for time waiting in the dispatcher queue.
3003 waitrq
= t
->t_waitrq
; /* hopefully atomic */
3005 if (waitrq
> curtime
) {
3006 curtime
= gethrtime_unscaled();
3008 tmp
= curtime
- waitrq
;
3010 pup
->pr_wtime
+= tmp
;
3015 * Adjust for time spent in current microstate.
3017 if (ms
->ms_state_start
> curtime
) {
3018 curtime
= gethrtime_unscaled();
3023 switch (state
= t
->t_mstate
) {
3026 * Update the timer for the current sleep state.
3028 switch (state
= ms
->ms_prev
) {
3047 case LMS_USER
: mstimep
= &pup
->pr_utime
; break;
3048 case LMS_SYSTEM
: mstimep
= &pup
->pr_stime
; break;
3049 case LMS_TRAP
: mstimep
= &pup
->pr_ttime
; break;
3050 case LMS_TFAULT
: mstimep
= &pup
->pr_tftime
; break;
3051 case LMS_DFAULT
: mstimep
= &pup
->pr_dftime
; break;
3052 case LMS_KFAULT
: mstimep
= &pup
->pr_kftime
; break;
3053 case LMS_USER_LOCK
: mstimep
= &pup
->pr_ltime
; break;
3054 case LMS_SLEEP
: mstimep
= &pup
->pr_slptime
; break;
3055 case LMS_WAIT_CPU
: mstimep
= &pup
->pr_wtime
; break;
3056 case LMS_STOPPED
: mstimep
= &pup
->pr_stoptime
; break;
3057 default: panic("praddusage: unknown microstate");
3059 tmp
= curtime
- ms
->ms_state_start
;
3061 curtime
= gethrtime_unscaled();
3066 } while (tmp
< 0 && i
< MAX_ITERS_SPIN
);
3070 /* update pup timestamp */
3071 pup
->pr_tstamp
= curtime
;
3072 scalehrtime(&pup
->pr_tstamp
);
3075 * Resource usage counters.
3077 pup
->pr_minf
+= lwp
->lwp_ru
.minflt
;
3078 pup
->pr_majf
+= lwp
->lwp_ru
.majflt
;
3079 pup
->pr_nswap
+= lwp
->lwp_ru
.nswap
;
3080 pup
->pr_inblk
+= lwp
->lwp_ru
.inblock
;
3081 pup
->pr_oublk
+= lwp
->lwp_ru
.oublock
;
3082 pup
->pr_msnd
+= lwp
->lwp_ru
.msgsnd
;
3083 pup
->pr_mrcv
+= lwp
->lwp_ru
.msgrcv
;
3084 pup
->pr_sigs
+= lwp
->lwp_ru
.nsignals
;
3085 pup
->pr_vctx
+= lwp
->lwp_ru
.nvcsw
;
3086 pup
->pr_ictx
+= lwp
->lwp_ru
.nivcsw
;
3087 pup
->pr_sysc
+= lwp
->lwp_ru
.sysc
;
3088 pup
->pr_ioch
+= lwp
->lwp_ru
.ioch
;
3092 * Convert a prhusage_t to a prusage_t.
3093 * This means convert each hrtime_t to a timestruc_t
3094 * and copy the count fields uint64_t => ulong_t.
3097 prcvtusage(prhusage_t
*pup
, prusage_t
*upup
)
3103 upup
->pr_lwpid
= pup
->pr_lwpid
;
3104 upup
->pr_count
= pup
->pr_count
;
3106 hrt2ts(pup
->pr_tstamp
, &upup
->pr_tstamp
);
3107 hrt2ts(pup
->pr_create
, &upup
->pr_create
);
3108 hrt2ts(pup
->pr_term
, &upup
->pr_term
);
3109 hrt2ts(pup
->pr_rtime
, &upup
->pr_rtime
);
3110 hrt2ts(pup
->pr_utime
, &upup
->pr_utime
);
3111 hrt2ts(pup
->pr_stime
, &upup
->pr_stime
);
3112 hrt2ts(pup
->pr_ttime
, &upup
->pr_ttime
);
3113 hrt2ts(pup
->pr_tftime
, &upup
->pr_tftime
);
3114 hrt2ts(pup
->pr_dftime
, &upup
->pr_dftime
);
3115 hrt2ts(pup
->pr_kftime
, &upup
->pr_kftime
);
3116 hrt2ts(pup
->pr_ltime
, &upup
->pr_ltime
);
3117 hrt2ts(pup
->pr_slptime
, &upup
->pr_slptime
);
3118 hrt2ts(pup
->pr_wtime
, &upup
->pr_wtime
);
3119 hrt2ts(pup
->pr_stoptime
, &upup
->pr_stoptime
);
3120 bzero(upup
->filltime
, sizeof (upup
->filltime
));
3122 ullp
= &pup
->pr_minf
;
3123 ulp
= &upup
->pr_minf
;
3124 for (i
= 0; i
< 22; i
++)
3125 *ulp
++ = (ulong_t
)*ullp
++;
3128 #ifdef _SYSCALL32_IMPL
3130 prcvtusage32(prhusage_t
*pup
, prusage32_t
*upup
)
3136 upup
->pr_lwpid
= pup
->pr_lwpid
;
3137 upup
->pr_count
= pup
->pr_count
;
3139 hrt2ts32(pup
->pr_tstamp
, &upup
->pr_tstamp
);
3140 hrt2ts32(pup
->pr_create
, &upup
->pr_create
);
3141 hrt2ts32(pup
->pr_term
, &upup
->pr_term
);
3142 hrt2ts32(pup
->pr_rtime
, &upup
->pr_rtime
);
3143 hrt2ts32(pup
->pr_utime
, &upup
->pr_utime
);
3144 hrt2ts32(pup
->pr_stime
, &upup
->pr_stime
);
3145 hrt2ts32(pup
->pr_ttime
, &upup
->pr_ttime
);
3146 hrt2ts32(pup
->pr_tftime
, &upup
->pr_tftime
);
3147 hrt2ts32(pup
->pr_dftime
, &upup
->pr_dftime
);
3148 hrt2ts32(pup
->pr_kftime
, &upup
->pr_kftime
);
3149 hrt2ts32(pup
->pr_ltime
, &upup
->pr_ltime
);
3150 hrt2ts32(pup
->pr_slptime
, &upup
->pr_slptime
);
3151 hrt2ts32(pup
->pr_wtime
, &upup
->pr_wtime
);
3152 hrt2ts32(pup
->pr_stoptime
, &upup
->pr_stoptime
);
3153 bzero(upup
->filltime
, sizeof (upup
->filltime
));
3155 ullp
= &pup
->pr_minf
;
3156 ulp
= &upup
->pr_minf
;
3157 for (i
= 0; i
< 22; i
++)
3158 *ulp
++ = (uint32_t)*ullp
++;
3160 #endif /* _SYSCALL32_IMPL */
3163 * Determine whether a set is empty.
3166 setisempty(uint32_t *sp
, uint_t n
)
3175 * Utility routine for establishing a watched area in the process.
3176 * Keep the list of watched areas sorted by virtual address.
3179 set_watched_area(proc_t
*p
, struct watched_area
*pwa
)
3181 caddr_t vaddr
= pwa
->wa_vaddr
;
3182 caddr_t eaddr
= pwa
->wa_eaddr
;
3183 ulong_t flags
= pwa
->wa_flags
;
3184 struct watched_area
*target
;
3188 /* we must not be holding p->p_lock, but the process must be locked */
3189 ASSERT(MUTEX_NOT_HELD(&p
->p_lock
));
3190 ASSERT(p
->p_proc_flag
& P_PR_LOCK
);
3193 * If this is our first watchpoint, enable watchpoints for the process.
3195 if (!pr_watch_active(p
)) {
3198 mutex_enter(&p
->p_lock
);
3199 if ((t
= p
->p_tlist
) != NULL
) {
3202 } while ((t
= t
->t_forw
) != p
->p_tlist
);
3204 mutex_exit(&p
->p_lock
);
3207 target
= pr_find_watched_area(p
, pwa
, &where
);
3208 if (target
!= NULL
) {
3210 * We discovered an existing, overlapping watched area.
3211 * Allow it only if it is an exact match.
3213 if (target
->wa_vaddr
!= vaddr
||
3214 target
->wa_eaddr
!= eaddr
)
3216 else if (target
->wa_flags
!= flags
) {
3217 error
= set_watched_page(p
, vaddr
, eaddr
,
3218 flags
, target
->wa_flags
);
3219 target
->wa_flags
= flags
;
3221 kmem_free(pwa
, sizeof (struct watched_area
));
3223 avl_insert(&p
->p_warea
, pwa
, where
);
3224 error
= set_watched_page(p
, vaddr
, eaddr
, flags
, 0);
3231 * Utility routine for clearing a watched area in the process.
3232 * Must be an exact match of the virtual address.
3233 * size and flags don't matter.
3236 clear_watched_area(proc_t
*p
, struct watched_area
*pwa
)
3238 struct watched_area
*found
;
3240 /* we must not be holding p->p_lock, but the process must be locked */
3241 ASSERT(MUTEX_NOT_HELD(&p
->p_lock
));
3242 ASSERT(p
->p_proc_flag
& P_PR_LOCK
);
3245 if (!pr_watch_active(p
)) {
3246 kmem_free(pwa
, sizeof (struct watched_area
));
3251 * Look for a matching address in the watched areas. If a match is
3252 * found, clear the old watched area and adjust the watched page(s). It
3253 * is not an error if there is no match.
3255 if ((found
= pr_find_watched_area(p
, pwa
, NULL
)) != NULL
&&
3256 found
->wa_vaddr
== pwa
->wa_vaddr
) {
3257 clear_watched_page(p
, found
->wa_vaddr
, found
->wa_eaddr
,
3259 avl_remove(&p
->p_warea
, found
);
3260 kmem_free(found
, sizeof (struct watched_area
));
3263 kmem_free(pwa
, sizeof (struct watched_area
));
3266 * If we removed the last watched area from the process, disable
3269 if (!pr_watch_active(p
)) {
3272 mutex_enter(&p
->p_lock
);
3273 if ((t
= p
->p_tlist
) != NULL
) {
3276 } while ((t
= t
->t_forw
) != p
->p_tlist
);
3278 mutex_exit(&p
->p_lock
);
3285 * Frees all the watched_area structures
3288 pr_free_watchpoints(proc_t
*p
)
3290 struct watched_area
*delp
;
3294 while ((delp
= avl_destroy_nodes(&p
->p_warea
, &cookie
)) != NULL
)
3295 kmem_free(delp
, sizeof (struct watched_area
));
3297 avl_destroy(&p
->p_warea
);
3301 * This one is called by the traced process to unwatch all the
3302 * pages while deallocating the list of watched_page structs.
3305 pr_free_watched_pages(proc_t
*p
)
3307 struct as
*as
= p
->p_as
;
3308 struct watched_page
*pwp
;
3313 if (as
== NULL
|| avl_numnodes(&as
->a_wpage
) == 0)
3316 ASSERT(MUTEX_NOT_HELD(&curproc
->p_lock
));
3317 AS_LOCK_ENTER(as
, RW_WRITER
);
3319 pwp
= avl_first(&as
->a_wpage
);
3322 while ((pwp
= avl_destroy_nodes(&as
->a_wpage
, &cookie
)) != NULL
) {
3324 if ((prot
= pwp
->wp_oprot
) != 0) {
3325 caddr_t addr
= pwp
->wp_vaddr
;
3329 if ((pwp
->wp_prot
!= prot
||
3330 (pwp
->wp_flags
& WP_NOWATCH
)) &&
3331 (seg
= as_segat(as
, addr
)) != NULL
) {
3332 err
= segop_setprot(seg
, addr
, PAGESIZE
, prot
);
3333 if (err
== IE_RETRY
) {
3334 ASSERT(retrycnt
== 0);
3340 kmem_free(pwp
, sizeof (struct watched_page
));
3343 avl_destroy(&as
->a_wpage
);
3350 * Insert a watched area into the list of watched pages.
3351 * If oflags is zero then we are adding a new watched area.
3352 * Otherwise we are changing the flags of an existing watched area.
3355 set_watched_page(proc_t
*p
, caddr_t vaddr
, caddr_t eaddr
,
3356 ulong_t flags
, ulong_t oflags
)
3358 struct as
*as
= p
->p_as
;
3359 avl_tree_t
*pwp_tree
;
3360 struct watched_page
*pwp
, *newpwp
;
3361 struct watched_page tpw
;
3368 * We need to pre-allocate a list of structures before we grab the
3369 * address space lock to avoid calling kmem_alloc(KM_SLEEP) with locks
3373 for (addr
= (caddr_t
)((uintptr_t)vaddr
& (uintptr_t)PAGEMASK
);
3374 addr
< eaddr
; addr
+= PAGESIZE
) {
3375 pwp
= kmem_zalloc(sizeof (struct watched_page
), KM_SLEEP
);
3376 pwp
->wp_list
= newpwp
;
3380 AS_LOCK_ENTER(as
, RW_WRITER
);
3383 * Search for an existing watched page to contain the watched area.
3384 * If none is found, grab a new one from the available list
3385 * and insert it in the active list, keeping the list sorted
3386 * by user-level virtual address.
3388 if (p
->p_flag
& SVFWAIT
)
3389 pwp_tree
= &p
->p_wpage
;
3391 pwp_tree
= &as
->a_wpage
;
3394 if (avl_numnodes(pwp_tree
) > prnwatch
) {
3396 while (newpwp
!= NULL
) {
3397 pwp
= newpwp
->wp_list
;
3398 kmem_free(newpwp
, sizeof (struct watched_page
));
3404 tpw
.wp_vaddr
= (caddr_t
)((uintptr_t)vaddr
& (uintptr_t)PAGEMASK
);
3405 if ((pwp
= avl_find(pwp_tree
, &tpw
, &where
)) == NULL
) {
3407 newpwp
= newpwp
->wp_list
;
3408 pwp
->wp_list
= NULL
;
3409 pwp
->wp_vaddr
= (caddr_t
)((uintptr_t)vaddr
&
3410 (uintptr_t)PAGEMASK
);
3411 avl_insert(pwp_tree
, pwp
, where
);
3414 ASSERT(vaddr
>= pwp
->wp_vaddr
&& vaddr
< pwp
->wp_vaddr
+ PAGESIZE
);
3416 if (oflags
& WA_READ
)
3418 if (oflags
& WA_WRITE
)
3420 if (oflags
& WA_EXEC
)
3423 ASSERT(pwp
->wp_read
>= 0);
3424 ASSERT(pwp
->wp_write
>= 0);
3425 ASSERT(pwp
->wp_exec
>= 0);
3427 if (flags
& WA_READ
)
3429 if (flags
& WA_WRITE
)
3431 if (flags
& WA_EXEC
)
3434 if (!(p
->p_flag
& SVFWAIT
)) {
3435 vaddr
= pwp
->wp_vaddr
;
3436 if (pwp
->wp_oprot
== 0 &&
3437 (seg
= as_segat(as
, vaddr
)) != NULL
) {
3438 (void) segop_getprot(seg
, vaddr
, 0, &prot
);
3439 pwp
->wp_oprot
= (uchar_t
)prot
;
3440 pwp
->wp_prot
= (uchar_t
)prot
;
3442 if (pwp
->wp_oprot
!= 0) {
3443 prot
= pwp
->wp_oprot
;
3445 prot
&= ~(PROT_READ
|PROT_WRITE
|PROT_EXEC
);
3447 prot
&= ~PROT_WRITE
;
3449 prot
&= ~(PROT_READ
|PROT_WRITE
|PROT_EXEC
);
3450 if (!(pwp
->wp_flags
& WP_NOWATCH
) &&
3451 pwp
->wp_prot
!= prot
&&
3452 (pwp
->wp_flags
& WP_SETPROT
) == 0) {
3453 pwp
->wp_flags
|= WP_SETPROT
;
3454 pwp
->wp_list
= p
->p_wprot
;
3457 pwp
->wp_prot
= (uchar_t
)prot
;
3462 * If the watched area extends into the next page then do
3463 * it over again with the virtual address of the next page.
3465 if ((vaddr
= pwp
->wp_vaddr
+ PAGESIZE
) < eaddr
)
3471 * Free any pages we may have over-allocated
3473 while (newpwp
!= NULL
) {
3474 pwp
= newpwp
->wp_list
;
3475 kmem_free(newpwp
, sizeof (struct watched_page
));
3483 * Remove a watched area from the list of watched pages.
3484 * A watched area may extend over more than one page.
3487 clear_watched_page(proc_t
*p
, caddr_t vaddr
, caddr_t eaddr
, ulong_t flags
)
3489 struct as
*as
= p
->p_as
;
3490 struct watched_page
*pwp
;
3491 struct watched_page tpw
;
3495 AS_LOCK_ENTER(as
, RW_WRITER
);
3497 if (p
->p_flag
& SVFWAIT
)
3500 tree
= &as
->a_wpage
;
3502 tpw
.wp_vaddr
= vaddr
=
3503 (caddr_t
)((uintptr_t)vaddr
& (uintptr_t)PAGEMASK
);
3504 pwp
= avl_find(tree
, &tpw
, &where
);
3506 pwp
= avl_nearest(tree
, where
, AVL_AFTER
);
3508 while (pwp
!= NULL
&& pwp
->wp_vaddr
< eaddr
) {
3509 ASSERT(vaddr
<= pwp
->wp_vaddr
);
3511 if (flags
& WA_READ
)
3513 if (flags
& WA_WRITE
)
3515 if (flags
& WA_EXEC
)
3518 if (pwp
->wp_read
+ pwp
->wp_write
+ pwp
->wp_exec
!= 0) {
3520 * Reset the hat layer's protections on this page.
3522 if (pwp
->wp_oprot
!= 0) {
3523 uint_t prot
= pwp
->wp_oprot
;
3527 ~(PROT_READ
|PROT_WRITE
|PROT_EXEC
);
3529 prot
&= ~PROT_WRITE
;
3532 ~(PROT_READ
|PROT_WRITE
|PROT_EXEC
);
3533 if (!(pwp
->wp_flags
& WP_NOWATCH
) &&
3534 pwp
->wp_prot
!= prot
&&
3535 (pwp
->wp_flags
& WP_SETPROT
) == 0) {
3536 pwp
->wp_flags
|= WP_SETPROT
;
3537 pwp
->wp_list
= p
->p_wprot
;
3540 pwp
->wp_prot
= (uchar_t
)prot
;
3544 * No watched areas remain in this page.
3545 * Reset everything to normal.
3547 if (pwp
->wp_oprot
!= 0) {
3548 pwp
->wp_prot
= pwp
->wp_oprot
;
3549 if ((pwp
->wp_flags
& WP_SETPROT
) == 0) {
3550 pwp
->wp_flags
|= WP_SETPROT
;
3551 pwp
->wp_list
= p
->p_wprot
;
3557 pwp
= AVL_NEXT(tree
, pwp
);
3564 * Return the original protections for the specified page.
3567 getwatchprot(struct as
*as
, caddr_t addr
, uint_t
*prot
)
3569 struct watched_page
*pwp
;
3570 struct watched_page tpw
;
3572 ASSERT(AS_LOCK_HELD(as
));
3574 tpw
.wp_vaddr
= (caddr_t
)((uintptr_t)addr
& (uintptr_t)PAGEMASK
);
3575 if ((pwp
= avl_find(&as
->a_wpage
, &tpw
, NULL
)) != NULL
)
3576 *prot
= pwp
->wp_oprot
;
3580 pr_pagev_create(struct seg
*seg
, int check_noreserve
)
3582 prpagev_t
*pagev
= kmem_alloc(sizeof (prpagev_t
), KM_SLEEP
);
3583 size_t total_pages
= seg_pages(seg
);
3586 * Limit the size of our vectors to pagev_lim pages at a time. We need
3587 * 4 or 5 bytes of storage per page, so this means we limit ourself
3588 * to about a megabyte of kernel heap by default.
3590 pagev
->pg_npages
= MIN(total_pages
, pagev_lim
);
3591 pagev
->pg_pnbase
= 0;
3594 kmem_alloc(pagev
->pg_npages
* sizeof (uint_t
), KM_SLEEP
);
3596 if (check_noreserve
)
3598 kmem_alloc(pagev
->pg_npages
* sizeof (char), KM_SLEEP
);
3600 pagev
->pg_incore
= NULL
;
3606 pr_pagev_destroy(prpagev_t
*pagev
)
3608 if (pagev
->pg_incore
!= NULL
)
3609 kmem_free(pagev
->pg_incore
, pagev
->pg_npages
* sizeof (char));
3611 kmem_free(pagev
->pg_protv
, pagev
->pg_npages
* sizeof (uint_t
));
3612 kmem_free(pagev
, sizeof (prpagev_t
));
3616 pr_pagev_fill(prpagev_t
*pagev
, struct seg
*seg
, caddr_t addr
, caddr_t eaddr
)
3618 ulong_t lastpg
= seg_page(seg
, eaddr
- 1);
3623 ASSERT(addr
>= seg
->s_base
&& addr
<= eaddr
);
3629 ASSERT(addr
< eaddr
);
3630 pagev
->pg_pnbase
= seg_page(seg
, addr
);
3631 pnlim
= pagev
->pg_pnbase
+ pagev
->pg_npages
;
3635 len
= (size_t)(eaddr
- addr
);
3637 len
= pagev
->pg_npages
* PAGESIZE
;
3639 if (pagev
->pg_incore
!= NULL
) {
3641 * INCORE cleverly has different semantics than GETPROT:
3642 * it returns info on pages up to but NOT including addr + len.
3644 (void) segop_incore(seg
, addr
, len
, pagev
->pg_incore
);
3645 pn
= pagev
->pg_pnbase
;
3649 * Guilty knowledge here: We know that segvn_incore
3650 * returns more than just the low-order bit that
3651 * indicates the page is actually in memory. If any
3652 * bits are set, then the page has backing store.
3654 if (pagev
->pg_incore
[pn
++ - pagev
->pg_pnbase
])
3657 } while ((addr
+= PAGESIZE
) < eaddr
&& pn
< pnlim
);
3660 * If we examined all the pages in the vector but we're not
3661 * at the end of the segment, take another lap.
3668 * Need to take len - 1 because addr + len is the address of the
3669 * first byte of the page just past the end of what we want.
3672 (void) segop_getprot(seg
, saddr
, len
- 1, pagev
->pg_protv
);
3677 pr_pagev_nextprot(prpagev_t
*pagev
, struct seg
*seg
,
3678 caddr_t
*saddrp
, caddr_t eaddr
, uint_t
*protp
)
3681 * Our starting address is either the specified address, or the base
3682 * address from the start of the pagev. If the latter is greater,
3683 * this means a previous call to pr_pagev_fill has already scanned
3684 * further than the end of the previous mapping.
3686 caddr_t base
= seg
->s_base
+ pagev
->pg_pnbase
* PAGESIZE
;
3687 caddr_t addr
= MAX(*saddrp
, base
);
3688 ulong_t pn
= seg_page(seg
, addr
);
3692 * If we're dealing with noreserve pages, then advance addr to
3693 * the address of the next page which has backing store.
3695 if (pagev
->pg_incore
!= NULL
) {
3696 while (pagev
->pg_incore
[pn
- pagev
->pg_pnbase
] == 0) {
3697 if ((addr
+= PAGESIZE
) == eaddr
) {
3702 if (++pn
== pagev
->pg_pnbase
+ pagev
->pg_npages
) {
3703 addr
= pr_pagev_fill(pagev
, seg
, addr
, eaddr
);
3704 if (addr
== eaddr
) {
3709 pn
= seg_page(seg
, addr
);
3715 * Get the protections on the page corresponding to addr.
3717 pn
= seg_page(seg
, addr
);
3718 ASSERT(pn
>= pagev
->pg_pnbase
);
3719 ASSERT(pn
< (pagev
->pg_pnbase
+ pagev
->pg_npages
));
3721 prot
= pagev
->pg_protv
[pn
- pagev
->pg_pnbase
];
3722 getwatchprot(seg
->s_as
, addr
, &prot
);
3726 * Now loop until we find a backed page with different protections
3727 * or we reach the end of this segment.
3729 while ((addr
+= PAGESIZE
) < eaddr
) {
3731 * If pn has advanced to the page number following what we
3732 * have information on, refill the page vector and reset
3733 * addr and pn. If pr_pagev_fill does not return the
3734 * address of the next page, we have a discontiguity and
3735 * thus have reached the end of the current mapping.
3737 if (++pn
== pagev
->pg_pnbase
+ pagev
->pg_npages
) {
3738 caddr_t naddr
= pr_pagev_fill(pagev
, seg
, addr
, eaddr
);
3741 pn
= seg_page(seg
, addr
);
3745 * The previous page's protections are in prot, and it has
3746 * backing. If this page is MAP_NORESERVE and has no backing,
3747 * then end this mapping and return the previous protections.
3749 if (pagev
->pg_incore
!= NULL
&&
3750 pagev
->pg_incore
[pn
- pagev
->pg_pnbase
] == 0)
3754 * Otherwise end the mapping if this page's protections (nprot)
3755 * are different than those in the previous page (prot).
3757 nprot
= pagev
->pg_protv
[pn
- pagev
->pg_pnbase
];
3758 getwatchprot(seg
->s_as
, addr
, &nprot
);
3770 pr_getsegsize(struct seg
*seg
, int reserved
)
3772 size_t size
= seg
->s_size
;
3775 * If we're interested in the reserved space, return the size of the
3776 * segment itself. Everything else in this function is a special case
3777 * to determine the actual underlying size of various segment types.
3783 * If this is a segvn mapping of a regular file, return the smaller
3784 * of the segment size and the remaining size of the file beyond
3785 * the file offset corresponding to seg->s_base.
3787 if (seg
->s_ops
== &segvn_ops
) {
3791 vattr
.va_mask
= AT_SIZE
;
3793 if (segop_getvp(seg
, seg
->s_base
, &vp
) == 0 &&
3794 vp
!= NULL
&& vp
->v_type
== VREG
&&
3795 fop_getattr(vp
, &vattr
, 0, CRED(), NULL
) == 0) {
3797 uoff_t fsize
= vattr
.va_size
;
3798 uoff_t offset
= segop_getoffset(seg
, seg
->s_base
);
3805 fsize
= roundup(fsize
, (uoff_t
)PAGESIZE
);
3807 if (fsize
< (uoff_t
)size
)
3808 size
= (size_t)fsize
;
3815 * If this is an ISM shared segment, don't include pages that are
3816 * beyond the real size of the spt segment that backs it.
3818 if (seg
->s_ops
== &segspt_shmops
)
3819 return (MIN(spt_realsize(seg
), size
));
3822 * If this is segment is a mapping from /dev/null, then this is a
3823 * reservation of virtual address space and has no actual size.
3824 * Such segments are backed by segdev and have type set to neither
3825 * MAP_SHARED nor MAP_PRIVATE.
3827 if (seg
->s_ops
== &segdev_ops
&&
3828 ((segop_gettype(seg
, seg
->s_base
) &
3829 (MAP_SHARED
| MAP_PRIVATE
)) == 0))
3833 * If this segment doesn't match one of the special types we handle,
3834 * just return the size of the segment itself.
3840 pr_getprot(struct seg
*seg
, int reserved
, void **tmp
,
3841 caddr_t
*saddrp
, caddr_t
*naddrp
, caddr_t eaddr
)
3843 struct as
*as
= seg
->s_as
;
3845 caddr_t saddr
= *saddrp
;
3848 int check_noreserve
;
3852 struct segvn_data
*svd
;
3853 struct segdev_data
*sdp
;
3857 s
.data
= seg
->s_data
;
3859 ASSERT(AS_WRITE_HELD(as
));
3860 ASSERT(saddr
>= seg
->s_base
&& saddr
< eaddr
);
3861 ASSERT(eaddr
<= seg
->s_base
+ seg
->s_size
);
3864 * Don't include MAP_NORESERVE pages in the address range
3865 * unless their mappings have actually materialized.
3866 * We cheat by knowing that segvn is the only segment
3867 * driver that supports MAP_NORESERVE.
3870 (!reserved
&& seg
->s_ops
== &segvn_ops
&& s
.svd
!= NULL
&&
3871 (s
.svd
->vp
== NULL
|| s
.svd
->vp
->v_type
!= VREG
) &&
3872 (s
.svd
->flags
& MAP_NORESERVE
));
3875 * Examine every page only as a last resort. We use guilty knowledge
3876 * of segvn and segdev to avoid this: if there are no per-page
3877 * protections present in the segment and we don't care about
3878 * MAP_NORESERVE, then s_data->prot is the prot for the whole segment.
3880 if (!check_noreserve
&& saddr
== seg
->s_base
&&
3881 seg
->s_ops
== &segvn_ops
&& s
.svd
!= NULL
&& s
.svd
->pageprot
== 0) {
3883 getwatchprot(as
, saddr
, &prot
);
3886 } else if (saddr
== seg
->s_base
&& seg
->s_ops
== &segdev_ops
&&
3887 s
.sdp
!= NULL
&& s
.sdp
->pageprot
== 0) {
3889 getwatchprot(as
, saddr
, &prot
);
3896 * If addr is sitting at the start of the segment, then
3897 * create a page vector to store protection and incore
3898 * information for pages in the segment, and fill it.
3899 * Otherwise, we expect *tmp to address the prpagev_t
3900 * allocated by a previous call to this function.
3902 if (saddr
== seg
->s_base
) {
3903 pagev
= pr_pagev_create(seg
, check_noreserve
);
3904 saddr
= pr_pagev_fill(pagev
, seg
, saddr
, eaddr
);
3906 ASSERT(*tmp
== NULL
);
3909 ASSERT(saddr
<= eaddr
);
3912 if (saddr
== eaddr
) {
3919 ASSERT(*tmp
!= NULL
);
3920 pagev
= (prpagev_t
*)*tmp
;
3923 naddr
= pr_pagev_nextprot(pagev
, seg
, saddrp
, eaddr
, &prot
);
3924 ASSERT(naddr
<= eaddr
);
3929 pr_getprot_done(tmp
);
3935 pr_getprot_done(void **tmp
)
3938 pr_pagev_destroy((prpagev_t
*)*tmp
);
3944 * Return true iff the vnode is a /proc file from the object directory.
3947 pr_isobject(vnode_t
*vp
)
3949 return (vn_matchops(vp
, &prvnodeops
) && VTOP(vp
)->pr_type
== PR_OBJECT
);
3953 * Return true iff the vnode is a /proc file opened by the process itself.
3956 pr_isself(vnode_t
*vp
)
3959 * XXX: To retain binary compatibility with the old
3960 * ioctl()-based version of /proc, we exempt self-opens
3961 * of /proc/<pid> from being marked close-on-exec.
3963 return (vn_matchops(vp
, &prvnodeops
) &&
3964 (VTOP(vp
)->pr_flags
& PR_ISSELF
) &&
3965 VTOP(vp
)->pr_type
!= PR_PIDDIR
);
3969 pr_getpagesize(struct seg
*seg
, caddr_t saddr
, caddr_t
*naddrp
, caddr_t eaddr
)
3971 ssize_t pagesize
, hatsize
;
3973 ASSERT(AS_WRITE_HELD(seg
->s_as
));
3974 ASSERT(IS_P2ALIGNED(saddr
, PAGESIZE
));
3975 ASSERT(IS_P2ALIGNED(eaddr
, PAGESIZE
));
3976 ASSERT(saddr
< eaddr
);
3978 pagesize
= hatsize
= hat_getpagesize(seg
->s_as
->a_hat
, saddr
);
3979 ASSERT(pagesize
== -1 || IS_P2ALIGNED(pagesize
, pagesize
));
3980 ASSERT(pagesize
!= 0);
3983 pagesize
= PAGESIZE
;
3985 saddr
+= P2NPHASE((uintptr_t)saddr
, pagesize
);
3987 while (saddr
< eaddr
) {
3988 if (hatsize
!= hat_getpagesize(seg
->s_as
->a_hat
, saddr
))
3990 ASSERT(IS_P2ALIGNED(saddr
, pagesize
));
3994 *naddrp
= ((saddr
< eaddr
) ? saddr
: eaddr
);
3999 * Return an array of structures with extended memory map information.
4000 * We allocate here; the caller must deallocate.
4003 prgetxmap(proc_t
*p
, list_t
*iolhead
)
4005 struct as
*as
= p
->p_as
;
4008 struct seg
*brkseg
, *stkseg
;
4013 ASSERT(as
!= &kas
&& AS_WRITE_HELD(as
));
4016 * Request an initial buffer size that doesn't waste memory
4017 * if the address space has only a small number of segments.
4019 pr_iol_initlist(iolhead
, sizeof (*mp
), avl_numnodes(&as
->a_segtree
));
4021 if ((seg
= AS_SEGFIRST(as
)) == NULL
)
4024 brkseg
= break_seg(p
);
4025 stkseg
= as_segat(as
, prgetstackbase(p
));
4028 caddr_t eaddr
= seg
->s_base
+ pr_getsegsize(seg
, 0);
4029 caddr_t saddr
, naddr
, baddr
;
4037 * Segment loop part one: iterate from the base of the segment
4038 * to its end, pausing at each address boundary (baddr) between
4039 * ranges that have different virtual memory protections.
4041 for (saddr
= seg
->s_base
; saddr
< eaddr
; saddr
= baddr
) {
4042 prot
= pr_getprot(seg
, 0, &tmp
, &saddr
, &baddr
, eaddr
);
4043 ASSERT(baddr
>= saddr
&& baddr
<= eaddr
);
4046 * Segment loop part two: iterate from the current
4047 * position to the end of the protection boundary,
4048 * pausing at each address boundary (naddr) between
4049 * ranges that have different underlying page sizes.
4051 for (; saddr
< baddr
; saddr
= naddr
) {
4052 psz
= pr_getpagesize(seg
, saddr
, &naddr
, baddr
);
4053 ASSERT(naddr
>= saddr
&& naddr
<= baddr
);
4055 mp
= pr_iol_newbuf(iolhead
, sizeof (*mp
));
4057 mp
->pr_vaddr
= (uintptr_t)saddr
;
4058 mp
->pr_size
= naddr
- saddr
;
4059 mp
->pr_offset
= segop_getoffset(seg
, saddr
);
4061 if (prot
& PROT_READ
)
4062 mp
->pr_mflags
|= MA_READ
;
4063 if (prot
& PROT_WRITE
)
4064 mp
->pr_mflags
|= MA_WRITE
;
4065 if (prot
& PROT_EXEC
)
4066 mp
->pr_mflags
|= MA_EXEC
;
4067 if (segop_gettype(seg
, saddr
) & MAP_SHARED
)
4068 mp
->pr_mflags
|= MA_SHARED
;
4069 if (segop_gettype(seg
, saddr
) & MAP_NORESERVE
)
4070 mp
->pr_mflags
|= MA_NORESERVE
;
4071 if (seg
->s_ops
== &segspt_shmops
||
4072 (seg
->s_ops
== &segvn_ops
&&
4073 (segop_getvp(seg
, saddr
, &vp
) != 0 ||
4075 mp
->pr_mflags
|= MA_ANON
;
4077 mp
->pr_mflags
|= MA_BREAK
;
4078 else if (seg
== stkseg
)
4079 mp
->pr_mflags
|= MA_STACK
;
4080 if (seg
->s_ops
== &segspt_shmops
)
4081 mp
->pr_mflags
|= MA_ISM
| MA_SHM
;
4083 mp
->pr_pagesize
= PAGESIZE
;
4085 mp
->pr_hatpagesize
= 0;
4087 mp
->pr_hatpagesize
= psz
;
4091 * Manufacture a filename for the "object" dir.
4093 mp
->pr_dev
= PRNODEV
;
4094 vattr
.va_mask
= AT_FSID
|AT_NODEID
;
4095 if (seg
->s_ops
== &segvn_ops
&&
4096 segop_getvp(seg
, saddr
, &vp
) == 0 &&
4097 vp
!= NULL
&& vp
->v_type
== VREG
&&
4098 fop_getattr(vp
, &vattr
, 0, CRED(),
4100 mp
->pr_dev
= vattr
.va_fsid
;
4101 mp
->pr_ino
= vattr
.va_nodeid
;
4102 if (vp
== p
->p_exec
)
4103 (void) strcpy(mp
->pr_mapname
,
4106 pr_object_name(mp
->pr_mapname
,
4111 * Get the SysV shared memory id, if any.
4113 if ((mp
->pr_mflags
& MA_SHARED
) &&
4114 p
->p_segacct
&& (mp
->pr_shmid
= shmgetid(p
,
4115 seg
->s_base
)) != SHMID_NONE
) {
4116 if (mp
->pr_shmid
== SHMID_FREE
)
4119 mp
->pr_mflags
|= MA_SHM
;
4124 npages
= ((uintptr_t)(naddr
- saddr
)) >>
4126 parr
= kmem_zalloc(npages
, KM_SLEEP
);
4128 (void) segop_incore(seg
, saddr
, naddr
- saddr
,
4131 for (pagenum
= 0; pagenum
< npages
; pagenum
++) {
4132 if (parr
[pagenum
] & SEG_PAGE_INCORE
)
4134 if (parr
[pagenum
] & SEG_PAGE_ANON
)
4136 if (parr
[pagenum
] & SEG_PAGE_LOCKED
)
4139 kmem_free(parr
, npages
);
4142 ASSERT(tmp
== NULL
);
4143 } while ((seg
= AS_SEGNEXT(as
, seg
)) != NULL
);
4149 * Return the process's credentials. We don't need a 32-bit equivalent of
4150 * this function because prcred_t and prcred32_t are actually the same.
4153 prgetcred(proc_t
*p
, prcred_t
*pcrp
)
4155 mutex_enter(&p
->p_crlock
);
4156 cred2prcred(p
->p_cred
, pcrp
);
4157 mutex_exit(&p
->p_crlock
);
4161 prgetsecflags(proc_t
*p
, prsecflags_t
*psfp
)
4163 ASSERT(psfp
!= NULL
);
4165 psfp
->pr_version
= PRSECFLAGS_VERSION_CURRENT
;
4166 psfp
->pr_lower
= p
->p_secflags
.psf_lower
;
4167 psfp
->pr_upper
= p
->p_secflags
.psf_upper
;
4168 psfp
->pr_effective
= p
->p_secflags
.psf_effective
;
4169 psfp
->pr_inherit
= p
->p_secflags
.psf_inherit
;
4173 * Compute actual size of the prpriv_t structure.
4179 return (priv_prgetprivsize(NULL
));
4183 * Return the process's privileges. We don't need a 32-bit equivalent of
4184 * this function because prpriv_t and prpriv32_t are actually the same.
4187 prgetpriv(proc_t
*p
, prpriv_t
*pprp
)
4189 mutex_enter(&p
->p_crlock
);
4190 cred2prpriv(p
->p_cred
, pprp
);
4191 mutex_exit(&p
->p_crlock
);
4194 #ifdef _SYSCALL32_IMPL
4196 * Return an array of structures with HAT memory map information.
4197 * We allocate here; the caller must deallocate.
4200 prgetxmap32(proc_t
*p
, list_t
*iolhead
)
4202 struct as
*as
= p
->p_as
;
4205 struct seg
*brkseg
, *stkseg
;
4210 ASSERT(as
!= &kas
&& AS_WRITE_HELD(as
));
4213 * Request an initial buffer size that doesn't waste memory
4214 * if the address space has only a small number of segments.
4216 pr_iol_initlist(iolhead
, sizeof (*mp
), avl_numnodes(&as
->a_segtree
));
4218 if ((seg
= AS_SEGFIRST(as
)) == NULL
)
4221 brkseg
= break_seg(p
);
4222 stkseg
= as_segat(as
, prgetstackbase(p
));
4225 caddr_t eaddr
= seg
->s_base
+ pr_getsegsize(seg
, 0);
4226 caddr_t saddr
, naddr
, baddr
;
4234 * Segment loop part one: iterate from the base of the segment
4235 * to its end, pausing at each address boundary (baddr) between
4236 * ranges that have different virtual memory protections.
4238 for (saddr
= seg
->s_base
; saddr
< eaddr
; saddr
= baddr
) {
4239 prot
= pr_getprot(seg
, 0, &tmp
, &saddr
, &baddr
, eaddr
);
4240 ASSERT(baddr
>= saddr
&& baddr
<= eaddr
);
4243 * Segment loop part two: iterate from the current
4244 * position to the end of the protection boundary,
4245 * pausing at each address boundary (naddr) between
4246 * ranges that have different underlying page sizes.
4248 for (; saddr
< baddr
; saddr
= naddr
) {
4249 psz
= pr_getpagesize(seg
, saddr
, &naddr
, baddr
);
4250 ASSERT(naddr
>= saddr
&& naddr
<= baddr
);
4252 mp
= pr_iol_newbuf(iolhead
, sizeof (*mp
));
4254 mp
->pr_vaddr
= (caddr32_t
)(uintptr_t)saddr
;
4255 mp
->pr_size
= (size32_t
)(naddr
- saddr
);
4256 mp
->pr_offset
= segop_getoffset(seg
, saddr
);
4258 if (prot
& PROT_READ
)
4259 mp
->pr_mflags
|= MA_READ
;
4260 if (prot
& PROT_WRITE
)
4261 mp
->pr_mflags
|= MA_WRITE
;
4262 if (prot
& PROT_EXEC
)
4263 mp
->pr_mflags
|= MA_EXEC
;
4264 if (segop_gettype(seg
, saddr
) & MAP_SHARED
)
4265 mp
->pr_mflags
|= MA_SHARED
;
4266 if (segop_gettype(seg
, saddr
) & MAP_NORESERVE
)
4267 mp
->pr_mflags
|= MA_NORESERVE
;
4268 if (seg
->s_ops
== &segspt_shmops
||
4269 (seg
->s_ops
== &segvn_ops
&&
4270 (segop_getvp(seg
, saddr
, &vp
) != 0 ||
4272 mp
->pr_mflags
|= MA_ANON
;
4274 mp
->pr_mflags
|= MA_BREAK
;
4275 else if (seg
== stkseg
)
4276 mp
->pr_mflags
|= MA_STACK
;
4277 if (seg
->s_ops
== &segspt_shmops
)
4278 mp
->pr_mflags
|= MA_ISM
| MA_SHM
;
4280 mp
->pr_pagesize
= PAGESIZE
;
4282 mp
->pr_hatpagesize
= 0;
4284 mp
->pr_hatpagesize
= psz
;
4288 * Manufacture a filename for the "object" dir.
4290 mp
->pr_dev
= PRNODEV32
;
4291 vattr
.va_mask
= AT_FSID
|AT_NODEID
;
4292 if (seg
->s_ops
== &segvn_ops
&&
4293 segop_getvp(seg
, saddr
, &vp
) == 0 &&
4294 vp
!= NULL
&& vp
->v_type
== VREG
&&
4295 fop_getattr(vp
, &vattr
, 0, CRED(),
4297 (void) cmpldev(&mp
->pr_dev
,
4299 mp
->pr_ino
= vattr
.va_nodeid
;
4300 if (vp
== p
->p_exec
)
4301 (void) strcpy(mp
->pr_mapname
,
4304 pr_object_name(mp
->pr_mapname
,
4309 * Get the SysV shared memory id, if any.
4311 if ((mp
->pr_mflags
& MA_SHARED
) &&
4312 p
->p_segacct
&& (mp
->pr_shmid
= shmgetid(p
,
4313 seg
->s_base
)) != SHMID_NONE
) {
4314 if (mp
->pr_shmid
== SHMID_FREE
)
4317 mp
->pr_mflags
|= MA_SHM
;
4322 npages
= ((uintptr_t)(naddr
- saddr
)) >>
4324 parr
= kmem_zalloc(npages
, KM_SLEEP
);
4326 (void) segop_incore(seg
, saddr
, naddr
- saddr
,
4329 for (pagenum
= 0; pagenum
< npages
; pagenum
++) {
4330 if (parr
[pagenum
] & SEG_PAGE_INCORE
)
4332 if (parr
[pagenum
] & SEG_PAGE_ANON
)
4334 if (parr
[pagenum
] & SEG_PAGE_LOCKED
)
4337 kmem_free(parr
, npages
);
4340 ASSERT(tmp
== NULL
);
4341 } while ((seg
= AS_SEGNEXT(as
, seg
)) != NULL
);
4345 #endif /* _SYSCALL32_IMPL */