dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / usr / src / cmd / ptools / pmap / pmap.c
blobce39753c37133484c36953a7637278c11fa5af1a
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #include <stdio.h>
28 #include <stdio_ext.h>
29 #include <stdlib.h>
30 #include <unistd.h>
31 #include <ctype.h>
32 #include <fcntl.h>
33 #include <string.h>
34 #include <dirent.h>
35 #include <limits.h>
36 #include <link.h>
37 #include <libelf.h>
38 #include <sys/types.h>
39 #include <signal.h>
40 #include <sys/stat.h>
41 #include <sys/mkdev.h>
42 #include <sys/mman.h>
43 #include <sys/lgrp_user.h>
44 #include <libproc.h>
46 #include "pmap_common.h"
48 #define KILOBYTE 1024
49 #define MEGABYTE (KILOBYTE * KILOBYTE)
50 #define GIGABYTE (KILOBYTE * KILOBYTE * KILOBYTE)
53 * Round up the value to the nearest kilobyte
55 #define ROUNDUP_KB(x) (((x) + (KILOBYTE - 1)) / KILOBYTE)
58 * The alignment should be a power of 2.
60 #define P2ALIGN(x, align) ((x) & -(align))
62 #define INVALID_ADDRESS (uintptr_t)(-1)
64 struct totals {
65 ulong_t total_size;
66 ulong_t total_swap;
67 ulong_t total_rss;
68 ulong_t total_anon;
69 ulong_t total_locked;
73 * -L option requires per-page information. The information is presented in an
74 * array of page_descr structures.
76 typedef struct page_descr {
77 uintptr_t pd_start; /* start address of a page */
78 size_t pd_pagesize; /* page size in bytes */
79 lgrp_id_t pd_lgrp; /* lgroup of memory backing the page */
80 int pd_valid; /* valid page description if non-zero */
81 } page_descr_t;
84 * Per-page information for a memory chunk.
85 * The meminfo(2) system call accepts up to MAX_MEMINFO_CNT pages at once.
86 * When we need to scan larger ranges we divide them in MAX_MEMINFO_CNT sized
87 * chunks. The chunk information is stored in the memory_chunk structure.
89 typedef struct memory_chunk {
90 page_descr_t page_info[MAX_MEMINFO_CNT];
91 uintptr_t end_addr;
92 uintptr_t chunk_start; /* Starting address */
93 uintptr_t chunk_end; /* chunk_end is always <= end_addr */
94 size_t page_size;
95 int page_index; /* Current page */
96 int page_count; /* Number of pages */
97 } memory_chunk_t;
99 static volatile int interrupt;
101 typedef int proc_xmap_f(void *, const prxmap_t *, const char *, int, int);
103 static int xmapping_iter(struct ps_prochandle *, proc_xmap_f *, void *,
104 int);
105 static int rmapping_iter(struct ps_prochandle *, proc_map_f *, void *);
107 static int look_map(void *, const prmap_t *, const char *);
108 static int look_smap(void *, const prxmap_t *, const char *, int, int);
109 static int look_xmap(void *, const prxmap_t *, const char *, int, int);
110 static int look_xmap_nopgsz(void *, const prxmap_t *, const char *,
111 int, int);
113 static int gather_map(void *, const prmap_t *, const char *);
114 static int gather_xmap(void *, const prxmap_t *, const char *, int, int);
115 static int iter_map(proc_map_f *, void *);
116 static int iter_xmap(proc_xmap_f *, void *);
117 static int parse_addr_range(char *, uintptr_t *, uintptr_t *);
118 static void mem_chunk_init(memory_chunk_t *, uintptr_t, size_t);
120 static int perr(char *);
121 static void printK(long, int);
122 static char *mflags(uint_t);
124 static size_t get_contiguous_region(memory_chunk_t *, uintptr_t,
125 uintptr_t, size_t, lgrp_id_t *);
126 static void mem_chunk_get(memory_chunk_t *, uintptr_t);
127 static lgrp_id_t addr_to_lgrp(memory_chunk_t *, uintptr_t, size_t *);
128 static char *lgrp2str(lgrp_id_t);
130 static int address_in_range(uintptr_t, uintptr_t, size_t);
131 static size_t adjust_addr_range(uintptr_t, uintptr_t, size_t,
132 uintptr_t *, uintptr_t *);
134 static int lflag = 0;
135 static int Lflag = 0;
136 static int aflag = 0;
139 * The -A address range is represented as a pair of addresses
140 * <start_addr, end_addr>. Either one of these may be unspecified (set to
141 * INVALID_ADDRESS). If both are unspecified, no address range restrictions are
142 * in place.
144 static uintptr_t start_addr = INVALID_ADDRESS;
145 static uintptr_t end_addr = INVALID_ADDRESS;
147 static int addr_width, size_width;
148 static char *command;
149 static char *procname;
150 static struct ps_prochandle *Pr;
152 static void intr(int);
154 typedef struct {
155 prxmap_t md_xmap;
156 prmap_t md_map;
157 char *md_objname;
158 boolean_t md_last;
159 int md_doswap;
160 } mapdata_t;
162 static mapdata_t *maps;
163 static int map_count;
164 static int map_alloc;
166 static lwpstack_t *stacks = NULL;
167 static uint_t nstacks = 0;
169 #define MAX_TRIES 5
171 static int
172 getstack(void *data, const lwpstatus_t *lsp)
174 int *np = (int *)data;
176 if (Plwp_alt_stack(Pr, lsp->pr_lwpid, &stacks[*np].lwps_stack) == 0) {
177 stacks[*np].lwps_stack.ss_flags |= SS_ONSTACK;
178 stacks[*np].lwps_lwpid = lsp->pr_lwpid;
179 (*np)++;
182 if (Plwp_main_stack(Pr, lsp->pr_lwpid, &stacks[*np].lwps_stack) == 0) {
183 stacks[*np].lwps_lwpid = lsp->pr_lwpid;
184 (*np)++;
187 return (0);
191 main(int argc, char **argv)
193 int rflag = 0, sflag = 0, xflag = 0, Fflag = 0;
194 int errflg = 0, Sflag = 0;
195 int rc = 0;
196 int opt;
197 const char *bar8 = "-------";
198 const char *bar16 = "----------";
199 const char *bar;
200 struct rlimit rlim;
201 struct stat64 statbuf;
202 char buf[128];
203 int mapfd;
204 int prg_gflags = PGRAB_RDONLY;
205 int prr_flags = 0;
206 boolean_t use_agent_lwp = B_FALSE;
208 if ((command = strrchr(argv[0], '/')) != NULL)
209 command++;
210 else
211 command = argv[0];
213 while ((opt = getopt(argc, argv, "arsxSlLFA:")) != EOF) {
214 switch (opt) {
215 case 'a': /* include shared mappings in -[xS] */
216 aflag = 1;
217 break;
218 case 'r': /* show reserved mappings */
219 rflag = 1;
220 break;
221 case 's': /* show hardware page sizes */
222 sflag = 1;
223 break;
224 case 'S': /* show swap reservations */
225 Sflag = 1;
226 break;
227 case 'x': /* show extended mappings */
228 xflag = 1;
229 break;
230 case 'l': /* show unresolved link map names */
231 lflag = 1;
232 break;
233 case 'L': /* show lgroup information */
234 Lflag = 1;
235 use_agent_lwp = B_TRUE;
236 break;
237 case 'F': /* force grabbing (no O_EXCL) */
238 Fflag = PGRAB_FORCE;
239 break;
240 case 'A':
241 if (parse_addr_range(optarg, &start_addr, &end_addr)
242 != 0)
243 errflg++;
244 break;
245 default:
246 errflg = 1;
247 break;
251 argc -= optind;
252 argv += optind;
254 if ((Sflag && (xflag || rflag || sflag)) || (xflag && rflag) ||
255 (aflag && (!xflag && !Sflag)) ||
256 (Lflag && (xflag || Sflag))) {
257 errflg = 1;
260 if (errflg || argc <= 0) {
261 (void) fprintf(stderr,
262 "usage:\t%s [-rslF] [-A start[,end]] { pid | core } ...\n",
263 command);
264 (void) fprintf(stderr,
265 "\t\t(report process address maps)\n");
266 (void) fprintf(stderr,
267 "\t%s -L [-rslF] [-A start[,end]] pid ...\n", command);
268 (void) fprintf(stderr,
269 "\t\t(report process address maps lgroups mappings)\n");
270 (void) fprintf(stderr,
271 "\t%s -x [-aslF] [-A start[,end]] pid ...\n", command);
272 (void) fprintf(stderr,
273 "\t\t(show resident/anon/locked mapping details)\n");
274 (void) fprintf(stderr,
275 "\t%s -S [-alF] [-A start[,end]] { pid | core } ...\n",
276 command);
277 (void) fprintf(stderr,
278 "\t\t(show swap reservations)\n\n");
279 (void) fprintf(stderr,
280 "\t-a: include shared mappings in -[xS] summary\n");
281 (void) fprintf(stderr,
282 "\t-r: show reserved address maps\n");
283 (void) fprintf(stderr,
284 "\t-s: show hardware page sizes\n");
285 (void) fprintf(stderr,
286 "\t-l: show unresolved dynamic linker map names\n");
287 (void) fprintf(stderr,
288 "\t-F: force grabbing of the target process\n");
289 (void) fprintf(stderr,
290 "\t-L: show lgroup mappings\n");
291 (void) fprintf(stderr,
292 "\t-A start,end: limit output to the specified range\n");
293 return (2);
297 * Make sure we'll have enough file descriptors to handle a target
298 * that has many many mappings.
300 if (getrlimit(RLIMIT_NOFILE, &rlim) == 0) {
301 rlim.rlim_cur = rlim.rlim_max;
302 (void) setrlimit(RLIMIT_NOFILE, &rlim);
303 (void) enable_extended_FILE_stdio(-1, -1);
307 * The implementation of -L option creates an agent LWP in the target
308 * process address space. The agent LWP issues meminfo(2) system calls
309 * on behalf of the target process. If we are interrupted prematurely,
310 * the target process remains in the stopped state with the agent still
311 * attached to it. To prevent such situation we catch signals from
312 * terminal and terminate gracefully.
314 if (use_agent_lwp) {
316 * Buffer output to stdout, stderr while process is grabbed.
317 * Prevents infamous deadlocks due to pmap `pgrep xterm` and
318 * other variants.
320 (void) proc_initstdio();
322 prg_gflags = PGRAB_RETAIN | Fflag;
323 prr_flags = PRELEASE_RETAIN;
325 if (sigset(SIGHUP, SIG_IGN) == SIG_DFL)
326 (void) sigset(SIGHUP, intr);
327 if (sigset(SIGINT, SIG_IGN) == SIG_DFL)
328 (void) sigset(SIGINT, intr);
329 if (sigset(SIGQUIT, SIG_IGN) == SIG_DFL)
330 (void) sigset(SIGQUIT, intr);
331 (void) sigset(SIGPIPE, intr);
332 (void) sigset(SIGTERM, intr);
335 while (argc-- > 0) {
336 char *arg;
337 int gcode;
338 psinfo_t psinfo;
339 int tries = 0;
341 if (use_agent_lwp)
342 (void) proc_flushstdio();
344 if ((Pr = proc_arg_grab(arg = *argv++, PR_ARG_ANY,
345 prg_gflags, &gcode)) == NULL) {
346 (void) fprintf(stderr, "%s: cannot examine %s: %s\n",
347 command, arg, Pgrab_error(gcode));
348 rc++;
349 continue;
352 procname = arg; /* for perr() */
354 addr_width = (Pstatus(Pr)->pr_dmodel == PR_MODEL_LP64) ? 16 : 8;
355 size_width = (Pstatus(Pr)->pr_dmodel == PR_MODEL_LP64) ? 11 : 8;
356 bar = addr_width == 8 ? bar8 : bar16;
357 (void) memcpy(&psinfo, Ppsinfo(Pr), sizeof (psinfo_t));
358 proc_unctrl_psinfo(&psinfo);
360 if (Pstate(Pr) != PS_DEAD) {
361 (void) snprintf(buf, sizeof (buf),
362 "/proc/%d/map", (int)psinfo.pr_pid);
363 if ((mapfd = open(buf, O_RDONLY)) < 0) {
364 (void) fprintf(stderr, "%s: cannot "
365 "examine %s: lost control of "
366 "process\n", command, arg);
367 rc++;
368 Prelease(Pr, prr_flags);
369 continue;
371 } else {
372 mapfd = -1;
375 again:
376 map_count = 0;
378 if (Pstate(Pr) == PS_DEAD) {
379 (void) printf("core '%s' of %d:\t%.70s\n",
380 arg, (int)psinfo.pr_pid, psinfo.pr_psargs);
382 if (rflag || sflag || xflag || Sflag || Lflag) {
383 (void) printf(" -%c option is not compatible "
384 "with core files\n", xflag ? 'x' :
385 sflag ? 's' : rflag ? 'r' :
386 Lflag ? 'L' : 'S');
387 Prelease(Pr, prr_flags);
388 rc++;
389 continue;
392 } else {
393 (void) printf("%d:\t%.70s\n",
394 (int)psinfo.pr_pid, psinfo.pr_psargs);
397 if (!(Pstatus(Pr)->pr_flags & PR_ISSYS)) {
398 struct totals t;
401 * Since we're grabbing the process readonly, we need
402 * to make sure the address space doesn't change during
403 * execution.
405 if (Pstate(Pr) != PS_DEAD) {
406 if (tries++ == MAX_TRIES) {
407 Prelease(Pr, prr_flags);
408 (void) close(mapfd);
409 (void) fprintf(stderr, "%s: cannot "
410 "examine %s: address space is "
411 "changing\n", command, arg);
412 continue;
415 if (fstat64(mapfd, &statbuf) != 0) {
416 Prelease(Pr, prr_flags);
417 (void) close(mapfd);
418 (void) fprintf(stderr, "%s: cannot "
419 "examine %s: lost control of "
420 "process\n", command, arg);
421 continue;
425 nstacks = psinfo.pr_nlwp * 2;
426 stacks = calloc(nstacks, sizeof (stacks[0]));
427 if (stacks != NULL) {
428 int n = 0;
429 (void) Plwp_iter(Pr, getstack, &n);
430 qsort(stacks, nstacks, sizeof (stacks[0]),
431 cmpstacks);
434 (void) memset(&t, 0, sizeof (t));
436 if (Pgetauxval(Pr, AT_BASE) != -1L &&
437 Prd_agent(Pr) == NULL) {
438 (void) fprintf(stderr, "%s: warning: "
439 "librtld_db failed to initialize; "
440 "shared library information will not be "
441 "available\n", command);
445 * Gather data
447 if (xflag)
448 rc += xmapping_iter(Pr, gather_xmap, NULL, 0);
449 else if (Sflag)
450 rc += xmapping_iter(Pr, gather_xmap, NULL, 1);
451 else {
452 if (rflag)
453 rc += rmapping_iter(Pr, gather_map,
454 NULL);
455 else if (sflag)
456 rc += xmapping_iter(Pr, gather_xmap,
457 NULL, 0);
458 else if (lflag)
459 rc += Pmapping_iter(Pr,
460 gather_map, NULL);
461 else
462 rc += Pmapping_iter_resolved(Pr,
463 gather_map, NULL);
467 * Ensure mappings are consistent.
469 if (Pstate(Pr) != PS_DEAD) {
470 struct stat64 newbuf;
472 if (fstat64(mapfd, &newbuf) != 0 ||
473 memcmp(&newbuf.st_mtim, &statbuf.st_mtim,
474 sizeof (newbuf.st_mtim)) != 0) {
475 if (stacks != NULL) {
476 free(stacks);
477 stacks = NULL;
479 goto again;
484 * Display data.
486 if (xflag) {
487 (void) printf("%*s%*s%*s%*s%*s "
488 "%sMode Mapped File\n",
489 addr_width, "Address",
490 size_width, "Kbytes",
491 size_width, "RSS",
492 size_width, "Anon",
493 size_width, "Locked",
494 sflag ? "Pgsz " : "");
496 rc += iter_xmap(sflag ? look_xmap :
497 look_xmap_nopgsz, &t);
499 (void) printf("%s%s %s %s %s %s\n",
500 addr_width == 8 ? "-" : "------",
501 bar, bar, bar, bar, bar);
503 (void) printf("%stotal Kb", addr_width == 16 ?
504 " " : "");
506 printK(t.total_size, size_width);
507 printK(t.total_rss, size_width);
508 printK(t.total_anon, size_width);
509 printK(t.total_locked, size_width);
511 (void) printf("\n");
513 } else if (Sflag) {
514 (void) printf("%*s%*s%*s Mode"
515 " Mapped File\n",
516 addr_width, "Address",
517 size_width, "Kbytes",
518 size_width, "Swap");
520 rc += iter_xmap(look_xmap_nopgsz, &t);
522 (void) printf("%s%s %s %s\n",
523 addr_width == 8 ? "-" : "------",
524 bar, bar, bar);
526 (void) printf("%stotal Kb", addr_width == 16 ?
527 " " : "");
529 printK(t.total_size, size_width);
530 printK(t.total_swap, size_width);
532 (void) printf("\n");
534 } else {
536 if (rflag) {
537 rc += iter_map(look_map, &t);
538 } else if (sflag) {
539 if (Lflag) {
540 (void) printf("%*s %*s %4s"
541 " %-6s %s %s\n",
542 addr_width, "Address",
543 size_width,
544 "Bytes", "Pgsz", "Mode ",
545 "Lgrp", "Mapped File");
546 rc += iter_xmap(look_smap, &t);
547 } else {
548 (void) printf("%*s %*s %4s"
549 " %-6s %s\n",
550 addr_width, "Address",
551 size_width,
552 "Bytes", "Pgsz", "Mode ",
553 "Mapped File");
554 rc += iter_xmap(look_smap, &t);
556 } else {
557 rc += iter_map(look_map, &t);
560 (void) printf(" %stotal %*luK\n",
561 addr_width == 16 ?
562 " " : "",
563 size_width, t.total_size);
566 if (stacks != NULL) {
567 free(stacks);
568 stacks = NULL;
573 Prelease(Pr, prr_flags);
574 if (mapfd != -1)
575 (void) close(mapfd);
578 if (use_agent_lwp)
579 (void) proc_finistdio();
581 return (rc);
584 static int
585 rmapping_iter(struct ps_prochandle *Pr, proc_map_f *func, void *cd)
587 char mapname[PATH_MAX];
588 int mapfd, nmap, i, rc;
589 struct stat st;
590 prmap_t *prmapp, *pmp;
591 ssize_t n;
593 (void) snprintf(mapname, sizeof (mapname),
594 "/proc/%d/rmap", (int)Pstatus(Pr)->pr_pid);
596 if ((mapfd = open(mapname, O_RDONLY)) < 0 || fstat(mapfd, &st) != 0) {
597 if (mapfd >= 0)
598 (void) close(mapfd);
599 return (perr(mapname));
602 nmap = st.st_size / sizeof (prmap_t);
603 prmapp = malloc((nmap + 1) * sizeof (prmap_t));
605 if ((n = pread(mapfd, prmapp, (nmap + 1) * sizeof (prmap_t), 0L)) < 0) {
606 (void) close(mapfd);
607 free(prmapp);
608 return (perr("read rmap"));
611 (void) close(mapfd);
612 nmap = n / sizeof (prmap_t);
614 for (i = 0, pmp = prmapp; i < nmap; i++, pmp++) {
615 if ((rc = func(cd, pmp, NULL)) != 0) {
616 free(prmapp);
617 return (rc);
621 free(prmapp);
622 return (0);
625 static int
626 xmapping_iter(struct ps_prochandle *Pr, proc_xmap_f *func, void *cd, int doswap)
628 char mapname[PATH_MAX];
629 int mapfd, nmap, i, rc;
630 struct stat st;
631 prxmap_t *prmapp, *pmp;
632 ssize_t n;
634 (void) snprintf(mapname, sizeof (mapname),
635 "/proc/%d/xmap", (int)Pstatus(Pr)->pr_pid);
637 if ((mapfd = open(mapname, O_RDONLY)) < 0 || fstat(mapfd, &st) != 0) {
638 if (mapfd >= 0)
639 (void) close(mapfd);
640 return (perr(mapname));
643 nmap = st.st_size / sizeof (prxmap_t);
644 nmap *= 2;
645 again:
646 prmapp = malloc((nmap + 1) * sizeof (prxmap_t));
648 if ((n = pread(mapfd, prmapp, (nmap + 1) * sizeof (prxmap_t), 0)) < 0) {
649 (void) close(mapfd);
650 free(prmapp);
651 return (perr("read xmap"));
654 if (nmap < n / sizeof (prxmap_t)) {
655 free(prmapp);
656 nmap *= 2;
657 goto again;
660 (void) close(mapfd);
661 nmap = n / sizeof (prxmap_t);
663 for (i = 0, pmp = prmapp; i < nmap; i++, pmp++) {
664 if ((rc = func(cd, pmp, NULL, i == nmap - 1, doswap)) != 0) {
665 free(prmapp);
666 return (rc);
671 * Mark the last element.
673 if (map_count > 0)
674 maps[map_count - 1].md_last = B_TRUE;
676 free(prmapp);
677 return (0);
680 /*ARGSUSED*/
681 static int
682 look_map(void *data, const prmap_t *pmp, const char *object_name)
684 struct totals *t = data;
685 const pstatus_t *Psp = Pstatus(Pr);
686 size_t size;
687 char mname[PATH_MAX];
688 char *lname = NULL;
689 size_t psz = pmp->pr_pagesize;
690 uintptr_t vaddr = pmp->pr_vaddr;
691 uintptr_t segment_end = vaddr + pmp->pr_size;
692 lgrp_id_t lgrp;
693 memory_chunk_t mchunk;
696 * If the mapping is not anon or not part of the heap, make a name
697 * for it. We don't want to report the heap as a.out's data.
699 if (!(pmp->pr_mflags & MA_ANON) ||
700 segment_end <= Psp->pr_brkbase ||
701 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) {
702 lname = make_name(Pr, lflag, pmp->pr_vaddr, pmp->pr_mapname,
703 mname, sizeof (mname));
706 if (lname == NULL &&
707 ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD)) {
708 lname = anon_name(mname, Psp, stacks, nstacks, pmp->pr_vaddr,
709 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid, NULL);
713 * Adjust the address range if -A is specified.
715 size = adjust_addr_range(pmp->pr_vaddr, segment_end, psz,
716 &vaddr, &segment_end);
718 if (size == 0)
719 return (0);
721 if (!Lflag) {
723 * Display the whole mapping
725 size = ROUNDUP_KB(size);
727 (void) printf(lname ?
728 "%.*lX %*luK %-6s %s\n" :
729 "%.*lX %*luK %s\n",
730 addr_width, vaddr,
731 size_width - 1, size, mflags(pmp->pr_mflags), lname);
733 t->total_size += size;
734 return (0);
738 * We need to display lgroups backing physical memory, so we break the
739 * segment into individual pages and coalesce pages with the same lgroup
740 * into one "segment".
744 * Initialize address descriptions for the mapping.
746 mem_chunk_init(&mchunk, segment_end, psz);
747 size = 0;
750 * Walk mapping (page by page) and display contiguous ranges of memory
751 * allocated to same lgroup.
753 do {
754 size_t size_contig;
757 * Get contiguous region of memory starting from vaddr allocated
758 * from the same lgroup.
760 size_contig = get_contiguous_region(&mchunk, vaddr,
761 segment_end, pmp->pr_pagesize, &lgrp);
763 (void) printf(lname ? "%.*lX %*luK %-6s%s %s\n" :
764 "%.*lX %*luK %s %s\n",
765 addr_width, vaddr,
766 size_width - 1, size_contig / KILOBYTE,
767 mflags(pmp->pr_mflags),
768 lgrp2str(lgrp), lname);
770 vaddr += size_contig;
771 size += size_contig;
772 } while (vaddr < segment_end && !interrupt);
774 /* Update the total size */
775 t->total_size += ROUNDUP_KB(size);
776 return (0);
779 static void
780 printK(long value, int width)
782 if (value == 0)
783 (void) printf(width == 8 ? " -" : " -");
784 else
785 (void) printf(" %*lu", width - 1, value);
788 static const char *
789 pagesize(const prxmap_t *pmp)
791 int pagesize = pmp->pr_hatpagesize;
792 static char buf[32];
794 if (pagesize == 0) {
795 return ("-"); /* no underlying HAT mapping */
798 if (pagesize >= KILOBYTE && (pagesize % KILOBYTE) == 0) {
799 if ((pagesize % GIGABYTE) == 0)
800 (void) snprintf(buf, sizeof (buf), "%dG",
801 pagesize / GIGABYTE);
802 else if ((pagesize % MEGABYTE) == 0)
803 (void) snprintf(buf, sizeof (buf), "%dM",
804 pagesize / MEGABYTE);
805 else
806 (void) snprintf(buf, sizeof (buf), "%dK",
807 pagesize / KILOBYTE);
808 } else
809 (void) snprintf(buf, sizeof (buf), "%db", pagesize);
811 return (buf);
814 /*ARGSUSED*/
815 static int
816 look_smap(void *data,
817 const prxmap_t *pmp,
818 const char *object_name,
819 int last, int doswap)
821 struct totals *t = data;
822 const pstatus_t *Psp = Pstatus(Pr);
823 size_t size;
824 char mname[PATH_MAX];
825 char *lname = NULL;
826 const char *format;
827 size_t psz = pmp->pr_pagesize;
828 uintptr_t vaddr = pmp->pr_vaddr;
829 uintptr_t segment_end = vaddr + pmp->pr_size;
830 lgrp_id_t lgrp;
831 memory_chunk_t mchunk;
834 * If the mapping is not anon or not part of the heap, make a name
835 * for it. We don't want to report the heap as a.out's data.
837 if (!(pmp->pr_mflags & MA_ANON) ||
838 pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase ||
839 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) {
840 lname = make_name(Pr, lflag, pmp->pr_vaddr, pmp->pr_mapname,
841 mname, sizeof (mname));
844 if (lname == NULL &&
845 ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD)) {
846 lname = anon_name(mname, Psp, stacks, nstacks, pmp->pr_vaddr,
847 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid, NULL);
851 * Adjust the address range if -A is specified.
853 size = adjust_addr_range(pmp->pr_vaddr, segment_end, psz,
854 &vaddr, &segment_end);
856 if (size == 0)
857 return (0);
859 if (!Lflag) {
861 * Display the whole mapping
863 if (lname != NULL)
864 format = "%.*lX %*luK %4s %-6s %s\n";
865 else
866 format = "%.*lX %*luK %4s %s\n";
868 size = ROUNDUP_KB(size);
870 (void) printf(format, addr_width, vaddr, size_width - 1, size,
871 pagesize(pmp), mflags(pmp->pr_mflags), lname);
873 t->total_size += size;
874 return (0);
877 if (lname != NULL)
878 format = "%.*lX %*luK %4s %-6s%s %s\n";
879 else
880 format = "%.*lX %*luK %4s%s %s\n";
883 * We need to display lgroups backing physical memory, so we break the
884 * segment into individual pages and coalesce pages with the same lgroup
885 * into one "segment".
889 * Initialize address descriptions for the mapping.
891 mem_chunk_init(&mchunk, segment_end, psz);
892 size = 0;
895 * Walk mapping (page by page) and display contiguous ranges of memory
896 * allocated to same lgroup.
898 do {
899 size_t size_contig;
902 * Get contiguous region of memory starting from vaddr allocated
903 * from the same lgroup.
905 size_contig = get_contiguous_region(&mchunk, vaddr,
906 segment_end, pmp->pr_pagesize, &lgrp);
908 (void) printf(format, addr_width, vaddr,
909 size_width - 1, size_contig / KILOBYTE,
910 pagesize(pmp), mflags(pmp->pr_mflags),
911 lgrp2str(lgrp), lname);
913 vaddr += size_contig;
914 size += size_contig;
915 } while (vaddr < segment_end && !interrupt);
917 t->total_size += ROUNDUP_KB(size);
918 return (0);
921 #define ANON(x) ((aflag || (((x)->pr_mflags & MA_SHARED) == 0)) ? \
922 ((x)->pr_anon) : 0)
924 /*ARGSUSED*/
925 static int
926 look_xmap(void *data,
927 const prxmap_t *pmp,
928 const char *object_name,
929 int last, int doswap)
931 struct totals *t = data;
932 const pstatus_t *Psp = Pstatus(Pr);
933 char mname[PATH_MAX];
934 char *lname = NULL;
935 char *ln;
938 * If the mapping is not anon or not part of the heap, make a name
939 * for it. We don't want to report the heap as a.out's data.
941 if (!(pmp->pr_mflags & MA_ANON) ||
942 pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase ||
943 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) {
944 lname = make_name(Pr, lflag, pmp->pr_vaddr, pmp->pr_mapname,
945 mname, sizeof (mname));
948 if (lname != NULL) {
949 if ((ln = strrchr(lname, '/')) != NULL)
950 lname = ln + 1;
951 } else if ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD) {
952 lname = anon_name(mname, Psp, stacks, nstacks, pmp->pr_vaddr,
953 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid, NULL);
956 (void) printf("%.*lX", addr_width, (ulong_t)pmp->pr_vaddr);
958 printK(ROUNDUP_KB(pmp->pr_size), size_width);
959 printK(pmp->pr_rss * (pmp->pr_pagesize / KILOBYTE), size_width);
960 printK(ANON(pmp) * (pmp->pr_pagesize / KILOBYTE), size_width);
961 printK(pmp->pr_locked * (pmp->pr_pagesize / KILOBYTE), size_width);
962 (void) printf(lname ? " %4s %-6s %s\n" : " %4s %s\n",
963 pagesize(pmp), mflags(pmp->pr_mflags), lname);
965 t->total_size += ROUNDUP_KB(pmp->pr_size);
966 t->total_rss += pmp->pr_rss * (pmp->pr_pagesize / KILOBYTE);
967 t->total_anon += ANON(pmp) * (pmp->pr_pagesize / KILOBYTE);
968 t->total_locked += (pmp->pr_locked * (pmp->pr_pagesize / KILOBYTE));
970 return (0);
973 /*ARGSUSED*/
974 static int
975 look_xmap_nopgsz(void *data,
976 const prxmap_t *pmp,
977 const char *object_name,
978 int last, int doswap)
980 struct totals *t = data;
981 const pstatus_t *Psp = Pstatus(Pr);
982 char mname[PATH_MAX];
983 char *lname = NULL;
984 char *ln;
985 static uintptr_t prev_vaddr;
986 static size_t prev_size;
987 static offset_t prev_offset;
988 static int prev_mflags;
989 static char *prev_lname;
990 static char prev_mname[PATH_MAX];
991 static ulong_t prev_rss;
992 static ulong_t prev_anon;
993 static ulong_t prev_locked;
994 static ulong_t prev_swap;
995 int merged = 0;
996 static int first = 1;
997 ulong_t swap = 0;
998 int kperpage;
1001 * Calculate swap reservations
1003 if (pmp->pr_mflags & MA_SHARED) {
1004 if (aflag && (pmp->pr_mflags & MA_NORESERVE) == 0) {
1005 /* Swap reserved for entire non-ism SHM */
1006 swap = pmp->pr_size / pmp->pr_pagesize;
1008 } else if (pmp->pr_mflags & MA_NORESERVE) {
1009 /* Swap reserved on fault for each anon page */
1010 swap = pmp->pr_anon;
1011 } else if (pmp->pr_mflags & MA_WRITE) {
1012 /* Swap reserve for entire writable segment */
1013 swap = pmp->pr_size / pmp->pr_pagesize;
1017 * If the mapping is not anon or not part of the heap, make a name
1018 * for it. We don't want to report the heap as a.out's data.
1020 if (!(pmp->pr_mflags & MA_ANON) ||
1021 pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase ||
1022 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) {
1023 lname = make_name(Pr, lflag, pmp->pr_vaddr, pmp->pr_mapname,
1024 mname, sizeof (mname));
1027 if (lname != NULL) {
1028 if ((ln = strrchr(lname, '/')) != NULL)
1029 lname = ln + 1;
1030 } else if ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD) {
1031 lname = anon_name(mname, Psp, stacks, nstacks, pmp->pr_vaddr,
1032 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid, NULL);
1035 kperpage = pmp->pr_pagesize / KILOBYTE;
1037 t->total_size += ROUNDUP_KB(pmp->pr_size);
1038 t->total_rss += pmp->pr_rss * kperpage;
1039 t->total_anon += ANON(pmp) * kperpage;
1040 t->total_locked += pmp->pr_locked * kperpage;
1041 t->total_swap += swap * kperpage;
1043 if (first == 1) {
1044 first = 0;
1045 prev_vaddr = pmp->pr_vaddr;
1046 prev_size = pmp->pr_size;
1047 prev_offset = pmp->pr_offset;
1048 prev_mflags = pmp->pr_mflags;
1049 if (lname == NULL) {
1050 prev_lname = NULL;
1051 } else {
1052 (void) strcpy(prev_mname, lname);
1053 prev_lname = prev_mname;
1055 prev_rss = pmp->pr_rss * kperpage;
1056 prev_anon = ANON(pmp) * kperpage;
1057 prev_locked = pmp->pr_locked * kperpage;
1058 prev_swap = swap * kperpage;
1059 if (last == 0) {
1060 return (0);
1062 merged = 1;
1063 } else if (prev_vaddr + prev_size == pmp->pr_vaddr &&
1064 prev_mflags == pmp->pr_mflags &&
1065 ((prev_mflags & MA_ISM) ||
1066 prev_offset + prev_size == pmp->pr_offset) &&
1067 ((lname == NULL && prev_lname == NULL) ||
1068 (lname != NULL && prev_lname != NULL &&
1069 strcmp(lname, prev_lname) == 0))) {
1070 prev_size += pmp->pr_size;
1071 prev_rss += pmp->pr_rss * kperpage;
1072 prev_anon += ANON(pmp) * kperpage;
1073 prev_locked += pmp->pr_locked * kperpage;
1074 prev_swap += swap * kperpage;
1075 if (last == 0) {
1076 return (0);
1078 merged = 1;
1081 (void) printf("%.*lX", addr_width, (ulong_t)prev_vaddr);
1082 printK(ROUNDUP_KB(prev_size), size_width);
1084 if (doswap)
1085 printK(prev_swap, size_width);
1086 else {
1087 printK(prev_rss, size_width);
1088 printK(prev_anon, size_width);
1089 printK(prev_locked, size_width);
1091 (void) printf(prev_lname ? " %-6s %s\n" : "%s\n",
1092 mflags(prev_mflags), prev_lname);
1094 if (last == 0) {
1095 prev_vaddr = pmp->pr_vaddr;
1096 prev_size = pmp->pr_size;
1097 prev_offset = pmp->pr_offset;
1098 prev_mflags = pmp->pr_mflags;
1099 if (lname == NULL) {
1100 prev_lname = NULL;
1101 } else {
1102 (void) strcpy(prev_mname, lname);
1103 prev_lname = prev_mname;
1105 prev_rss = pmp->pr_rss * kperpage;
1106 prev_anon = ANON(pmp) * kperpage;
1107 prev_locked = pmp->pr_locked * kperpage;
1108 prev_swap = swap * kperpage;
1109 } else if (merged == 0) {
1110 (void) printf("%.*lX", addr_width, (ulong_t)pmp->pr_vaddr);
1111 printK(ROUNDUP_KB(pmp->pr_size), size_width);
1112 if (doswap)
1113 printK(swap * kperpage, size_width);
1114 else {
1115 printK(pmp->pr_rss * kperpage, size_width);
1116 printK(ANON(pmp) * kperpage, size_width);
1117 printK(pmp->pr_locked * kperpage, size_width);
1119 (void) printf(lname ? " %-6s %s\n" : " %s\n",
1120 mflags(pmp->pr_mflags), lname);
1123 if (last != 0)
1124 first = 1;
1126 return (0);
1129 static int
1130 perr(char *s)
1132 if (s)
1133 (void) fprintf(stderr, "%s: ", procname);
1134 else
1135 s = procname;
1136 perror(s);
1137 return (1);
1140 static char *
1141 mflags(uint_t arg)
1143 static char code_buf[80];
1144 char *str = code_buf;
1147 * rwxsR
1149 * r - segment is readable
1150 * w - segment is writable
1151 * x - segment is executable
1152 * s - segment is shared
1153 * R - segment is mapped MAP_NORESERVE
1156 (void) sprintf(str, "%c%c%c%c%c%c",
1157 arg & MA_READ ? 'r' : '-',
1158 arg & MA_WRITE ? 'w' : '-',
1159 arg & MA_EXEC ? 'x' : '-',
1160 arg & MA_SHARED ? 's' : '-',
1161 arg & MA_NORESERVE ? 'R' : '-',
1162 arg & MA_RESERVED1 ? '*' : ' ');
1164 return (str);
1167 static mapdata_t *
1168 nextmap(void)
1170 mapdata_t *newmaps;
1171 int next;
1173 if (map_count == map_alloc) {
1174 if (map_alloc == 0)
1175 next = 16;
1176 else
1177 next = map_alloc * 2;
1179 newmaps = reallocarray(maps, next, sizeof (mapdata_t));
1180 if (newmaps == NULL) {
1181 (void) perr("failed to allocate maps");
1182 exit(1);
1184 (void) memset(newmaps + map_alloc, '\0',
1185 (next - map_alloc) * sizeof (mapdata_t));
1187 map_alloc = next;
1188 maps = newmaps;
1191 return (&maps[map_count++]);
1194 /*ARGSUSED*/
1195 static int
1196 gather_map(void *ignored, const prmap_t *map, const char *objname)
1198 mapdata_t *data;
1200 /* Skip mappings which are outside the range specified by -A */
1201 if (!address_in_range(map->pr_vaddr,
1202 map->pr_vaddr + map->pr_size, map->pr_pagesize))
1203 return (0);
1205 data = nextmap();
1206 data->md_map = *map;
1207 free(data->md_objname);
1208 data->md_objname = objname ? strdup(objname) : NULL;
1210 return (0);
1213 /*ARGSUSED*/
1214 static int
1215 gather_xmap(void *ignored, const prxmap_t *xmap, const char *objname,
1216 int last, int doswap)
1218 mapdata_t *data;
1220 /* Skip mappings which are outside the range specified by -A */
1221 if (!address_in_range(xmap->pr_vaddr,
1222 xmap->pr_vaddr + xmap->pr_size, xmap->pr_pagesize))
1223 return (0);
1225 data = nextmap();
1226 data->md_xmap = *xmap;
1227 free(data->md_objname);
1228 data->md_objname = objname ? strdup(objname) : NULL;
1229 data->md_last = last;
1230 data->md_doswap = doswap;
1232 return (0);
1235 static int
1236 iter_map(proc_map_f *func, void *data)
1238 int i;
1239 int ret;
1241 for (i = 0; i < map_count; i++) {
1242 if (interrupt)
1243 break;
1244 if ((ret = func(data, &maps[i].md_map,
1245 maps[i].md_objname)) != 0)
1246 return (ret);
1249 return (0);
1252 static int
1253 iter_xmap(proc_xmap_f *func, void *data)
1255 int i;
1256 int ret;
1258 for (i = 0; i < map_count; i++) {
1259 if (interrupt)
1260 break;
1261 if ((ret = func(data, &maps[i].md_xmap, maps[i].md_objname,
1262 maps[i].md_last, maps[i].md_doswap)) != 0)
1263 return (ret);
1266 return (0);
1270 * Convert lgroup ID to string.
1271 * returns dash when lgroup ID is invalid.
1273 static char *
1274 lgrp2str(lgrp_id_t lgrp)
1276 static char lgrp_buf[20];
1277 char *str = lgrp_buf;
1279 (void) sprintf(str, lgrp == LGRP_NONE ? " -" : "%4d", lgrp);
1280 return (str);
1284 * Parse address range specification for -A option.
1285 * The address range may have the following forms:
1287 * address
1288 * start and end is set to address
1289 * address,
1290 * start is set to address, end is set to INVALID_ADDRESS
1291 * ,address
1292 * start is set to 0, end is set to address
1293 * address1,address2
1294 * start is set to address1, end is set to address2
1297 static int
1298 parse_addr_range(char *input_str, uintptr_t *start, uintptr_t *end)
1300 char *startp = input_str;
1301 char *endp = strchr(input_str, ',');
1302 ulong_t s = (ulong_t)INVALID_ADDRESS;
1303 ulong_t e = (ulong_t)INVALID_ADDRESS;
1305 if (endp != NULL) {
1307 * Comma is present. If there is nothing after comma, the end
1308 * remains set at INVALID_ADDRESS. Otherwise it is set to the
1309 * value after comma.
1311 *endp = '\0';
1312 endp++;
1314 if ((*endp != '\0') && sscanf(endp, "%lx", &e) != 1)
1315 return (1);
1318 if (startp != NULL) {
1320 * Read the start address, if it is specified. If the address is
1321 * missing, start will be set to INVALID_ADDRESS.
1323 if ((*startp != '\0') && sscanf(startp, "%lx", &s) != 1)
1324 return (1);
1327 /* If there is no comma, end becomes equal to start */
1328 if (endp == NULL)
1329 e = s;
1332 * ,end implies 0..end range
1334 if (e != INVALID_ADDRESS && s == INVALID_ADDRESS)
1335 s = 0;
1337 *start = (uintptr_t)s;
1338 *end = (uintptr_t)e;
1340 /* Return error if neither start nor end address were specified */
1341 return (! (s != INVALID_ADDRESS || e != INVALID_ADDRESS));
1345 * Check whether any portion of [start, end] segment is within the
1346 * [start_addr, end_addr] range.
1348 * Return values:
1349 * 0 - address is outside the range
1350 * 1 - address is within the range
1352 static int
1353 address_in_range(uintptr_t start, uintptr_t end, size_t psz)
1355 int rc = 1;
1358 * Nothing to do if there is no address range specified with -A
1360 if (start_addr != INVALID_ADDRESS || end_addr != INVALID_ADDRESS) {
1361 /* The segment end is below the range start */
1362 if ((start_addr != INVALID_ADDRESS) &&
1363 (end < P2ALIGN(start_addr, psz)))
1364 rc = 0;
1366 /* The segment start is above the range end */
1367 if ((end_addr != INVALID_ADDRESS) &&
1368 (start > P2ALIGN(end_addr + psz, psz)))
1369 rc = 0;
1371 return (rc);
1375 * Returns an intersection of the [start, end] interval and the range specified
1376 * by -A flag [start_addr, end_addr]. Unspecified parts of the address range
1377 * have value INVALID_ADDRESS.
1379 * The start_addr address is rounded down to the beginning of page and end_addr
1380 * is rounded up to the end of page.
1382 * Returns the size of the resulting interval or zero if the interval is empty
1383 * or invalid.
1385 static size_t
1386 adjust_addr_range(uintptr_t start, uintptr_t end, size_t psz,
1387 uintptr_t *new_start, uintptr_t *new_end)
1389 uintptr_t from; /* start_addr rounded down */
1390 uintptr_t to; /* end_addr rounded up */
1393 * Round down the lower address of the range to the beginning of page.
1395 if (start_addr == INVALID_ADDRESS) {
1397 * No start_addr specified by -A, the lower part of the interval
1398 * does not change.
1400 *new_start = start;
1401 } else {
1402 from = P2ALIGN(start_addr, psz);
1404 * If end address is outside the range, return an empty
1405 * interval
1407 if (end < from) {
1408 *new_start = *new_end = 0;
1409 return (0);
1412 * The adjusted start address is the maximum of requested start
1413 * and the aligned start_addr of the -A range.
1415 *new_start = start < from ? from : start;
1419 * Round up the higher address of the range to the end of page.
1421 if (end_addr == INVALID_ADDRESS) {
1423 * No end_addr specified by -A, the upper part of the interval
1424 * does not change.
1426 *new_end = end;
1427 } else {
1429 * If only one address is specified and it is the beginning of a
1430 * segment, get information about the whole segment. This
1431 * function is called once per segment and the 'end' argument is
1432 * always the end of a segment, so just use the 'end' value.
1434 to = (end_addr == start_addr && start == start_addr) ?
1435 end :
1436 P2ALIGN(end_addr + psz, psz);
1438 * If start address is outside the range, return an empty
1439 * interval
1441 if (start > to) {
1442 *new_start = *new_end = 0;
1443 return (0);
1446 * The adjusted end address is the minimum of requested end
1447 * and the aligned end_addr of the -A range.
1449 *new_end = end > to ? to : end;
1453 * Make sure that the resulting interval is legal.
1455 if (*new_end < *new_start)
1456 *new_start = *new_end = 0;
1458 /* Return the size of the interval */
1459 return (*new_end - *new_start);
1463 * Initialize memory_info data structure with information about a new segment.
1465 static void
1466 mem_chunk_init(memory_chunk_t *chunk, uintptr_t end, size_t psz)
1468 chunk->end_addr = end;
1469 chunk->page_size = psz;
1470 chunk->page_index = 0;
1471 chunk->chunk_start = chunk->chunk_end = 0;
1475 * Create a new chunk of addresses starting from vaddr.
1476 * Pass the whole chunk to pr_meminfo to collect lgroup and page size
1477 * information for each page in the chunk.
1479 static void
1480 mem_chunk_get(memory_chunk_t *chunk, uintptr_t vaddr)
1482 page_descr_t *pdp = chunk->page_info;
1483 size_t psz = chunk->page_size;
1484 uintptr_t addr = vaddr;
1485 uint64_t inaddr[MAX_MEMINFO_CNT];
1486 uint64_t outdata[2 * MAX_MEMINFO_CNT];
1487 uint_t info[2] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
1488 uint_t validity[MAX_MEMINFO_CNT];
1489 uint64_t *dataptr = inaddr;
1490 uint64_t *outptr = outdata;
1491 uint_t *valptr = validity;
1492 int i, j, rc;
1494 chunk->chunk_start = vaddr;
1495 chunk->page_index = 0; /* reset index for the new chunk */
1498 * Fill in MAX_MEMINFO_CNT wotrh of pages starting from vaddr. Also,
1499 * copy starting address of each page to inaddr array for pr_meminfo.
1501 for (i = 0, pdp = chunk->page_info;
1502 (i < MAX_MEMINFO_CNT) && (addr <= chunk->end_addr);
1503 i++, pdp++, dataptr++, addr += psz) {
1504 *dataptr = (uint64_t)addr;
1505 pdp->pd_start = addr;
1506 pdp->pd_lgrp = LGRP_NONE;
1507 pdp->pd_valid = 0;
1508 pdp->pd_pagesize = 0;
1511 /* Mark the number of entries in the chunk and the last address */
1512 chunk->page_count = i;
1513 chunk->chunk_end = addr - psz;
1515 if (interrupt)
1516 return;
1518 /* Call meminfo for all collected addresses */
1519 rc = pr_meminfo(Pr, inaddr, i, info, 2, outdata, validity);
1520 if (rc < 0) {
1521 (void) perr("can not get memory information");
1522 return;
1525 /* Verify validity of each result and fill in the addrs array */
1526 pdp = chunk->page_info;
1527 for (j = 0; j < i; j++, pdp++, valptr++, outptr += 2) {
1528 /* Skip invalid address pointers */
1529 if ((*valptr & 1) == 0) {
1530 continue;
1533 /* Is lgroup information available? */
1534 if ((*valptr & 2) != 0) {
1535 pdp->pd_lgrp = (lgrp_id_t)*outptr;
1536 pdp->pd_valid = 1;
1539 /* Is page size informaion available? */
1540 if ((*valptr & 4) != 0) {
1541 pdp->pd_pagesize = *(outptr + 1);
1547 * Starting from address 'vaddr' find the region with pages allocated from the
1548 * same lgroup.
1550 * Arguments:
1551 * mchunk Initialized memory chunk structure
1552 * vaddr Starting address of the region
1553 * maxaddr Upper bound of the region
1554 * pagesize Default page size to use
1555 * ret_lgrp On exit contains the lgroup ID of all pages in the
1556 * region.
1558 * Returns:
1559 * Size of the contiguous region in bytes
1560 * The lgroup ID of all pages in the region in ret_lgrp argument.
1562 static size_t
1563 get_contiguous_region(memory_chunk_t *mchunk, uintptr_t vaddr,
1564 uintptr_t maxaddr, size_t pagesize, lgrp_id_t *ret_lgrp)
1566 size_t size_contig = 0;
1567 lgrp_id_t lgrp; /* Lgroup of the region start */
1568 lgrp_id_t curr_lgrp; /* Lgroup of the current page */
1569 size_t psz = pagesize; /* Pagesize to use */
1571 /* Set both lgroup IDs to the lgroup of the first page */
1572 curr_lgrp = lgrp = addr_to_lgrp(mchunk, vaddr, &psz);
1575 * Starting from vaddr, walk page by page until either the end
1576 * of the segment is reached or a page is allocated from a different
1577 * lgroup. Also stop if interrupted from keyboard.
1579 while ((vaddr < maxaddr) && (curr_lgrp == lgrp) && !interrupt) {
1581 * Get lgroup ID and the page size of the current page.
1583 curr_lgrp = addr_to_lgrp(mchunk, vaddr, &psz);
1584 /* If there is no page size information, use the default */
1585 if (psz == 0)
1586 psz = pagesize;
1588 if (curr_lgrp == lgrp) {
1590 * This page belongs to the contiguous region.
1591 * Increase the region size and advance to the new page.
1593 size_contig += psz;
1594 vaddr += psz;
1598 /* Return the region lgroup ID and the size */
1599 *ret_lgrp = lgrp;
1600 return (size_contig);
1604 * Given a virtual address, return its lgroup and page size. If there is meminfo
1605 * information for an address, use it, otherwise shift the chunk window to the
1606 * vaddr and create a new chunk with known meminfo information.
1608 static lgrp_id_t
1609 addr_to_lgrp(memory_chunk_t *chunk, uintptr_t vaddr, size_t *psz)
1611 page_descr_t *pdp;
1612 lgrp_id_t lgrp = LGRP_NONE;
1613 int i;
1615 *psz = chunk->page_size;
1617 if (interrupt)
1618 return (0);
1621 * Is there information about this address? If not, create a new chunk
1622 * starting from vaddr and apply pr_meminfo() to the whole chunk.
1624 if (vaddr < chunk->chunk_start || vaddr > chunk->chunk_end) {
1626 * This address is outside the chunk, get the new chunk and
1627 * collect meminfo information for it.
1629 mem_chunk_get(chunk, vaddr);
1633 * Find information about the address.
1635 pdp = &chunk->page_info[chunk->page_index];
1636 for (i = chunk->page_index; i < chunk->page_count; i++, pdp++) {
1637 if (pdp->pd_start == vaddr) {
1638 if (pdp->pd_valid) {
1639 lgrp = pdp->pd_lgrp;
1641 * Override page size information if it is
1642 * present.
1644 if (pdp->pd_pagesize > 0)
1645 *psz = pdp->pd_pagesize;
1647 break;
1651 * Remember where we ended - the next search will start here.
1652 * We can query for the lgrp for the same address again, so do not
1653 * advance index past the current value.
1655 chunk->page_index = i;
1657 return (lgrp);
1660 /* ARGSUSED */
1661 static void
1662 intr(int sig)
1664 interrupt = 1;