remove xen support
[unleashed/tickless.git] / usr / src / cmd / mdb / common / modules / genunix / memory.c
blobccaa288f1a3dde03b3dcd80afe1104c4e44be425
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2015 Joyent, Inc.
26 #include <mdb/mdb_param.h>
27 #include <mdb/mdb_modapi.h>
28 #include <mdb/mdb_ks.h>
29 #include <sys/types.h>
30 #include <sys/memlist.h>
31 #include <sys/swap.h>
32 #include <sys/systm.h>
33 #include <sys/thread.h>
34 #include <vm/anon.h>
35 #include <vm/as.h>
36 #include <vm/page.h>
37 #include <sys/thread.h>
38 #include <sys/swap.h>
39 #include <sys/memlist.h>
40 #include <sys/vnode.h>
41 #include <vm/seg_map.h>
42 #include <vm/seg_vn.h>
44 #include "avl.h"
45 #include "memory.h"
48 * Page walker.
49 * By default, this will walk all pages in the system. If given an
50 * address, it will walk all pages belonging to the vnode at that
51 * address.
54 #define PW_GLOBAL(wsp) ((wsp)->walk_data == NULL)
56 int
57 page_walk_init(mdb_walk_state_t *wsp)
60 * We use this to let page_walk_step know if this wask a local or a
61 * global walk.
63 wsp->walk_data = (void *)wsp->walk_addr;
65 if (wsp->walk_addr == 0) {
67 * Walk all pages
69 * In essence:
70 * ::walk vn_cache | ::print vnode_t v_object.tree | ::walk avl
73 if (mdb_layered_walk("vn_cache", wsp) == -1) {
74 mdb_warn("couldn't walk list of vnodes");
75 return (WALK_ERR);
77 } else {
79 * Walk just this vnode
81 * In essence:
82 * addr::print vnode_t v_object.tree | ::walk avl
84 * In this case, all the work happens in the _step function.
88 return (WALK_NEXT);
92 * This is called for each vnode, so we just need to do what amounts to:
94 * addr::print vnode_t v_object.tree | ::walk avl
96 int
97 page_walk_step(mdb_walk_state_t *wsp)
99 uintptr_t addr = wsp->walk_addr + OFFSETOF(struct vnode, v_object.tree);
101 if (mdb_pwalk("avl", wsp->walk_callback, wsp->walk_cbdata, addr) == -1) {
102 mdb_warn("couldn't walk vnode's page AVL tree at %p", addr);
103 return (WALK_ERR);
107 * If this was a global walk, we need to move onto the next vnode.
108 * To do that we return WALK_NEXT which is handled by
109 * mdb_layered_walk. If this was a local walk, there was only one
110 * vnode to walk, and we are already done with it so we return
111 * WALK_DONE. (If we returned WALK_NEXT, we would get called again
112 * with the same ->walk_addr!)
114 return (PW_GLOBAL(wsp) ? WALK_NEXT : WALK_DONE);
117 void
118 page_walk_fini(mdb_walk_state_t *wsp)
123 * allpages walks all pages in the system in order they appear in
124 * the memseg structure
127 #define PAGE_BUFFER 128
130 allpages_walk_init(mdb_walk_state_t *wsp)
132 if (wsp->walk_addr != (uintptr_t)NULL) {
133 mdb_warn("allpages only supports global walks.\n");
134 return (WALK_ERR);
137 if (mdb_layered_walk("memseg", wsp) == -1) {
138 mdb_warn("couldn't walk 'memseg'");
139 return (WALK_ERR);
142 wsp->walk_data = mdb_alloc(sizeof (page_t) * PAGE_BUFFER, UM_SLEEP);
143 return (WALK_NEXT);
147 allpages_walk_step(mdb_walk_state_t *wsp)
149 const struct memseg *msp = wsp->walk_layer;
150 page_t *buf = wsp->walk_data;
151 size_t pg_read, i;
152 size_t pg_num = msp->pages_end - msp->pages_base;
153 const page_t *pg_addr = msp->pages;
155 while (pg_num > 0) {
156 pg_read = MIN(pg_num, PAGE_BUFFER);
158 if (mdb_vread(buf, pg_read * sizeof (page_t),
159 (uintptr_t)pg_addr) == -1) {
160 mdb_warn("can't read page_t's at %#lx", pg_addr);
161 return (WALK_ERR);
163 for (i = 0; i < pg_read; i++) {
164 int ret = wsp->walk_callback((uintptr_t)&pg_addr[i],
165 &buf[i], wsp->walk_cbdata);
167 if (ret != WALK_NEXT)
168 return (ret);
170 pg_num -= pg_read;
171 pg_addr += pg_read;
174 return (WALK_NEXT);
177 void
178 allpages_walk_fini(mdb_walk_state_t *wsp)
180 mdb_free(wsp->walk_data, sizeof (page_t) * PAGE_BUFFER);
184 * Hash table + LRU queue.
185 * This table is used to cache recently read vnodes for the memstat
186 * command, to reduce the number of mdb_vread calls. This greatly
187 * speeds the memstat command on on live, large CPU count systems.
190 #define VN_SMALL 401
191 #define VN_LARGE 10007
192 #define VN_HTABLE_KEY(p, hp) ((p) % ((hp)->vn_htable_buckets))
194 struct vn_htable_list {
195 uint_t vn_flag; /* v_flag from vnode */
196 uintptr_t vn_ptr; /* pointer to vnode */
197 struct vn_htable_list *vn_q_next; /* queue next pointer */
198 struct vn_htable_list *vn_q_prev; /* queue prev pointer */
199 struct vn_htable_list *vn_h_next; /* hash table pointer */
203 * vn_q_first -> points to to head of queue: the vnode that was most
204 * recently used
205 * vn_q_last -> points to the oldest used vnode, and is freed once a new
206 * vnode is read.
207 * vn_htable -> hash table
208 * vn_htable_buf -> contains htable objects
209 * vn_htable_size -> total number of items in the hash table
210 * vn_htable_buckets -> number of buckets in the hash table
212 typedef struct vn_htable {
213 struct vn_htable_list *vn_q_first;
214 struct vn_htable_list *vn_q_last;
215 struct vn_htable_list **vn_htable;
216 struct vn_htable_list *vn_htable_buf;
217 int vn_htable_size;
218 int vn_htable_buckets;
219 } vn_htable_t;
222 /* allocate memory, initilize hash table and LRU queue */
223 static void
224 vn_htable_init(vn_htable_t *hp, size_t vn_size)
226 int i;
227 int htable_size = MAX(vn_size, VN_LARGE);
229 if ((hp->vn_htable_buf = mdb_zalloc(sizeof (struct vn_htable_list)
230 * htable_size, UM_NOSLEEP|UM_GC)) == NULL) {
231 htable_size = VN_SMALL;
232 hp->vn_htable_buf = mdb_zalloc(sizeof (struct vn_htable_list)
233 * htable_size, UM_SLEEP|UM_GC);
236 hp->vn_htable = mdb_zalloc(sizeof (struct vn_htable_list *)
237 * htable_size, UM_SLEEP|UM_GC);
239 hp->vn_q_first = &hp->vn_htable_buf[0];
240 hp->vn_q_last = &hp->vn_htable_buf[htable_size - 1];
241 hp->vn_q_first->vn_q_next = &hp->vn_htable_buf[1];
242 hp->vn_q_last->vn_q_prev = &hp->vn_htable_buf[htable_size - 2];
244 for (i = 1; i < (htable_size-1); i++) {
245 hp->vn_htable_buf[i].vn_q_next = &hp->vn_htable_buf[i + 1];
246 hp->vn_htable_buf[i].vn_q_prev = &hp->vn_htable_buf[i - 1];
249 hp->vn_htable_size = htable_size;
250 hp->vn_htable_buckets = htable_size;
255 * Find the vnode whose address is ptr, and return its v_flag in vp->v_flag.
256 * The function tries to find needed information in the following order:
258 * 1. check if ptr is the first in queue
259 * 2. check if ptr is in hash table (if so move it to the top of queue)
260 * 3. do mdb_vread, remove last queue item from queue and hash table.
261 * Insert new information to freed object, and put this object in to the
262 * top of the queue.
264 static int
265 vn_get(vn_htable_t *hp, struct vnode *vp, uintptr_t ptr)
267 int hkey;
268 struct vn_htable_list *hent, **htmp, *q_next, *q_prev;
269 struct vn_htable_list *q_first = hp->vn_q_first;
271 /* 1. vnode ptr is the first in queue, just get v_flag and return */
272 if (q_first->vn_ptr == ptr) {
273 vp->v_flag = q_first->vn_flag;
275 return (0);
278 /* 2. search the hash table for this ptr */
279 hkey = VN_HTABLE_KEY(ptr, hp);
280 hent = hp->vn_htable[hkey];
281 while (hent && (hent->vn_ptr != ptr))
282 hent = hent->vn_h_next;
284 /* 3. if hent is NULL, we did not find in hash table, do mdb_vread */
285 if (hent == NULL) {
286 struct vnode vn;
288 if (mdb_vread(&vn, sizeof (vnode_t), ptr) == -1) {
289 mdb_warn("unable to read vnode_t at %#lx", ptr);
290 return (-1);
293 /* we will insert read data into the last element in queue */
294 hent = hp->vn_q_last;
296 /* remove last hp->vn_q_last object from hash table */
297 if (hent->vn_ptr) {
298 htmp = &hp->vn_htable[VN_HTABLE_KEY(hent->vn_ptr, hp)];
299 while (*htmp != hent)
300 htmp = &(*htmp)->vn_h_next;
301 *htmp = hent->vn_h_next;
304 /* insert data into new free object */
305 hent->vn_ptr = ptr;
306 hent->vn_flag = vn.v_flag;
308 /* insert new object into hash table */
309 hent->vn_h_next = hp->vn_htable[hkey];
310 hp->vn_htable[hkey] = hent;
313 /* Remove from queue. hent is not first, vn_q_prev is not NULL */
314 q_next = hent->vn_q_next;
315 q_prev = hent->vn_q_prev;
316 if (q_next == NULL)
317 hp->vn_q_last = q_prev;
318 else
319 q_next->vn_q_prev = q_prev;
320 q_prev->vn_q_next = q_next;
322 /* Add to the front of queue */
323 hent->vn_q_prev = NULL;
324 hent->vn_q_next = q_first;
325 q_first->vn_q_prev = hent;
326 hp->vn_q_first = hent;
328 /* Set v_flag in vnode pointer from hent */
329 vp->v_flag = hent->vn_flag;
331 return (0);
334 /* Summary statistics of pages */
335 typedef struct memstat {
336 struct vnode *ms_kvp; /* Cached address of kernel vnode */
337 struct vnode *ms_unused_vp; /* Unused pages vnode pointer */
338 struct vnode *ms_zvp; /* Cached address of zio vnode */
339 uint64_t ms_kmem; /* Pages of kernel memory */
340 uint64_t ms_zfs_data; /* Pages of zfs data */
341 uint64_t ms_anon; /* Pages of anonymous memory */
342 uint64_t ms_vnode; /* Pages of named (vnode) memory */
343 uint64_t ms_exec; /* Pages of exec/library memory */
344 uint64_t ms_cachelist; /* Pages on the cachelist (free) */
345 uint64_t ms_bootpages; /* Pages on the bootpages list */
346 uint64_t ms_total; /* Pages on page hash */
347 vn_htable_t *ms_vn_htable; /* Pointer to hash table */
348 struct vnode ms_vn; /* vnode buffer */
349 } memstat_t;
351 #define MS_PP_ISKAS(pp, stats) \
352 ((pp)->p_vnode == (stats)->ms_kvp)
354 #define MS_PP_ISZFS_DATA(pp, stats) \
355 (((stats)->ms_zvp != NULL) && ((pp)->p_vnode == (stats)->ms_zvp))
358 * Summarize pages by type and update stat information
361 /* ARGSUSED */
362 static int
363 memstat_callback(page_t *page, page_t *pp, memstat_t *stats)
365 struct vnode *vp = &stats->ms_vn;
367 if (PP_ISBOOTPAGES(pp))
368 stats->ms_bootpages++;
369 else if (pp->p_vnode == NULL || pp->p_vnode == stats->ms_unused_vp)
370 return (WALK_NEXT);
371 else if (MS_PP_ISKAS(pp, stats))
372 stats->ms_kmem++;
373 else if (MS_PP_ISZFS_DATA(pp, stats))
374 stats->ms_zfs_data++;
375 else if (PP_ISFREE(pp))
376 stats->ms_cachelist++;
377 else if (vn_get(stats->ms_vn_htable, vp, (uintptr_t)pp->p_vnode))
378 return (WALK_ERR);
379 else if (IS_SWAPFSVP(vp))
380 stats->ms_anon++;
381 else if ((vp->v_flag & VVMEXEC) != 0)
382 stats->ms_exec++;
383 else
384 stats->ms_vnode++;
386 stats->ms_total++;
388 return (WALK_NEXT);
391 /* ARGSUSED */
393 memstat(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
395 pgcnt_t total_pages, physmem;
396 ulong_t freemem;
397 memstat_t stats;
398 GElf_Sym sym;
399 vn_htable_t ht;
400 struct vnode *kvps;
401 uintptr_t vn_size = 0;
403 bzero(&stats, sizeof (memstat_t));
406 * -s size, is an internal option. It specifies the size of vn_htable.
407 * Hash table size is set in the following order:
408 * If user has specified the size that is larger than VN_LARGE: try it,
409 * but if malloc failed default to VN_SMALL. Otherwise try VN_LARGE, if
410 * failed to allocate default to VN_SMALL.
411 * For a better efficiency of hash table it is highly recommended to
412 * set size to a prime number.
414 if ((flags & DCMD_ADDRSPEC) || mdb_getopts(argc, argv,
415 's', MDB_OPT_UINTPTR, &vn_size, NULL) != argc)
416 return (DCMD_USAGE);
418 /* Initialize vnode hash list and queue */
419 vn_htable_init(&ht, vn_size);
420 stats.ms_vn_htable = &ht;
422 /* Total physical memory */
423 if (mdb_readvar(&total_pages, "total_pages") == -1) {
424 mdb_warn("unable to read total_pages");
425 return (DCMD_ERR);
428 /* Artificially limited memory */
429 if (mdb_readvar(&physmem, "physmem") == -1) {
430 mdb_warn("unable to read physmem");
431 return (DCMD_ERR);
434 /* read kernel vnode array pointer */
435 if (mdb_lookup_by_obj(MDB_OBJ_EXEC, "kvps",
436 (GElf_Sym *)&sym) == -1) {
437 mdb_warn("unable to read kvps");
438 return (DCMD_ERR);
440 kvps = (struct vnode *)(uintptr_t)sym.st_value;
441 stats.ms_kvp = &kvps[KV_KVP];
444 * Read the zio vnode pointer.
446 stats.ms_zvp = &kvps[KV_ZVP];
449 * If physmem != total_pages, then the administrator has limited the
450 * number of pages available in the system. Excluded pages are
451 * associated with the unused pages vnode. Read this vnode so the
452 * pages can be excluded in the page accounting.
454 if (mdb_lookup_by_obj(MDB_OBJ_EXEC, "unused_pages_vp",
455 (GElf_Sym *)&sym) == -1) {
456 mdb_warn("unable to read unused_pages_vp");
457 return (DCMD_ERR);
459 stats.ms_unused_vp = (struct vnode *)(uintptr_t)sym.st_value;
461 /* walk all pages, collect statistics */
462 if (mdb_walk("allpages", (mdb_walk_cb_t)memstat_callback,
463 &stats) == -1) {
464 mdb_warn("can't walk memseg");
465 return (DCMD_ERR);
468 #define MS_PCT_TOTAL(x) ((ulong_t)((((5 * total_pages) + ((x) * 1000ull))) / \
469 ((physmem) * 10)))
471 mdb_printf("Page Summary Pages MB"
472 " %%Tot\n");
473 mdb_printf("------------ ---------------- ----------------"
474 " ----\n");
475 mdb_printf("Kernel %16llu %16llu %3lu%%\n",
476 stats.ms_kmem,
477 (uint64_t)stats.ms_kmem * PAGESIZE / (1024 * 1024),
478 MS_PCT_TOTAL(stats.ms_kmem));
480 if (stats.ms_bootpages != 0) {
481 mdb_printf("Boot pages %16llu %16llu %3lu%%\n",
482 stats.ms_bootpages,
483 (uint64_t)stats.ms_bootpages * PAGESIZE / (1024 * 1024),
484 MS_PCT_TOTAL(stats.ms_bootpages));
487 if (stats.ms_zfs_data != 0) {
488 mdb_printf("ZFS File Data %16llu %16llu %3lu%%\n",
489 stats.ms_zfs_data,
490 (uint64_t)stats.ms_zfs_data * PAGESIZE / (1024 * 1024),
491 MS_PCT_TOTAL(stats.ms_zfs_data));
494 mdb_printf("Anon %16llu %16llu %3lu%%\n",
495 stats.ms_anon,
496 (uint64_t)stats.ms_anon * PAGESIZE / (1024 * 1024),
497 MS_PCT_TOTAL(stats.ms_anon));
498 mdb_printf("Exec and libs %16llu %16llu %3lu%%\n",
499 stats.ms_exec,
500 (uint64_t)stats.ms_exec * PAGESIZE / (1024 * 1024),
501 MS_PCT_TOTAL(stats.ms_exec));
502 mdb_printf("Page cache %16llu %16llu %3lu%%\n",
503 stats.ms_vnode,
504 (uint64_t)stats.ms_vnode * PAGESIZE / (1024 * 1024),
505 MS_PCT_TOTAL(stats.ms_vnode));
506 mdb_printf("Free (cachelist) %16llu %16llu %3lu%%\n",
507 stats.ms_cachelist,
508 (uint64_t)stats.ms_cachelist * PAGESIZE / (1024 * 1024),
509 MS_PCT_TOTAL(stats.ms_cachelist));
512 * occasionally, we double count pages above. To avoid printing
513 * absurdly large values for freemem, we clamp it at zero.
515 if (physmem > stats.ms_total)
516 freemem = physmem - stats.ms_total;
517 else
518 freemem = 0;
520 mdb_printf("Free (freelist) %16lu %16llu %3lu%%\n", freemem,
521 (uint64_t)freemem * PAGESIZE / (1024 * 1024),
522 MS_PCT_TOTAL(freemem));
524 mdb_printf("\nTotal %16lu %16lu\n",
525 physmem,
526 (uint64_t)physmem * PAGESIZE / (1024 * 1024));
528 if (physmem != total_pages) {
529 mdb_printf("Physical %16lu %16lu\n",
530 total_pages,
531 (uint64_t)total_pages * PAGESIZE / (1024 * 1024));
534 #undef MS_PCT_TOTAL
536 return (DCMD_OK);
539 void
540 pagelookup_help(void)
542 mdb_printf(
543 "Finds the page with name { %<b>vp%</b>, %<b>offset%</b> }.\n"
544 "\n"
545 "Can be invoked three different ways:\n\n"
546 " ::pagelookup -v %<b>vp%</b> -o %<b>offset%</b>\n"
547 " %<b>vp%</b>::pagelookup -o %<b>offset%</b>\n"
548 " %<b>offset%</b>::pagelookup -v %<b>vp%</b>\n"
549 "\n"
550 "The latter two forms are useful in pipelines.\n");
554 pagelookup(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
556 uintptr_t vp = -(uintptr_t)1;
557 uint64_t offset = -(uint64_t)1;
559 uintptr_t pageaddr;
560 int hasaddr = (flags & DCMD_ADDRSPEC);
561 int usedaddr = 0;
563 if (mdb_getopts(argc, argv,
564 'v', MDB_OPT_UINTPTR, &vp,
565 'o', MDB_OPT_UINT64, &offset,
566 0) != argc) {
567 return (DCMD_USAGE);
570 if (vp == -(uintptr_t)1) {
571 if (offset == -(uint64_t)1) {
572 mdb_warn(
573 "pagelookup: at least one of -v vp or -o offset "
574 "required.\n");
575 return (DCMD_USAGE);
577 vp = addr;
578 usedaddr = 1;
579 } else if (offset == -(uint64_t)1) {
580 offset = mdb_get_dot();
581 usedaddr = 1;
583 if (usedaddr && !hasaddr) {
584 mdb_warn("pagelookup: address required\n");
585 return (DCMD_USAGE);
587 if (!usedaddr && hasaddr) {
588 mdb_warn(
589 "pagelookup: address specified when both -v and -o were "
590 "passed");
591 return (DCMD_USAGE);
594 pageaddr = mdb_page_lookup(vp, offset);
595 if (pageaddr == (uintptr_t)NULL) {
596 mdb_warn("pagelookup: no page for {vp = %p, offset = %llp)\n",
597 vp, offset);
598 return (DCMD_OK);
600 mdb_printf("%#lr\n", pageaddr); /* this is PIPE_OUT friendly */
601 return (DCMD_OK);
604 /*ARGSUSED*/
606 page_num2pp(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
608 uintptr_t pp;
610 if (argc != 0 || !(flags & DCMD_ADDRSPEC)) {
611 return (DCMD_USAGE);
614 pp = mdb_pfn2page((pfn_t)addr);
615 if (pp == 0) {
616 return (DCMD_ERR);
619 if (flags & DCMD_PIPE_OUT) {
620 mdb_printf("%#lr\n", pp);
621 } else {
622 mdb_printf("%lx has page_t at %#lx\n", (pfn_t)addr, pp);
625 return (DCMD_OK);
629 page(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
631 page_t p;
633 if (!(flags & DCMD_ADDRSPEC)) {
634 if (mdb_walk_dcmd("page", "page", argc, argv) == -1) {
635 mdb_warn("can't walk pages");
636 return (DCMD_ERR);
638 return (DCMD_OK);
641 if (DCMD_HDRSPEC(flags)) {
642 mdb_printf("%<u>%?s %?s %16s %8s %3s %3s %2s %2s %2s%</u>\n",
643 "PAGE", "VNODE", "OFFSET", "SELOCK",
644 "LCT", "COW", "IO", "FS", "ST");
647 if (mdb_vread(&p, sizeof (page_t), addr) == -1) {
648 mdb_warn("can't read page_t at %#lx", addr);
649 return (DCMD_ERR);
652 mdb_printf("%0?lx %?p %16llx %8x %3d %3d %2x %2x %2x\n",
653 addr, p.p_vnode, p.p_offset, p.p_selock, p.p_lckcnt, p.p_cowcnt,
654 p.p_iolock_state, p.p_fsdata, p.p_state);
656 return (DCMD_OK);
660 swap_walk_init(mdb_walk_state_t *wsp)
662 void *ptr;
664 if ((mdb_readvar(&ptr, "swapinfo") == -1) || ptr == NULL) {
665 mdb_warn("swapinfo not found or invalid");
666 return (WALK_ERR);
669 wsp->walk_addr = (uintptr_t)ptr;
671 return (WALK_NEXT);
675 swap_walk_step(mdb_walk_state_t *wsp)
677 uintptr_t sip;
678 struct swapinfo si;
680 sip = wsp->walk_addr;
682 if (sip == (uintptr_t)NULL)
683 return (WALK_DONE);
685 if (mdb_vread(&si, sizeof (struct swapinfo), sip) == -1) {
686 mdb_warn("unable to read swapinfo at %#lx", sip);
687 return (WALK_ERR);
690 wsp->walk_addr = (uintptr_t)si.si_next;
692 return (wsp->walk_callback(sip, &si, wsp->walk_cbdata));
696 swapinfof(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
698 struct swapinfo si;
699 char *name;
701 if (!(flags & DCMD_ADDRSPEC)) {
702 if (mdb_walk_dcmd("swapinfo", "swapinfo", argc, argv) == -1) {
703 mdb_warn("can't walk swapinfo");
704 return (DCMD_ERR);
706 return (DCMD_OK);
709 if (DCMD_HDRSPEC(flags)) {
710 mdb_printf("%<u>%?s %?s %9s %9s %s%</u>\n",
711 "ADDR", "VNODE", "PAGES", "FREE", "NAME");
714 if (mdb_vread(&si, sizeof (struct swapinfo), addr) == -1) {
715 mdb_warn("can't read swapinfo at %#lx", addr);
716 return (DCMD_ERR);
719 name = mdb_alloc(si.si_pnamelen, UM_SLEEP | UM_GC);
720 if (mdb_vread(name, si.si_pnamelen, (uintptr_t)si.si_pname) == -1)
721 name = "*error*";
723 mdb_printf("%0?lx %?p %9d %9d %s\n",
724 addr, si.si_vp, si.si_npgs, si.si_nfpgs, name);
726 return (DCMD_OK);
730 memlist_walk_step(mdb_walk_state_t *wsp)
732 uintptr_t mlp;
733 struct memlist ml;
735 mlp = wsp->walk_addr;
737 if (mlp == (uintptr_t)NULL)
738 return (WALK_DONE);
740 if (mdb_vread(&ml, sizeof (struct memlist), mlp) == -1) {
741 mdb_warn("unable to read memlist at %#lx", mlp);
742 return (WALK_ERR);
745 wsp->walk_addr = (uintptr_t)ml.ml_next;
747 return (wsp->walk_callback(mlp, &ml, wsp->walk_cbdata));
751 memlist(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
753 struct memlist ml;
755 if (!(flags & DCMD_ADDRSPEC)) {
756 uintptr_t ptr;
757 uint_t list = 0;
758 int i;
759 static const char *lists[] = {
760 "phys_install",
761 "phys_avail",
762 "virt_avail"
765 if (mdb_getopts(argc, argv,
766 'i', MDB_OPT_SETBITS, (1 << 0), &list,
767 'a', MDB_OPT_SETBITS, (1 << 1), &list,
768 'v', MDB_OPT_SETBITS, (1 << 2), &list, NULL) != argc)
769 return (DCMD_USAGE);
771 if (!list)
772 list = 1;
774 for (i = 0; list; i++, list >>= 1) {
775 if (!(list & 1))
776 continue;
777 if ((mdb_readvar(&ptr, lists[i]) == -1) ||
778 (ptr == (uintptr_t)NULL)) {
779 mdb_warn("%s not found or invalid", lists[i]);
780 return (DCMD_ERR);
783 mdb_printf("%s:\n", lists[i]);
784 if (mdb_pwalk_dcmd("memlist", "memlist", 0, NULL,
785 ptr) == -1) {
786 mdb_warn("can't walk memlist");
787 return (DCMD_ERR);
790 return (DCMD_OK);
793 if (DCMD_HDRSPEC(flags))
794 mdb_printf("%<u>%?s %16s %16s%</u>\n", "ADDR", "BASE", "SIZE");
796 if (mdb_vread(&ml, sizeof (struct memlist), addr) == -1) {
797 mdb_warn("can't read memlist at %#lx", addr);
798 return (DCMD_ERR);
801 mdb_printf("%0?lx %16llx %16llx\n", addr, ml.ml_address, ml.ml_size);
803 return (DCMD_OK);
807 seg_walk_init(mdb_walk_state_t *wsp)
809 if (wsp->walk_addr == (uintptr_t)NULL) {
810 mdb_warn("seg walk must begin at struct as *\n");
811 return (WALK_ERR);
815 * this is really just a wrapper to AVL tree walk
817 wsp->walk_addr = (uintptr_t)&((struct as *)wsp->walk_addr)->a_segtree;
818 return (avl_walk_init(wsp));
821 /*ARGSUSED*/
823 seg(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
825 struct seg s;
827 if (argc != 0)
828 return (DCMD_USAGE);
830 if ((flags & DCMD_LOOPFIRST) || !(flags & DCMD_LOOP)) {
831 mdb_printf("%<u>%?s %?s %?s %?s %s%</u>\n",
832 "SEG", "BASE", "SIZE", "DATA", "OPS");
835 if (mdb_vread(&s, sizeof (s), addr) == -1) {
836 mdb_warn("failed to read seg at %p", addr);
837 return (DCMD_ERR);
840 mdb_printf("%?p %?p %?lx %?p %a\n",
841 addr, s.s_base, s.s_size, s.s_data, s.s_ops);
843 return (DCMD_OK);
846 /*ARGSUSED*/
847 static int
848 pmap_walk_count_pages(uintptr_t addr, const void *data, void *out)
850 pgcnt_t *nres = out;
852 (*nres)++;
854 return (WALK_NEXT);
857 static int
858 pmap_walk_seg(uintptr_t addr, const struct seg *seg, uintptr_t segvn)
861 mdb_printf("%0?p %0?p %7dk", addr, seg->s_base, seg->s_size / 1024);
863 if (segvn == (uintptr_t)seg->s_ops && seg->s_data != NULL) {
864 struct segvn_data svn;
865 pgcnt_t nres = 0;
867 svn.vp = NULL;
868 (void) mdb_vread(&svn, sizeof (svn), (uintptr_t)seg->s_data);
871 * Use the segvn_pages walker to find all of the in-core pages
872 * for this mapping.
874 if (mdb_pwalk("segvn_pages", pmap_walk_count_pages, &nres,
875 (uintptr_t)seg->s_data) == -1) {
876 mdb_warn("failed to walk segvn_pages (s_data=%p)",
877 seg->s_data);
879 mdb_printf(" %7ldk", (nres * PAGESIZE) / 1024);
881 if (svn.vp != NULL) {
882 char buf[29];
884 mdb_vnode2path((uintptr_t)svn.vp, buf, sizeof (buf));
885 mdb_printf(" %s", buf);
886 } else {
887 mdb_printf(" [ anon ]");
889 } else {
890 mdb_printf(" %8s [ &%a ]", "?", seg->s_ops);
893 mdb_printf("\n");
894 return (WALK_NEXT);
897 static int
898 pmap_walk_seg_quick(uintptr_t addr, const struct seg *seg, uintptr_t segvn)
900 mdb_printf("%0?p %0?p %7dk", addr, seg->s_base, seg->s_size / 1024);
902 if (segvn == (uintptr_t)seg->s_ops && seg->s_data != NULL) {
903 struct segvn_data svn;
905 svn.vp = NULL;
906 (void) mdb_vread(&svn, sizeof (svn), (uintptr_t)seg->s_data);
908 if (svn.vp != NULL) {
909 mdb_printf(" %0?p", svn.vp);
910 } else {
911 mdb_printf(" [ anon ]");
913 } else {
914 mdb_printf(" [ &%a ]", seg->s_ops);
917 mdb_printf("\n");
918 return (WALK_NEXT);
921 /*ARGSUSED*/
923 pmap(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
925 uintptr_t segvn;
926 proc_t proc;
927 uint_t quick = FALSE;
928 mdb_walk_cb_t cb = (mdb_walk_cb_t)pmap_walk_seg;
930 GElf_Sym sym;
932 if (!(flags & DCMD_ADDRSPEC))
933 return (DCMD_USAGE);
935 if (mdb_getopts(argc, argv,
936 'q', MDB_OPT_SETBITS, TRUE, &quick, NULL) != argc)
937 return (DCMD_USAGE);
939 if (mdb_vread(&proc, sizeof (proc), addr) == -1) {
940 mdb_warn("failed to read proc at %p", addr);
941 return (DCMD_ERR);
944 if (mdb_lookup_by_name("segvn_ops", &sym) == 0)
945 segvn = (uintptr_t)sym.st_value;
946 else
947 segvn = (uintptr_t)NULL;
949 mdb_printf("%?s %?s %8s ", "SEG", "BASE", "SIZE");
951 if (quick) {
952 mdb_printf("VNODE\n");
953 cb = (mdb_walk_cb_t)pmap_walk_seg_quick;
954 } else {
955 mdb_printf("%8s %s\n", "RES", "PATH");
958 if (mdb_pwalk("seg", cb, (void *)segvn, (uintptr_t)proc.p_as) == -1) {
959 mdb_warn("failed to walk segments of as %p", proc.p_as);
960 return (DCMD_ERR);
963 return (DCMD_OK);
966 typedef struct anon_walk_data {
967 uintptr_t *aw_levone;
968 uintptr_t *aw_levtwo;
969 size_t aw_minslot;
970 size_t aw_maxslot;
971 pgcnt_t aw_nlevone;
972 pgcnt_t aw_levone_ndx;
973 size_t aw_levtwo_ndx;
974 struct anon_map *aw_ampp;
975 struct anon_map aw_amp;
976 struct anon_hdr aw_ahp;
977 int aw_all; /* report all anon pointers, even NULLs */
978 } anon_walk_data_t;
981 anon_walk_init_common(mdb_walk_state_t *wsp, ulong_t minslot, ulong_t maxslot)
983 anon_walk_data_t *aw;
985 if (wsp->walk_addr == (uintptr_t)NULL) {
986 mdb_warn("anon walk doesn't support global walks\n");
987 return (WALK_ERR);
990 aw = mdb_alloc(sizeof (anon_walk_data_t), UM_SLEEP);
991 aw->aw_ampp = (struct anon_map *)wsp->walk_addr;
993 if (mdb_vread(&aw->aw_amp, sizeof (aw->aw_amp), wsp->walk_addr) == -1) {
994 mdb_warn("failed to read anon map at %p", wsp->walk_addr);
995 mdb_free(aw, sizeof (anon_walk_data_t));
996 return (WALK_ERR);
999 if (mdb_vread(&aw->aw_ahp, sizeof (aw->aw_ahp),
1000 (uintptr_t)(aw->aw_amp.ahp)) == -1) {
1001 mdb_warn("failed to read anon hdr ptr at %p", aw->aw_amp.ahp);
1002 mdb_free(aw, sizeof (anon_walk_data_t));
1003 return (WALK_ERR);
1006 /* update min and maxslot with the given constraints */
1007 maxslot = MIN(maxslot, aw->aw_ahp.size);
1008 minslot = MIN(minslot, maxslot);
1010 if (aw->aw_ahp.size <= ANON_CHUNK_SIZE ||
1011 (aw->aw_ahp.flags & ANON_ALLOC_FORCE)) {
1012 aw->aw_nlevone = maxslot;
1013 aw->aw_levone_ndx = minslot;
1014 aw->aw_levtwo = NULL;
1015 } else {
1016 aw->aw_nlevone =
1017 (maxslot + ANON_CHUNK_OFF) >> ANON_CHUNK_SHIFT;
1018 aw->aw_levone_ndx = 0;
1019 aw->aw_levtwo =
1020 mdb_zalloc(ANON_CHUNK_SIZE * sizeof (uintptr_t), UM_SLEEP);
1023 aw->aw_levone =
1024 mdb_alloc(aw->aw_nlevone * sizeof (uintptr_t), UM_SLEEP);
1025 aw->aw_all = (wsp->walk_arg == ANON_WALK_ALL);
1027 mdb_vread(aw->aw_levone, aw->aw_nlevone * sizeof (uintptr_t),
1028 (uintptr_t)aw->aw_ahp.array_chunk);
1030 aw->aw_levtwo_ndx = 0;
1031 aw->aw_minslot = minslot;
1032 aw->aw_maxslot = maxslot;
1034 out:
1035 wsp->walk_data = aw;
1036 return (0);
1040 anon_walk_step(mdb_walk_state_t *wsp)
1042 anon_walk_data_t *aw = (anon_walk_data_t *)wsp->walk_data;
1043 struct anon anon;
1044 uintptr_t anonptr;
1045 ulong_t slot;
1048 * Once we've walked through level one, we're done.
1050 if (aw->aw_levone_ndx >= aw->aw_nlevone) {
1051 return (WALK_DONE);
1054 if (aw->aw_levtwo == NULL) {
1055 anonptr = aw->aw_levone[aw->aw_levone_ndx];
1056 aw->aw_levone_ndx++;
1057 } else {
1058 if (aw->aw_levtwo_ndx == 0) {
1059 uintptr_t levtwoptr;
1061 /* The first time through, skip to our first index. */
1062 if (aw->aw_levone_ndx == 0) {
1063 aw->aw_levone_ndx =
1064 aw->aw_minslot / ANON_CHUNK_SIZE;
1065 aw->aw_levtwo_ndx =
1066 aw->aw_minslot % ANON_CHUNK_SIZE;
1069 levtwoptr = (uintptr_t)aw->aw_levone[aw->aw_levone_ndx];
1071 if (levtwoptr == (uintptr_t)NULL) {
1072 if (!aw->aw_all) {
1073 aw->aw_levtwo_ndx = 0;
1074 aw->aw_levone_ndx++;
1075 return (WALK_NEXT);
1077 bzero(aw->aw_levtwo,
1078 ANON_CHUNK_SIZE * sizeof (uintptr_t));
1080 } else if (mdb_vread(aw->aw_levtwo,
1081 ANON_CHUNK_SIZE * sizeof (uintptr_t), levtwoptr) ==
1082 -1) {
1083 mdb_warn("unable to read anon_map %p's "
1084 "second-level map %d at %p",
1085 aw->aw_ampp, aw->aw_levone_ndx,
1086 levtwoptr);
1087 return (WALK_ERR);
1090 slot = aw->aw_levone_ndx * ANON_CHUNK_SIZE + aw->aw_levtwo_ndx;
1091 anonptr = aw->aw_levtwo[aw->aw_levtwo_ndx];
1093 /* update the indices for next time */
1094 aw->aw_levtwo_ndx++;
1095 if (aw->aw_levtwo_ndx == ANON_CHUNK_SIZE) {
1096 aw->aw_levtwo_ndx = 0;
1097 aw->aw_levone_ndx++;
1100 /* make sure the slot # is in the requested range */
1101 if (slot >= aw->aw_maxslot) {
1102 return (WALK_DONE);
1106 if (anonptr != (uintptr_t)NULL) {
1107 mdb_vread(&anon, sizeof (anon), anonptr);
1108 return (wsp->walk_callback(anonptr, &anon, wsp->walk_cbdata));
1110 if (aw->aw_all) {
1111 return (wsp->walk_callback((uintptr_t)NULL, NULL,
1112 wsp->walk_cbdata));
1114 return (WALK_NEXT);
1117 void
1118 anon_walk_fini(mdb_walk_state_t *wsp)
1120 anon_walk_data_t *aw = (anon_walk_data_t *)wsp->walk_data;
1122 if (aw->aw_levtwo != NULL)
1123 mdb_free(aw->aw_levtwo, ANON_CHUNK_SIZE * sizeof (uintptr_t));
1125 mdb_free(aw->aw_levone, aw->aw_nlevone * sizeof (uintptr_t));
1126 mdb_free(aw, sizeof (anon_walk_data_t));
1130 anon_walk_init(mdb_walk_state_t *wsp)
1132 return (anon_walk_init_common(wsp, 0, ULONG_MAX));
1136 segvn_anon_walk_init(mdb_walk_state_t *wsp)
1138 const uintptr_t svd_addr = wsp->walk_addr;
1139 uintptr_t amp_addr;
1140 uintptr_t seg_addr;
1141 struct segvn_data svd;
1142 struct anon_map amp;
1143 struct seg seg;
1145 if (svd_addr == (uintptr_t)NULL) {
1146 mdb_warn("segvn_anon walk doesn't support global walks\n");
1147 return (WALK_ERR);
1149 if (mdb_vread(&svd, sizeof (svd), svd_addr) == -1) {
1150 mdb_warn("segvn_anon walk: unable to read segvn_data at %p",
1151 svd_addr);
1152 return (WALK_ERR);
1154 if (svd.amp == NULL) {
1155 mdb_warn("segvn_anon walk: segvn_data at %p has no anon map\n",
1156 svd_addr);
1157 return (WALK_ERR);
1159 amp_addr = (uintptr_t)svd.amp;
1160 if (mdb_vread(&amp, sizeof (amp), amp_addr) == -1) {
1161 mdb_warn("segvn_anon walk: unable to read amp %p for "
1162 "segvn_data %p", amp_addr, svd_addr);
1163 return (WALK_ERR);
1165 seg_addr = (uintptr_t)svd.seg;
1166 if (mdb_vread(&seg, sizeof (seg), seg_addr) == -1) {
1167 mdb_warn("segvn_anon walk: unable to read seg %p for "
1168 "segvn_data %p", seg_addr, svd_addr);
1169 return (WALK_ERR);
1171 if ((seg.s_size + (svd.anon_index << PAGESHIFT)) > amp.size) {
1172 mdb_warn("anon map %p is too small for segment %p\n",
1173 amp_addr, seg_addr);
1174 return (WALK_ERR);
1177 wsp->walk_addr = amp_addr;
1178 return (anon_walk_init_common(wsp,
1179 svd.anon_index, svd.anon_index + (seg.s_size >> PAGESHIFT)));
1183 typedef struct {
1184 uoff_t svs_offset;
1185 uintptr_t svs_page;
1186 } segvn_sparse_t;
1187 #define SEGVN_MAX_SPARSE ((128 * 1024) / sizeof (segvn_sparse_t))
1189 typedef struct {
1190 uintptr_t svw_svdp;
1191 struct segvn_data svw_svd;
1192 struct seg svw_seg;
1193 size_t svw_walkoff;
1194 ulong_t svw_anonskip;
1195 segvn_sparse_t *svw_sparse;
1196 size_t svw_sparse_idx;
1197 size_t svw_sparse_count;
1198 size_t svw_sparse_size;
1199 uint8_t svw_sparse_overflow;
1200 uint8_t svw_all;
1201 } segvn_walk_data_t;
1203 static int
1204 segvn_sparse_fill(uintptr_t addr, const void *pp_arg, void *arg)
1206 segvn_walk_data_t *const svw = arg;
1207 const page_t *const pp = pp_arg;
1208 const uoff_t offset = pp->p_offset;
1209 segvn_sparse_t *const cur =
1210 &svw->svw_sparse[svw->svw_sparse_count];
1212 /* See if the page is of interest */
1213 if ((uoff_t)(offset - svw->svw_svd.offset) >= svw->svw_seg.s_size) {
1214 return (WALK_NEXT);
1216 /* See if we have space for the new entry, then add it. */
1217 if (svw->svw_sparse_count >= svw->svw_sparse_size) {
1218 svw->svw_sparse_overflow = 1;
1219 return (WALK_DONE);
1221 svw->svw_sparse_count++;
1222 cur->svs_offset = offset;
1223 cur->svs_page = addr;
1224 return (WALK_NEXT);
1227 static int
1228 segvn_sparse_cmp(const void *lp, const void *rp)
1230 const segvn_sparse_t *const l = lp;
1231 const segvn_sparse_t *const r = rp;
1233 if (l->svs_offset < r->svs_offset) {
1234 return (-1);
1236 if (l->svs_offset > r->svs_offset) {
1237 return (1);
1239 return (0);
1243 * Builds on the "anon_all" walker to walk all resident pages in a segvn_data
1244 * structure. For segvn_datas without an anon structure, it just looks up
1245 * pages in the vnode. For segvn_datas with an anon structure, NULL slots
1246 * pass through to the vnode, and non-null slots are checked for residency.
1249 segvn_pages_walk_init(mdb_walk_state_t *wsp)
1251 segvn_walk_data_t *svw;
1252 struct segvn_data *svd;
1254 if (wsp->walk_addr == (uintptr_t)NULL) {
1255 mdb_warn("segvn walk doesn't support global walks\n");
1256 return (WALK_ERR);
1259 svw = mdb_zalloc(sizeof (*svw), UM_SLEEP);
1260 svw->svw_svdp = wsp->walk_addr;
1261 svw->svw_anonskip = 0;
1262 svw->svw_sparse_idx = 0;
1263 svw->svw_walkoff = 0;
1264 svw->svw_all = (wsp->walk_arg == SEGVN_PAGES_ALL);
1266 if (mdb_vread(&svw->svw_svd, sizeof (svw->svw_svd), wsp->walk_addr) ==
1267 -1) {
1268 mdb_warn("failed to read segvn_data at %p", wsp->walk_addr);
1269 mdb_free(svw, sizeof (*svw));
1270 return (WALK_ERR);
1273 svd = &svw->svw_svd;
1274 if (mdb_vread(&svw->svw_seg, sizeof (svw->svw_seg),
1275 (uintptr_t)svd->seg) == -1) {
1276 mdb_warn("failed to read seg at %p (from %p)",
1277 svd->seg, &((struct segvn_data *)(wsp->walk_addr))->seg);
1278 mdb_free(svw, sizeof (*svw));
1279 return (WALK_ERR);
1282 if (svd->amp == NULL && svd->vp == NULL) {
1283 /* make the walk terminate immediately; no pages */
1284 svw->svw_walkoff = svw->svw_seg.s_size;
1286 } else if (svd->amp == NULL &&
1287 (svw->svw_seg.s_size >> PAGESHIFT) >= SEGVN_MAX_SPARSE) {
1289 * If we don't have an anon pointer, and the segment is large,
1290 * we try to load the in-memory pages into a fixed-size array,
1291 * which is then sorted and reported directly. This is much
1292 * faster than doing a mdb_page_lookup() for each possible
1293 * offset.
1295 * If the allocation fails, or there are too many pages
1296 * in-core, we fall back to looking up the pages individually.
1298 svw->svw_sparse = mdb_alloc(
1299 SEGVN_MAX_SPARSE * sizeof (*svw->svw_sparse), UM_NOSLEEP);
1300 if (svw->svw_sparse != NULL) {
1301 svw->svw_sparse_size = SEGVN_MAX_SPARSE;
1303 if (mdb_pwalk("page", segvn_sparse_fill, svw,
1304 (uintptr_t)svd->vp) == -1 ||
1305 svw->svw_sparse_overflow) {
1306 mdb_free(svw->svw_sparse, SEGVN_MAX_SPARSE *
1307 sizeof (*svw->svw_sparse));
1308 svw->svw_sparse = NULL;
1309 } else {
1310 qsort(svw->svw_sparse, svw->svw_sparse_count,
1311 sizeof (*svw->svw_sparse),
1312 segvn_sparse_cmp);
1316 } else if (svd->amp != NULL) {
1317 const char *const layer = (!svw->svw_all && svd->vp == NULL) ?
1318 "segvn_anon" : "segvn_anon_all";
1320 * If we're not printing all offsets, and the segvn_data has
1321 * no backing VP, we can use the "segvn_anon" walker, which
1322 * efficiently skips NULL slots.
1324 * Otherwise, we layer over the "segvn_anon_all" walker
1325 * (which reports all anon slots, even NULL ones), so that
1326 * segvn_pages_walk_step() knows the precise offset for each
1327 * element. It uses that offset information to look up the
1328 * backing pages for NULL anon slots.
1330 if (mdb_layered_walk(layer, wsp) == -1) {
1331 mdb_warn("segvn_pages: failed to layer \"%s\" "
1332 "for segvn_data %p", layer, svw->svw_svdp);
1333 mdb_free(svw, sizeof (*svw));
1334 return (WALK_ERR);
1338 wsp->walk_data = svw;
1339 return (WALK_NEXT);
1343 segvn_pages_walk_step(mdb_walk_state_t *wsp)
1345 segvn_walk_data_t *const svw = wsp->walk_data;
1346 struct seg *const seg = &svw->svw_seg;
1347 struct segvn_data *const svd = &svw->svw_svd;
1348 uintptr_t pp;
1349 page_t page;
1351 /* If we've walked off the end of the segment, we're done. */
1352 if (svw->svw_walkoff >= seg->s_size) {
1353 return (WALK_DONE);
1357 * If we've got a sparse page array, just send it directly.
1359 if (svw->svw_sparse != NULL) {
1360 uoff_t off;
1362 if (svw->svw_sparse_idx >= svw->svw_sparse_count) {
1363 pp = (uintptr_t)NULL;
1364 if (!svw->svw_all) {
1365 return (WALK_DONE);
1367 } else {
1368 segvn_sparse_t *const svs =
1369 &svw->svw_sparse[svw->svw_sparse_idx];
1370 off = svs->svs_offset - svd->offset;
1371 if (svw->svw_all && svw->svw_walkoff != off) {
1372 pp = (uintptr_t)NULL;
1373 } else {
1374 pp = svs->svs_page;
1375 svw->svw_sparse_idx++;
1379 } else if (svd->amp == NULL || wsp->walk_addr == (uintptr_t)NULL) {
1381 * If there's no anon, or the anon slot is NULL, look up
1382 * <vp, offset>.
1384 if (svd->vp != NULL) {
1385 pp = mdb_page_lookup((uintptr_t)svd->vp,
1386 svd->offset + svw->svw_walkoff);
1387 } else {
1388 pp = (uintptr_t)NULL;
1391 } else {
1392 const struct anon *const anon = wsp->walk_layer;
1395 * We have a "struct anon"; if it's not swapped out,
1396 * look up the page.
1398 if (anon->an_vp != NULL || anon->an_off != 0) {
1399 pp = mdb_page_lookup((uintptr_t)anon->an_vp,
1400 anon->an_off);
1401 if (pp == 0 && mdb_get_state() != MDB_STATE_RUNNING) {
1402 mdb_warn("walk segvn_pages: segvn_data %p "
1403 "offset %ld, anon page <%p, %llx> not "
1404 "found.\n", svw->svw_svdp, svw->svw_walkoff,
1405 anon->an_vp, anon->an_off);
1407 } else {
1408 if (anon->an_pvp == NULL) {
1409 mdb_warn("walk segvn_pages: useless struct "
1410 "anon at %p\n", wsp->walk_addr);
1412 pp = (uintptr_t)NULL; /* nothing at this offset */
1416 svw->svw_walkoff += PAGESIZE; /* Update for the next call */
1417 if (pp != (uintptr_t)NULL) {
1418 if (mdb_vread(&page, sizeof (page_t), pp) == -1) {
1419 mdb_warn("unable to read page_t at %#lx", pp);
1420 return (WALK_ERR);
1422 return (wsp->walk_callback(pp, &page, wsp->walk_cbdata));
1424 if (svw->svw_all) {
1425 return (wsp->walk_callback((uintptr_t)NULL, NULL,
1426 wsp->walk_cbdata));
1428 return (WALK_NEXT);
1431 void
1432 segvn_pages_walk_fini(mdb_walk_state_t *wsp)
1434 segvn_walk_data_t *const svw = wsp->walk_data;
1436 if (svw->svw_sparse != NULL) {
1437 mdb_free(svw->svw_sparse, SEGVN_MAX_SPARSE *
1438 sizeof (*svw->svw_sparse));
1440 mdb_free(svw, sizeof (*svw));
1444 * Grumble, grumble.
1446 #define SMAP_HASHFUNC(vp, off) \
1447 ((((uintptr_t)(vp) >> 6) + ((uintptr_t)(vp) >> 3) + \
1448 ((off) >> MAXBSHIFT)) & smd_hashmsk)
1451 vnode2smap(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
1453 long smd_hashmsk;
1454 int hash;
1455 uintptr_t offset = 0;
1456 struct smap smp;
1457 uintptr_t saddr, kaddr;
1458 uintptr_t smd_hash, smd_smap;
1459 struct seg seg;
1461 if (!(flags & DCMD_ADDRSPEC))
1462 return (DCMD_USAGE);
1464 if (mdb_readvar(&smd_hashmsk, "smd_hashmsk") == -1) {
1465 mdb_warn("failed to read smd_hashmsk");
1466 return (DCMD_ERR);
1469 if (mdb_readvar(&smd_hash, "smd_hash") == -1) {
1470 mdb_warn("failed to read smd_hash");
1471 return (DCMD_ERR);
1474 if (mdb_readvar(&smd_smap, "smd_smap") == -1) {
1475 mdb_warn("failed to read smd_hash");
1476 return (DCMD_ERR);
1479 if (mdb_readvar(&kaddr, "segkmap") == -1) {
1480 mdb_warn("failed to read segkmap");
1481 return (DCMD_ERR);
1484 if (mdb_vread(&seg, sizeof (seg), kaddr) == -1) {
1485 mdb_warn("failed to read segkmap at %p", kaddr);
1486 return (DCMD_ERR);
1489 if (argc != 0) {
1490 const mdb_arg_t *arg = &argv[0];
1492 if (arg->a_type == MDB_TYPE_IMMEDIATE)
1493 offset = arg->a_un.a_val;
1494 else
1495 offset = (uintptr_t)mdb_strtoull(arg->a_un.a_str);
1498 hash = SMAP_HASHFUNC(addr, offset);
1500 if (mdb_vread(&saddr, sizeof (saddr),
1501 smd_hash + hash * sizeof (uintptr_t)) == -1) {
1502 mdb_warn("couldn't read smap at %p",
1503 smd_hash + hash * sizeof (uintptr_t));
1504 return (DCMD_ERR);
1507 do {
1508 if (mdb_vread(&smp, sizeof (smp), saddr) == -1) {
1509 mdb_warn("couldn't read smap at %p", saddr);
1510 return (DCMD_ERR);
1513 if ((uintptr_t)smp.sm_vp == addr && smp.sm_off == offset) {
1514 mdb_printf("vnode %p, offs %p is smap %p, vaddr %p\n",
1515 addr, offset, saddr, ((saddr - smd_smap) /
1516 sizeof (smp)) * MAXBSIZE + seg.s_base);
1517 return (DCMD_OK);
1520 saddr = (uintptr_t)smp.sm_hash;
1521 } while (saddr != (uintptr_t)NULL);
1523 mdb_printf("no smap for vnode %p, offs %p\n", addr, offset);
1524 return (DCMD_OK);
1527 /*ARGSUSED*/
1529 addr2smap(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
1531 uintptr_t kaddr;
1532 struct seg seg;
1533 struct segmap_data sd;
1535 if (!(flags & DCMD_ADDRSPEC))
1536 return (DCMD_USAGE);
1538 if (mdb_readvar(&kaddr, "segkmap") == -1) {
1539 mdb_warn("failed to read segkmap");
1540 return (DCMD_ERR);
1543 if (mdb_vread(&seg, sizeof (seg), kaddr) == -1) {
1544 mdb_warn("failed to read segkmap at %p", kaddr);
1545 return (DCMD_ERR);
1548 if (mdb_vread(&sd, sizeof (sd), (uintptr_t)seg.s_data) == -1) {
1549 mdb_warn("failed to read segmap_data at %p", seg.s_data);
1550 return (DCMD_ERR);
1553 mdb_printf("%p is smap %p\n", addr,
1554 ((addr - (uintptr_t)seg.s_base) >> MAXBSHIFT) *
1555 sizeof (struct smap) + (uintptr_t)sd.smd_sm);
1557 return (DCMD_OK);