4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #include <mdb/mdb_param.h>
28 #include <mdb/mdb_modapi.h>
30 #include <sys/fs/ufs_inode.h>
31 #include <sys/kmem_impl.h>
32 #include <sys/vmem_impl.h>
33 #include <sys/modctl.h>
35 #include <sys/kobj_impl.h>
36 #include <vm/seg_vn.h>
38 #include <vm/seg_map.h>
39 #include <mdb/mdb_ctf.h>
42 #include "leaky_impl.h"
45 * This file defines the genunix target for leaky.c. There are three types
46 * of buffers in the kernel's heap: TYPE_VMEM, for kmem_oversize allocations,
47 * TYPE_KMEM, for kmem_cache_alloc() allocations bufctl_audit_ts, and
48 * TYPE_CACHE, for kmem_cache_alloc() allocation without bufctl_audit_ts.
50 * See "leaky_impl.h" for the target interface definition.
53 #define TYPE_VMEM 0 /* lkb_data is the vmem_seg's size */
54 #define TYPE_CACHE 1 /* lkb_cid is the bufctl's cache */
55 #define TYPE_KMEM 2 /* lkb_cid is the bufctl's cache */
57 #define LKM_CTL_BUFCTL 0 /* normal allocation, PTR is bufctl */
58 #define LKM_CTL_VMSEG 1 /* oversize allocation, PTR is vmem_seg_t */
59 #define LKM_CTL_CACHE 2 /* normal alloc, non-debug, PTR is cache */
60 #define LKM_CTL_MASK 3L
62 #define LKM_CTL(ptr, type) (LKM_CTLPTR(ptr) | (type))
63 #define LKM_CTLPTR(ctl) ((uintptr_t)(ctl) & ~(LKM_CTL_MASK))
64 #define LKM_CTLTYPE(ctl) ((uintptr_t)(ctl) & (LKM_CTL_MASK))
66 static int kmem_lite_count
= 0; /* cache of the kernel's version */
70 leaky_mtab(uintptr_t addr
, const kmem_bufctl_audit_t
*bcp
, leak_mtab_t
**lmp
)
72 leak_mtab_t
*lm
= (*lmp
)++;
74 lm
->lkm_base
= (uintptr_t)bcp
->bc_addr
;
75 lm
->lkm_bufctl
= LKM_CTL(addr
, LKM_CTL_BUFCTL
);
82 leaky_mtab_addr(uintptr_t addr
, void *ignored
, leak_mtab_t
**lmp
)
84 leak_mtab_t
*lm
= (*lmp
)++;
92 leaky_seg(uintptr_t addr
, const vmem_seg_t
*seg
, leak_mtab_t
**lmp
)
94 leak_mtab_t
*lm
= (*lmp
)++;
96 lm
->lkm_base
= seg
->vs_start
;
97 lm
->lkm_limit
= seg
->vs_end
;
98 lm
->lkm_bufctl
= LKM_CTL(addr
, LKM_CTL_VMSEG
);
104 leaky_vmem_interested(const vmem_t
*vmem
)
106 if (strcmp(vmem
->vm_name
, "kmem_oversize") != 0 &&
107 strcmp(vmem
->vm_name
, "static_alloc") != 0)
113 leaky_vmem(uintptr_t addr
, const vmem_t
*vmem
, leak_mtab_t
**lmp
)
115 if (!leaky_vmem_interested(vmem
))
118 if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t
)leaky_seg
, lmp
, addr
) == -1)
119 mdb_warn("can't walk vmem_alloc for kmem_oversize (%p)", addr
);
126 leaky_estimate_vmem(uintptr_t addr
, const vmem_t
*vmem
, size_t *est
)
128 if (!leaky_vmem_interested(vmem
))
131 *est
+= (int)(vmem
->vm_kstat
.vk_alloc
.value
.ui64
-
132 vmem
->vm_kstat
.vk_free
.value
.ui64
);
138 leaky_interested(const kmem_cache_t
*c
)
143 * ignore HAT-related caches that happen to derive from kmem_default
145 if (strcmp(c
->cache_name
, "sfmmu1_cache") == 0 ||
146 strcmp(c
->cache_name
, "sf_hment_cache") == 0 ||
147 strcmp(c
->cache_name
, "pa_hment_cache") == 0)
150 if (mdb_vread(&vmem
, sizeof (vmem
), (uintptr_t)c
->cache_arena
) == -1) {
151 mdb_warn("cannot read arena %p for cache '%s'",
152 (uintptr_t)c
->cache_arena
, c
->cache_name
);
157 * If this cache isn't allocating from the kmem_default,
158 * kmem_firewall, or static vmem arenas, we're not interested.
160 if (strcmp(vmem
.vm_name
, "kmem_default") != 0 &&
161 strcmp(vmem
.vm_name
, "kmem_firewall") != 0 &&
162 strcmp(vmem
.vm_name
, "static") != 0)
169 leaky_estimate(uintptr_t addr
, const kmem_cache_t
*c
, size_t *est
)
171 if (!leaky_interested(c
))
174 *est
+= kmem_estimate_allocated(addr
, c
);
181 leaky_cache(uintptr_t addr
, const kmem_cache_t
*c
, leak_mtab_t
**lmp
)
183 leak_mtab_t
*lm
= *lmp
;
186 int audit
= (c
->cache_flags
& KMF_AUDIT
);
188 if (!leaky_interested(c
))
193 cb
= (mdb_walk_cb_t
)leaky_mtab
;
196 cb
= (mdb_walk_cb_t
)leaky_mtab_addr
;
198 if (mdb_pwalk(walk
, cb
, lmp
, addr
) == -1) {
199 mdb_warn("can't walk kmem for cache %p (%s)", addr
,
204 for (; lm
< *lmp
; lm
++) {
205 lm
->lkm_limit
= lm
->lkm_base
+ c
->cache_bufsize
;
207 lm
->lkm_bufctl
= LKM_CTL(addr
, LKM_CTL_CACHE
);
215 leaky_scan_buffer(uintptr_t addr
, const void *ignored
, const kmem_cache_t
*c
)
217 leaky_grep(addr
, c
->cache_bufsize
);
220 * free, constructed KMF_LITE buffers keep their first uint64_t in
221 * their buftag's redzone.
223 if (c
->cache_flags
& KMF_LITE
) {
224 /* LINTED alignment */
225 kmem_buftag_t
*btp
= KMEM_BUFTAG(c
, addr
);
226 leaky_grep((uintptr_t)&btp
->bt_redzone
,
227 sizeof (btp
->bt_redzone
));
235 leaky_scan_cache(uintptr_t addr
, const kmem_cache_t
*c
, void *ignored
)
237 if (!leaky_interested(c
))
241 * Scan all of the free, constructed buffers, since they may have
242 * pointers to allocated objects.
244 if (mdb_pwalk("freemem_constructed",
245 (mdb_walk_cb_t
)leaky_scan_buffer
, (void *)c
, addr
) == -1) {
246 mdb_warn("can't walk freemem_constructed for cache %p (%s)",
247 addr
, c
->cache_name
);
256 leaky_modctl(uintptr_t addr
, const struct modctl
*m
, int *ignored
)
259 char name
[MODMAXNAMELEN
];
261 if (m
->mod_mp
== NULL
)
264 if (mdb_vread(&mod
, sizeof (mod
), (uintptr_t)m
->mod_mp
) == -1) {
265 mdb_warn("couldn't read modctl %p's module", addr
);
269 if (mdb_readstr(name
, sizeof (name
), (uintptr_t)m
->mod_modname
) == -1)
270 (void) mdb_snprintf(name
, sizeof (name
), "0x%p", addr
);
272 leaky_grep((uintptr_t)m
->mod_mp
, sizeof (struct module
));
273 leaky_grep((uintptr_t)mod
.data
, mod
.data_size
);
274 leaky_grep((uintptr_t)mod
.bss
, mod
.bss_size
);
281 leaky_thread(uintptr_t addr
, const kthread_t
*t
, unsigned long *pagesize
)
283 uintptr_t size
, base
= (uintptr_t)t
->t_stkbase
;
284 uintptr_t stk
= (uintptr_t)t
->t_stk
;
286 if (t
->t_state
!= TS_FREE
)
287 leaky_grep(base
, stk
- base
);
290 * There is always gunk hanging out between t_stk and the page
291 * boundary. If this thread structure wasn't kmem allocated,
292 * this will include the thread structure itself. If the thread
293 * _is_ kmem allocated, we'll be able to get to it via allthreads.
295 size
= *pagesize
- (stk
& (*pagesize
- 1));
297 leaky_grep(stk
, size
);
304 leaky_kstat(uintptr_t addr
, vmem_seg_t
*seg
, void *ignored
)
306 leaky_grep(seg
->vs_start
, seg
->vs_end
- seg
->vs_start
);
315 mdb_ctf_id_t id
, rid
;
324 * Because of DR, the page counters (which live in the kmem64 segment)
325 * can point into kmem_alloc()ed memory. The "page_counters" array
326 * is multi-dimensional, and each entry points to an array of
327 * "hw_page_map_t"s which is "max_mem_nodes" in length.
329 * To keep this from having too much grotty knowledge of internals,
330 * we use CTF data to get the size of the structure. For simplicity,
331 * we treat the page_counters array as a flat array of pointers, and
332 * use its size to determine how much to scan. Unused entries will
335 if (mdb_lookup_by_name("page_counters", &sym
) == -1) {
336 mdb_warn("unable to lookup page_counters");
340 if (mdb_readvar(&max_mem_nodes
, "max_mem_nodes") == -1) {
341 mdb_warn("unable to read max_mem_nodes");
345 if (mdb_ctf_lookup_by_name("unix`hw_page_map_t", &id
) == -1 ||
346 mdb_ctf_type_resolve(id
, &rid
) == -1 ||
347 (hwpm_size
= mdb_ctf_type_size(rid
)) < 0) {
348 mdb_warn("unable to lookup unix`hw_page_map_t");
352 counters
= mdb_alloc(sym
.st_size
, UM_SLEEP
| UM_GC
);
354 if (mdb_vread(counters
, sym
.st_size
, (uintptr_t)sym
.st_value
) == -1) {
355 mdb_warn("unable to read page_counters");
359 ncounters
= sym
.st_size
/ sizeof (counters
);
361 for (idx
= 0; idx
< ncounters
; idx
++) {
362 uintptr_t addr
= counters
[idx
];
363 if (addr
!= (uintptr_t)NULL
)
364 leaky_grep(addr
, hwpm_size
* max_mem_nodes
);
369 leaky_subr_estimate(size_t *estp
)
374 if ((state
= mdb_get_state()) == MDB_STATE_RUNNING
) {
375 mdb_warn("findleaks: can only be run on a system "
376 "dump or under kmdb; see dumpadm(1M)\n");
380 if (mdb_readvar(&panicstr
, "panicstr") == -1) {
381 mdb_warn("can't read variable 'panicstr'");
385 if (state
!= MDB_STATE_STOPPED
&& panicstr
== (uintptr_t)NULL
) {
386 mdb_warn("findleaks: cannot be run on a live dump.\n");
390 if (mdb_walk("kmem_cache", (mdb_walk_cb_t
)leaky_estimate
, estp
) == -1) {
391 mdb_warn("couldn't walk 'kmem_cache'");
396 mdb_warn("findleaks: no buffers found\n");
400 if (mdb_walk("vmem", (mdb_walk_cb_t
)leaky_estimate_vmem
, estp
) == -1) {
401 mdb_warn("couldn't walk 'vmem'");
409 leaky_subr_fill(leak_mtab_t
**lmpp
)
411 if (mdb_walk("vmem", (mdb_walk_cb_t
)leaky_vmem
, lmpp
) == -1) {
412 mdb_warn("couldn't walk 'vmem'");
416 if (mdb_walk("kmem_cache", (mdb_walk_cb_t
)leaky_cache
, lmpp
) == -1) {
417 mdb_warn("couldn't walk 'kmem_cache'");
421 if (mdb_readvar(&kmem_lite_count
, "kmem_lite_count") == -1) {
422 mdb_warn("couldn't read 'kmem_lite_count'");
424 } else if (kmem_lite_count
> 16) {
425 mdb_warn("kmem_lite_count nonsensical, ignored\n");
435 unsigned long ps
= PAGESIZE
;
436 uintptr_t kstat_arena
;
441 if (mdb_walk("kmem_cache", (mdb_walk_cb_t
)leaky_scan_cache
,
443 mdb_warn("couldn't walk 'kmem_cache'");
447 if (mdb_walk("modctl", (mdb_walk_cb_t
)leaky_modctl
, NULL
) == -1) {
448 mdb_warn("couldn't walk 'modctl'");
453 * If kmdb is loaded, we need to walk it's module list, since kmdb
454 * modctl structures can reference kmem allocations.
456 if ((mdb_readvar(&dmods
, "kdi_dmods") != -1) &&
457 (dmods
!= (uintptr_t)NULL
))
458 (void) mdb_pwalk("modctl", (mdb_walk_cb_t
)leaky_modctl
,
461 if (mdb_walk("thread", (mdb_walk_cb_t
)leaky_thread
, &ps
) == -1) {
462 mdb_warn("couldn't walk 'thread'");
466 if (mdb_walk("deathrow", (mdb_walk_cb_t
)leaky_thread
, &ps
) == -1) {
467 mdb_warn("couldn't walk 'deathrow'");
471 if (mdb_readvar(&kstat_arena
, "kstat_arena") == -1) {
472 mdb_warn("couldn't read 'kstat_arena'");
476 if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t
)leaky_kstat
,
477 NULL
, kstat_arena
) == -1) {
478 mdb_warn("couldn't walk kstat vmem arena");
486 leaky_subr_add_leak(leak_mtab_t
*lmp
)
488 uintptr_t addr
= LKM_CTLPTR(lmp
->lkm_bufctl
);
491 switch (LKM_CTLTYPE(lmp
->lkm_bufctl
)) {
492 case LKM_CTL_VMSEG
: {
495 if (mdb_vread(&vs
, sizeof (vs
), addr
) == -1) {
496 mdb_warn("couldn't read leaked vmem_seg at addr %p",
500 depth
= MIN(vs
.vs_depth
, VMEM_STACK_DEPTH
);
502 leaky_add_leak(TYPE_VMEM
, addr
, vs
.vs_start
, vs
.vs_timestamp
,
503 vs
.vs_stack
, depth
, 0, (vs
.vs_end
- vs
.vs_start
));
506 case LKM_CTL_BUFCTL
: {
507 kmem_bufctl_audit_t bc
;
509 if (mdb_vread(&bc
, sizeof (bc
), addr
) == -1) {
510 mdb_warn("couldn't read leaked bufctl at addr %p",
515 depth
= MIN(bc
.bc_depth
, KMEM_STACK_DEPTH
);
518 * The top of the stack will be kmem_cache_alloc+offset.
519 * Since the offset in kmem_cache_alloc() isn't interesting
520 * we skip that frame for the purposes of uniquifying stacks.
522 * We also use the cache pointer as the leaks's cid, to
523 * prevent the coalescing of leaks from different caches.
527 leaky_add_leak(TYPE_KMEM
, addr
, (uintptr_t)bc
.bc_addr
,
528 bc
.bc_timestamp
, bc
.bc_stack
+ 1, depth
,
529 (uintptr_t)bc
.bc_cache
, 0);
532 case LKM_CTL_CACHE
: {
534 kmem_buftag_lite_t bt
;
539 * For KMF_LITE caches, we can get the allocation PC
540 * out of the buftag structure.
542 if (mdb_vread(&cache
, sizeof (cache
), addr
) != -1 &&
543 (cache
.cache_flags
& KMF_LITE
) &&
544 kmem_lite_count
> 0 &&
545 mdb_vread(&bt
, sizeof (bt
),
546 /* LINTED alignment */
547 (uintptr_t)KMEM_BUFTAG(&cache
, lmp
->lkm_base
)) != -1) {
548 caller
= bt
.bt_history
[0];
551 leaky_add_leak(TYPE_CACHE
, lmp
->lkm_base
, lmp
->lkm_base
, 0,
552 &caller
, depth
, addr
, addr
);
556 mdb_warn("internal error: invalid leak_bufctl_t\n");
562 leaky_subr_caller(const pc_t
*stack
, uint_t depth
, char *buf
, uintptr_t *pcp
)
570 for (i
= 0; i
< depth
; i
++) {
573 if (mdb_lookup_by_addr(pc
,
574 MDB_SYM_FUZZY
, buf
, MDB_SYM_NAMLEN
, &sym
) == -1)
576 if (strncmp(buf
, "kmem_", 5) == 0)
578 if (strncmp(buf
, "vmem_", 5) == 0)
586 * We're only here if the entire call chain begins with "kmem_";
587 * this shouldn't happen, but we'll just use the last caller.
593 leaky_subr_bufctl_cmp(const leak_bufctl_t
*lhs
, const leak_bufctl_t
*rhs
)
595 char lbuf
[MDB_SYM_NAMLEN
], rbuf
[MDB_SYM_NAMLEN
];
596 uintptr_t lcaller
, rcaller
;
599 leaky_subr_caller(lhs
->lkb_stack
, lhs
->lkb_depth
, lbuf
, &lcaller
);
600 leaky_subr_caller(rhs
->lkb_stack
, lhs
->lkb_depth
, rbuf
, &rcaller
);
602 if (rval
= strcmp(lbuf
, rbuf
))
605 if (lcaller
< rcaller
)
608 if (lcaller
> rcaller
)
611 if (lhs
->lkb_data
< rhs
->lkb_data
)
614 if (lhs
->lkb_data
> rhs
->lkb_data
)
621 * Global state variables used by the leaky_subr_dump_* routines. Note that
622 * they are carefully cleared before use.
624 static int lk_vmem_seen
;
625 static int lk_cache_seen
;
626 static int lk_kmem_seen
;
627 static size_t lk_ttl
;
628 static size_t lk_bytes
;
631 leaky_subr_dump_start(int type
)
652 leaky_subr_dump(const leak_bufctl_t
*lkb
, int verbose
)
654 const leak_bufctl_t
*cur
;
656 size_t min
, max
, size
;
658 char c
[MDB_SYM_NAMLEN
];
666 switch (lkb
->lkb_type
) {
668 if (!verbose
&& !lk_vmem_seen
) {
670 mdb_printf("%-16s %7s %?s %s\n",
671 "BYTES", "LEAKED", "VMEM_SEG", "CALLER");
674 min
= max
= lkb
->lkb_data
;
676 for (cur
= lkb
; cur
!= NULL
; cur
= cur
->lkb_next
) {
677 size
= cur
->lkb_data
;
689 (void) mdb_snprintf(sz
, sizeof (sz
), "%ld", min
);
691 (void) mdb_snprintf(sz
, sizeof (sz
), "%ld-%ld",
695 leaky_subr_caller(lkb
->lkb_stack
, lkb
->lkb_depth
,
699 (void) mdb_snprintf(c
, sizeof (c
),
702 (void) mdb_snprintf(c
, sizeof (c
),
705 mdb_printf("%-16s %7d %?p %s\n", sz
, lkb
->lkb_dups
+ 1,
711 mdb_printf("kmem_oversize leak: 1 vmem_seg, "
712 "%ld bytes\n", lk_bytes
);
714 mdb_printf("kmem_oversize leak: %d vmem_segs, "
715 "%s bytes each, %ld bytes total\n",
716 lk_ttl
, sz
, lk_bytes
);
718 v
.a_type
= MDB_TYPE_STRING
;
721 if (mdb_call_dcmd("vmem_seg", lkb
->lkb_addr
,
722 DCMD_ADDRSPEC
, 1, &v
) == -1) {
723 mdb_warn("'%p::vmem_seg -v' failed",
730 if (!verbose
&& !lk_cache_seen
) {
734 mdb_printf("%-?s %7s %?s %s\n",
735 "CACHE", "LEAKED", "BUFFER", "CALLER");
738 if (mdb_vread(&cache
, sizeof (cache
), lkb
->lkb_data
) == -1) {
740 * This _really_ shouldn't happen; we shouldn't
741 * have been able to get this far if this
742 * cache wasn't readable.
744 mdb_warn("can't read cache %p for leaked "
745 "buffer %p", lkb
->lkb_data
, lkb
->lkb_addr
);
749 lk_ttl
+= lkb
->lkb_dups
+ 1;
750 lk_bytes
+= (lkb
->lkb_dups
+ 1) * cache
.cache_bufsize
;
752 caller
= (lkb
->lkb_depth
== 0) ? 0 : lkb
->lkb_stack
[0];
754 (void) mdb_snprintf(c
, sizeof (c
), "%a", caller
);
756 (void) mdb_snprintf(c
, sizeof (c
),
757 "%s", (verbose
) ? "" : "?");
761 mdb_printf("%0?p %7d %0?p %s\n", lkb
->lkb_cid
,
762 lkb
->lkb_dups
+ 1, lkb
->lkb_addr
, c
);
765 mdb_printf("%s leak: 1 buffer, %ld bytes,\n",
766 cache
.cache_name
, lk_bytes
);
768 mdb_printf("%s leak: %d buffers, "
769 "%ld bytes each, %ld bytes total,\n",
770 cache
.cache_name
, lk_ttl
,
771 cache
.cache_bufsize
, lk_bytes
);
773 mdb_printf(" sample addr %p%s%s\n",
774 lkb
->lkb_addr
, (caller
== 0) ? "" : ", caller ", c
);
779 if (!verbose
&& !lk_kmem_seen
) {
781 if (lk_vmem_seen
|| lk_cache_seen
)
783 mdb_printf("%-?s %7s %?s %s\n",
784 "CACHE", "LEAKED", "BUFCTL", "CALLER");
787 if (mdb_vread(&cache
, sizeof (cache
), lkb
->lkb_cid
) == -1) {
789 * This _really_ shouldn't happen; we shouldn't
790 * have been able to get this far if this
791 * cache wasn't readable.
793 mdb_warn("can't read cache %p for leaked "
794 "bufctl %p", lkb
->lkb_cid
, lkb
->lkb_addr
);
798 lk_ttl
+= lkb
->lkb_dups
+ 1;
799 lk_bytes
+= (lkb
->lkb_dups
+ 1) * cache
.cache_bufsize
;
802 leaky_subr_caller(lkb
->lkb_stack
, lkb
->lkb_depth
,
806 (void) mdb_snprintf(c
, sizeof (c
),
809 (void) mdb_snprintf(c
, sizeof (c
),
812 mdb_printf("%0?p %7d %0?p %s\n", lkb
->lkb_cid
,
813 lkb
->lkb_dups
+ 1, lkb
->lkb_addr
, c
);
818 mdb_printf("%s leak: 1 buffer, %ld bytes\n",
819 cache
.cache_name
, lk_bytes
);
821 mdb_printf("%s leak: %d buffers, "
822 "%ld bytes each, %ld bytes total\n",
823 cache
.cache_name
, lk_ttl
,
824 cache
.cache_bufsize
, lk_bytes
);
826 v
.a_type
= MDB_TYPE_STRING
;
829 if (mdb_call_dcmd("bufctl", lkb
->lkb_addr
,
830 DCMD_ADDRSPEC
, 1, &v
) == -1) {
831 mdb_warn("'%p::bufctl -v' failed",
843 leaky_subr_dump_end(int type
)
855 leaks
= "kmem_oversize leak";
862 width
= sizeof (uintptr_t) * 2;
870 width
= sizeof (uintptr_t) * 2;
878 for (i
= 0; i
< 72; i
++)
880 mdb_printf("\n%*s %7ld %s%s, %ld byte%s\n",
881 width
, "Total", lk_ttl
, leaks
, (lk_ttl
== 1) ? "" : "s",
882 lk_bytes
, (lk_bytes
== 1) ? "" : "s");
886 leaky_subr_invoke_callback(const leak_bufctl_t
*lkb
, mdb_walk_cb_t cb
,
889 kmem_bufctl_audit_t bc
;
892 switch (lkb
->lkb_type
) {
894 if (mdb_vread(&vs
, sizeof (vs
), lkb
->lkb_addr
) == -1) {
895 mdb_warn("unable to read vmem_seg at %p",
899 return (cb(lkb
->lkb_addr
, &vs
, cbdata
));
902 return (cb(lkb
->lkb_addr
, NULL
, cbdata
));
905 if (mdb_vread(&bc
, sizeof (bc
), lkb
->lkb_addr
) == -1) {
906 mdb_warn("unable to read bufctl at %p",
910 return (cb(lkb
->lkb_addr
, &bc
, cbdata
));