4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
30 #include <sys/vmem_impl_user.h>
31 #include <umem_impl.h>
37 #include <sys/stack.h>
39 #include "leaky_impl.h"
41 #include "proc_kludges.h"
43 #include "umem_pagesize.h"
46 * This file defines the libumem target for ../genunix/leaky.c.
48 * See ../genunix/leaky_impl.h for the target interface definition.
52 * leaky_subr_dump_start()/_end() depend on the ordering of TYPE_VMEM,
53 * TYPE_MMAP and TYPE_SBRK.
55 #define TYPE_MMAP 0 /* lkb_data is the size */
56 #define TYPE_SBRK 1 /* lkb_data is the size */
57 #define TYPE_VMEM 2 /* lkb_data is the vmem_seg's size */
58 #define TYPE_CACHE 3 /* lkb_cid is the bufctl's cache */
59 #define TYPE_UMEM 4 /* lkb_cid is the bufctl's cache */
61 #define LKM_CTL_BUFCTL 0 /* normal allocation, PTR is bufctl */
62 #define LKM_CTL_VMSEG 1 /* oversize allocation, PTR is vmem_seg_t */
63 #define LKM_CTL_MEMORY 2 /* non-umem mmap or brk, PTR is region start */
64 #define LKM_CTL_CACHE 3 /* normal alloc, non-debug, PTR is cache */
65 #define LKM_CTL_MASK 3L
68 * create a lkm_bufctl from a pointer and a type
70 #define LKM_CTL(ptr, type) (LKM_CTLPTR(ptr) | (type))
71 #define LKM_CTLPTR(ctl) ((uintptr_t)(ctl) & ~(LKM_CTL_MASK))
72 #define LKM_CTLTYPE(ctl) ((uintptr_t)(ctl) & (LKM_CTL_MASK))
74 static uintptr_t leak_brkbase
;
75 static uintptr_t leak_brksize
;
77 #define LEAKY_INBRK(ptr) \
78 (((uintptr_t)(ptr) - leak_brkbase) < leak_brksize)
80 typedef struct leaky_seg_info
{
85 typedef struct leaky_maps
{
86 leaky_seg_info_t
*lm_segs
;
87 uintptr_t lm_seg_count
;
90 pstatus_t
*lm_pstatus
;
97 leaky_mtab(uintptr_t addr
, const umem_bufctl_audit_t
*bcp
, leak_mtab_t
**lmp
)
99 leak_mtab_t
*lm
= (*lmp
)++;
101 lm
->lkm_base
= (uintptr_t)bcp
->bc_addr
;
102 lm
->lkm_bufctl
= LKM_CTL(addr
, LKM_CTL_BUFCTL
);
109 leaky_mtab_addr(uintptr_t addr
, void *ignored
, leak_mtab_t
**lmp
)
111 leak_mtab_t
*lm
= (*lmp
)++;
119 leaky_seg(uintptr_t addr
, const vmem_seg_t
*seg
, leak_mtab_t
**lmp
)
121 leak_mtab_t
*lm
= (*lmp
)++;
123 lm
->lkm_base
= seg
->vs_start
;
124 lm
->lkm_limit
= seg
->vs_end
;
125 lm
->lkm_bufctl
= LKM_CTL(addr
, LKM_CTL_VMSEG
);
130 leaky_vmem(uintptr_t addr
, const vmem_t
*vmem
, leak_mtab_t
**lmp
)
132 if (strcmp(vmem
->vm_name
, "umem_oversize") != 0 &&
133 strcmp(vmem
->vm_name
, "umem_memalign") != 0)
136 if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t
)leaky_seg
, lmp
, addr
) == -1)
137 mdb_warn("can't walk vmem_alloc for %s (%p)", vmem
->vm_name
,
145 leaky_estimate_vmem(uintptr_t addr
, const vmem_t
*vmem
, size_t *est
)
147 if (strcmp(vmem
->vm_name
, "umem_oversize") != 0 &&
148 strcmp(vmem
->vm_name
, "umem_memalign") != 0)
151 *est
+= (int)(vmem
->vm_kstat
.vk_alloc
- vmem
->vm_kstat
.vk_free
);
157 leaky_seg_cmp(const void *l
, const void *r
)
159 const leaky_seg_info_t
*lhs
= (const leaky_seg_info_t
*)l
;
160 const leaky_seg_info_t
*rhs
= (const leaky_seg_info_t
*)r
;
162 if (lhs
->ls_start
< rhs
->ls_start
)
164 if (lhs
->ls_start
> rhs
->ls_start
)
171 leaky_seg_search(uintptr_t addr
, leaky_seg_info_t
*listp
, unsigned count
)
173 ssize_t left
= 0, right
= count
- 1, guess
;
175 while (right
>= left
) {
176 guess
= (right
+ left
) >> 1;
178 if (addr
< listp
[guess
].ls_start
) {
183 if (addr
>= listp
[guess
].ls_end
) {
196 leaky_count(uintptr_t addr
, void *unused
, size_t *total
)
205 leaky_read_segs(uintptr_t addr
, const vmem_seg_t
*seg
, leaky_maps_t
*lmp
)
207 leaky_seg_info_t
*my_si
= lmp
->lm_segs
+ lmp
->lm_seg_count
;
209 if (seg
->vs_start
== seg
->vs_end
&& seg
->vs_start
== 0)
212 if (lmp
->lm_seg_count
++ >= lmp
->lm_seg_max
)
215 my_si
->ls_start
= seg
->vs_start
;
216 my_si
->ls_end
= seg
->vs_end
;
223 leaky_process_anon_mappings(uintptr_t ignored
, const prmap_t
*pmp
,
226 uintptr_t start
= pmp
->pr_vaddr
;
227 uintptr_t end
= pmp
->pr_vaddr
+ pmp
->pr_size
;
230 pstatus_t
*Psp
= lmp
->lm_pstatus
;
232 uintptr_t brk_start
= Psp
->pr_brkbase
;
233 uintptr_t brk_end
= Psp
->pr_brkbase
+ Psp
->pr_brksize
;
239 * This checks if there is any overlap between the segment and the brk.
241 if (end
> brk_start
&& start
< brk_end
)
244 if (leaky_seg_search(start
, lmp
->lm_segs
, lmp
->lm_seg_count
) != -1)
248 * We only want anonymous, mmaped memory. That means:
250 * 1. Must be read-write
251 * 2. Cannot be shared
252 * 3. Cannot have backing
253 * 4. Cannot be in the brk
254 * 5. Cannot be part of the vmem heap.
256 if ((pmp
->pr_mflags
& (MA_READ
| MA_WRITE
)) == (MA_READ
| MA_WRITE
) &&
257 (pmp
->pr_mflags
& MA_SHARED
) == 0 &&
258 (pmp
->pr_mapname
[0] == 0) &&
261 dprintf(("mmaped region: [%p, %p)\n", start
, end
));
262 lm
= (*lmp
->lm_lmp
)++;
263 lm
->lkm_base
= start
;
265 lm
->lkm_bufctl
= LKM_CTL(pmp
->pr_vaddr
, LKM_CTL_MEMORY
);
272 leaky_handle_sbrk(leaky_maps_t
*lmp
)
274 uintptr_t brkbase
= lmp
->lm_pstatus
->pr_brkbase
;
275 uintptr_t brkend
= brkbase
+ lmp
->lm_pstatus
->pr_brksize
;
279 leaky_seg_info_t
*segs
= lmp
->lm_segs
;
281 int x
, first
= -1, last
= -1;
283 dprintf(("brk: [%p, %p)\n", brkbase
, brkend
));
285 for (x
= 0; x
< lmp
->lm_seg_count
; x
++) {
286 if (segs
[x
].ls_start
>= brkbase
&& segs
[x
].ls_end
<= brkend
) {
293 if (brkbase
== brkend
) {
294 dprintf(("empty brk -- do nothing\n"));
295 } else if (first
== -1) {
296 dprintf(("adding [%p, %p) whole brk\n", brkbase
, brkend
));
298 lm
= (*lmp
->lm_lmp
)++;
299 lm
->lkm_base
= brkbase
;
300 lm
->lkm_limit
= brkend
;
301 lm
->lkm_bufctl
= LKM_CTL(brkbase
, LKM_CTL_MEMORY
);
303 uintptr_t curbrk
= P2ROUNDUP(brkbase
, umem_pagesize
);
305 if (curbrk
!= segs
[first
].ls_start
) {
306 dprintf(("adding [%p, %p) in brk, before first seg\n",
307 brkbase
, segs
[first
].ls_start
));
309 lm
= (*lmp
->lm_lmp
)++;
310 lm
->lkm_base
= brkbase
;
311 lm
->lkm_limit
= segs
[first
].ls_start
;
312 lm
->lkm_bufctl
= LKM_CTL(brkbase
, LKM_CTL_MEMORY
);
314 curbrk
= segs
[first
].ls_start
;
316 } else if (curbrk
!= brkbase
) {
317 dprintf(("ignore [%p, %p) -- realign\n", brkbase
,
321 for (x
= first
; x
<= last
; x
++) {
322 if (curbrk
< segs
[x
].ls_start
) {
323 dprintf(("adding [%p, %p) in brk\n", curbrk
,
326 lm
= (*lmp
->lm_lmp
)++;
327 lm
->lkm_base
= curbrk
;
328 lm
->lkm_limit
= segs
[x
].ls_start
;
329 lm
->lkm_bufctl
= LKM_CTL(curbrk
,
332 curbrk
= segs
[x
].ls_end
;
335 if (curbrk
< brkend
) {
336 dprintf(("adding [%p, %p) in brk, after last seg\n",
339 lm
= (*lmp
->lm_lmp
)++;
340 lm
->lkm_base
= curbrk
;
341 lm
->lkm_limit
= brkend
;
342 lm
->lkm_bufctl
= LKM_CTL(curbrk
, LKM_CTL_MEMORY
);
348 leaky_handle_anon_mappings(leak_mtab_t
**lmp
)
359 if (mdb_get_xdata("pstatus", &Ps
, sizeof (Ps
)) == -1) {
360 mdb_warn("couldn't read pstatus xdata");
365 leak_brkbase
= Ps
.pr_brkbase
;
366 leak_brksize
= Ps
.pr_brksize
;
368 if (umem_readvar(&heap_arena
, "heap_arena") == -1) {
369 mdb_warn("couldn't read heap_arena");
373 if (heap_arena
== NULL
) {
374 mdb_warn("heap_arena is NULL.\n");
378 for (vm_next
= heap_arena
; vm_next
!= NULL
; vm_next
= vmem
.vm_source
) {
379 if (mdb_vread(&vmem
, sizeof (vmem
), (uintptr_t)vm_next
) == -1) {
380 mdb_warn("couldn't read vmem at %p", vm_next
);
389 if (mdb_pwalk("vmem_span", (mdb_walk_cb_t
)leaky_count
,
390 &lm
.lm_seg_max
, (uintptr_t)heap_top
) == -1) {
391 mdb_warn("couldn't walk vmem_span for vmem %p", heap_top
);
394 lm
.lm_segs
= mdb_alloc(lm
.lm_seg_max
* sizeof (*lm
.lm_segs
),
397 if (mdb_pwalk("vmem_span", (mdb_walk_cb_t
)leaky_read_segs
, &lm
,
398 (uintptr_t)heap_top
) == -1) {
399 mdb_warn("couldn't walk vmem_span for vmem %p",
404 if (lm
.lm_seg_count
> lm
.lm_seg_max
) {
405 mdb_warn("segment list for vmem %p grew\n", heap_top
);
409 qsort(lm
.lm_segs
, lm
.lm_seg_count
, sizeof (*lm
.lm_segs
), leaky_seg_cmp
);
413 prockludge_add_walkers();
415 if (mdb_walk(KLUDGE_MAPWALK_NAME
,
416 (mdb_walk_cb_t
)leaky_process_anon_mappings
, &lm
) == -1) {
417 mdb_warn("Couldn't walk "KLUDGE_MAPWALK_NAME
);
418 prockludge_remove_walkers();
422 prockludge_remove_walkers();
423 leaky_handle_sbrk(&lm
);
429 leaky_interested(const umem_cache_t
*c
)
433 if (mdb_vread(&vmem
, sizeof (vmem
), (uintptr_t)c
->cache_arena
) == -1) {
434 mdb_warn("cannot read arena %p for cache '%s'",
435 (uintptr_t)c
->cache_arena
, c
->cache_name
);
440 * If this cache isn't allocating from either the umem_default or
441 * umem_firewall vmem arena, we're not interested.
443 if (strcmp(vmem
.vm_name
, "umem_default") != 0 &&
444 strcmp(vmem
.vm_name
, "umem_firewall") != 0) {
445 dprintf(("Skipping cache '%s' with arena '%s'\n",
446 c
->cache_name
, vmem
.vm_name
));
455 leaky_estimate(uintptr_t addr
, const umem_cache_t
*c
, size_t *est
)
457 if (!leaky_interested(c
))
460 *est
+= umem_estimate_allocated(addr
, c
);
467 leaky_cache(uintptr_t addr
, const umem_cache_t
*c
, leak_mtab_t
**lmp
)
469 leak_mtab_t
*lm
= *lmp
;
472 int audit
= (c
->cache_flags
& UMF_AUDIT
);
474 if (!leaky_interested(c
))
479 cb
= (mdb_walk_cb_t
)leaky_mtab
;
482 cb
= (mdb_walk_cb_t
)leaky_mtab_addr
;
484 if (mdb_pwalk(walk
, cb
, lmp
, addr
) == -1) {
485 mdb_warn("can't walk umem for cache %p (%s)", addr
,
490 for (; lm
< *lmp
; lm
++) {
491 lm
->lkm_limit
= lm
->lkm_base
+ c
->cache_bufsize
;
493 lm
->lkm_bufctl
= LKM_CTL(addr
, LKM_CTL_CACHE
);
498 static char *map_head
= "%-?s %?s %-10s used reason\n";
499 static char *map_fmt
= "[%?p,%?p) %-10s ";
500 #define BACKING_LEN 10 /* must match the third field's width in map_fmt */
503 leaky_mappings_header(void)
505 dprintf((map_head
, "mapping", "", "backing"));
510 leaky_grep_mappings(uintptr_t ignored
, const prmap_t
*pmp
,
511 const pstatus_t
*Psp
)
513 const char *map_libname_ptr
;
514 char db_mp_name
[BACKING_LEN
+1];
516 map_libname_ptr
= strrchr(pmp
->pr_mapname
, '/');
517 if (map_libname_ptr
!= NULL
)
520 map_libname_ptr
= pmp
->pr_mapname
;
522 strlcpy(db_mp_name
, map_libname_ptr
, sizeof (db_mp_name
));
524 dprintf((map_fmt
, pmp
->pr_vaddr
, (char *)pmp
->pr_vaddr
+ pmp
->pr_size
,
527 #define USE(rsn) dprintf_cont(("yes %s\n", (rsn)))
528 #define IGNORE(rsn) dprintf_cont(("no %s\n", (rsn)))
530 if (!(pmp
->pr_mflags
& MA_WRITE
) || !(pmp
->pr_mflags
& MA_READ
)) {
532 } else if (pmp
->pr_vaddr
<= Psp
->pr_brkbase
&&
533 pmp
->pr_vaddr
+ pmp
->pr_size
> Psp
->pr_brkbase
) {
534 USE("bss"); /* grab up to brkbase */
535 leaky_grep(pmp
->pr_vaddr
, Psp
->pr_brkbase
- pmp
->pr_vaddr
);
536 } else if (pmp
->pr_vaddr
>= Psp
->pr_brkbase
&&
537 pmp
->pr_vaddr
< Psp
->pr_brkbase
+ Psp
->pr_brksize
) {
539 } else if (pmp
->pr_vaddr
== Psp
->pr_stkbase
&&
540 pmp
->pr_size
== Psp
->pr_stksize
) {
542 } else if (0 == strcmp(map_libname_ptr
, "a.out")) {
544 leaky_grep(pmp
->pr_vaddr
, pmp
->pr_size
);
545 } else if (0 == strncmp(map_libname_ptr
, "libumem.so", 10)) {
546 IGNORE("part of umem");
547 } else if (pmp
->pr_mapname
[0] != 0) {
548 USE("lib data"); /* library data/bss */
549 leaky_grep(pmp
->pr_vaddr
, pmp
->pr_size
);
550 } else if ((pmp
->pr_mflags
& MA_ANON
) && pmp
->pr_mapname
[0] == 0) {
553 IGNORE(""); /* default to ignoring */
564 leaky_mark_lwp(void *ignored
, const lwpstatus_t
*lwp
)
566 leaky_mark_ptr(lwp
->pr_reg
[R_SP
] + STACK_BIAS
);
572 leaky_process_lwp(void *ignored
, const lwpstatus_t
*lwp
)
574 const uintptr_t *regs
= (const uintptr_t *)&lwp
->pr_reg
;
580 for (i
= 0; i
< R_SP
; i
++)
581 leaky_grep_ptr(regs
[i
]);
583 sp
= regs
[i
++] + STACK_BIAS
;
584 if (leaky_lookup_marked(sp
, &addr
, &size
))
585 leaky_grep(sp
, size
- (sp
- addr
));
587 for (; i
< NPRGREG
; i
++)
588 leaky_grep_ptr(regs
[i
]);
594 * Handles processing various proc-related things:
595 * 1. calls leaky_process_lwp on each the LWP
596 * 2. leaky_greps the bss/data of libraries and a.out, and the a.out stack.
599 leaky_process_proc(void)
602 struct ps_prochandle
*Pr
;
604 if (mdb_get_xdata("pstatus", &Ps
, sizeof (Ps
)) == -1) {
605 mdb_warn("couldn't read pstatus xdata");
609 dprintf(("pstatus says:\n"));
610 dprintf(("\tbrk: base %p size %p\n",
611 Ps
.pr_brkbase
, Ps
.pr_brksize
));
612 dprintf(("\tstk: base %p size %p\n",
613 Ps
.pr_stkbase
, Ps
.pr_stksize
));
615 if (mdb_get_xdata("pshandle", &Pr
, sizeof (Pr
)) == -1) {
616 mdb_warn("couldn't read pshandle xdata");
620 if (Plwp_iter(Pr
, leaky_mark_lwp
, NULL
) != 0) {
621 mdb_warn("findleaks: Failed to iterate lwps\n");
625 if (Plwp_iter(Pr
, leaky_process_lwp
, NULL
) != 0) {
626 mdb_warn("findleaks: Failed to iterate lwps\n");
630 prockludge_add_walkers();
632 leaky_mappings_header();
634 if (mdb_walk(KLUDGE_MAPWALK_NAME
, (mdb_walk_cb_t
)leaky_grep_mappings
,
636 mdb_warn("Couldn't walk "KLUDGE_MAPWALK_NAME
);
637 prockludge_remove_walkers();
641 prockludge_remove_walkers();
647 leaky_subr_caller(const uintptr_t *stack
, uint_t depth
, char *buf
,
656 for (i
= 0; i
< depth
; i
++) {
659 if (mdb_lookup_by_addr(pc
,
660 MDB_SYM_FUZZY
, buf
, MDB_SYM_NAMLEN
, &sym
) == -1)
662 if (strncmp(buf
, "libumem.so", 10) == 0)
670 * We're only here if the entire call chain is in libumem.so;
671 * this shouldn't happen, but we'll just use the last caller.
677 leaky_subr_bufctl_cmp(const leak_bufctl_t
*lhs
, const leak_bufctl_t
*rhs
)
679 char lbuf
[MDB_SYM_NAMLEN
], rbuf
[MDB_SYM_NAMLEN
];
680 uintptr_t lcaller
, rcaller
;
683 leaky_subr_caller(lhs
->lkb_stack
, lhs
->lkb_depth
, lbuf
, &lcaller
);
684 leaky_subr_caller(rhs
->lkb_stack
, lhs
->lkb_depth
, rbuf
, &rcaller
);
686 if (rval
= strcmp(lbuf
, rbuf
))
689 if (lcaller
< rcaller
)
692 if (lcaller
> rcaller
)
695 if (lhs
->lkb_data
< rhs
->lkb_data
)
698 if (lhs
->lkb_data
> rhs
->lkb_data
)
706 leaky_subr_estimate(size_t *estp
)
708 if (umem_ready
== 0) {
710 "findleaks: umem is not loaded in the address space\n");
714 if (umem_ready
== UMEM_READY_INIT_FAILED
) {
715 mdb_warn("findleaks: umem initialization failed -- no "
716 "possible leaks.\n");
720 if (umem_ready
!= UMEM_READY
) {
721 mdb_warn("findleaks: No allocations have occured -- no "
722 "possible leaks.\n");
726 if (mdb_walk("umem_cache", (mdb_walk_cb_t
)leaky_estimate
, estp
) == -1) {
727 mdb_warn("couldn't walk 'umem_cache'");
731 if (mdb_walk("vmem", (mdb_walk_cb_t
)leaky_estimate_vmem
, estp
) == -1) {
732 mdb_warn("couldn't walk 'vmem'");
737 mdb_warn("findleaks: No allocated buffers found.\n");
741 prockludge_add_walkers();
743 if (mdb_walk(KLUDGE_MAPWALK_NAME
, (mdb_walk_cb_t
)leaky_count
,
745 mdb_warn("Couldn't walk "KLUDGE_MAPWALK_NAME
);
746 prockludge_remove_walkers();
750 prockludge_remove_walkers();
756 leaky_subr_fill(leak_mtab_t
**lmpp
)
758 if (leaky_handle_anon_mappings(lmpp
) != DCMD_OK
) {
759 mdb_warn("unable to process mappings\n");
763 if (mdb_walk("vmem", (mdb_walk_cb_t
)leaky_vmem
, lmpp
) == -1) {
764 mdb_warn("couldn't walk 'vmem'");
768 if (mdb_walk("umem_cache", (mdb_walk_cb_t
)leaky_cache
, lmpp
) == -1) {
769 mdb_warn("couldn't walk 'umem_cache'");
779 if (leaky_process_proc() == DCMD_ERR
) {
780 mdb_warn("failed to process proc");
787 leaky_subr_add_leak(leak_mtab_t
*lmp
)
789 uintptr_t addr
= LKM_CTLPTR(lmp
->lkm_bufctl
);
793 umem_bufctl_audit_t
*bcp
;
794 UMEM_LOCAL_BUFCTL_AUDIT(&bcp
);
796 switch (LKM_CTLTYPE(lmp
->lkm_bufctl
)) {
798 if (mdb_vread(bcp
, UMEM_BUFCTL_AUDIT_SIZE
, addr
) == -1) {
799 mdb_warn("couldn't read leaked bufctl at addr %p",
804 depth
= MIN(bcp
->bc_depth
, umem_stack_depth
);
807 * The top of the stack will be in umem_cache_alloc().
808 * Since the offset in umem_cache_alloc() isn't interesting
809 * we skip that frame for the purposes of uniquifying stacks.
811 * Also, we use the cache pointer as the leaks's cid, to
812 * prevent the coalescing of leaks from different caches.
816 leaky_add_leak(TYPE_UMEM
, addr
, (uintptr_t)bcp
->bc_addr
,
817 bcp
->bc_timestamp
, bcp
->bc_stack
+ 1, depth
,
818 (uintptr_t)bcp
->bc_cache
, (uintptr_t)bcp
->bc_cache
);
821 if (mdb_vread(&vs
, sizeof (vs
), addr
) == -1) {
822 mdb_warn("couldn't read leaked vmem_seg at addr %p",
826 depth
= MIN(vs
.vs_depth
, VMEM_STACK_DEPTH
);
828 leaky_add_leak(TYPE_VMEM
, addr
, vs
.vs_start
, vs
.vs_timestamp
,
829 vs
.vs_stack
, depth
, 0, (vs
.vs_end
- vs
.vs_start
));
832 if (LEAKY_INBRK(addr
))
833 leaky_add_leak(TYPE_SBRK
, addr
, addr
, 0, NULL
, 0, 0,
834 lmp
->lkm_limit
- addr
);
836 leaky_add_leak(TYPE_MMAP
, addr
, addr
, 0, NULL
, 0, 0,
837 lmp
->lkm_limit
- addr
);
840 leaky_add_leak(TYPE_CACHE
, lmp
->lkm_base
, lmp
->lkm_base
, 0,
841 NULL
, 0, addr
, addr
);
844 mdb_warn("internal error: invalid leak_bufctl_t\n");
849 static int lk_vmem_seen
;
850 static int lk_cache_seen
;
851 static int lk_umem_seen
;
852 static size_t lk_ttl
;
853 static size_t lk_bytes
;
856 leaky_subr_dump_start(int type
)
865 return; /* don't zero counts */
884 leaky_subr_dump(const leak_bufctl_t
*lkb
, int verbose
)
886 const leak_bufctl_t
*cur
;
888 size_t min
, max
, size
;
890 char c
[MDB_SYM_NAMLEN
];
892 const char *nm
, *nm_lc
;
893 uint8_t type
= lkb
->lkb_type
;
898 } else if (!lk_vmem_seen
&& (type
== TYPE_VMEM
|| type
== TYPE_MMAP
||
899 type
== TYPE_SBRK
)) {
901 mdb_printf("%-16s %7s %?s %s\n",
902 "BYTES", "LEAKED", "VMEM_SEG", "CALLER");
905 switch (lkb
->lkb_type
) {
908 nm
= (lkb
->lkb_type
== TYPE_MMAP
) ? "MMAP" : "SBRK";
909 nm_lc
= (lkb
->lkb_type
== TYPE_MMAP
) ? "mmap(2)" : "sbrk(2)";
911 for (; lkb
!= NULL
; lkb
= lkb
->lkb_next
) {
913 mdb_printf("%-16d %7d %?p %s\n", lkb
->lkb_data
,
914 lkb
->lkb_dups
+ 1, lkb
->lkb_addr
, nm
);
916 mdb_printf("%s leak: [%p, %p), %ld bytes\n",
917 nm_lc
, lkb
->lkb_addr
,
918 lkb
->lkb_addr
+ lkb
->lkb_data
,
921 lk_bytes
+= lkb
->lkb_data
;
926 min
= max
= lkb
->lkb_data
;
928 for (cur
= lkb
; cur
!= NULL
; cur
= cur
->lkb_next
) {
929 size
= cur
->lkb_data
;
941 (void) mdb_snprintf(sz
, sizeof (sz
), "%ld", min
);
943 (void) mdb_snprintf(sz
, sizeof (sz
), "%ld-%ld",
947 leaky_subr_caller(lkb
->lkb_stack
, lkb
->lkb_depth
,
950 mdb_printf("%-16s %7d %?p %a\n", sz
, lkb
->lkb_dups
+ 1,
951 lkb
->lkb_addr
, caller
);
956 mdb_printf("umem_oversize leak: 1 vmem_seg, "
957 "%ld bytes\n", lk_bytes
);
959 mdb_printf("umem_oversize leak: %d vmem_segs, "
960 "%s bytes each, %ld bytes total\n",
961 lk_ttl
, sz
, lk_bytes
);
963 v
.a_type
= MDB_TYPE_STRING
;
966 if (mdb_call_dcmd("vmem_seg", lkb
->lkb_addr
,
967 DCMD_ADDRSPEC
, 1, &v
) == -1) {
968 mdb_warn("'%p::vmem_seg -v' failed",
975 if (!lk_cache_seen
) {
979 mdb_printf("%-?s %7s %?s %s\n",
980 "CACHE", "LEAKED", "BUFFER", "CALLER");
983 if (mdb_vread(&cache
, sizeof (cache
), lkb
->lkb_data
) == -1) {
985 * This _really_ shouldn't happen; we shouldn't
986 * have been able to get this far if this
987 * cache wasn't readable.
989 mdb_warn("can't read cache %p for leaked "
990 "buffer %p", lkb
->lkb_data
, lkb
->lkb_addr
);
994 lk_ttl
+= lkb
->lkb_dups
+ 1;
995 lk_bytes
+= (lkb
->lkb_dups
+ 1) * cache
.cache_bufsize
;
997 caller
= (lkb
->lkb_depth
== 0) ? 0 : lkb
->lkb_stack
[0];
999 (void) mdb_snprintf(c
, sizeof (c
), "%a", caller
);
1001 (void) mdb_snprintf(c
, sizeof (c
), "%s",
1002 (verbose
) ? "" : "?");
1006 mdb_printf("%0?p %7d %0?p %s\n", lkb
->lkb_cid
,
1007 lkb
->lkb_dups
+ 1, lkb
->lkb_addr
, c
);
1010 mdb_printf("%s leak: 1 buffer, %ld bytes,\n",
1011 cache
.cache_name
, lk_bytes
);
1013 mdb_printf("%s leak: %d buffers, "
1014 "%ld bytes each, %ld bytes total,\n",
1015 cache
.cache_name
, lk_ttl
,
1016 cache
.cache_bufsize
, lk_bytes
);
1017 mdb_printf(" %s%s%ssample addr %p\n",
1018 (caller
== 0) ? "" : "caller ", c
,
1019 (caller
== 0) ? "" : ", ", lkb
->lkb_addr
);
1024 if (!lk_umem_seen
) {
1026 if (lk_vmem_seen
|| lk_cache_seen
)
1028 mdb_printf("%-?s %7s %?s %s\n",
1029 "CACHE", "LEAKED", "BUFCTL", "CALLER");
1031 if (mdb_vread(&cache
, sizeof (cache
), lkb
->lkb_data
) == -1) {
1033 * This _really_ shouldn't happen; we shouldn't
1034 * have been able to get this far if this
1035 * cache wasn't readable.
1037 mdb_warn("can't read cache %p for leaked "
1038 "bufctl %p", lkb
->lkb_data
, lkb
->lkb_addr
);
1042 lk_ttl
+= lkb
->lkb_dups
+ 1;
1043 lk_bytes
+= (lkb
->lkb_dups
+ 1) * cache
.cache_bufsize
;
1046 leaky_subr_caller(lkb
->lkb_stack
, lkb
->lkb_depth
, c
,
1049 mdb_printf("%0?p %7d %0?p %a\n", lkb
->lkb_data
,
1050 lkb
->lkb_dups
+ 1, lkb
->lkb_addr
, caller
);
1055 mdb_printf("%s leak: 1 buffer, %ld bytes\n",
1056 cache
.cache_name
, lk_bytes
);
1058 mdb_printf("%s leak: %d buffers, "
1059 "%ld bytes each, %ld bytes total\n",
1060 cache
.cache_name
, lk_ttl
,
1061 cache
.cache_bufsize
, lk_bytes
);
1063 v
.a_type
= MDB_TYPE_STRING
;
1064 v
.a_un
.a_str
= "-v";
1066 if (mdb_call_dcmd("bufctl", lkb
->lkb_addr
,
1067 DCMD_ADDRSPEC
, 1, &v
) == -1) {
1068 mdb_warn("'%p::bufctl -v' failed",
1080 leaky_subr_dump_end(int type
)
1092 leak
= "oversized leak";
1099 width
= sizeof (uintptr_t) * 2;
1107 width
= sizeof (uintptr_t) * 2;
1115 for (i
= 0; i
< 72; i
++)
1117 mdb_printf("\n%*s %7ld %s%s, %ld byte%s\n",
1118 width
, "Total", lk_ttl
, leak
, (lk_ttl
== 1) ? "" : "s",
1119 lk_bytes
, (lk_bytes
== 1) ? "" : "s");
1123 leaky_subr_invoke_callback(const leak_bufctl_t
*lkb
, mdb_walk_cb_t cb
,
1127 umem_bufctl_audit_t
*bcp
;
1128 UMEM_LOCAL_BUFCTL_AUDIT(&bcp
);
1130 switch (lkb
->lkb_type
) {
1132 if (mdb_vread(&vs
, sizeof (vs
), lkb
->lkb_addr
) == -1) {
1133 mdb_warn("unable to read vmem_seg at %p",
1137 return (cb(lkb
->lkb_addr
, &vs
, cbdata
));
1140 if (mdb_vread(bcp
, UMEM_BUFCTL_AUDIT_SIZE
,
1141 lkb
->lkb_addr
) == -1) {
1142 mdb_warn("unable to read bufctl at %p",
1146 return (cb(lkb
->lkb_addr
, bcp
, cbdata
));
1149 return (cb(lkb
->lkb_addr
, NULL
, cbdata
));