dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / usr / src / cmd / mdb / common / modules / libumem / leaky_subr.c
blob0142b1969883fca76eb2a116220446818ef54963
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include "umem.h"
30 #include <sys/vmem_impl_user.h>
31 #include <umem_impl.h>
33 #include <alloca.h>
34 #include <libproc.h>
35 #include <stdio.h>
36 #include <string.h>
37 #include <sys/stack.h>
39 #include "leaky_impl.h"
40 #include "misc.h"
41 #include "proc_kludges.h"
43 #include "umem_pagesize.h"
46 * This file defines the libumem target for ../genunix/leaky.c.
48 * See ../genunix/leaky_impl.h for the target interface definition.
52 * leaky_subr_dump_start()/_end() depend on the ordering of TYPE_VMEM,
53 * TYPE_MMAP and TYPE_SBRK.
55 #define TYPE_MMAP 0 /* lkb_data is the size */
56 #define TYPE_SBRK 1 /* lkb_data is the size */
57 #define TYPE_VMEM 2 /* lkb_data is the vmem_seg's size */
58 #define TYPE_CACHE 3 /* lkb_cid is the bufctl's cache */
59 #define TYPE_UMEM 4 /* lkb_cid is the bufctl's cache */
61 #define LKM_CTL_BUFCTL 0 /* normal allocation, PTR is bufctl */
62 #define LKM_CTL_VMSEG 1 /* oversize allocation, PTR is vmem_seg_t */
63 #define LKM_CTL_MEMORY 2 /* non-umem mmap or brk, PTR is region start */
64 #define LKM_CTL_CACHE 3 /* normal alloc, non-debug, PTR is cache */
65 #define LKM_CTL_MASK 3L
68 * create a lkm_bufctl from a pointer and a type
70 #define LKM_CTL(ptr, type) (LKM_CTLPTR(ptr) | (type))
71 #define LKM_CTLPTR(ctl) ((uintptr_t)(ctl) & ~(LKM_CTL_MASK))
72 #define LKM_CTLTYPE(ctl) ((uintptr_t)(ctl) & (LKM_CTL_MASK))
74 static uintptr_t leak_brkbase;
75 static uintptr_t leak_brksize;
77 #define LEAKY_INBRK(ptr) \
78 (((uintptr_t)(ptr) - leak_brkbase) < leak_brksize)
80 typedef struct leaky_seg_info {
81 uintptr_t ls_start;
82 uintptr_t ls_end;
83 } leaky_seg_info_t;
85 typedef struct leaky_maps {
86 leaky_seg_info_t *lm_segs;
87 uintptr_t lm_seg_count;
88 uintptr_t lm_seg_max;
90 pstatus_t *lm_pstatus;
92 leak_mtab_t **lm_lmp;
93 } leaky_maps_t;
95 /*ARGSUSED*/
96 static int
97 leaky_mtab(uintptr_t addr, const umem_bufctl_audit_t *bcp, leak_mtab_t **lmp)
99 leak_mtab_t *lm = (*lmp)++;
101 lm->lkm_base = (uintptr_t)bcp->bc_addr;
102 lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_BUFCTL);
104 return (WALK_NEXT);
107 /*ARGSUSED*/
108 static int
109 leaky_mtab_addr(uintptr_t addr, void *ignored, leak_mtab_t **lmp)
111 leak_mtab_t *lm = (*lmp)++;
113 lm->lkm_base = addr;
115 return (WALK_NEXT);
118 static int
119 leaky_seg(uintptr_t addr, const vmem_seg_t *seg, leak_mtab_t **lmp)
121 leak_mtab_t *lm = (*lmp)++;
123 lm->lkm_base = seg->vs_start;
124 lm->lkm_limit = seg->vs_end;
125 lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_VMSEG);
126 return (WALK_NEXT);
129 static int
130 leaky_vmem(uintptr_t addr, const vmem_t *vmem, leak_mtab_t **lmp)
132 if (strcmp(vmem->vm_name, "umem_oversize") != 0 &&
133 strcmp(vmem->vm_name, "umem_memalign") != 0)
134 return (WALK_NEXT);
136 if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_seg, lmp, addr) == -1)
137 mdb_warn("can't walk vmem_alloc for %s (%p)", vmem->vm_name,
138 addr);
140 return (WALK_NEXT);
143 /*ARGSUSED*/
144 static int
145 leaky_estimate_vmem(uintptr_t addr, const vmem_t *vmem, size_t *est)
147 if (strcmp(vmem->vm_name, "umem_oversize") != 0 &&
148 strcmp(vmem->vm_name, "umem_memalign") != 0)
149 return (WALK_NEXT);
151 *est += (int)(vmem->vm_kstat.vk_alloc - vmem->vm_kstat.vk_free);
153 return (WALK_NEXT);
156 static int
157 leaky_seg_cmp(const void *l, const void *r)
159 const leaky_seg_info_t *lhs = (const leaky_seg_info_t *)l;
160 const leaky_seg_info_t *rhs = (const leaky_seg_info_t *)r;
162 if (lhs->ls_start < rhs->ls_start)
163 return (-1);
164 if (lhs->ls_start > rhs->ls_start)
165 return (1);
167 return (0);
170 static ssize_t
171 leaky_seg_search(uintptr_t addr, leaky_seg_info_t *listp, unsigned count)
173 ssize_t left = 0, right = count - 1, guess;
175 while (right >= left) {
176 guess = (right + left) >> 1;
178 if (addr < listp[guess].ls_start) {
179 right = guess - 1;
180 continue;
183 if (addr >= listp[guess].ls_end) {
184 left = guess + 1;
185 continue;
188 return (guess);
191 return (-1);
194 /*ARGSUSED*/
195 static int
196 leaky_count(uintptr_t addr, void *unused, size_t *total)
198 ++*total;
200 return (WALK_NEXT);
203 /*ARGSUSED*/
204 static int
205 leaky_read_segs(uintptr_t addr, const vmem_seg_t *seg, leaky_maps_t *lmp)
207 leaky_seg_info_t *my_si = lmp->lm_segs + lmp->lm_seg_count;
209 if (seg->vs_start == seg->vs_end && seg->vs_start == 0)
210 return (WALK_NEXT);
212 if (lmp->lm_seg_count++ >= lmp->lm_seg_max)
213 return (WALK_ERR);
215 my_si->ls_start = seg->vs_start;
216 my_si->ls_end = seg->vs_end;
218 return (WALK_NEXT);
221 /* ARGSUSED */
222 static int
223 leaky_process_anon_mappings(uintptr_t ignored, const prmap_t *pmp,
224 leaky_maps_t *lmp)
226 uintptr_t start = pmp->pr_vaddr;
227 uintptr_t end = pmp->pr_vaddr + pmp->pr_size;
229 leak_mtab_t *lm;
230 pstatus_t *Psp = lmp->lm_pstatus;
232 uintptr_t brk_start = Psp->pr_brkbase;
233 uintptr_t brk_end = Psp->pr_brkbase + Psp->pr_brksize;
235 int has_brk = 0;
236 int in_vmem = 0;
239 * This checks if there is any overlap between the segment and the brk.
241 if (end > brk_start && start < brk_end)
242 has_brk = 1;
244 if (leaky_seg_search(start, lmp->lm_segs, lmp->lm_seg_count) != -1)
245 in_vmem = 1;
248 * We only want anonymous, mmaped memory. That means:
250 * 1. Must be read-write
251 * 2. Cannot be shared
252 * 3. Cannot have backing
253 * 4. Cannot be in the brk
254 * 5. Cannot be part of the vmem heap.
256 if ((pmp->pr_mflags & (MA_READ | MA_WRITE)) == (MA_READ | MA_WRITE) &&
257 (pmp->pr_mflags & MA_SHARED) == 0 &&
258 (pmp->pr_mapname[0] == 0) &&
259 !has_brk &&
260 !in_vmem) {
261 dprintf(("mmaped region: [%p, %p)\n", start, end));
262 lm = (*lmp->lm_lmp)++;
263 lm->lkm_base = start;
264 lm->lkm_limit = end;
265 lm->lkm_bufctl = LKM_CTL(pmp->pr_vaddr, LKM_CTL_MEMORY);
268 return (WALK_NEXT);
271 static void
272 leaky_handle_sbrk(leaky_maps_t *lmp)
274 uintptr_t brkbase = lmp->lm_pstatus->pr_brkbase;
275 uintptr_t brkend = brkbase + lmp->lm_pstatus->pr_brksize;
277 leak_mtab_t *lm;
279 leaky_seg_info_t *segs = lmp->lm_segs;
281 int x, first = -1, last = -1;
283 dprintf(("brk: [%p, %p)\n", brkbase, brkend));
285 for (x = 0; x < lmp->lm_seg_count; x++) {
286 if (segs[x].ls_start >= brkbase && segs[x].ls_end <= brkend) {
287 if (first == -1)
288 first = x;
289 last = x;
293 if (brkbase == brkend) {
294 dprintf(("empty brk -- do nothing\n"));
295 } else if (first == -1) {
296 dprintf(("adding [%p, %p) whole brk\n", brkbase, brkend));
298 lm = (*lmp->lm_lmp)++;
299 lm->lkm_base = brkbase;
300 lm->lkm_limit = brkend;
301 lm->lkm_bufctl = LKM_CTL(brkbase, LKM_CTL_MEMORY);
302 } else {
303 uintptr_t curbrk = P2ROUNDUP(brkbase, umem_pagesize);
305 if (curbrk != segs[first].ls_start) {
306 dprintf(("adding [%p, %p) in brk, before first seg\n",
307 brkbase, segs[first].ls_start));
309 lm = (*lmp->lm_lmp)++;
310 lm->lkm_base = brkbase;
311 lm->lkm_limit = segs[first].ls_start;
312 lm->lkm_bufctl = LKM_CTL(brkbase, LKM_CTL_MEMORY);
314 curbrk = segs[first].ls_start;
316 } else if (curbrk != brkbase) {
317 dprintf(("ignore [%p, %p) -- realign\n", brkbase,
318 curbrk));
321 for (x = first; x <= last; x++) {
322 if (curbrk < segs[x].ls_start) {
323 dprintf(("adding [%p, %p) in brk\n", curbrk,
324 segs[x].ls_start));
326 lm = (*lmp->lm_lmp)++;
327 lm->lkm_base = curbrk;
328 lm->lkm_limit = segs[x].ls_start;
329 lm->lkm_bufctl = LKM_CTL(curbrk,
330 LKM_CTL_MEMORY);
332 curbrk = segs[x].ls_end;
335 if (curbrk < brkend) {
336 dprintf(("adding [%p, %p) in brk, after last seg\n",
337 curbrk, brkend));
339 lm = (*lmp->lm_lmp)++;
340 lm->lkm_base = curbrk;
341 lm->lkm_limit = brkend;
342 lm->lkm_bufctl = LKM_CTL(curbrk, LKM_CTL_MEMORY);
347 static int
348 leaky_handle_anon_mappings(leak_mtab_t **lmp)
350 leaky_maps_t lm;
352 vmem_t *heap_arena;
353 vmem_t *vm_next;
354 vmem_t *heap_top;
355 vmem_t vmem;
357 pstatus_t Ps;
359 if (mdb_get_xdata("pstatus", &Ps, sizeof (Ps)) == -1) {
360 mdb_warn("couldn't read pstatus xdata");
361 return (DCMD_ERR);
363 lm.lm_pstatus = &Ps;
365 leak_brkbase = Ps.pr_brkbase;
366 leak_brksize = Ps.pr_brksize;
368 if (umem_readvar(&heap_arena, "heap_arena") == -1) {
369 mdb_warn("couldn't read heap_arena");
370 return (DCMD_ERR);
373 if (heap_arena == NULL) {
374 mdb_warn("heap_arena is NULL.\n");
375 return (DCMD_ERR);
378 for (vm_next = heap_arena; vm_next != NULL; vm_next = vmem.vm_source) {
379 if (mdb_vread(&vmem, sizeof (vmem), (uintptr_t)vm_next) == -1) {
380 mdb_warn("couldn't read vmem at %p", vm_next);
381 return (DCMD_ERR);
383 heap_top = vm_next;
386 lm.lm_seg_count = 0;
387 lm.lm_seg_max = 0;
389 if (mdb_pwalk("vmem_span", (mdb_walk_cb_t)leaky_count,
390 &lm.lm_seg_max, (uintptr_t)heap_top) == -1) {
391 mdb_warn("couldn't walk vmem_span for vmem %p", heap_top);
392 return (DCMD_ERR);
394 lm.lm_segs = mdb_alloc(lm.lm_seg_max * sizeof (*lm.lm_segs),
395 UM_SLEEP | UM_GC);
397 if (mdb_pwalk("vmem_span", (mdb_walk_cb_t)leaky_read_segs, &lm,
398 (uintptr_t)heap_top) == -1) {
399 mdb_warn("couldn't walk vmem_span for vmem %p",
400 heap_top);
401 return (DCMD_ERR);
404 if (lm.lm_seg_count > lm.lm_seg_max) {
405 mdb_warn("segment list for vmem %p grew\n", heap_top);
406 return (DCMD_ERR);
409 qsort(lm.lm_segs, lm.lm_seg_count, sizeof (*lm.lm_segs), leaky_seg_cmp);
411 lm.lm_lmp = lmp;
413 prockludge_add_walkers();
415 if (mdb_walk(KLUDGE_MAPWALK_NAME,
416 (mdb_walk_cb_t)leaky_process_anon_mappings, &lm) == -1) {
417 mdb_warn("Couldn't walk "KLUDGE_MAPWALK_NAME);
418 prockludge_remove_walkers();
419 return (DCMD_ERR);
422 prockludge_remove_walkers();
423 leaky_handle_sbrk(&lm);
425 return (DCMD_OK);
428 static int
429 leaky_interested(const umem_cache_t *c)
431 vmem_t vmem;
433 if (mdb_vread(&vmem, sizeof (vmem), (uintptr_t)c->cache_arena) == -1) {
434 mdb_warn("cannot read arena %p for cache '%s'",
435 (uintptr_t)c->cache_arena, c->cache_name);
436 return (0);
440 * If this cache isn't allocating from either the umem_default or
441 * umem_firewall vmem arena, we're not interested.
443 if (strcmp(vmem.vm_name, "umem_default") != 0 &&
444 strcmp(vmem.vm_name, "umem_firewall") != 0) {
445 dprintf(("Skipping cache '%s' with arena '%s'\n",
446 c->cache_name, vmem.vm_name));
447 return (0);
450 return (1);
453 /*ARGSUSED*/
454 static int
455 leaky_estimate(uintptr_t addr, const umem_cache_t *c, size_t *est)
457 if (!leaky_interested(c))
458 return (WALK_NEXT);
460 *est += umem_estimate_allocated(addr, c);
462 return (WALK_NEXT);
465 /*ARGSUSED*/
466 static int
467 leaky_cache(uintptr_t addr, const umem_cache_t *c, leak_mtab_t **lmp)
469 leak_mtab_t *lm = *lmp;
470 mdb_walk_cb_t cb;
471 const char *walk;
472 int audit = (c->cache_flags & UMF_AUDIT);
474 if (!leaky_interested(c))
475 return (WALK_NEXT);
477 if (audit) {
478 walk = "bufctl";
479 cb = (mdb_walk_cb_t)leaky_mtab;
480 } else {
481 walk = "umem";
482 cb = (mdb_walk_cb_t)leaky_mtab_addr;
484 if (mdb_pwalk(walk, cb, lmp, addr) == -1) {
485 mdb_warn("can't walk umem for cache %p (%s)", addr,
486 c->cache_name);
487 return (WALK_DONE);
490 for (; lm < *lmp; lm++) {
491 lm->lkm_limit = lm->lkm_base + c->cache_bufsize;
492 if (!audit)
493 lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_CACHE);
495 return (WALK_NEXT);
498 static char *map_head = "%-?s %?s %-10s used reason\n";
499 static char *map_fmt = "[%?p,%?p) %-10s ";
500 #define BACKING_LEN 10 /* must match the third field's width in map_fmt */
502 static void
503 leaky_mappings_header(void)
505 dprintf((map_head, "mapping", "", "backing"));
508 /* ARGSUSED */
509 static int
510 leaky_grep_mappings(uintptr_t ignored, const prmap_t *pmp,
511 const pstatus_t *Psp)
513 const char *map_libname_ptr;
514 char db_mp_name[BACKING_LEN+1];
516 map_libname_ptr = strrchr(pmp->pr_mapname, '/');
517 if (map_libname_ptr != NULL)
518 map_libname_ptr++;
519 else
520 map_libname_ptr = pmp->pr_mapname;
522 strlcpy(db_mp_name, map_libname_ptr, sizeof (db_mp_name));
524 dprintf((map_fmt, pmp->pr_vaddr, (char *)pmp->pr_vaddr + pmp->pr_size,
525 db_mp_name));
527 #define USE(rsn) dprintf_cont(("yes %s\n", (rsn)))
528 #define IGNORE(rsn) dprintf_cont(("no %s\n", (rsn)))
530 if (!(pmp->pr_mflags & MA_WRITE) || !(pmp->pr_mflags & MA_READ)) {
531 IGNORE("read-only");
532 } else if (pmp->pr_vaddr <= Psp->pr_brkbase &&
533 pmp->pr_vaddr + pmp->pr_size > Psp->pr_brkbase) {
534 USE("bss"); /* grab up to brkbase */
535 leaky_grep(pmp->pr_vaddr, Psp->pr_brkbase - pmp->pr_vaddr);
536 } else if (pmp->pr_vaddr >= Psp->pr_brkbase &&
537 pmp->pr_vaddr < Psp->pr_brkbase + Psp->pr_brksize) {
538 IGNORE("in brk");
539 } else if (pmp->pr_vaddr == Psp->pr_stkbase &&
540 pmp->pr_size == Psp->pr_stksize) {
541 IGNORE("stack");
542 } else if (0 == strcmp(map_libname_ptr, "a.out")) {
543 USE("a.out data");
544 leaky_grep(pmp->pr_vaddr, pmp->pr_size);
545 } else if (0 == strncmp(map_libname_ptr, "libumem.so", 10)) {
546 IGNORE("part of umem");
547 } else if (pmp->pr_mapname[0] != 0) {
548 USE("lib data"); /* library data/bss */
549 leaky_grep(pmp->pr_vaddr, pmp->pr_size);
550 } else if ((pmp->pr_mflags & MA_ANON) && pmp->pr_mapname[0] == 0) {
551 IGNORE("anon");
552 } else {
553 IGNORE(""); /* default to ignoring */
556 #undef USE
557 #undef IGNORE
559 return (WALK_NEXT);
562 /*ARGSUSED*/
563 static int
564 leaky_mark_lwp(void *ignored, const lwpstatus_t *lwp)
566 leaky_mark_ptr(lwp->pr_reg[R_SP] + STACK_BIAS);
567 return (0);
570 /*ARGSUSED*/
571 static int
572 leaky_process_lwp(void *ignored, const lwpstatus_t *lwp)
574 const uintptr_t *regs = (const uintptr_t *)&lwp->pr_reg;
575 int i;
576 uintptr_t sp;
577 uintptr_t addr;
578 size_t size;
580 for (i = 0; i < R_SP; i++)
581 leaky_grep_ptr(regs[i]);
583 sp = regs[i++] + STACK_BIAS;
584 if (leaky_lookup_marked(sp, &addr, &size))
585 leaky_grep(sp, size - (sp - addr));
587 for (; i < NPRGREG; i++)
588 leaky_grep_ptr(regs[i]);
590 return (0);
594 * Handles processing various proc-related things:
595 * 1. calls leaky_process_lwp on each the LWP
596 * 2. leaky_greps the bss/data of libraries and a.out, and the a.out stack.
598 static int
599 leaky_process_proc(void)
601 pstatus_t Ps;
602 struct ps_prochandle *Pr;
604 if (mdb_get_xdata("pstatus", &Ps, sizeof (Ps)) == -1) {
605 mdb_warn("couldn't read pstatus xdata");
606 return (DCMD_ERR);
609 dprintf(("pstatus says:\n"));
610 dprintf(("\tbrk: base %p size %p\n",
611 Ps.pr_brkbase, Ps.pr_brksize));
612 dprintf(("\tstk: base %p size %p\n",
613 Ps.pr_stkbase, Ps.pr_stksize));
615 if (mdb_get_xdata("pshandle", &Pr, sizeof (Pr)) == -1) {
616 mdb_warn("couldn't read pshandle xdata");
617 return (DCMD_ERR);
620 if (Plwp_iter(Pr, leaky_mark_lwp, NULL) != 0) {
621 mdb_warn("findleaks: Failed to iterate lwps\n");
622 return (DCMD_ERR);
625 if (Plwp_iter(Pr, leaky_process_lwp, NULL) != 0) {
626 mdb_warn("findleaks: Failed to iterate lwps\n");
627 return (DCMD_ERR);
630 prockludge_add_walkers();
632 leaky_mappings_header();
634 if (mdb_walk(KLUDGE_MAPWALK_NAME, (mdb_walk_cb_t)leaky_grep_mappings,
635 &Ps) == -1) {
636 mdb_warn("Couldn't walk "KLUDGE_MAPWALK_NAME);
637 prockludge_remove_walkers();
638 return (-1);
641 prockludge_remove_walkers();
643 return (0);
646 static void
647 leaky_subr_caller(const uintptr_t *stack, uint_t depth, char *buf,
648 uintptr_t *pcp)
650 int i;
651 GElf_Sym sym;
652 uintptr_t pc = 0;
654 buf[0] = 0;
656 for (i = 0; i < depth; i++) {
657 pc = stack[i];
659 if (mdb_lookup_by_addr(pc,
660 MDB_SYM_FUZZY, buf, MDB_SYM_NAMLEN, &sym) == -1)
661 continue;
662 if (strncmp(buf, "libumem.so", 10) == 0)
663 continue;
665 *pcp = pc;
666 return;
670 * We're only here if the entire call chain is in libumem.so;
671 * this shouldn't happen, but we'll just use the last caller.
673 *pcp = pc;
677 leaky_subr_bufctl_cmp(const leak_bufctl_t *lhs, const leak_bufctl_t *rhs)
679 char lbuf[MDB_SYM_NAMLEN], rbuf[MDB_SYM_NAMLEN];
680 uintptr_t lcaller, rcaller;
681 int rval;
683 leaky_subr_caller(lhs->lkb_stack, lhs->lkb_depth, lbuf, &lcaller);
684 leaky_subr_caller(rhs->lkb_stack, lhs->lkb_depth, rbuf, &rcaller);
686 if (rval = strcmp(lbuf, rbuf))
687 return (rval);
689 if (lcaller < rcaller)
690 return (-1);
692 if (lcaller > rcaller)
693 return (1);
695 if (lhs->lkb_data < rhs->lkb_data)
696 return (-1);
698 if (lhs->lkb_data > rhs->lkb_data)
699 return (1);
701 return (0);
704 /*ARGSUSED*/
706 leaky_subr_estimate(size_t *estp)
708 if (umem_ready == 0) {
709 mdb_warn(
710 "findleaks: umem is not loaded in the address space\n");
711 return (DCMD_ERR);
714 if (umem_ready == UMEM_READY_INIT_FAILED) {
715 mdb_warn("findleaks: umem initialization failed -- no "
716 "possible leaks.\n");
717 return (DCMD_ERR);
720 if (umem_ready != UMEM_READY) {
721 mdb_warn("findleaks: No allocations have occured -- no "
722 "possible leaks.\n");
723 return (DCMD_ERR);
726 if (mdb_walk("umem_cache", (mdb_walk_cb_t)leaky_estimate, estp) == -1) {
727 mdb_warn("couldn't walk 'umem_cache'");
728 return (DCMD_ERR);
731 if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_estimate_vmem, estp) == -1) {
732 mdb_warn("couldn't walk 'vmem'");
733 return (DCMD_ERR);
736 if (*estp == 0) {
737 mdb_warn("findleaks: No allocated buffers found.\n");
738 return (DCMD_ERR);
741 prockludge_add_walkers();
743 if (mdb_walk(KLUDGE_MAPWALK_NAME, (mdb_walk_cb_t)leaky_count,
744 estp) == -1) {
745 mdb_warn("Couldn't walk "KLUDGE_MAPWALK_NAME);
746 prockludge_remove_walkers();
747 return (DCMD_ERR);
750 prockludge_remove_walkers();
752 return (DCMD_OK);
756 leaky_subr_fill(leak_mtab_t **lmpp)
758 if (leaky_handle_anon_mappings(lmpp) != DCMD_OK) {
759 mdb_warn("unable to process mappings\n");
760 return (DCMD_ERR);
763 if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_vmem, lmpp) == -1) {
764 mdb_warn("couldn't walk 'vmem'");
765 return (DCMD_ERR);
768 if (mdb_walk("umem_cache", (mdb_walk_cb_t)leaky_cache, lmpp) == -1) {
769 mdb_warn("couldn't walk 'umem_cache'");
770 return (DCMD_ERR);
773 return (DCMD_OK);
777 leaky_subr_run(void)
779 if (leaky_process_proc() == DCMD_ERR) {
780 mdb_warn("failed to process proc");
781 return (DCMD_ERR);
783 return (DCMD_OK);
786 void
787 leaky_subr_add_leak(leak_mtab_t *lmp)
789 uintptr_t addr = LKM_CTLPTR(lmp->lkm_bufctl);
790 uint_t depth;
792 vmem_seg_t vs;
793 umem_bufctl_audit_t *bcp;
794 UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
796 switch (LKM_CTLTYPE(lmp->lkm_bufctl)) {
797 case LKM_CTL_BUFCTL:
798 if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) {
799 mdb_warn("couldn't read leaked bufctl at addr %p",
800 addr);
801 return;
804 depth = MIN(bcp->bc_depth, umem_stack_depth);
807 * The top of the stack will be in umem_cache_alloc().
808 * Since the offset in umem_cache_alloc() isn't interesting
809 * we skip that frame for the purposes of uniquifying stacks.
811 * Also, we use the cache pointer as the leaks's cid, to
812 * prevent the coalescing of leaks from different caches.
814 if (depth > 0)
815 depth--;
816 leaky_add_leak(TYPE_UMEM, addr, (uintptr_t)bcp->bc_addr,
817 bcp->bc_timestamp, bcp->bc_stack + 1, depth,
818 (uintptr_t)bcp->bc_cache, (uintptr_t)bcp->bc_cache);
819 break;
820 case LKM_CTL_VMSEG:
821 if (mdb_vread(&vs, sizeof (vs), addr) == -1) {
822 mdb_warn("couldn't read leaked vmem_seg at addr %p",
823 addr);
824 return;
826 depth = MIN(vs.vs_depth, VMEM_STACK_DEPTH);
828 leaky_add_leak(TYPE_VMEM, addr, vs.vs_start, vs.vs_timestamp,
829 vs.vs_stack, depth, 0, (vs.vs_end - vs.vs_start));
830 break;
831 case LKM_CTL_MEMORY:
832 if (LEAKY_INBRK(addr))
833 leaky_add_leak(TYPE_SBRK, addr, addr, 0, NULL, 0, 0,
834 lmp->lkm_limit - addr);
835 else
836 leaky_add_leak(TYPE_MMAP, addr, addr, 0, NULL, 0, 0,
837 lmp->lkm_limit - addr);
838 break;
839 case LKM_CTL_CACHE:
840 leaky_add_leak(TYPE_CACHE, lmp->lkm_base, lmp->lkm_base, 0,
841 NULL, 0, addr, addr);
842 break;
843 default:
844 mdb_warn("internal error: invalid leak_bufctl_t\n");
845 break;
849 static int lk_vmem_seen;
850 static int lk_cache_seen;
851 static int lk_umem_seen;
852 static size_t lk_ttl;
853 static size_t lk_bytes;
855 void
856 leaky_subr_dump_start(int type)
858 switch (type) {
859 case TYPE_MMAP:
860 lk_vmem_seen = 0;
861 break;
863 case TYPE_SBRK:
864 case TYPE_VMEM:
865 return; /* don't zero counts */
867 case TYPE_CACHE:
868 lk_cache_seen = 0;
869 break;
871 case TYPE_UMEM:
872 lk_umem_seen = 0;
873 break;
875 default:
876 break;
879 lk_ttl = 0;
880 lk_bytes = 0;
883 void
884 leaky_subr_dump(const leak_bufctl_t *lkb, int verbose)
886 const leak_bufctl_t *cur;
887 umem_cache_t cache;
888 size_t min, max, size;
889 char sz[30];
890 char c[MDB_SYM_NAMLEN];
891 uintptr_t caller;
892 const char *nm, *nm_lc;
893 uint8_t type = lkb->lkb_type;
895 if (verbose) {
896 lk_ttl = 0;
897 lk_bytes = 0;
898 } else if (!lk_vmem_seen && (type == TYPE_VMEM || type == TYPE_MMAP ||
899 type == TYPE_SBRK)) {
900 lk_vmem_seen = 1;
901 mdb_printf("%-16s %7s %?s %s\n",
902 "BYTES", "LEAKED", "VMEM_SEG", "CALLER");
905 switch (lkb->lkb_type) {
906 case TYPE_MMAP:
907 case TYPE_SBRK:
908 nm = (lkb->lkb_type == TYPE_MMAP) ? "MMAP" : "SBRK";
909 nm_lc = (lkb->lkb_type == TYPE_MMAP) ? "mmap(2)" : "sbrk(2)";
911 for (; lkb != NULL; lkb = lkb->lkb_next) {
912 if (!verbose)
913 mdb_printf("%-16d %7d %?p %s\n", lkb->lkb_data,
914 lkb->lkb_dups + 1, lkb->lkb_addr, nm);
915 else
916 mdb_printf("%s leak: [%p, %p), %ld bytes\n",
917 nm_lc, lkb->lkb_addr,
918 lkb->lkb_addr + lkb->lkb_data,
919 lkb->lkb_data);
920 lk_ttl++;
921 lk_bytes += lkb->lkb_data;
923 return;
925 case TYPE_VMEM:
926 min = max = lkb->lkb_data;
928 for (cur = lkb; cur != NULL; cur = cur->lkb_next) {
929 size = cur->lkb_data;
931 if (size < min)
932 min = size;
933 if (size > max)
934 max = size;
936 lk_ttl++;
937 lk_bytes += size;
940 if (min == max)
941 (void) mdb_snprintf(sz, sizeof (sz), "%ld", min);
942 else
943 (void) mdb_snprintf(sz, sizeof (sz), "%ld-%ld",
944 min, max);
946 if (!verbose) {
947 leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
948 c, &caller);
950 mdb_printf("%-16s %7d %?p %a\n", sz, lkb->lkb_dups + 1,
951 lkb->lkb_addr, caller);
952 } else {
953 mdb_arg_t v;
955 if (lk_ttl == 1)
956 mdb_printf("umem_oversize leak: 1 vmem_seg, "
957 "%ld bytes\n", lk_bytes);
958 else
959 mdb_printf("umem_oversize leak: %d vmem_segs, "
960 "%s bytes each, %ld bytes total\n",
961 lk_ttl, sz, lk_bytes);
963 v.a_type = MDB_TYPE_STRING;
964 v.a_un.a_str = "-v";
966 if (mdb_call_dcmd("vmem_seg", lkb->lkb_addr,
967 DCMD_ADDRSPEC, 1, &v) == -1) {
968 mdb_warn("'%p::vmem_seg -v' failed",
969 lkb->lkb_addr);
972 return;
974 case TYPE_CACHE:
975 if (!lk_cache_seen) {
976 lk_cache_seen = 1;
977 if (lk_vmem_seen)
978 mdb_printf("\n");
979 mdb_printf("%-?s %7s %?s %s\n",
980 "CACHE", "LEAKED", "BUFFER", "CALLER");
983 if (mdb_vread(&cache, sizeof (cache), lkb->lkb_data) == -1) {
985 * This _really_ shouldn't happen; we shouldn't
986 * have been able to get this far if this
987 * cache wasn't readable.
989 mdb_warn("can't read cache %p for leaked "
990 "buffer %p", lkb->lkb_data, lkb->lkb_addr);
991 return;
994 lk_ttl += lkb->lkb_dups + 1;
995 lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
997 caller = (lkb->lkb_depth == 0) ? 0 : lkb->lkb_stack[0];
998 if (caller != 0) {
999 (void) mdb_snprintf(c, sizeof (c), "%a", caller);
1000 } else {
1001 (void) mdb_snprintf(c, sizeof (c), "%s",
1002 (verbose) ? "" : "?");
1005 if (!verbose) {
1006 mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
1007 lkb->lkb_dups + 1, lkb->lkb_addr, c);
1008 } else {
1009 if (lk_ttl == 1)
1010 mdb_printf("%s leak: 1 buffer, %ld bytes,\n",
1011 cache.cache_name, lk_bytes);
1012 else
1013 mdb_printf("%s leak: %d buffers, "
1014 "%ld bytes each, %ld bytes total,\n",
1015 cache.cache_name, lk_ttl,
1016 cache.cache_bufsize, lk_bytes);
1017 mdb_printf(" %s%s%ssample addr %p\n",
1018 (caller == 0) ? "" : "caller ", c,
1019 (caller == 0) ? "" : ", ", lkb->lkb_addr);
1021 return;
1023 case TYPE_UMEM:
1024 if (!lk_umem_seen) {
1025 lk_umem_seen = 1;
1026 if (lk_vmem_seen || lk_cache_seen)
1027 mdb_printf("\n");
1028 mdb_printf("%-?s %7s %?s %s\n",
1029 "CACHE", "LEAKED", "BUFCTL", "CALLER");
1031 if (mdb_vread(&cache, sizeof (cache), lkb->lkb_data) == -1) {
1033 * This _really_ shouldn't happen; we shouldn't
1034 * have been able to get this far if this
1035 * cache wasn't readable.
1037 mdb_warn("can't read cache %p for leaked "
1038 "bufctl %p", lkb->lkb_data, lkb->lkb_addr);
1039 return;
1042 lk_ttl += lkb->lkb_dups + 1;
1043 lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
1045 if (!verbose) {
1046 leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth, c,
1047 &caller);
1049 mdb_printf("%0?p %7d %0?p %a\n", lkb->lkb_data,
1050 lkb->lkb_dups + 1, lkb->lkb_addr, caller);
1051 } else {
1052 mdb_arg_t v;
1054 if (lk_ttl == 1)
1055 mdb_printf("%s leak: 1 buffer, %ld bytes\n",
1056 cache.cache_name, lk_bytes);
1057 else
1058 mdb_printf("%s leak: %d buffers, "
1059 "%ld bytes each, %ld bytes total\n",
1060 cache.cache_name, lk_ttl,
1061 cache.cache_bufsize, lk_bytes);
1063 v.a_type = MDB_TYPE_STRING;
1064 v.a_un.a_str = "-v";
1066 if (mdb_call_dcmd("bufctl", lkb->lkb_addr,
1067 DCMD_ADDRSPEC, 1, &v) == -1) {
1068 mdb_warn("'%p::bufctl -v' failed",
1069 lkb->lkb_addr);
1072 return;
1074 default:
1075 return;
1079 void
1080 leaky_subr_dump_end(int type)
1082 int i;
1083 int width;
1084 const char *leak;
1086 switch (type) {
1087 case TYPE_VMEM:
1088 if (!lk_vmem_seen)
1089 return;
1091 width = 16;
1092 leak = "oversized leak";
1093 break;
1095 case TYPE_CACHE:
1096 if (!lk_cache_seen)
1097 return;
1099 width = sizeof (uintptr_t) * 2;
1100 leak = "buffer";
1101 break;
1103 case TYPE_UMEM:
1104 if (!lk_umem_seen)
1105 return;
1107 width = sizeof (uintptr_t) * 2;
1108 leak = "buffer";
1109 break;
1111 default:
1112 return;
1115 for (i = 0; i < 72; i++)
1116 mdb_printf("-");
1117 mdb_printf("\n%*s %7ld %s%s, %ld byte%s\n",
1118 width, "Total", lk_ttl, leak, (lk_ttl == 1) ? "" : "s",
1119 lk_bytes, (lk_bytes == 1) ? "" : "s");
1123 leaky_subr_invoke_callback(const leak_bufctl_t *lkb, mdb_walk_cb_t cb,
1124 void *cbdata)
1126 vmem_seg_t vs;
1127 umem_bufctl_audit_t *bcp;
1128 UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
1130 switch (lkb->lkb_type) {
1131 case TYPE_VMEM:
1132 if (mdb_vread(&vs, sizeof (vs), lkb->lkb_addr) == -1) {
1133 mdb_warn("unable to read vmem_seg at %p",
1134 lkb->lkb_addr);
1135 return (WALK_NEXT);
1137 return (cb(lkb->lkb_addr, &vs, cbdata));
1139 case TYPE_UMEM:
1140 if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE,
1141 lkb->lkb_addr) == -1) {
1142 mdb_warn("unable to read bufctl at %p",
1143 lkb->lkb_addr);
1144 return (WALK_NEXT);
1146 return (cb(lkb->lkb_addr, bcp, cbdata));
1148 default:
1149 return (cb(lkb->lkb_addr, NULL, cbdata));