1 //--------------------------------------------------------------------*/
2 //--- Massif: a heap profiling tool. ms_main.c ---*/
3 //--------------------------------------------------------------------*/
6 This file is part of Massif, a Valgrind tool for profiling memory
9 Copyright (C) 2003-2013 Nicholas Nethercote
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This program is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 The GNU General Public License is contained in the file COPYING.
30 //---------------------------------------------------------------------------
32 //---------------------------------------------------------------------------
33 // Todo -- nice, but less critical:
34 // - do a graph-drawing test
35 // - make file format more generic. Obstacles:
36 // - unit prefixes are not generic
37 // - preset column widths for stats are not generic
38 // - preset column headers are not generic
39 // - "Massif arguments:" line is not generic
40 // - do snapshots on some specific client requests
41 // - "show me the extra allocations since the last snapshot"
42 // - "start/stop logging" (eg. quickly skip boring bits)
43 // - Add ability to draw multiple graphs, eg. heap-only, stack-only, total.
44 // Give each graph a title. (try to do it generically!)
45 // - make --show-below-main=no work
46 // - Options like --alloc-fn='operator new(unsigned, std::nothrow_t const&)'
47 // don't work in a .valgrindrc file or in $VALGRIND_OPTS.
48 // m_commandline.c:add_args_from_string() needs to respect single quotes.
49 // - With --stack=yes, want to add a stack trace for detailed snapshots so
50 // it's clear where/why the peak is occurring. (Mattieu Castet) Also,
51 // possibly useful even with --stack=no? (Andi Yin)
54 // - To run the benchmarks:
56 // perl perf/vg_perf --tools=massif --reps=3 perf/{heap,tinycc} massif
57 // time valgrind --tool=massif --depth=100 konqueror
59 // The other benchmarks don't do much allocation, and so give similar speeds
62 // Timing results on 'nevermore' (njn's machine) as of r7013:
64 // heap 0.53s ma:12.4s (23.5x, -----)
65 // tinycc 0.46s ma: 4.9s (10.7x, -----)
66 // many-xpts 0.08s ma: 2.0s (25.0x, -----)
67 // konqueror 29.6s real 0:21.0s user
69 // [Introduction of --time-unit=i as the default slowed things down by
72 // - get_XCon accounts for about 9% of konqueror startup time. Try
73 // keeping XPt children sorted by 'ip' and use binary search in get_XCon.
74 // Requires factoring out binary search code from various places into a
75 // VG_(bsearch) function.
77 // Todo -- low priority:
78 // - In each XPt, record both bytes and the number of allocations, and
79 // possibly the global number of allocations.
80 // - (Andy Lin) Give a stack trace on detailed snapshots?
81 // - (Artur Wisz) add a feature to Massif to ignore any heap blocks larger
82 // than a certain size! Because: "linux's malloc allows to set a
83 // MMAP_THRESHOLD value, so we set it to 4096 - all blocks above that will
84 // be handled directly by the kernel, and are guaranteed to be returned to
85 // the system when freed. So we needed to profile only blocks below this
88 // File format working notes:
91 desc
: --heap
-admin
=foo
110 n1
: 5 (heap allocation functions
) malloc
/new/new[], --alloc
-fns
, etc
.
111 n1
: 5 0x27F6E0: _nl_normalize_codeset (in
/lib
/libc
-2.3.5.so
)
112 n1
: 5 0x279DE6: _nl_load_locale_from_archive (in
/lib
/libc
-2.3.5.so
)
113 n1
: 5 0x278E97: _nl_find_locale (in
/lib
/libc
-2.3.5.so
)
114 n1
: 5 0x278871: setlocale (in
/lib
/libc
-2.3.5.so
)
115 n1
: 5 0x8049821: (within
/bin
/date
)
116 n0
: 5 0x26ED5E: (below main
) (in
/lib
/libc
-2.3.5.so
)
119 n_events
: n
time(ms
) total(B
) useful
-heap(B
) admin
-heap(B
) stacks(B
)
127 - each snapshot specifies an x
-axis value
and one
or more y
-axis values
.
128 - can display the y
-axis values separately
if you like
129 - can completely separate connection between snapshots
and trees
.
132 - how to specify
and scale
/abbreviate units on axes
?
133 - how to combine multiple values into the y
-axis
?
135 --------------------------------------------------------------------------------Command
: date
136 Massif arguments
: --heap
-admin
=foo
137 ms_print arguments
: massif
.out
138 --------------------------------------------------------------------------------
143 | ::@
:@
:@
:@
:::# :: : ::::
144 0 +-----------------------------------@
---@
---@
-----@
--@
---#-------------->ms 0 713
146 Number of snapshots
: 50
147 Detailed snapshots
: [2, 11, 13, 19, 25, 32 (peak
)]
148 -------------------------------------------------------------------------------- n
time(ms
) total(B
) useful
-heap(B
) admin
-heap(B
) stacks(B
)
149 -------------------------------------------------------------------------------- 0 0 0 0 0 0
152 100.00% (5B
) (heap allocation functions
) malloc
/new/new[], --alloc
-fns
, etc
.
153 ->100.00% (5B
) 0x27F6E0: _nl_normalize_codeset (in
/lib
/libc
-2.3.5.so
)
156 //---------------------------------------------------------------------------
158 #include "pub_tool_basics.h"
159 #include "pub_tool_vki.h"
160 #include "pub_tool_aspacemgr.h"
161 #include "pub_tool_debuginfo.h"
162 #include "pub_tool_hashtable.h"
163 #include "pub_tool_libcbase.h"
164 #include "pub_tool_libcassert.h"
165 #include "pub_tool_libcfile.h"
166 #include "pub_tool_libcprint.h"
167 #include "pub_tool_libcproc.h"
168 #include "pub_tool_machine.h"
169 #include "pub_tool_mallocfree.h"
170 #include "pub_tool_options.h"
171 #include "pub_tool_replacemalloc.h"
172 #include "pub_tool_stacktrace.h"
173 #include "pub_tool_threadstate.h"
174 #include "pub_tool_tooliface.h"
175 #include "pub_tool_xarray.h"
176 #include "pub_tool_clientstate.h"
177 #include "pub_tool_gdbserver.h"
179 #include "pub_tool_clreq.h" // For {MALLOC,FREE}LIKE_BLOCK
181 //------------------------------------------------------------*/
182 //--- Overview of operation ---*/
183 //------------------------------------------------------------*/
185 // The size of the stacks and heap is tracked. The heap is tracked in a lot
186 // of detail, enough to tell how many bytes each line of code is responsible
187 // for, more or less. The main data structure is a tree representing the
188 // call tree beneath all the allocation functions like malloc().
189 // (Alternatively, if --pages-as-heap=yes is specified, memory is tracked at
190 // the page level, and each page is treated much like a heap block. We use
191 // "heap" throughout below to cover this case because the concepts are all the
194 // "Snapshots" are recordings of the memory usage. There are two basic
196 // - Normal: these record the current time, total memory size, total heap
197 // size, heap admin size and stack size.
198 // - Detailed: these record those things in a normal snapshot, plus a very
199 // detailed XTree (see below) indicating how the heap is structured.
201 // Snapshots are taken every so often. There are two storage classes of
203 // - Temporary: Massif does a temporary snapshot every so often. The idea
204 // is to always have a certain number of temporary snapshots around. So
205 // we take them frequently to begin with, but decreasingly often as the
206 // program continues to run. Also, we remove some old ones after a while.
207 // Overall it's a kind of exponential decay thing. Most of these are
208 // normal snapshots, a small fraction are detailed snapshots.
209 // - Permanent: Massif takes a permanent (detailed) snapshot in some
210 // circumstances. They are:
211 // - Peak snapshot: When the memory usage peak is reached, it takes a
212 // snapshot. It keeps this, unless the peak is subsequently exceeded,
213 // in which case it will overwrite the peak snapshot.
214 // - User-requested snapshots: These are done in response to client
215 // requests. They are always kept.
217 // Used for printing things when clo_verbosity > 1.
218 #define VERB(verb, format, args...) \
219 if (VG_(clo_verbosity) > verb) { \
220 VG_(dmsg)("Massif: " format, ##args); \
223 //------------------------------------------------------------//
224 //--- Statistics ---//
225 //------------------------------------------------------------//
227 // Konqueror startup, to give an idea of the numbers involved with a biggish
228 // program, with default depth:
231 // - 310,000 allocations
233 // - 15,000 XPts 800,000 XPts
236 static UInt n_heap_allocs
= 0;
237 static UInt n_heap_reallocs
= 0;
238 static UInt n_heap_frees
= 0;
239 static UInt n_ignored_heap_allocs
= 0;
240 static UInt n_ignored_heap_frees
= 0;
241 static UInt n_ignored_heap_reallocs
= 0;
242 static UInt n_stack_allocs
= 0;
243 static UInt n_stack_frees
= 0;
244 static UInt n_xpts
= 0;
245 static UInt n_xpt_init_expansions
= 0;
246 static UInt n_xpt_later_expansions
= 0;
247 static UInt n_sxpt_allocs
= 0;
248 static UInt n_sxpt_frees
= 0;
249 static UInt n_skipped_snapshots
= 0;
250 static UInt n_real_snapshots
= 0;
251 static UInt n_detailed_snapshots
= 0;
252 static UInt n_peak_snapshots
= 0;
253 static UInt n_cullings
= 0;
254 static UInt n_XCon_redos
= 0;
256 //------------------------------------------------------------//
258 //------------------------------------------------------------//
260 // Number of guest instructions executed so far. Only used with
262 static Long guest_instrs_executed
= 0;
264 static SizeT heap_szB
= 0; // Live heap size
265 static SizeT heap_extra_szB
= 0; // Live heap extra size -- slop + admin bytes
266 static SizeT stacks_szB
= 0; // Live stacks size
268 // This is the total size from the current peak snapshot, or 0 if no peak
269 // snapshot has been taken yet.
270 static SizeT peak_snapshot_total_szB
= 0;
272 // Incremented every time memory is allocated/deallocated, by the
273 // allocated/deallocated amount; includes heap, heap-admin and stack
274 // memory. An alternative to milliseconds as a unit of program "time".
275 static ULong total_allocs_deallocs_szB
= 0;
277 // When running with --heap=yes --pages-as-heap=no, we don't start taking
278 // snapshots until the first basic block is executed, rather than doing it in
279 // ms_post_clo_init (which is the obvious spot), for two reasons.
280 // - It lets us ignore stack events prior to that, because they're not
281 // really proper ones and just would screw things up.
282 // - Because there's still some core initialisation to do, and so there
283 // would be an artificial time gap between the first and second snapshots.
285 // When running with --heap=yes --pages-as-heap=yes, snapshots start much
286 // earlier due to new_mem_startup so this isn't relevant.
288 static Bool have_started_executing_code
= False
;
290 //------------------------------------------------------------//
291 //--- Alloc fns ---//
292 //------------------------------------------------------------//
294 static XArray
* alloc_fns
;
295 static XArray
* ignore_fns
;
297 static void init_alloc_fns(void)
299 // Create the list, and add the default elements.
300 alloc_fns
= VG_(newXA
)(VG_(malloc
), "ms.main.iaf.1",
301 VG_(free
), sizeof(HChar
*));
302 #define DO(x) { const HChar* s = x; VG_(addToXA)(alloc_fns, &s); }
304 // Ordered roughly according to (presumed) frequency.
305 // Nb: The C++ "operator new*" ones are overloadable. We include them
306 // always anyway, because even if they're overloaded, it would be a
307 // prodigiously stupid overloading that caused them to not allocate
310 // XXX: because we don't look at the first stack entry (unless it's a
311 // custom allocation) there's not much point to having all these alloc
312 // functions here -- they should never appear anywhere (I think?) other
313 // than the top stack entry. The only exceptions are those that in
314 // vg_replace_malloc.c are partly or fully implemented in terms of another
315 // alloc function: realloc (which uses malloc); valloc,
316 // malloc_zone_valloc, posix_memalign and memalign_common (which use
320 DO("__builtin_new" );
321 DO("operator new(unsigned)" );
322 DO("operator new(unsigned long)" );
323 DO("__builtin_vec_new" );
324 DO("operator new[](unsigned)" );
325 DO("operator new[](unsigned long)" );
329 DO("posix_memalign" );
331 DO("operator new(unsigned, std::nothrow_t const&)" );
332 DO("operator new[](unsigned, std::nothrow_t const&)" );
333 DO("operator new(unsigned long, std::nothrow_t const&)" );
334 DO("operator new[](unsigned long, std::nothrow_t const&)");
335 #if defined(VGO_darwin)
336 DO("malloc_zone_malloc" );
337 DO("malloc_zone_calloc" );
338 DO("malloc_zone_realloc" );
339 DO("malloc_zone_memalign" );
340 DO("malloc_zone_valloc" );
344 static void init_ignore_fns(void)
346 // Create the (empty) list.
347 ignore_fns
= VG_(newXA
)(VG_(malloc
), "ms.main.iif.1",
348 VG_(free
), sizeof(HChar
*));
351 // Determines if the named function is a member of the XArray.
352 static Bool
is_member_fn(const XArray
* fns
, const HChar
* fnname
)
357 // Nb: It's a linear search through the list, because we're comparing
358 // strings rather than pointers to strings.
359 // Nb: This gets called a lot. It was an OSet, but they're quite slow to
360 // iterate through so it wasn't a good choice.
361 for (i
= 0; i
< VG_(sizeXA
)(fns
); i
++) {
362 fn_ptr
= VG_(indexXA
)(fns
, i
);
363 if (VG_STREQ(fnname
, *fn_ptr
))
370 //------------------------------------------------------------//
371 //--- Command line args ---//
372 //------------------------------------------------------------//
374 #define MAX_DEPTH 200
376 typedef enum { TimeI
, TimeMS
, TimeB
} TimeUnit
;
378 static const HChar
* TimeUnit_to_string(TimeUnit time_unit
)
381 case TimeI
: return "i";
382 case TimeMS
: return "ms";
383 case TimeB
: return "B";
384 default: tl_assert2(0, "TimeUnit_to_string: unrecognised TimeUnit");
388 static Bool clo_heap
= True
;
389 // clo_heap_admin is deliberately a word-sized type. At one point it was
390 // a UInt, but this caused problems on 64-bit machines when it was
391 // multiplied by a small negative number and then promoted to a
392 // word-sized type -- it ended up with a value of 4.2 billion. Sigh.
393 static SSizeT clo_heap_admin
= 8;
394 static Bool clo_pages_as_heap
= False
;
395 static Bool clo_stacks
= False
;
396 static Int clo_depth
= 30;
397 static double clo_threshold
= 1.0; // percentage
398 static double clo_peak_inaccuracy
= 1.0; // percentage
399 static Int clo_time_unit
= TimeI
;
400 static Int clo_detailed_freq
= 10;
401 static Int clo_max_snapshots
= 100;
402 static const HChar
* clo_massif_out_file
= "massif.out.%p";
404 static XArray
* args_for_massif
;
406 static Bool
ms_process_cmd_line_option(const HChar
* arg
)
408 const HChar
* tmp_str
;
410 // Remember the arg for later use.
411 VG_(addToXA
)(args_for_massif
, &arg
);
413 if VG_BOOL_CLO(arg
, "--heap", clo_heap
) {}
414 else if VG_BINT_CLO(arg
, "--heap-admin", clo_heap_admin
, 0, 1024) {}
416 else if VG_BOOL_CLO(arg
, "--stacks", clo_stacks
) {}
418 else if VG_BOOL_CLO(arg
, "--pages-as-heap", clo_pages_as_heap
) {}
420 else if VG_BINT_CLO(arg
, "--depth", clo_depth
, 1, MAX_DEPTH
) {}
422 else if VG_STR_CLO(arg
, "--alloc-fn", tmp_str
) {
423 VG_(addToXA
)(alloc_fns
, &tmp_str
);
425 else if VG_STR_CLO(arg
, "--ignore-fn", tmp_str
) {
426 VG_(addToXA
)(ignore_fns
, &tmp_str
);
429 else if VG_DBL_CLO(arg
, "--threshold", clo_threshold
) {
430 if (clo_threshold
< 0 || clo_threshold
> 100) {
431 VG_(fmsg_bad_option
)(arg
,
432 "--threshold must be between 0.0 and 100.0\n");
436 else if VG_DBL_CLO(arg
, "--peak-inaccuracy", clo_peak_inaccuracy
) {}
438 else if VG_XACT_CLO(arg
, "--time-unit=i", clo_time_unit
, TimeI
) {}
439 else if VG_XACT_CLO(arg
, "--time-unit=ms", clo_time_unit
, TimeMS
) {}
440 else if VG_XACT_CLO(arg
, "--time-unit=B", clo_time_unit
, TimeB
) {}
442 else if VG_BINT_CLO(arg
, "--detailed-freq", clo_detailed_freq
, 1, 1000000) {}
444 else if VG_BINT_CLO(arg
, "--max-snapshots", clo_max_snapshots
, 10, 1000) {}
446 else if VG_STR_CLO(arg
, "--massif-out-file", clo_massif_out_file
) {}
449 return VG_(replacement_malloc_process_cmd_line_option
)(arg
);
454 static void ms_print_usage(void)
457 " --heap=no|yes profile heap blocks [yes]\n"
458 " --heap-admin=<size> average admin bytes per heap block;\n"
459 " ignored if --heap=no [8]\n"
460 " --stacks=no|yes profile stack(s) [no]\n"
461 " --pages-as-heap=no|yes profile memory at the page level [no]\n"
462 " --depth=<number> depth of contexts [30]\n"
463 " --alloc-fn=<name> specify <name> as an alloc function [empty]\n"
464 " --ignore-fn=<name> ignore heap allocations within <name> [empty]\n"
465 " --threshold=<m.n> significance threshold, as a percentage [1.0]\n"
466 " --peak-inaccuracy=<m.n> maximum peak inaccuracy, as a percentage [1.0]\n"
467 " --time-unit=i|ms|B time unit: instructions executed, milliseconds\n"
468 " or heap bytes alloc'd/dealloc'd [i]\n"
469 " --detailed-freq=<N> every Nth snapshot should be detailed [10]\n"
470 " --max-snapshots=<N> maximum number of snapshots recorded [100]\n"
471 " --massif-out-file=<file> output file name [massif.out.%%p]\n"
475 static void ms_print_debug_usage(void)
483 //------------------------------------------------------------//
484 //--- XPts, XTrees and XCons ---//
485 //------------------------------------------------------------//
487 // An XPt represents an "execution point", ie. a code address. Each XPt is
488 // part of a tree of XPts (an "execution tree", or "XTree"). The details of
489 // the heap are represented by a single XTree.
491 // The root of the tree is 'alloc_xpt', which represents all allocation
493 // - malloc/calloc/realloc/memalign/new/new[];
494 // - user-specified allocation functions (using --alloc-fn);
495 // - custom allocation (MALLOCLIKE) points
496 // It's a bit of a fake XPt (ie. its 'ip' is zero), and is only used because
497 // it makes the code simpler.
499 // Any child of 'alloc_xpt' is called a "top-XPt". The XPts at the bottom
500 // of an XTree (leaf nodes) are "bottom-XPTs".
502 // Each path from a top-XPt to a bottom-XPt through an XTree gives an
503 // execution context ("XCon"), ie. a stack trace. (And sub-paths represent
504 // stack sub-traces.) The number of XCons in an XTree is equal to the
505 // number of bottom-XPTs in that XTree.
507 // alloc_xpt XTrees are bi-directional.
510 // > parent < Example: if child1() calls parent() and child2()
511 // / | \ also calls parent(), and parent() calls malloc(),
512 // | / \ | the XTree will look like this.
516 // (Note that malformed stack traces can lead to difficulties. See the
517 // comment at the bottom of get_XCon.)
519 // XTrees and XPts are mirrored by SXTrees and SXPts, where the 'S' is short
520 // for "saved". When the XTree is duplicated for a snapshot, we duplicate
521 // it as an SXTree, which is similar but omits some things it does not need,
522 // and aggregates up insignificant nodes. This is important as an SXTree is
523 // typically much smaller than an XTree.
525 // XXX: make XPt and SXPt extensible arrays, to avoid having to do two
526 // allocations per Pt.
528 typedef struct _XPt XPt
;
530 Addr ip
; // code address
532 // Bottom-XPts: space for the precise context.
533 // Other XPts: space of all the descendent bottom-XPts.
534 // Nb: this value goes up and down as the program executes.
537 XPt
* parent
; // pointer to parent XPt
540 // n_children and max_children are 32-bit integers. 16-bit integers
541 // are too small -- a very big program might have more than 65536
542 // allocation points (ie. top-XPts) -- Konqueror starting up has 1800.
543 UInt n_children
; // number of children
544 UInt max_children
; // capacity of children array
545 XPt
** children
; // pointers to children XPts
555 typedef struct _SXPt SXPt
;
558 SizeT szB
; // memory size for the node, be it Sig or Insig
560 // An SXPt representing a single significant code location. Much like
561 // an XPt, minus the fields that aren't necessary.
569 // An SXPt representing one or more code locations, all below the
570 // significance threshold.
572 Int n_xpts
; // number of aggregated XPts
578 // Fake XPt representing all allocation functions like malloc(). Acts as
579 // parent node to all top-XPts.
580 static XPt
* alloc_xpt
;
582 static XPt
* new_XPt(Addr ip
, XPt
* parent
)
584 // XPts are never freed, so we can use VG_(perm_malloc) to allocate them.
585 // Note that we cannot use VG_(perm_malloc) for the 'children' array, because
586 // that needs to be resizable.
587 XPt
* xpt
= VG_(perm_malloc
)(sizeof(XPt
), vg_alignof(XPt
));
590 xpt
->parent
= parent
;
592 // We don't initially allocate any space for children. We let that
593 // happen on demand. Many XPts (ie. all the bottom-XPts) don't have any
596 xpt
->max_children
= 0;
597 xpt
->children
= NULL
;
605 static void add_child_xpt(XPt
* parent
, XPt
* child
)
607 // Expand 'children' if necessary.
608 tl_assert(parent
->n_children
<= parent
->max_children
);
609 if (parent
->n_children
== parent
->max_children
) {
610 if (0 == parent
->max_children
) {
611 parent
->max_children
= 4;
612 parent
->children
= VG_(malloc
)( "ms.main.acx.1",
613 parent
->max_children
* sizeof(XPt
*) );
614 n_xpt_init_expansions
++;
616 parent
->max_children
*= 2; // Double size
617 parent
->children
= VG_(realloc
)( "ms.main.acx.2",
619 parent
->max_children
* sizeof(XPt
*) );
620 n_xpt_later_expansions
++;
624 // Insert new child XPt in parent's children list.
625 parent
->children
[ parent
->n_children
++ ] = child
;
628 // Reverse comparison for a reverse sort -- biggest to smallest.
629 static Int
SXPt_revcmp_szB(const void* n1
, const void* n2
)
631 const SXPt
* sxpt1
= *(const SXPt
*const *)n1
;
632 const SXPt
* sxpt2
= *(const SXPt
*const *)n2
;
633 return ( sxpt1
->szB
< sxpt2
->szB
? 1
634 : sxpt1
->szB
> sxpt2
->szB
? -1
638 //------------------------------------------------------------//
639 //--- XTree Operations ---//
640 //------------------------------------------------------------//
642 // Duplicates an XTree as an SXTree.
643 static SXPt
* dup_XTree(XPt
* xpt
, SizeT total_szB
)
645 Int i
, n_sig_children
, n_insig_children
, n_child_sxpts
;
646 SizeT sig_child_threshold_szB
;
649 // Number of XPt children Action for SXPT
650 // ------------------ ---------------
651 // 0 sig, 0 insig alloc 0 children
652 // N sig, 0 insig alloc N children, dup all
653 // N sig, M insig alloc N+1, dup first N, aggregate remaining M
654 // 0 sig, M insig alloc 1, aggregate M
656 // Work out how big a child must be to be significant. If the current
657 // total_szB is zero, then we set it to 1, which means everything will be
658 // judged insignificant -- this is sensible, as there's no point showing
659 // any detail for this case. Unless they used --threshold=0, in which
660 // case we show them everything because that's what they asked for.
662 // Nb: We do this once now, rather than once per child, because if we do
663 // that the cost of all the divisions adds up to something significant.
664 if (0 == total_szB
&& 0 != clo_threshold
) {
665 sig_child_threshold_szB
= 1;
667 sig_child_threshold_szB
= (SizeT
)((total_szB
* clo_threshold
) / 100);
670 // How many children are significant? And do we need an aggregate SXPt?
672 for (i
= 0; i
< xpt
->n_children
; i
++) {
673 if (xpt
->children
[i
]->szB
>= sig_child_threshold_szB
) {
677 n_insig_children
= xpt
->n_children
- n_sig_children
;
678 n_child_sxpts
= n_sig_children
+ ( n_insig_children
> 0 ? 1 : 0 );
680 // Duplicate the XPt.
681 sxpt
= VG_(malloc
)("ms.main.dX.1", sizeof(SXPt
));
684 sxpt
->szB
= xpt
->szB
;
685 sxpt
->Sig
.ip
= xpt
->ip
;
686 sxpt
->Sig
.n_children
= n_child_sxpts
;
688 // Create the SXPt's children.
689 if (n_child_sxpts
> 0) {
691 SizeT sig_children_szB
= 0, insig_children_szB
= 0;
692 sxpt
->Sig
.children
= VG_(malloc
)("ms.main.dX.2",
693 n_child_sxpts
* sizeof(SXPt
*));
695 // Duplicate the significant children. (Nb: sig_children_szB +
696 // insig_children_szB doesn't necessarily equal xpt->szB.)
698 for (i
= 0; i
< xpt
->n_children
; i
++) {
699 if (xpt
->children
[i
]->szB
>= sig_child_threshold_szB
) {
700 sxpt
->Sig
.children
[j
++] = dup_XTree(xpt
->children
[i
], total_szB
);
701 sig_children_szB
+= xpt
->children
[i
]->szB
;
703 insig_children_szB
+= xpt
->children
[i
]->szB
;
707 // Create the SXPt for the insignificant children, if any, and put it
708 // in the last child entry.
709 if (n_insig_children
> 0) {
710 // Nb: We 'n_sxpt_allocs' here because creating an Insig SXPt
711 // doesn't involve a call to dup_XTree().
712 SXPt
* insig_sxpt
= VG_(malloc
)("ms.main.dX.3", sizeof(SXPt
));
714 insig_sxpt
->tag
= InsigSXPt
;
715 insig_sxpt
->szB
= insig_children_szB
;
716 insig_sxpt
->Insig
.n_xpts
= n_insig_children
;
717 sxpt
->Sig
.children
[n_sig_children
] = insig_sxpt
;
720 sxpt
->Sig
.children
= NULL
;
726 static void free_SXTree(SXPt
* sxpt
)
729 tl_assert(sxpt
!= NULL
);
733 // Free all children SXPts, then the children array.
734 for (i
= 0; i
< sxpt
->Sig
.n_children
; i
++) {
735 free_SXTree(sxpt
->Sig
.children
[i
]);
736 sxpt
->Sig
.children
[i
] = NULL
;
738 VG_(free
)(sxpt
->Sig
.children
); sxpt
->Sig
.children
= NULL
;
744 default: tl_assert2(0, "free_SXTree: unknown SXPt tag");
747 // Free the SXPt itself.
748 VG_(free
)(sxpt
); sxpt
= NULL
;
752 // Sanity checking: we periodically check the heap XTree with
753 // ms_expensive_sanity_check.
754 static void sanity_check_XTree(XPt
* xpt
, XPt
* parent
)
756 tl_assert(xpt
!= NULL
);
758 // Check back-pointer.
759 tl_assert2(xpt
->parent
== parent
,
760 "xpt->parent = %p, parent = %p\n", xpt
->parent
, parent
);
762 // Check children counts look sane.
763 tl_assert(xpt
->n_children
<= xpt
->max_children
);
765 // Unfortunately, xpt's size is not necessarily equal to the sum of xpt's
766 // children's sizes. See comment at the bottom of get_XCon.
769 // Sanity checking: we check SXTrees (which are in snapshots) after
770 // snapshots are created, before they are deleted, and before they are
772 static void sanity_check_SXTree(SXPt
* sxpt
)
776 tl_assert(sxpt
!= NULL
);
778 // Check the sum of any children szBs equals the SXPt's szB. Check the
779 // children at the same time.
782 if (sxpt
->Sig
.n_children
> 0) {
783 for (i
= 0; i
< sxpt
->Sig
.n_children
; i
++) {
784 sanity_check_SXTree(sxpt
->Sig
.children
[i
]);
792 default: tl_assert2(0, "sanity_check_SXTree: unknown SXPt tag");
797 //------------------------------------------------------------//
798 //--- XCon Operations ---//
799 //------------------------------------------------------------//
801 // This is the limit on the number of removed alloc-fns that can be in a
803 #define MAX_OVERESTIMATE 50
804 #define MAX_IPS (MAX_DEPTH + MAX_OVERESTIMATE)
806 // Determine if the given IP belongs to a function that should be ignored.
807 static Bool
fn_should_be_ignored(Addr ip
)
811 ( VG_(get_fnname
)(ip
, &buf
) && is_member_fn(ignore_fns
, buf
)
815 // Get the stack trace for an XCon, filtering out uninteresting entries:
816 // alloc-fns and entries above alloc-fns, and entries below main-or-below-main.
817 // Eg: alloc-fn1 / alloc-fn2 / a / b / main / (below main) / c
818 // becomes: a / b / main
819 // Nb: it's possible to end up with an empty trace, eg. if 'main' is marked
820 // as an alloc-fn. This is ok.
822 Int
get_IPs( ThreadId tid
, Bool exclude_first_entry
, Addr ips
[])
824 Int n_ips
, i
, n_alloc_fns_removed
;
828 // We ask for a few more IPs than clo_depth suggests we need. Then we
829 // remove every entry that is an alloc-fn. Depending on the
830 // circumstances, we may need to redo it all, asking for more IPs.
832 // - If the original stack trace is smaller than asked-for, redo=False
833 // - Else if after filtering we have >= clo_depth IPs, redo=False
835 // In other words, to redo, we'd have to get a stack trace as big as we
836 // asked for and remove more than 'overestimate' alloc-fns.
839 redo
= True
; // Assume this to begin with.
840 for (overestimate
= 3; redo
; overestimate
+= 6) {
841 // This should never happen -- would require MAX_OVERESTIMATE
842 // alloc-fns to be removed from the stack trace.
843 if (overestimate
> MAX_OVERESTIMATE
)
844 VG_(tool_panic
)("get_IPs: ips[] too small, inc. MAX_OVERESTIMATE?");
846 // Ask for more IPs than clo_depth suggests we need.
847 n_ips
= VG_(get_StackTrace
)( tid
, ips
, clo_depth
+ overestimate
,
848 NULL
/*array to dump SP values in*/,
849 NULL
/*array to dump FP values in*/,
850 0/*first_ip_delta*/ );
851 tl_assert(n_ips
> 0);
853 // If the original stack trace is smaller than asked-for, redo=False.
854 if (n_ips
< clo_depth
+ overestimate
) { redo
= False
; }
856 // Filter out alloc fns. If requested, we automatically remove the
857 // first entry (which presumably will be something like malloc or
858 // __builtin_new that we're sure to filter out) without looking at it,
859 // because VG_(get_fnname) is expensive.
860 n_alloc_fns_removed
= ( exclude_first_entry
? 1 : 0 );
861 for (i
= n_alloc_fns_removed
; i
< n_ips
; i
++) {
863 if (VG_(get_fnname
)(ips
[i
], &buf
)) {
864 if (is_member_fn(alloc_fns
, buf
)) {
865 n_alloc_fns_removed
++;
871 // Remove the alloc fns by shuffling the rest down over them.
872 n_ips
-= n_alloc_fns_removed
;
873 for (i
= 0; i
< n_ips
; i
++) {
874 ips
[i
] = ips
[i
+ n_alloc_fns_removed
];
877 // If after filtering we have >= clo_depth IPs, redo=False
878 if (n_ips
>= clo_depth
) {
880 n_ips
= clo_depth
; // Ignore any IPs below --depth.
890 // Gets an XCon and puts it in the tree. Returns the XCon's bottom-XPt.
891 // Unless the allocation should be ignored, in which case we return NULL.
892 static XPt
* get_XCon( ThreadId tid
, Bool exclude_first_entry
)
894 static Addr ips
[MAX_IPS
];
896 XPt
* xpt
= alloc_xpt
;
898 // After this call, the IPs we want are in ips[0]..ips[n_ips-1].
899 Int n_ips
= get_IPs(tid
, exclude_first_entry
, ips
);
901 // Should we ignore this allocation? (Nb: n_ips can be zero, eg. if
902 // 'main' is marked as an alloc-fn.)
903 if (n_ips
> 0 && fn_should_be_ignored(ips
[0])) {
907 // Now do the search/insertion of the XCon.
908 for (i
= 0; i
< n_ips
; i
++) {
911 // Look for IP in xpt's children.
912 // Linear search, ugh -- about 10% of time for konqueror startup tried
913 // caching last result, only hit about 4% for konqueror.
914 // Nb: this search hits about 98% of the time for konqueror
915 for (ch
= 0; True
; ch
++) {
916 if (ch
== xpt
->n_children
) {
917 // IP not found in the children.
918 // Create and add new child XPt, then stop.
919 XPt
* new_child_xpt
= new_XPt(ip
, xpt
);
920 add_child_xpt(xpt
, new_child_xpt
);
924 } else if (ip
== xpt
->children
[ch
]->ip
) {
925 // Found the IP in the children, stop.
926 xpt
= xpt
->children
[ch
];
932 // [Note: several comments refer to this comment. Do not delete it
933 // without updating them.]
935 // A complication... If all stack traces were well-formed, then the
936 // returned xpt would always be a bottom-XPt. As a consequence, an XPt's
937 // size would always be equal to the sum of its children's sizes, which
938 // is an excellent sanity check.
940 // Unfortunately, stack traces occasionally are malformed, ie. truncated.
941 // This allows a stack trace to be a sub-trace of another, eg. a/b/c is a
942 // sub-trace of a/b/c/d. So we can't assume this xpt is a bottom-XPt;
943 // nor can we do sanity check an XPt's size against its children's sizes.
944 // This is annoying, but must be dealt with. (Older versions of Massif
945 // had this assertion in, and it was reported to fail by real users a
946 // couple of times.) Even more annoyingly, I can't come up with a simple
947 // test case that exhibit such a malformed stack trace, so I can't
948 // regression test it. Sigh.
950 // However, we can print a warning, so that if it happens (unexpectedly)
951 // in existing regression tests we'll know. Also, it warns users that
952 // the output snapshots may not add up the way they might expect.
954 //tl_assert(0 == xpt->n_children); // Must be bottom-XPt
955 if (0 != xpt
->n_children
) {
956 static Int n_moans
= 0;
959 "Warning: Malformed stack trace detected. In Massif's output,\n");
961 " the size of an entry's child entries may not sum up\n");
963 " to the entry's size as they normally do.\n");
967 " (And Massif now won't warn about this again.)\n");
973 // Update 'szB' of every XPt in the XCon, by percolating upwards.
974 static void update_XCon(XPt
* xpt
, SSizeT space_delta
)
977 tl_assert(NULL
!= xpt
);
979 if (0 == space_delta
)
982 while (xpt
!= alloc_xpt
) {
983 if (space_delta
< 0) tl_assert(xpt
->szB
>= -space_delta
);
984 xpt
->szB
+= space_delta
;
987 if (space_delta
< 0) tl_assert(alloc_xpt
->szB
>= -space_delta
);
988 alloc_xpt
->szB
+= space_delta
;
992 //------------------------------------------------------------//
993 //--- Snapshots ---//
994 //------------------------------------------------------------//
996 // Snapshots are done in a way so that we always have a reasonable number of
997 // them. We start by taking them quickly. Once we hit our limit, we cull
998 // some (eg. half), and start taking them more slowly. Once we hit the
999 // limit again, we again cull and then take them even more slowly, and so
1002 // Time is measured either in i or ms or bytes, depending on the --time-unit
1003 // option. It's a Long because it can exceed 32-bits reasonably easily, and
1004 // because we need to allow negative values to represent unset times.
1007 #define UNUSED_SNAPSHOT_TIME -333 // A conspicuous negative number.
1022 SizeT heap_extra_szB
;// Heap slop + admin bytes.
1024 SXPt
* alloc_sxpt
; // Heap XTree root, if a detailed snapshot,
1025 } // otherwise NULL.
1028 static UInt next_snapshot_i
= 0; // Index of where next snapshot will go.
1029 static Snapshot
* snapshots
; // Array of snapshots.
1031 static Bool
is_snapshot_in_use(Snapshot
* snapshot
)
1033 if (Unused
== snapshot
->kind
) {
1034 // If snapshot is unused, check all the fields are unset.
1035 tl_assert(snapshot
->time
== UNUSED_SNAPSHOT_TIME
);
1036 tl_assert(snapshot
->heap_extra_szB
== 0);
1037 tl_assert(snapshot
->heap_szB
== 0);
1038 tl_assert(snapshot
->stacks_szB
== 0);
1039 tl_assert(snapshot
->alloc_sxpt
== NULL
);
1042 tl_assert(snapshot
->time
!= UNUSED_SNAPSHOT_TIME
);
1047 static Bool
is_detailed_snapshot(Snapshot
* snapshot
)
1049 return (snapshot
->alloc_sxpt
? True
: False
);
1052 static Bool
is_uncullable_snapshot(Snapshot
* snapshot
)
1054 return &snapshots
[0] == snapshot
// First snapshot
1055 || &snapshots
[next_snapshot_i
-1] == snapshot
// Last snapshot
1056 || snapshot
->kind
== Peak
; // Peak snapshot
1059 static void sanity_check_snapshot(Snapshot
* snapshot
)
1061 if (snapshot
->alloc_sxpt
) {
1062 sanity_check_SXTree(snapshot
->alloc_sxpt
);
1066 // All the used entries should look used, all the unused ones should be clear.
1067 static void sanity_check_snapshots_array(void)
1070 for (i
= 0; i
< next_snapshot_i
; i
++) {
1071 tl_assert( is_snapshot_in_use( & snapshots
[i
] ));
1073 for ( ; i
< clo_max_snapshots
; i
++) {
1074 tl_assert(!is_snapshot_in_use( & snapshots
[i
] ));
1078 // This zeroes all the fields in the snapshot, but does not free the heap
1079 // XTree if present. It also does a sanity check unless asked not to; we
1080 // can't sanity check at startup when clearing the initial snapshots because
1081 // they're full of junk.
1082 static void clear_snapshot(Snapshot
* snapshot
, Bool do_sanity_check
)
1084 if (do_sanity_check
) sanity_check_snapshot(snapshot
);
1085 snapshot
->kind
= Unused
;
1086 snapshot
->time
= UNUSED_SNAPSHOT_TIME
;
1087 snapshot
->heap_extra_szB
= 0;
1088 snapshot
->heap_szB
= 0;
1089 snapshot
->stacks_szB
= 0;
1090 snapshot
->alloc_sxpt
= NULL
;
1093 // This zeroes all the fields in the snapshot, and frees the heap XTree if
1095 static void delete_snapshot(Snapshot
* snapshot
)
1097 // Nb: if there's an XTree, we free it after calling clear_snapshot,
1098 // because clear_snapshot does a sanity check which includes checking the
1100 SXPt
* tmp_sxpt
= snapshot
->alloc_sxpt
;
1101 clear_snapshot(snapshot
, /*do_sanity_check*/True
);
1103 free_SXTree(tmp_sxpt
);
1107 static void VERB_snapshot(Int verbosity
, const HChar
* prefix
, Int i
)
1109 Snapshot
* snapshot
= &snapshots
[i
];
1110 const HChar
* suffix
;
1111 switch (snapshot
->kind
) {
1112 case Peak
: suffix
= "p"; break;
1113 case Normal
: suffix
= ( is_detailed_snapshot(snapshot
) ? "d" : "." ); break;
1114 case Unused
: suffix
= "u"; break;
1116 tl_assert2(0, "VERB_snapshot: unknown snapshot kind: %d", snapshot
->kind
);
1118 VERB(verbosity
, "%s S%s%3d (t:%lld, hp:%ld, ex:%ld, st:%ld)\n",
1122 snapshot
->heap_extra_szB
,
1123 snapshot
->stacks_szB
1127 // Cull half the snapshots; we choose those that represent the smallest
1128 // time-spans, because that gives us the most even distribution of snapshots
1129 // over time. (It's possible to lose interesting spikes, however.)
1131 // Algorithm for N snapshots: We find the snapshot representing the smallest
1132 // timeframe, and remove it. We repeat this until (N/2) snapshots are gone.
1133 // We have to do this one snapshot at a time, rather than finding the (N/2)
1134 // smallest snapshots in one hit, because when a snapshot is removed, its
1135 // neighbours immediately cover greater timespans. So it's O(N^2), but N is
1136 // small, and it's not done very often.
1138 // Once we're done, we return the new smallest interval between snapshots.
1139 // That becomes our minimum time interval.
1140 static UInt
cull_snapshots(void)
1142 Int i
, jp
, j
, jn
, min_timespan_i
;
1148 // Sets j to the index of the first not-yet-removed snapshot at or after i
1149 #define FIND_SNAPSHOT(i, j) \
1151 j < clo_max_snapshots && !is_snapshot_in_use(&snapshots[j]); \
1154 VERB(2, "Culling...\n");
1156 // First we remove enough snapshots by clearing them in-place. Once
1157 // that's done, we can slide the remaining ones down.
1158 for (i
= 0; i
< clo_max_snapshots
/2; i
++) {
1159 // Find the snapshot representing the smallest timespan. The timespan
1160 // for snapshot n = d(N-1,N)+d(N,N+1), where d(A,B) is the time between
1161 // snapshot A and B. We don't consider the first and last snapshots for
1163 Snapshot
* min_snapshot
;
1166 // Initial triple: (prev, curr, next) == (jp, j, jn)
1167 // Initial min_timespan is the first one.
1169 FIND_SNAPSHOT(1, j
);
1170 FIND_SNAPSHOT(j
+1, jn
);
1171 min_timespan
= 0x7fffffffffffffffLL
;
1173 while (jn
< clo_max_snapshots
) {
1174 Time timespan
= snapshots
[jn
].time
- snapshots
[jp
].time
;
1175 tl_assert(timespan
>= 0);
1176 // Nb: We never cull the peak snapshot.
1177 if (Peak
!= snapshots
[j
].kind
&& timespan
< min_timespan
) {
1178 min_timespan
= timespan
;
1181 // Move on to next triple
1184 FIND_SNAPSHOT(jn
+1, jn
);
1186 // We've found the least important snapshot, now delete it. First
1187 // print it if necessary.
1188 tl_assert(-1 != min_j
); // Check we found a minimum.
1189 min_snapshot
= & snapshots
[ min_j
];
1190 if (VG_(clo_verbosity
) > 1) {
1191 HChar buf
[64]; // large enough
1192 VG_(snprintf
)(buf
, 64, " %3d (t-span = %lld)", i
, min_timespan
);
1193 VERB_snapshot(2, buf
, min_j
);
1195 delete_snapshot(min_snapshot
);
1199 // Slide down the remaining snapshots over the removed ones. First set i
1200 // to point to the first empty slot, and j to the first full slot after
1201 // i. Then slide everything down.
1202 for (i
= 0; is_snapshot_in_use( &snapshots
[i
] ); i
++) { }
1203 for (j
= i
; !is_snapshot_in_use( &snapshots
[j
] ); j
++) { }
1204 for ( ; j
< clo_max_snapshots
; j
++) {
1205 if (is_snapshot_in_use( &snapshots
[j
] )) {
1206 snapshots
[i
++] = snapshots
[j
];
1207 clear_snapshot(&snapshots
[j
], /*do_sanity_check*/True
);
1210 next_snapshot_i
= i
;
1212 // Check snapshots array looks ok after changes.
1213 sanity_check_snapshots_array();
1215 // Find the minimum timespan remaining; that will be our new minimum
1216 // time interval. Note that above we were finding timespans by measuring
1217 // two intervals around a snapshot that was under consideration for
1218 // deletion. Here we only measure single intervals because all the
1219 // deletions have occurred.
1221 // But we have to be careful -- some snapshots (eg. snapshot 0, and the
1222 // peak snapshot) are uncullable. If two uncullable snapshots end up
1223 // next to each other, they'll never be culled (assuming the peak doesn't
1224 // change), and the time gap between them will not change. However, the
1225 // time between the remaining cullable snapshots will grow ever larger.
1226 // This means that the min_timespan found will always be that between the
1227 // two uncullable snapshots, and it will be much smaller than it should
1228 // be. To avoid this problem, when computing the minimum timespan, we
1229 // ignore any timespans between two uncullable snapshots.
1230 tl_assert(next_snapshot_i
> 1);
1231 min_timespan
= 0x7fffffffffffffffLL
;
1232 min_timespan_i
= -1;
1233 for (i
= 1; i
< next_snapshot_i
; i
++) {
1234 if (is_uncullable_snapshot(&snapshots
[i
]) &&
1235 is_uncullable_snapshot(&snapshots
[i
-1]))
1237 VERB(2, "(Ignoring interval %d--%d when computing minimum)\n", i
-1, i
);
1239 Time timespan
= snapshots
[i
].time
- snapshots
[i
-1].time
;
1240 tl_assert(timespan
>= 0);
1241 if (timespan
< min_timespan
) {
1242 min_timespan
= timespan
;
1247 tl_assert(-1 != min_timespan_i
); // Check we found a minimum.
1249 // Print remaining snapshots, if necessary.
1250 if (VG_(clo_verbosity
) > 1) {
1251 VERB(2, "Finished culling (%3d of %3d deleted)\n",
1252 n_deleted
, clo_max_snapshots
);
1253 for (i
= 0; i
< next_snapshot_i
; i
++) {
1254 VERB_snapshot(2, " post-cull", i
);
1256 VERB(2, "New time interval = %lld (between snapshots %d and %d)\n",
1257 min_timespan
, min_timespan_i
-1, min_timespan_i
);
1260 return min_timespan
;
1263 static Time
get_time(void)
1265 // Get current time, in whatever time unit we're using.
1266 if (clo_time_unit
== TimeI
) {
1267 return guest_instrs_executed
;
1268 } else if (clo_time_unit
== TimeMS
) {
1269 // Some stuff happens between the millisecond timer being initialised
1270 // to zero and us taking our first snapshot. We determine that time
1271 // gap so we can subtract it from all subsequent times so that our
1272 // first snapshot is considered to be at t = 0ms. Unfortunately, a
1273 // bunch of symbols get read after the first snapshot is taken but
1274 // before the second one (which is triggered by the first allocation),
1275 // so when the time-unit is 'ms' we always have a big gap between the
1276 // first two snapshots. But at least users won't have to wonder why
1277 // the first snapshot isn't at t=0.
1278 static Bool is_first_get_time
= True
;
1279 static Time start_time_ms
;
1280 if (is_first_get_time
) {
1281 start_time_ms
= VG_(read_millisecond_timer
)();
1282 is_first_get_time
= False
;
1285 return VG_(read_millisecond_timer
)() - start_time_ms
;
1287 } else if (clo_time_unit
== TimeB
) {
1288 return total_allocs_deallocs_szB
;
1290 tl_assert2(0, "bad --time-unit value");
1294 // Take a snapshot, and only that -- decisions on whether to take a
1295 // snapshot, or what kind of snapshot, are made elsewhere.
1296 // Nb: we call the arg "my_time" because "time" shadows a global declaration
1297 // in /usr/include/time.h on Darwin.
1299 take_snapshot(Snapshot
* snapshot
, SnapshotKind kind
, Time my_time
,
1302 tl_assert(!is_snapshot_in_use(snapshot
));
1303 if (!clo_pages_as_heap
) {
1304 tl_assert(have_started_executing_code
);
1307 // Heap and heap admin.
1309 snapshot
->heap_szB
= heap_szB
;
1311 SizeT total_szB
= heap_szB
+ heap_extra_szB
+ stacks_szB
;
1312 snapshot
->alloc_sxpt
= dup_XTree(alloc_xpt
, total_szB
);
1313 tl_assert( alloc_xpt
->szB
== heap_szB
);
1314 tl_assert(snapshot
->alloc_sxpt
->szB
== heap_szB
);
1316 snapshot
->heap_extra_szB
= heap_extra_szB
;
1321 snapshot
->stacks_szB
= stacks_szB
;
1324 // Rest of snapshot.
1325 snapshot
->kind
= kind
;
1326 snapshot
->time
= my_time
;
1327 sanity_check_snapshot(snapshot
);
1330 if (Peak
== kind
) n_peak_snapshots
++;
1331 if (is_detailed
) n_detailed_snapshots
++;
1336 // Take a snapshot, if it's time, or if we've hit a peak.
1338 maybe_take_snapshot(SnapshotKind kind
, const HChar
* what
)
1340 // 'min_time_interval' is the minimum time interval between snapshots.
1341 // If we try to take a snapshot and less than this much time has passed,
1342 // we don't take it. It gets larger as the program runs longer. It's
1343 // initialised to zero so that we begin by taking snapshots as quickly as
1345 static Time min_time_interval
= 0;
1346 // Zero allows startup snapshot.
1347 static Time earliest_possible_time_of_next_snapshot
= 0;
1348 static Int n_snapshots_since_last_detailed
= 0;
1349 static Int n_skipped_snapshots_since_last_snapshot
= 0;
1353 // Nb: we call this variable "my_time" because "time" shadows a global
1354 // declaration in /usr/include/time.h on Darwin.
1355 Time my_time
= get_time();
1359 // Only do a snapshot if it's time.
1360 if (my_time
< earliest_possible_time_of_next_snapshot
) {
1361 n_skipped_snapshots
++;
1362 n_skipped_snapshots_since_last_snapshot
++;
1365 is_detailed
= (clo_detailed_freq
-1 == n_snapshots_since_last_detailed
);
1369 // Because we're about to do a deallocation, we're coming down from a
1370 // local peak. If it is (a) actually a global peak, and (b) a certain
1371 // amount bigger than the previous peak, then we take a peak snapshot.
1372 // By not taking a snapshot for every peak, we save a lot of effort --
1373 // because many peaks remain peak only for a short time.
1374 SizeT total_szB
= heap_szB
+ heap_extra_szB
+ stacks_szB
;
1375 SizeT excess_szB_for_new_peak
=
1376 (SizeT
)((peak_snapshot_total_szB
* clo_peak_inaccuracy
) / 100);
1377 if (total_szB
<= peak_snapshot_total_szB
+ excess_szB_for_new_peak
) {
1385 tl_assert2(0, "maybe_take_snapshot: unrecognised snapshot kind");
1388 // Take the snapshot.
1389 snapshot
= & snapshots
[next_snapshot_i
];
1390 take_snapshot(snapshot
, kind
, my_time
, is_detailed
);
1392 // Record if it was detailed.
1394 n_snapshots_since_last_detailed
= 0;
1396 n_snapshots_since_last_detailed
++;
1399 // Update peak data, if it's a Peak snapshot.
1401 Int i
, number_of_peaks_snapshots_found
= 0;
1403 // Sanity check the size, then update our recorded peak.
1404 SizeT snapshot_total_szB
=
1405 snapshot
->heap_szB
+ snapshot
->heap_extra_szB
+ snapshot
->stacks_szB
;
1406 tl_assert2(snapshot_total_szB
> peak_snapshot_total_szB
,
1407 "%ld, %ld\n", snapshot_total_szB
, peak_snapshot_total_szB
);
1408 peak_snapshot_total_szB
= snapshot_total_szB
;
1410 // Find the old peak snapshot, if it exists, and mark it as normal.
1411 for (i
= 0; i
< next_snapshot_i
; i
++) {
1412 if (Peak
== snapshots
[i
].kind
) {
1413 snapshots
[i
].kind
= Normal
;
1414 number_of_peaks_snapshots_found
++;
1417 tl_assert(number_of_peaks_snapshots_found
<= 1);
1420 // Finish up verbosity and stats stuff.
1421 if (n_skipped_snapshots_since_last_snapshot
> 0) {
1422 VERB(2, " (skipped %d snapshot%s)\n",
1423 n_skipped_snapshots_since_last_snapshot
,
1424 ( 1 == n_skipped_snapshots_since_last_snapshot
? "" : "s") );
1426 VERB_snapshot(2, what
, next_snapshot_i
);
1427 n_skipped_snapshots_since_last_snapshot
= 0;
1429 // Cull the entries, if our snapshot table is full.
1431 if (clo_max_snapshots
== next_snapshot_i
) {
1432 min_time_interval
= cull_snapshots();
1435 // Work out the earliest time when the next snapshot can happen.
1436 earliest_possible_time_of_next_snapshot
= my_time
+ min_time_interval
;
1440 //------------------------------------------------------------//
1441 //--- Sanity checking ---//
1442 //------------------------------------------------------------//
1444 static Bool
ms_cheap_sanity_check ( void )
1446 return True
; // Nothing useful we can cheaply check.
1449 static Bool
ms_expensive_sanity_check ( void )
1451 sanity_check_XTree(alloc_xpt
, /*parent*/NULL
);
1452 sanity_check_snapshots_array();
1457 //------------------------------------------------------------//
1458 //--- Heap management ---//
1459 //------------------------------------------------------------//
1461 // Metadata for heap blocks. Each one contains a pointer to a bottom-XPt,
1462 // which is a foothold into the XCon at which it was allocated. From
1463 // HP_Chunks, XPt 'space' fields are incremented (at allocation) and
1464 // decremented (at deallocation).
1466 // Nb: first two fields must match core's VgHashNode.
1469 struct _HP_Chunk
* next
;
1470 Addr data
; // Ptr to actual block
1471 SizeT req_szB
; // Size requested
1472 SizeT slop_szB
; // Extra bytes given above those requested
1473 XPt
* where
; // Where allocated; bottom-XPt
1477 static VgHashTable
*malloc_list
= NULL
; // HP_Chunks
1479 static void update_alloc_stats(SSizeT szB_delta
)
1481 // Update total_allocs_deallocs_szB.
1482 if (szB_delta
< 0) szB_delta
= -szB_delta
;
1483 total_allocs_deallocs_szB
+= szB_delta
;
1486 static void update_heap_stats(SSizeT heap_szB_delta
, Int heap_extra_szB_delta
)
1488 if (heap_szB_delta
< 0)
1489 tl_assert(heap_szB
>= -heap_szB_delta
);
1490 if (heap_extra_szB_delta
< 0)
1491 tl_assert(heap_extra_szB
>= -heap_extra_szB_delta
);
1493 heap_extra_szB
+= heap_extra_szB_delta
;
1494 heap_szB
+= heap_szB_delta
;
1496 update_alloc_stats(heap_szB_delta
+ heap_extra_szB_delta
);
1500 void* record_block( ThreadId tid
, void* p
, SizeT req_szB
, SizeT slop_szB
,
1501 Bool exclude_first_entry
, Bool maybe_snapshot
)
1503 // Make new HP_Chunk node, add to malloc_list
1504 HP_Chunk
* hc
= VG_(malloc
)("ms.main.rb.1", sizeof(HP_Chunk
));
1505 hc
->req_szB
= req_szB
;
1506 hc
->slop_szB
= slop_szB
;
1509 VG_(HT_add_node
)(malloc_list
, hc
);
1512 VERB(3, "<<< record_block (%lu, %lu)\n", req_szB
, slop_szB
);
1514 hc
->where
= get_XCon( tid
, exclude_first_entry
);
1517 // Update statistics.
1520 // Update heap stats.
1521 update_heap_stats(req_szB
, clo_heap_admin
+ slop_szB
);
1524 update_XCon(hc
->where
, req_szB
);
1526 // Maybe take a snapshot.
1527 if (maybe_snapshot
) {
1528 maybe_take_snapshot(Normal
, " alloc");
1532 // Ignored allocation.
1533 n_ignored_heap_allocs
++;
1535 VERB(3, "(ignored)\n");
1545 void* alloc_and_record_block ( ThreadId tid
, SizeT req_szB
, SizeT req_alignB
,
1548 SizeT actual_szB
, slop_szB
;
1551 if ((SSizeT
)req_szB
< 0) return NULL
;
1553 // Allocate and zero if necessary.
1554 p
= VG_(cli_malloc
)( req_alignB
, req_szB
);
1558 if (is_zeroed
) VG_(memset
)(p
, 0, req_szB
);
1559 actual_szB
= VG_(cli_malloc_usable_size
)(p
);
1560 tl_assert(actual_szB
>= req_szB
);
1561 slop_szB
= actual_szB
- req_szB
;
1564 record_block(tid
, p
, req_szB
, slop_szB
, /*exclude_first_entry*/True
,
1565 /*maybe_snapshot*/True
);
1571 void unrecord_block ( void* p
, Bool maybe_snapshot
)
1573 // Remove HP_Chunk from malloc_list
1574 HP_Chunk
* hc
= VG_(HT_remove
)(malloc_list
, (UWord
)p
);
1576 return; // must have been a bogus free()
1580 VERB(3, "<<< unrecord_block\n");
1583 // Update statistics.
1586 // Maybe take a peak snapshot, since it's a deallocation.
1587 if (maybe_snapshot
) {
1588 maybe_take_snapshot(Peak
, "de-PEAK");
1591 // Update heap stats.
1592 update_heap_stats(-hc
->req_szB
, -clo_heap_admin
- hc
->slop_szB
);
1595 update_XCon(hc
->where
, -hc
->req_szB
);
1597 // Maybe take a snapshot.
1598 if (maybe_snapshot
) {
1599 maybe_take_snapshot(Normal
, "dealloc");
1603 n_ignored_heap_frees
++;
1605 VERB(3, "(ignored)\n");
1608 VERB(3, ">>> (-%lu, -%lu)\n", hc
->req_szB
, hc
->slop_szB
);
1611 // Actually free the chunk, and the heap block (if necessary)
1612 VG_(free
)( hc
); hc
= NULL
;
1615 // Nb: --ignore-fn is tricky for realloc. If the block's original alloc was
1616 // ignored, but the realloc is not requested to be ignored, and we are
1617 // shrinking the block, then we have to ignore the realloc -- otherwise we
1618 // could end up with negative heap sizes. This isn't a danger if we are
1619 // growing such a block, but for consistency (it also simplifies things) we
1620 // ignore such reallocs as well.
1622 void* realloc_block ( ThreadId tid
, void* p_old
, SizeT new_req_szB
)
1626 SizeT old_req_szB
, old_slop_szB
, new_slop_szB
, new_actual_szB
;
1627 XPt
*old_where
, *new_where
;
1628 Bool is_ignored
= False
;
1630 // Remove the old block
1631 hc
= VG_(HT_remove
)(malloc_list
, (UWord
)p_old
);
1633 return NULL
; // must have been a bogus realloc()
1636 old_req_szB
= hc
->req_szB
;
1637 old_slop_szB
= hc
->slop_szB
;
1639 tl_assert(!clo_pages_as_heap
); // Shouldn't be here if --pages-as-heap=yes.
1641 VERB(3, "<<< realloc_block (%lu)\n", new_req_szB
);
1644 // Update statistics.
1647 // Maybe take a peak snapshot, if it's (effectively) a deallocation.
1648 if (new_req_szB
< old_req_szB
) {
1649 maybe_take_snapshot(Peak
, "re-PEAK");
1652 // The original malloc was ignored, so we have to ignore the
1658 // Actually do the allocation, if necessary.
1659 if (new_req_szB
<= old_req_szB
+ old_slop_szB
) {
1660 // New size is smaller or same; block not moved.
1662 new_slop_szB
= old_slop_szB
+ (old_req_szB
- new_req_szB
);
1665 // New size is bigger; make new block, copy shared contents, free old.
1666 p_new
= VG_(cli_malloc
)(VG_(clo_alignment
), new_req_szB
);
1668 // Nb: if realloc fails, NULL is returned but the old block is not
1669 // touched. What an awful function.
1672 VG_(memcpy
)(p_new
, p_old
, old_req_szB
+ old_slop_szB
);
1673 VG_(cli_free
)(p_old
);
1674 new_actual_szB
= VG_(cli_malloc_usable_size
)(p_new
);
1675 tl_assert(new_actual_szB
>= new_req_szB
);
1676 new_slop_szB
= new_actual_szB
- new_req_szB
;
1681 hc
->data
= (Addr
)p_new
;
1682 hc
->req_szB
= new_req_szB
;
1683 hc
->slop_szB
= new_slop_szB
;
1684 old_where
= hc
->where
;
1689 new_where
= get_XCon( tid
, /*exclude_first_entry*/True
);
1690 if (!is_ignored
&& new_where
) {
1691 hc
->where
= new_where
;
1692 update_XCon(old_where
, -old_req_szB
);
1693 update_XCon(new_where
, new_req_szB
);
1695 // The realloc itself is ignored.
1698 // Update statistics.
1699 n_ignored_heap_reallocs
++;
1704 // Now insert the new hc (with a possibly new 'data' field) into
1705 // malloc_list. If this realloc() did not increase the memory size, we
1706 // will have removed and then re-added hc unnecessarily. But that's ok
1707 // because shrinking a block with realloc() is (presumably) much rarer
1708 // than growing it, and this way simplifies the growing case.
1709 VG_(HT_add_node
)(malloc_list
, hc
);
1713 // Update heap stats.
1714 update_heap_stats(new_req_szB
- old_req_szB
,
1715 new_slop_szB
- old_slop_szB
);
1717 // Maybe take a snapshot.
1718 maybe_take_snapshot(Normal
, "realloc");
1721 VERB(3, "(ignored)\n");
1724 VERB(3, ">>> (%ld, %ld)\n",
1725 new_req_szB
- old_req_szB
, new_slop_szB
- old_slop_szB
);
1732 //------------------------------------------------------------//
1733 //--- malloc() et al replacement wrappers ---//
1734 //------------------------------------------------------------//
1736 static void* ms_malloc ( ThreadId tid
, SizeT szB
)
1738 return alloc_and_record_block( tid
, szB
, VG_(clo_alignment
), /*is_zeroed*/False
);
1741 static void* ms___builtin_new ( ThreadId tid
, SizeT szB
)
1743 return alloc_and_record_block( tid
, szB
, VG_(clo_alignment
), /*is_zeroed*/False
);
1746 static void* ms___builtin_vec_new ( ThreadId tid
, SizeT szB
)
1748 return alloc_and_record_block( tid
, szB
, VG_(clo_alignment
), /*is_zeroed*/False
);
1751 static void* ms_calloc ( ThreadId tid
, SizeT m
, SizeT szB
)
1753 return alloc_and_record_block( tid
, m
*szB
, VG_(clo_alignment
), /*is_zeroed*/True
);
1756 static void *ms_memalign ( ThreadId tid
, SizeT alignB
, SizeT szB
)
1758 return alloc_and_record_block( tid
, szB
, alignB
, False
);
1761 static void ms_free ( ThreadId tid
__attribute__((unused
)), void* p
)
1763 unrecord_block(p
, /*maybe_snapshot*/True
);
1767 static void ms___builtin_delete ( ThreadId tid
, void* p
)
1769 unrecord_block(p
, /*maybe_snapshot*/True
);
1773 static void ms___builtin_vec_delete ( ThreadId tid
, void* p
)
1775 unrecord_block(p
, /*maybe_snapshot*/True
);
1779 static void* ms_realloc ( ThreadId tid
, void* p_old
, SizeT new_szB
)
1781 return realloc_block(tid
, p_old
, new_szB
);
1784 static SizeT
ms_malloc_usable_size ( ThreadId tid
, void* p
)
1786 HP_Chunk
* hc
= VG_(HT_lookup
)( malloc_list
, (UWord
)p
);
1788 return ( hc
? hc
->req_szB
+ hc
->slop_szB
: 0 );
1791 //------------------------------------------------------------//
1792 //--- Page handling ---//
1793 //------------------------------------------------------------//
1796 void ms_record_page_mem ( Addr a
, SizeT len
)
1798 ThreadId tid
= VG_(get_running_tid
)();
1800 tl_assert(VG_IS_PAGE_ALIGNED(len
));
1801 tl_assert(len
>= VKI_PAGE_SIZE
);
1802 // Record the first N-1 pages as blocks, but don't do any snapshots.
1803 for (end
= a
+ len
- VKI_PAGE_SIZE
; a
< end
; a
+= VKI_PAGE_SIZE
) {
1804 record_block( tid
, (void*)a
, VKI_PAGE_SIZE
, /*slop_szB*/0,
1805 /*exclude_first_entry*/False
, /*maybe_snapshot*/False
);
1807 // Record the last page as a block, and maybe do a snapshot afterwards.
1808 record_block( tid
, (void*)a
, VKI_PAGE_SIZE
, /*slop_szB*/0,
1809 /*exclude_first_entry*/False
, /*maybe_snapshot*/True
);
1813 void ms_unrecord_page_mem( Addr a
, SizeT len
)
1816 tl_assert(VG_IS_PAGE_ALIGNED(len
));
1817 tl_assert(len
>= VKI_PAGE_SIZE
);
1818 for (end
= a
+ len
- VKI_PAGE_SIZE
; a
< end
; a
+= VKI_PAGE_SIZE
) {
1819 unrecord_block((void*)a
, /*maybe_snapshot*/False
);
1821 unrecord_block((void*)a
, /*maybe_snapshot*/True
);
1824 //------------------------------------------------------------//
1827 void ms_new_mem_mmap ( Addr a
, SizeT len
,
1828 Bool rr
, Bool ww
, Bool xx
, ULong di_handle
)
1830 tl_assert(VG_IS_PAGE_ALIGNED(len
));
1831 ms_record_page_mem(a
, len
);
1835 void ms_new_mem_startup( Addr a
, SizeT len
,
1836 Bool rr
, Bool ww
, Bool xx
, ULong di_handle
)
1838 // startup maps are always be page-sized, except the trampoline page is
1839 // marked by the core as only being the size of the trampoline itself,
1840 // which is something like 57 bytes. Round it up to page size.
1841 len
= VG_PGROUNDUP(len
);
1842 ms_record_page_mem(a
, len
);
1846 void ms_new_mem_brk ( Addr a
, SizeT len
, ThreadId tid
)
1848 // brk limit is not necessarily aligned on a page boundary.
1849 // If new memory being brk-ed implies to allocate a new page,
1850 // then call ms_record_page_mem with page aligned parameters
1851 // otherwise just ignore.
1852 Addr old_bottom_page
= VG_PGROUNDDN(a
- 1);
1853 Addr new_top_page
= VG_PGROUNDDN(a
+ len
- 1);
1854 if (old_bottom_page
!= new_top_page
)
1855 ms_record_page_mem(VG_PGROUNDDN(a
),
1856 (new_top_page
- old_bottom_page
));
1860 void ms_copy_mem_remap( Addr from
, Addr to
, SizeT len
)
1862 tl_assert(VG_IS_PAGE_ALIGNED(len
));
1863 ms_unrecord_page_mem(from
, len
);
1864 ms_record_page_mem(to
, len
);
1868 void ms_die_mem_munmap( Addr a
, SizeT len
)
1870 tl_assert(VG_IS_PAGE_ALIGNED(len
));
1871 ms_unrecord_page_mem(a
, len
);
1875 void ms_die_mem_brk( Addr a
, SizeT len
)
1877 // Call ms_unrecord_page_mem only if one or more pages are de-allocated.
1878 // See ms_new_mem_brk for more details.
1879 Addr new_bottom_page
= VG_PGROUNDDN(a
- 1);
1880 Addr old_top_page
= VG_PGROUNDDN(a
+ len
- 1);
1881 if (old_top_page
!= new_bottom_page
)
1882 ms_unrecord_page_mem(VG_PGROUNDDN(a
),
1883 (old_top_page
- new_bottom_page
));
1887 //------------------------------------------------------------//
1889 //------------------------------------------------------------//
1891 // We really want the inlining to occur...
1892 #define INLINE inline __attribute__((always_inline))
1894 static void update_stack_stats(SSizeT stack_szB_delta
)
1896 if (stack_szB_delta
< 0) tl_assert(stacks_szB
>= -stack_szB_delta
);
1897 stacks_szB
+= stack_szB_delta
;
1899 update_alloc_stats(stack_szB_delta
);
1902 static INLINE
void new_mem_stack_2(SizeT len
, const HChar
* what
)
1904 if (have_started_executing_code
) {
1905 VERB(3, "<<< new_mem_stack (%ld)\n", len
);
1907 update_stack_stats(len
);
1908 maybe_take_snapshot(Normal
, what
);
1913 static INLINE
void die_mem_stack_2(SizeT len
, const HChar
* what
)
1915 if (have_started_executing_code
) {
1916 VERB(3, "<<< die_mem_stack (%ld)\n", -len
);
1918 maybe_take_snapshot(Peak
, "stkPEAK");
1919 update_stack_stats(-len
);
1920 maybe_take_snapshot(Normal
, what
);
1925 static void new_mem_stack(Addr a
, SizeT len
)
1927 new_mem_stack_2(len
, "stk-new");
1930 static void die_mem_stack(Addr a
, SizeT len
)
1932 die_mem_stack_2(len
, "stk-die");
1935 static void new_mem_stack_signal(Addr a
, SizeT len
, ThreadId tid
)
1937 new_mem_stack_2(len
, "sig-new");
1940 static void die_mem_stack_signal(Addr a
, SizeT len
)
1942 die_mem_stack_2(len
, "sig-die");
1946 //------------------------------------------------------------//
1947 //--- Client Requests ---//
1948 //------------------------------------------------------------//
1950 static void print_monitor_help ( void )
1952 VG_(gdb_printf
) ("\n");
1953 VG_(gdb_printf
) ("massif monitor commands:\n");
1954 VG_(gdb_printf
) (" snapshot [<filename>]\n");
1955 VG_(gdb_printf
) (" detailed_snapshot [<filename>]\n");
1956 VG_(gdb_printf
) (" takes a snapshot (or a detailed snapshot)\n");
1957 VG_(gdb_printf
) (" and saves it in <filename>\n");
1958 VG_(gdb_printf
) (" default <filename> is massif.vgdb.out\n");
1959 VG_(gdb_printf
) (" all_snapshots [<filename>]\n");
1960 VG_(gdb_printf
) (" saves all snapshot(s) taken so far in <filename>\n");
1961 VG_(gdb_printf
) (" default <filename> is massif.vgdb.out\n");
1962 VG_(gdb_printf
) ("\n");
1966 /* Forward declaration.
1967 return True if request recognised, False otherwise */
1968 static Bool
handle_gdb_monitor_command (ThreadId tid
, HChar
*req
);
1969 static Bool
ms_handle_client_request ( ThreadId tid
, UWord
* argv
, UWord
* ret
)
1972 case VG_USERREQ__MALLOCLIKE_BLOCK
: {
1973 void* p
= (void*)argv
[1];
1974 SizeT szB
= argv
[2];
1975 record_block( tid
, p
, szB
, /*slop_szB*/0, /*exclude_first_entry*/False
,
1976 /*maybe_snapshot*/True
);
1980 case VG_USERREQ__RESIZEINPLACE_BLOCK
: {
1981 void* p
= (void*)argv
[1];
1982 SizeT newSizeB
= argv
[3];
1984 unrecord_block(p
, /*maybe_snapshot*/True
);
1985 record_block(tid
, p
, newSizeB
, /*slop_szB*/0,
1986 /*exclude_first_entry*/False
, /*maybe_snapshot*/True
);
1989 case VG_USERREQ__FREELIKE_BLOCK
: {
1990 void* p
= (void*)argv
[1];
1991 unrecord_block(p
, /*maybe_snapshot*/True
);
1995 case VG_USERREQ__GDB_MONITOR_COMMAND
: {
1996 Bool handled
= handle_gdb_monitor_command (tid
, (HChar
*)argv
[1]);
2010 //------------------------------------------------------------//
2011 //--- Instrumentation ---//
2012 //------------------------------------------------------------//
2014 static void add_counter_update(IRSB
* sbOut
, Int n
)
2016 #if defined(VG_BIGENDIAN)
2017 # define END Iend_BE
2018 #elif defined(VG_LITTLEENDIAN)
2019 # define END Iend_LE
2021 # error "Unknown endianness"
2023 // Add code to increment 'guest_instrs_executed' by 'n', like this:
2024 // WrTmp(t1, Load64(&guest_instrs_executed))
2025 // WrTmp(t2, Add64(RdTmp(t1), Const(n)))
2026 // Store(&guest_instrs_executed, t2)
2027 IRTemp t1
= newIRTemp(sbOut
->tyenv
, Ity_I64
);
2028 IRTemp t2
= newIRTemp(sbOut
->tyenv
, Ity_I64
);
2029 IRExpr
* counter_addr
= mkIRExpr_HWord( (HWord
)&guest_instrs_executed
);
2031 IRStmt
* st1
= IRStmt_WrTmp(t1
, IRExpr_Load(END
, Ity_I64
, counter_addr
));
2034 IRExpr_Binop(Iop_Add64
, IRExpr_RdTmp(t1
),
2035 IRExpr_Const(IRConst_U64(n
))));
2036 IRStmt
* st3
= IRStmt_Store(END
, counter_addr
, IRExpr_RdTmp(t2
));
2038 addStmtToIRSB( sbOut
, st1
);
2039 addStmtToIRSB( sbOut
, st2
);
2040 addStmtToIRSB( sbOut
, st3
);
2043 static IRSB
* ms_instrument2( IRSB
* sbIn
)
2048 // We increment the instruction count in two places:
2049 // - just before any Ist_Exit statements;
2050 // - just before the IRSB's end.
2051 // In the former case, we zero 'n' and then continue instrumenting.
2053 sbOut
= deepCopyIRSBExceptStmts(sbIn
);
2055 for (i
= 0; i
< sbIn
->stmts_used
; i
++) {
2056 IRStmt
* st
= sbIn
->stmts
[i
];
2058 if (!st
|| st
->tag
== Ist_NoOp
) continue;
2060 if (st
->tag
== Ist_IMark
) {
2062 } else if (st
->tag
== Ist_Exit
) {
2064 // Add an increment before the Exit statement, then reset 'n'.
2065 add_counter_update(sbOut
, n
);
2069 addStmtToIRSB( sbOut
, st
);
2073 // Add an increment before the SB end.
2074 add_counter_update(sbOut
, n
);
2080 IRSB
* ms_instrument ( VgCallbackClosure
* closure
,
2082 const VexGuestLayout
* layout
,
2083 const VexGuestExtents
* vge
,
2084 const VexArchInfo
* archinfo_host
,
2085 IRType gWordTy
, IRType hWordTy
)
2087 if (! have_started_executing_code
) {
2088 // Do an initial sample to guarantee that we have at least one.
2089 // We use 'maybe_take_snapshot' instead of 'take_snapshot' to ensure
2090 // 'maybe_take_snapshot's internal static variables are initialised.
2091 have_started_executing_code
= True
;
2092 maybe_take_snapshot(Normal
, "startup");
2095 if (clo_time_unit
== TimeI
) { return ms_instrument2(sbIn
); }
2096 else if (clo_time_unit
== TimeMS
) { return sbIn
; }
2097 else if (clo_time_unit
== TimeB
) { return sbIn
; }
2098 else { tl_assert2(0, "bad --time-unit value"); }
2102 //------------------------------------------------------------//
2103 //--- Writing snapshots ---//
2104 //------------------------------------------------------------//
2106 #define FP(format, args...) ({ VG_(fprintf)(fp, format, ##args); })
2108 static void pp_snapshot_SXPt(VgFile
*fp
, SXPt
* sxpt
, Int depth
,
2109 HChar
* depth_str
, Int depth_str_len
,
2110 SizeT snapshot_heap_szB
, SizeT snapshot_total_szB
)
2112 Int i
, j
, n_insig_children_sxpts
;
2115 // Used for printing function names. Is made static to keep it out
2116 // of the stack frame -- this function is recursive. Obviously this
2117 // now means its contents are trashed across the recursive call.
2118 const HChar
* ip_desc
;
2120 switch (sxpt
->tag
) {
2122 // Print the SXPt itself.
2127 ? "(page allocation syscalls) mmap/mremap/brk, --alloc-fns, etc."
2128 : "(heap allocation functions) malloc/new/new[], --alloc-fns, etc."
2131 // XXX: --alloc-fns?
2133 // Nick thinks this case cannot happen. ip_desc would be
2134 // conceptually uninitialised here. Therefore:
2135 tl_assert2(0, "pp_snapshot_SXPt: unexpected");
2138 // If it's main-or-below-main, we (if appropriate) ignore everything
2139 // below it by pretending it has no children.
2140 if ( ! VG_(clo_show_below_main
) ) {
2141 Vg_FnNameKind kind
= VG_(get_fnname_kind_from_IP
)(sxpt
->Sig
.ip
);
2142 if (Vg_FnNameMain
== kind
|| Vg_FnNameBelowMain
== kind
) {
2143 sxpt
->Sig
.n_children
= 0;
2147 // We need the -1 to get the line number right, But I'm not sure why.
2148 ip_desc
= VG_(describe_IP
)(sxpt
->Sig
.ip
-1, NULL
);
2151 // Do the non-ip_desc part first...
2152 FP("%sn%d: %lu ", depth_str
, sxpt
->Sig
.n_children
, sxpt
->szB
);
2154 // For ip_descs beginning with "0xABCD...:" addresses, we first
2155 // measure the length of the "0xabcd: " address at the start of the
2158 if ('0' == ip_desc
[0] && 'x' == ip_desc
[1]) {
2162 if (':' == ip_desc
[j
]) break;
2165 tl_assert2(0, "ip_desc has unexpected form: %s\n", ip_desc
);
2169 // It used to be that ip_desc was truncated at the end.
2170 // But there does not seem to be a good reason for that. Besides,
2171 // the string was truncated at the right, which is less than ideal.
2172 // Truncation at the beginning of the string would have been preferable.
2173 // Think several nested namespaces in C++....
2174 // Anyhow, we spit out the full-length string now.
2175 FP("%s\n", ip_desc
);
2178 tl_assert(depth
+1 < depth_str_len
-1); // -1 for end NUL char
2179 depth_str
[depth
+0] = ' ';
2180 depth_str
[depth
+1] = '\0';
2182 // Sort SXPt's children by szB (reverse order: biggest to smallest).
2183 // Nb: we sort them here, rather than earlier (eg. in dup_XTree), for
2184 // two reasons. First, if we do it during dup_XTree, it can get
2185 // expensive (eg. 15% of execution time for konqueror
2186 // startup/shutdown). Second, this way we get the Insig SXPt (if one
2187 // is present) in its sorted position, not at the end.
2188 VG_(ssort
)(sxpt
->Sig
.children
, sxpt
->Sig
.n_children
, sizeof(SXPt
*),
2191 // Print the SXPt's children. They should already be in sorted order.
2192 n_insig_children_sxpts
= 0;
2193 for (i
= 0; i
< sxpt
->Sig
.n_children
; i
++) {
2194 child
= sxpt
->Sig
.children
[i
];
2196 if (InsigSXPt
== child
->tag
)
2197 n_insig_children_sxpts
++;
2199 // Ok, print the child. NB: contents of ip_desc will be
2200 // trashed by this recursive call. Doesn't matter currently,
2201 // but worth noting.
2202 pp_snapshot_SXPt(fp
, child
, depth
+1, depth_str
, depth_str_len
,
2203 snapshot_heap_szB
, snapshot_total_szB
);
2207 depth_str
[depth
+0] = '\0';
2208 depth_str
[depth
+1] = '\0';
2210 // There should be 0 or 1 Insig children SXPts.
2211 tl_assert(n_insig_children_sxpts
<= 1);
2215 const HChar
* s
= ( 1 == sxpt
->Insig
.n_xpts
? "," : "s, all" );
2216 FP("%sn0: %lu in %d place%s below massif's threshold (%.2f%%)\n",
2217 depth_str
, sxpt
->szB
, sxpt
->Insig
.n_xpts
, s
, clo_threshold
);
2222 tl_assert2(0, "pp_snapshot_SXPt: unrecognised SXPt tag");
2226 static void pp_snapshot(VgFile
*fp
, Snapshot
* snapshot
, Int snapshot_n
)
2228 sanity_check_snapshot(snapshot
);
2230 FP("#-----------\n");
2231 FP("snapshot=%d\n", snapshot_n
);
2232 FP("#-----------\n");
2233 FP("time=%lld\n", snapshot
->time
);
2234 FP("mem_heap_B=%lu\n", snapshot
->heap_szB
);
2235 FP("mem_heap_extra_B=%lu\n", snapshot
->heap_extra_szB
);
2236 FP("mem_stacks_B=%lu\n", snapshot
->stacks_szB
);
2238 if (is_detailed_snapshot(snapshot
)) {
2239 // Detailed snapshot -- print heap tree.
2240 Int depth_str_len
= clo_depth
+ 3;
2241 HChar
* depth_str
= VG_(malloc
)("ms.main.pps.1",
2242 sizeof(HChar
) * depth_str_len
);
2243 SizeT snapshot_total_szB
=
2244 snapshot
->heap_szB
+ snapshot
->heap_extra_szB
+ snapshot
->stacks_szB
;
2245 depth_str
[0] = '\0'; // Initialise depth_str to "".
2247 FP("heap_tree=%s\n", ( Peak
== snapshot
->kind
? "peak" : "detailed" ));
2248 pp_snapshot_SXPt(fp
, snapshot
->alloc_sxpt
, 0, depth_str
,
2249 depth_str_len
, snapshot
->heap_szB
,
2250 snapshot_total_szB
);
2252 VG_(free
)(depth_str
);
2255 FP("heap_tree=empty\n");
2259 static void write_snapshots_to_file(const HChar
* massif_out_file
,
2260 Snapshot snapshots_array
[],
2266 fp
= VG_(fopen
)(massif_out_file
, VKI_O_CREAT
|VKI_O_TRUNC
|VKI_O_WRONLY
,
2267 VKI_S_IRUSR
|VKI_S_IWUSR
);
2269 // If the file can't be opened for whatever reason (conflict
2270 // between multiple cachegrinded processes?), give up now.
2271 VG_(umsg
)("error: can't open output file '%s'\n", massif_out_file
);
2272 VG_(umsg
)(" ... so profiling results will be missing.\n");
2276 // Print massif-specific options that were used.
2277 // XXX: is it worth having a "desc:" line? Could just call it "options:"
2278 // -- this file format isn't as generic as Cachegrind's, so the
2279 // implied genericity of "desc:" is bogus.
2281 for (i
= 0; i
< VG_(sizeXA
)(args_for_massif
); i
++) {
2282 HChar
* arg
= *(HChar
**)VG_(indexXA
)(args_for_massif
, i
);
2285 if (0 == i
) FP(" (none)");
2288 // Print "cmd:" line.
2290 FP("%s", VG_(args_the_exename
));
2291 for (i
= 0; i
< VG_(sizeXA
)( VG_(args_for_client
) ); i
++) {
2292 HChar
* arg
= * (HChar
**) VG_(indexXA
)( VG_(args_for_client
), i
);
2297 FP("time_unit: %s\n", TimeUnit_to_string(clo_time_unit
));
2299 for (i
= 0; i
< nr_elements
; i
++) {
2300 Snapshot
* snapshot
= & snapshots_array
[i
];
2301 pp_snapshot(fp
, snapshot
, i
); // Detailed snapshot!
2306 static void write_snapshots_array_to_file(void)
2308 // Setup output filename. Nb: it's important to do this now, ie. as late
2309 // as possible. If we do it at start-up and the program forks and the
2310 // output file format string contains a %p (pid) specifier, both the
2311 // parent and child will incorrectly write to the same file; this
2312 // happened in 3.3.0.
2313 HChar
* massif_out_file
=
2314 VG_(expand_file_name
)("--massif-out-file", clo_massif_out_file
);
2315 write_snapshots_to_file (massif_out_file
, snapshots
, next_snapshot_i
);
2316 VG_(free
)(massif_out_file
);
2319 static void handle_snapshot_monitor_command (const HChar
*filename
,
2324 if (!clo_pages_as_heap
&& !have_started_executing_code
) {
2325 // See comments of variable have_started_executing_code.
2327 ("error: cannot take snapshot before execution has started\n");
2331 clear_snapshot(&snapshot
, /* do_sanity_check */ False
);
2332 take_snapshot(&snapshot
, Normal
, get_time(), detailed
);
2333 write_snapshots_to_file ((filename
== NULL
) ?
2334 "massif.vgdb.out" : filename
,
2337 delete_snapshot(&snapshot
);
2340 static void handle_all_snapshots_monitor_command (const HChar
*filename
)
2342 if (!clo_pages_as_heap
&& !have_started_executing_code
) {
2343 // See comments of variable have_started_executing_code.
2345 ("error: cannot take snapshot before execution has started\n");
2349 write_snapshots_to_file ((filename
== NULL
) ?
2350 "massif.vgdb.out" : filename
,
2351 snapshots
, next_snapshot_i
);
2354 static Bool
handle_gdb_monitor_command (ThreadId tid
, HChar
*req
)
2357 HChar s
[VG_(strlen(req
)) + 1]; /* copy for strtok_r */
2360 VG_(strcpy
) (s
, req
);
2362 wcmd
= VG_(strtok_r
) (s
, " ", &ssaveptr
);
2363 switch (VG_(keyword_id
) ("help snapshot detailed_snapshot all_snapshots",
2364 wcmd
, kwd_report_duplicated_matches
)) {
2365 case -2: /* multiple matches */
2367 case -1: /* not found */
2370 print_monitor_help();
2372 case 1: { /* snapshot */
2374 filename
= VG_(strtok_r
) (NULL
, " ", &ssaveptr
);
2375 handle_snapshot_monitor_command (filename
, False
/* detailed */);
2378 case 2: { /* detailed_snapshot */
2380 filename
= VG_(strtok_r
) (NULL
, " ", &ssaveptr
);
2381 handle_snapshot_monitor_command (filename
, True
/* detailed */);
2384 case 3: { /* all_snapshots */
2386 filename
= VG_(strtok_r
) (NULL
, " ", &ssaveptr
);
2387 handle_all_snapshots_monitor_command (filename
);
2396 static void ms_print_stats (void)
2398 #define STATS(format, args...) \
2399 VG_(dmsg)("Massif: " format, ##args)
2401 STATS("heap allocs: %u\n", n_heap_allocs
);
2402 STATS("heap reallocs: %u\n", n_heap_reallocs
);
2403 STATS("heap frees: %u\n", n_heap_frees
);
2404 STATS("ignored heap allocs: %u\n", n_ignored_heap_allocs
);
2405 STATS("ignored heap frees: %u\n", n_ignored_heap_frees
);
2406 STATS("ignored heap reallocs: %u\n", n_ignored_heap_reallocs
);
2407 STATS("stack allocs: %u\n", n_stack_allocs
);
2408 STATS("stack frees: %u\n", n_stack_frees
);
2409 STATS("XPts: %u\n", n_xpts
);
2410 STATS("top-XPts: %u (%d%%)\n",
2411 alloc_xpt
->n_children
,
2412 ( n_xpts
? alloc_xpt
->n_children
* 100 / n_xpts
: 0));
2413 STATS("XPt init expansions: %u\n", n_xpt_init_expansions
);
2414 STATS("XPt later expansions: %u\n", n_xpt_later_expansions
);
2415 STATS("SXPt allocs: %u\n", n_sxpt_allocs
);
2416 STATS("SXPt frees: %u\n", n_sxpt_frees
);
2417 STATS("skipped snapshots: %u\n", n_skipped_snapshots
);
2418 STATS("real snapshots: %u\n", n_real_snapshots
);
2419 STATS("detailed snapshots: %u\n", n_detailed_snapshots
);
2420 STATS("peak snapshots: %u\n", n_peak_snapshots
);
2421 STATS("cullings: %u\n", n_cullings
);
2422 STATS("XCon redos: %u\n", n_XCon_redos
);
2426 //------------------------------------------------------------//
2427 //--- Finalisation ---//
2428 //------------------------------------------------------------//
2430 static void ms_fini(Int exit_status
)
2433 write_snapshots_array_to_file();
2436 tl_assert(n_xpts
> 0); // always have alloc_xpt
2443 //------------------------------------------------------------//
2444 //--- Initialisation ---//
2445 //------------------------------------------------------------//
2447 static void ms_post_clo_init(void)
2450 HChar
* LD_PRELOAD_val
;
2455 if (clo_pages_as_heap
) {
2457 VG_(fmsg_bad_option
)("--pages-as-heap=yes",
2458 "Cannot be used together with --stacks=yes");
2462 clo_pages_as_heap
= False
;
2465 // If --pages-as-heap=yes we don't want malloc replacement to occur. So we
2466 // disable vgpreload_massif-$PLATFORM.so by removing it from LD_PRELOAD (or
2467 // platform-equivalent). We replace it entirely with spaces because then
2468 // the linker doesn't complain (it does complain if we just change the name
2469 // to a bogus file). This is a bit of a hack, but LD_PRELOAD is setup well
2470 // before tool initialisation, so this seems the best way to do it.
2471 if (clo_pages_as_heap
) {
2472 clo_heap_admin
= 0; // No heap admin on pages.
2474 LD_PRELOAD_val
= VG_(getenv
)( VG_(LD_PRELOAD_var_name
) );
2475 tl_assert(LD_PRELOAD_val
);
2477 // Make sure the vgpreload_core-$PLATFORM entry is there, for sanity.
2478 s2
= VG_(strstr
)(LD_PRELOAD_val
, "vgpreload_core");
2481 // Now find the vgpreload_massif-$PLATFORM entry.
2482 s2
= VG_(strstr
)(LD_PRELOAD_val
, "vgpreload_massif");
2485 // Blank out everything to the previous ':', which must be there because
2486 // of the preceding vgpreload_core-$PLATFORM entry.
2487 for (s
= s2
; *s
!= ':'; s
--) {
2491 // Blank out everything to the end of the entry, which will be '\0' if
2492 // LD_PRELOAD was empty before Valgrind started, or ':' otherwise.
2493 for (s
= s2
; *s
!= ':' && *s
!= '\0'; s
++) {
2498 // Print alloc-fns and ignore-fns, if necessary.
2499 if (VG_(clo_verbosity
) > 1) {
2500 VERB(1, "alloc-fns:\n");
2501 for (i
= 0; i
< VG_(sizeXA
)(alloc_fns
); i
++) {
2502 HChar
** fn_ptr
= VG_(indexXA
)(alloc_fns
, i
);
2503 VERB(1, " %s\n", *fn_ptr
);
2506 VERB(1, "ignore-fns:\n");
2507 if (0 == VG_(sizeXA
)(ignore_fns
)) {
2508 VERB(1, " <empty>\n");
2510 for (i
= 0; i
< VG_(sizeXA
)(ignore_fns
); i
++) {
2511 HChar
** fn_ptr
= VG_(indexXA
)(ignore_fns
, i
);
2512 VERB(1, " %d: %s\n", i
, *fn_ptr
);
2518 VG_(track_new_mem_stack
) ( new_mem_stack
);
2519 VG_(track_die_mem_stack
) ( die_mem_stack
);
2520 VG_(track_new_mem_stack_signal
) ( new_mem_stack_signal
);
2521 VG_(track_die_mem_stack_signal
) ( die_mem_stack_signal
);
2524 if (clo_pages_as_heap
) {
2525 VG_(track_new_mem_startup
) ( ms_new_mem_startup
);
2526 VG_(track_new_mem_brk
) ( ms_new_mem_brk
);
2527 VG_(track_new_mem_mmap
) ( ms_new_mem_mmap
);
2529 VG_(track_copy_mem_remap
) ( ms_copy_mem_remap
);
2531 VG_(track_die_mem_brk
) ( ms_die_mem_brk
);
2532 VG_(track_die_mem_munmap
) ( ms_die_mem_munmap
);
2535 // Initialise snapshot array, and sanity-check it.
2536 snapshots
= VG_(malloc
)("ms.main.mpoci.1",
2537 sizeof(Snapshot
) * clo_max_snapshots
);
2538 // We don't want to do snapshot sanity checks here, because they're
2539 // currently uninitialised.
2540 for (i
= 0; i
< clo_max_snapshots
; i
++) {
2541 clear_snapshot( & snapshots
[i
], /*do_sanity_check*/False
);
2543 sanity_check_snapshots_array();
2546 static void ms_pre_clo_init(void)
2548 VG_(details_name
) ("Massif");
2549 VG_(details_version
) (NULL
);
2550 VG_(details_description
) ("a heap profiler");
2551 VG_(details_copyright_author
)(
2552 "Copyright (C) 2003-2013, and GNU GPL'd, by Nicholas Nethercote");
2553 VG_(details_bug_reports_to
) (VG_BUGS_TO
);
2555 VG_(details_avg_translation_sizeB
) ( 330 );
2557 VG_(clo_vex_control
).iropt_register_updates_default
2558 = VG_(clo_px_file_backed
)
2559 = VexRegUpdSpAtMemAccess
; // overridable by the user.
2562 VG_(basic_tool_funcs
) (ms_post_clo_init
,
2567 VG_(needs_libc_freeres
)();
2568 VG_(needs_command_line_options
)(ms_process_cmd_line_option
,
2570 ms_print_debug_usage
);
2571 VG_(needs_client_requests
) (ms_handle_client_request
);
2572 VG_(needs_sanity_checks
) (ms_cheap_sanity_check
,
2573 ms_expensive_sanity_check
);
2574 VG_(needs_print_stats
) (ms_print_stats
);
2575 VG_(needs_malloc_replacement
) (ms_malloc
,
2577 ms___builtin_vec_new
,
2581 ms___builtin_delete
,
2582 ms___builtin_vec_delete
,
2584 ms_malloc_usable_size
,
2588 malloc_list
= VG_(HT_construct
)( "Massif's malloc list" );
2590 // Dummy node at top of the context structure.
2591 alloc_xpt
= new_XPt(/*ip*/0, /*parent*/NULL
);
2593 // Initialise alloc_fns and ignore_fns.
2597 // Initialise args_for_massif.
2598 args_for_massif
= VG_(newXA
)(VG_(malloc
), "ms.main.mprci.1",
2599 VG_(free
), sizeof(HChar
*));
2602 VG_DETERMINE_INTERFACE_VERSION(ms_pre_clo_init
)
2604 //--------------------------------------------------------------------//
2606 //--------------------------------------------------------------------//