Bug 475652 - Missing suppression for __wcsncpy_avx2 (strncpy-avx2.S:308)?
[valgrind.git] / memcheck / mc_leakcheck.c
blob83a2b74e2a1e5e6ed3e704998ea72577b5168f5b
2 /*--------------------------------------------------------------------*/
3 /*--- The leak checker. mc_leakcheck.c ---*/
4 /*--------------------------------------------------------------------*/
6 /*
7 This file is part of MemCheck, a heavyweight Valgrind tool for
8 detecting memory errors.
10 Copyright (C) 2000-2017 Julian Seward
11 jseward@acm.org
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
29 #include "pub_tool_basics.h"
30 #include "pub_tool_vki.h"
31 #include "pub_tool_aspacehl.h"
32 #include "pub_tool_aspacemgr.h"
33 #include "pub_tool_execontext.h"
34 #include "pub_tool_hashtable.h"
35 #include "pub_tool_libcbase.h"
36 #include "pub_tool_libcassert.h"
37 #include "pub_tool_libcprint.h"
38 #include "pub_tool_libcsignal.h"
39 #include "pub_tool_machine.h"
40 #include "pub_tool_mallocfree.h"
41 #include "pub_tool_options.h"
42 #include "pub_tool_oset.h"
43 #include "pub_tool_poolalloc.h"
44 #include "pub_tool_signals.h" // Needed for mc_include.h
45 #include "pub_tool_libcsetjmp.h" // setjmp facilities
46 #include "pub_tool_tooliface.h" // Needed for mc_include.h
47 #include "pub_tool_xarray.h"
48 #include "pub_tool_xtree.h"
50 #include "mc_include.h"
52 /*------------------------------------------------------------*/
53 /*--- An overview of leak checking. ---*/
54 /*------------------------------------------------------------*/
56 // Leak-checking is a directed-graph traversal problem. The graph has
57 // two kinds of nodes:
58 // - root-set nodes:
59 // - GP registers of all threads;
60 // - valid, aligned, pointer-sized data words in valid client memory,
61 // including stacks, but excluding words within client heap-allocated
62 // blocks (they are excluded so that later on we can differentiate
63 // between heap blocks that are indirectly leaked vs. directly leaked).
64 // - heap-allocated blocks. A block is a mempool chunk or a malloc chunk
65 // that doesn't contain a mempool chunk. Nb: the terms "blocks" and
66 // "chunks" are used interchangeably below.
68 // There are two kinds of edges:
69 // - start-pointers, i.e. pointers to the start of a block;
70 // - interior-pointers, i.e. pointers to the interior of a block.
72 // We use "pointers" rather than "edges" below.
74 // Root set nodes only point to blocks. Blocks only point to blocks;
75 // a block can point to itself.
77 // The aim is to traverse the graph and determine the status of each block.
79 // There are 9 distinct cases. See memcheck/docs/mc-manual.xml for details.
80 // Presenting all nine categories to the user is probably too much.
81 // Currently we do this:
82 // - definitely lost: case 3
83 // - indirectly lost: case 4, 9
84 // - possibly lost: cases 5..8
85 // - still reachable: cases 1, 2
86 //
87 // It's far from clear that this is the best possible categorisation; it's
88 // accreted over time without any central guiding principle.
90 /*------------------------------------------------------------*/
91 /*--- XXX: Thoughts for improvement. ---*/
92 /*------------------------------------------------------------*/
94 // From the user's point of view:
95 // - If they aren't using interior-pointers, they just have to fix the
96 // directly lost blocks, and the indirectly lost ones will be fixed as
97 // part of that. Any possibly lost blocks will just be due to random
98 // pointer garbage and can be ignored.
99 //
100 // - If they are using interior-pointers, the fact that they currently are not
101 // being told which ones might be directly lost vs. indirectly lost makes
102 // it hard to know where to begin.
104 // All this makes me wonder if new option is warranted:
105 // --follow-interior-pointers. By default it would be off, the leak checker
106 // wouldn't follow interior-pointers and there would only be 3 categories:
107 // R, DL, IL.
109 // If turned on, then it would show 7 categories (R, DL, IL, DR/DL, IR/IL,
110 // IR/IL/DL, IL/DL). That output is harder to understand but it's your own
111 // damn fault for using interior-pointers...
113 // ----
115 // Also, why are two blank lines printed between each loss record?
116 // [bug 197930]
118 // ----
120 // Also, --show-reachable is a bad name because it also turns on the showing
121 // of indirectly leaked blocks(!) It would be better named --show-all or
122 // --show-all-heap-blocks, because that's the end result.
123 // We now have the option --show-leak-kinds=... which allows to specify =all.
125 // ----
127 // Also, the VALGRIND_LEAK_CHECK and VALGRIND_QUICK_LEAK_CHECK aren't great
128 // names. VALGRIND_FULL_LEAK_CHECK and VALGRIND_SUMMARY_LEAK_CHECK would be
129 // better.
131 // ----
133 // Also, VALGRIND_COUNT_LEAKS and VALGRIND_COUNT_LEAK_BLOCKS aren't great as
134 // they combine direct leaks and indirect leaks into one. New, more precise
135 // ones (they'll need new names) would be good. If more categories are
136 // used, as per the --follow-interior-pointers option, they should be
137 // updated accordingly. And they should use a struct to return the values.
139 // ----
141 // Also, for this case:
143 // (4) p4 BBB ---> AAA
145 // BBB is definitely directly lost. AAA is definitely indirectly lost.
146 // Here's the relevant loss records printed for a full check (each block is
147 // 16 bytes):
149 // ==20397== 16 bytes in 1 blocks are indirectly lost in loss record 9 of 15
150 // ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
151 // ==20397== by 0x400521: mk (leak-cases.c:49)
152 // ==20397== by 0x400578: main (leak-cases.c:72)
154 // ==20397== 32 (16 direct, 16 indirect) bytes in 1 blocks are definitely
155 // lost in loss record 14 of 15
156 // ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
157 // ==20397== by 0x400521: mk (leak-cases.c:49)
158 // ==20397== by 0x400580: main (leak-cases.c:72)
160 // The first one is fine -- it describes AAA.
162 // The second one is for BBB. It's correct in that 16 bytes in 1 block are
163 // directly lost. It's also correct that 16 are indirectly lost as a result,
164 // but it means that AAA is being counted twice in the loss records. (It's
165 // not, thankfully, counted twice in the summary counts). Argh.
167 // This would be less confusing for the second one:
169 // ==20397== 16 bytes in 1 blocks are definitely lost in loss record 14
170 // of 15 (and 16 bytes in 1 block are indirectly lost as a result; they
171 // are mentioned elsewhere (if --show-reachable=yes or indirect is given
172 // in --show-leak-kinds=... !))
173 // ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
174 // ==20397== by 0x400521: mk (leak-cases.c:49)
175 // ==20397== by 0x400580: main (leak-cases.c:72)
177 // But ideally we'd present the loss record for the directly lost block and
178 // then the resultant indirectly lost blocks and make it clear the
179 // dependence. Double argh.
181 /*------------------------------------------------------------*/
182 /*--- The actual algorithm. ---*/
183 /*------------------------------------------------------------*/
185 // - Find all the blocks (a.k.a. chunks) to check. Mempool chunks require
186 // some special treatment because they can be within malloc'd blocks.
187 // - Scan every word in the root set (GP registers and valid
188 // non-heap memory words).
189 // - First, we skip if it doesn't point to valid memory.
190 // - Then, we see if it points to the start or interior of a block. If
191 // so, we push the block onto the mark stack and mark it as having been
192 // reached.
193 // - Then, we process the mark stack, repeating the scanning for each block;
194 // this can push more blocks onto the mark stack. We repeat until the
195 // mark stack is empty. Each block is marked as definitely or possibly
196 // reachable, depending on whether interior-pointers were required to
197 // reach it.
198 // - At this point we know for every block if it's reachable or not.
199 // - We then push each unreached block onto the mark stack, using the block
200 // number as the "clique" number.
201 // - We process the mark stack again, this time grouping blocks into cliques
202 // in order to facilitate the directly/indirectly lost categorisation.
203 // - We group blocks by their ExeContexts and categorisation, and print them
204 // if --leak-check=full. We also print summary numbers.
206 // A note on "cliques":
207 // - A directly lost block is one with no pointers to it. An indirectly
208 // lost block is one that is pointed to by a directly or indirectly lost
209 // block.
210 // - Each directly lost block has zero or more indirectly lost blocks
211 // hanging off it. All these blocks together form a "clique". The
212 // directly lost block is called the "clique leader". The clique number
213 // is the number (in lc_chunks[]) of the clique leader.
214 // - Actually, a directly lost block may be pointed to if it's part of a
215 // cycle. In that case, there may be more than one choice for the clique
216 // leader, and the choice is arbitrary. Eg. if you have A-->B and B-->A
217 // either A or B could be the clique leader.
218 // - Cliques cannot overlap, and will be truncated to avoid this. Eg. if we
219 // have A-->C and B-->C, the two cliques will be {A,C} and {B}, or {A} and
220 // {B,C} (again the choice is arbitrary). This is because we don't want
221 // to count a block as indirectly lost more than once.
223 // A note on 'is_prior_definite':
224 // - This is a boolean used in various places that indicates if the chain
225 // up to the prior node (prior to the one being considered) is definite.
226 // - In the clique == -1 case:
227 // - if True it means that the prior node is a root-set node, or that the
228 // prior node is a block which is reachable from the root-set via
229 // start-pointers.
230 // - if False it means that the prior node is a block that is only
231 // reachable from the root-set via a path including at least one
232 // interior-pointer.
233 // - In the clique != -1 case, currently it's always True because we treat
234 // start-pointers and interior-pointers the same for direct/indirect leak
235 // checking. If we added a PossibleIndirectLeak state then this would
236 // change.
239 // Define to debug the memory-leak-detector.
240 #define VG_DEBUG_FIND_CHUNK 0
241 #define VG_DEBUG_LEAKCHECK 0
242 #define VG_DEBUG_CLIQUE 0
245 /*------------------------------------------------------------*/
246 /*--- Getting the initial chunks, and searching them. ---*/
247 /*------------------------------------------------------------*/
249 // Compare the MC_Chunks by 'data' (i.e. the address of the block).
250 static Int compare_MC_Chunks(const void* n1, const void* n2)
252 const MC_Chunk* mc1 = *(const MC_Chunk *const *)n1;
253 const MC_Chunk* mc2 = *(const MC_Chunk *const *)n2;
254 if (mc1->data < mc2->data) return -1;
255 if (mc1->data > mc2->data) return 1;
256 return 0;
259 #if VG_DEBUG_FIND_CHUNK
260 // Used to sanity-check the fast binary-search mechanism.
261 static
262 Int find_chunk_for_OLD ( Addr ptr,
263 MC_Chunk** chunks,
264 Int n_chunks )
267 Int i;
268 Addr a_lo, a_hi;
269 PROF_EVENT(MCPE_FIND_CHUNK_FOR_OLD);
270 for (i = 0; i < n_chunks; i++) {
271 PROF_EVENT(MCPE_FIND_CHUNK_FOR_OLD_LOOP);
272 a_lo = chunks[i]->data;
273 a_hi = ((Addr)chunks[i]->data) + chunks[i]->szB;
274 if (a_lo == a_hi)
275 a_hi++; // Special case for szB 0. See find_chunk_for.
276 if (a_lo <= ptr && ptr < a_hi)
277 return i;
279 return -1;
281 #endif
283 // Find the i such that ptr points at or inside the block described by
284 // chunks[i]. Return -1 if none found. This assumes that chunks[]
285 // has been sorted on the 'data' field.
286 static
287 Int find_chunk_for ( Addr ptr,
288 MC_Chunk** chunks,
289 Int n_chunks )
291 Addr a_mid_lo, a_mid_hi;
292 Int lo, mid, hi, retVal;
293 // VG_(printf)("find chunk for %p = ", ptr);
294 retVal = -1;
295 lo = 0;
296 hi = n_chunks-1;
297 while (True) {
298 // Invariant: current unsearched space is from lo to hi, inclusive.
299 if (lo > hi) break; // not found
301 mid = (lo + hi) / 2;
302 a_mid_lo = chunks[mid]->data;
303 a_mid_hi = chunks[mid]->data + chunks[mid]->szB;
304 // Extent of block 'mid' is [a_mid_lo .. a_mid_hi).
305 // Special-case zero-sized blocks - treat them as if they had
306 // size 1. Not doing so causes them to not cover any address
307 // range at all and so will never be identified as the target of
308 // any pointer, which causes them to be incorrectly reported as
309 // definitely leaked.
310 if (chunks[mid]->szB == 0)
311 a_mid_hi++;
313 if (ptr < a_mid_lo) {
314 hi = mid-1;
315 continue;
317 if (ptr >= a_mid_hi) {
318 lo = mid+1;
319 continue;
321 tl_assert(ptr >= a_mid_lo && ptr < a_mid_hi);
322 retVal = mid;
323 break;
326 # if VG_DEBUG_FIND_CHUNK
327 tl_assert(retVal == find_chunk_for_OLD ( ptr, chunks, n_chunks ));
328 # endif
329 // VG_(printf)("%d\n", retVal);
330 return retVal;
334 static MC_Chunk**
335 get_sorted_array_of_active_chunks(Int* pn_chunks)
337 UInt n_mallocs;
338 MC_Chunk **mallocs;
340 // First we collect all the malloc chunks into an array and sort it.
341 // We do this because we want to query the chunks by interior
342 // pointers, requiring binary search.
343 mallocs = (MC_Chunk**) VG_(HT_to_array)( MC_(malloc_list), &n_mallocs );
344 if (n_mallocs == 0) {
345 tl_assert(mallocs == NULL);
346 *pn_chunks = 0;
347 return NULL;
349 VG_(ssort)(mallocs, n_mallocs, sizeof(VgHashNode*), compare_MC_Chunks);
351 // If there are no mempools (for most users, this is the case),
352 // n_mallocs and mallocs is the final result
353 // otherwise we need to do special handling for mempools.
355 if (VG_(HT_count_nodes)(MC_(mempool_list)) > 0) {
356 // Our goal is to construct a set of chunks that includes every
357 // mempool chunk, and every malloc region that *doesn't* contain a
358 // mempool chunk.
359 MC_Mempool *mp;
360 MC_Chunk **chunks, *mc;
361 UInt n_chunks, m, s;
362 Bool *malloc_chunk_holds_a_pool_chunk;
364 // We build an array containing a Bool for each malloc chunk,
365 // indicating whether it contains any mempools.
366 malloc_chunk_holds_a_pool_chunk = VG_(calloc)( "mc.fas.1",
367 n_mallocs, sizeof(Bool) );
368 n_chunks = n_mallocs;
370 // Then we loop over the mempool tables. For each chunk in each
371 // pool, we set the entry in the Bool array corresponding to the
372 // malloc chunk containing the mempool chunk.
373 VG_(HT_ResetIter)(MC_(mempool_list));
374 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
375 VG_(HT_ResetIter)(mp->chunks);
376 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
378 // We'll need to record this chunk.
379 n_chunks++;
381 // Possibly invalidate the malloc holding the beginning of this chunk.
382 m = find_chunk_for(mc->data, mallocs, n_mallocs);
383 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
384 tl_assert(n_chunks > 0);
385 n_chunks--;
386 malloc_chunk_holds_a_pool_chunk[m] = True;
389 // Possibly invalidate the malloc holding the end of this chunk.
390 if (mc->szB > 1) {
391 m = find_chunk_for(mc->data + (mc->szB - 1), mallocs, n_mallocs);
392 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
393 tl_assert(n_chunks > 0);
394 n_chunks--;
395 malloc_chunk_holds_a_pool_chunk[m] = True;
400 tl_assert(n_chunks > 0);
402 // Create final chunk array.
403 chunks = VG_(malloc)("mc.fas.2", sizeof(VgHashNode*) * (n_chunks));
404 s = 0;
406 // Copy the mempool chunks and the non-marked malloc chunks into a
407 // combined array of chunks.
408 VG_(HT_ResetIter)(MC_(mempool_list));
409 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
410 VG_(HT_ResetIter)(mp->chunks);
411 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
412 tl_assert(s < n_chunks);
413 chunks[s++] = mc;
416 for (m = 0; m < n_mallocs; ++m) {
417 if (!malloc_chunk_holds_a_pool_chunk[m]) {
418 tl_assert(s < n_chunks);
419 chunks[s++] = mallocs[m];
422 tl_assert(s == n_chunks);
424 // Free temporaries.
425 VG_(free)(mallocs);
426 VG_(free)(malloc_chunk_holds_a_pool_chunk);
428 *pn_chunks = n_chunks;
430 // Sort the array so blocks are in ascending order in memory.
431 VG_(ssort)(chunks, n_chunks, sizeof(VgHashNode*), compare_MC_Chunks);
433 // Sanity check -- make sure they're in order.
434 for (int i = 0; i < n_chunks-1; i++) {
435 tl_assert( chunks[i]->data <= chunks[i+1]->data);
438 return chunks;
439 } else {
440 *pn_chunks = n_mallocs;
441 return mallocs;
445 /*------------------------------------------------------------*/
446 /*--- The leak detector proper. ---*/
447 /*------------------------------------------------------------*/
449 // Holds extra info about each block during leak checking.
450 typedef
451 struct {
452 UInt state:2; // Reachedness.
453 UInt pending:1; // Scan pending.
454 UInt heuristic: (sizeof(UInt)*8)-3;
455 // Heuristic with which this block was considered reachable.
456 // LchNone if state != Reachable or no heuristic needed to
457 // consider it reachable.
459 union {
460 SizeT indirect_szB;
461 // If Unreached, how many bytes are unreachable from here.
462 SizeT clique;
463 // if IndirectLeak, clique leader to which it belongs.
464 } IorC;
466 LC_Extra;
468 // An array holding pointers to every chunk we're checking. Sorted by address.
469 // lc_chunks is initialised during leak search. It is kept after leak search
470 // to support printing the list of blocks belonging to a loss record.
471 // lc_chunk array can only be used validly till the next "free" operation
472 // (as a free operation potentially destroys one or more chunks).
473 // To detect lc_chunk is valid, we store the nr of frees operations done
474 // when lc_chunk was build : lc_chunks (and lc_extras) stays valid as
475 // long as no free operations has been done since lc_chunks building.
476 static MC_Chunk** lc_chunks;
477 // How many chunks we're dealing with.
478 static Int lc_n_chunks;
479 static SizeT lc_chunks_n_frees_marker;
480 // This has the same number of entries as lc_chunks, and each entry
481 // in lc_chunks corresponds with the entry here (ie. lc_chunks[i] and
482 // lc_extras[i] describe the same block).
483 static LC_Extra* lc_extras;
485 // chunks will be converted and merged in loss record, maintained in lr_table
486 // lr_table elements are kept from one leak_search to another to implement
487 // the "print new/changed leaks" client request
488 static OSet* lr_table;
489 // Array of sorted loss record (produced during last leak search).
490 static LossRecord** lr_array;
492 // Value of the heuristics parameter used in the current (or last) leak check.
493 static UInt detect_memory_leaks_last_heuristics;
495 // DeltaMode used the last time we called detect_memory_leaks.
496 // The recorded leak errors are output using a logic based on this delta_mode.
497 // The below avoids replicating the delta_mode in each LossRecord.
498 LeakCheckDeltaMode MC_(detect_memory_leaks_last_delta_mode);
500 // Each leak search run increments the below generation counter.
501 // A used suppression during a leak search will contain this
502 // generation number.
503 UInt MC_(leak_search_gen);
505 // Records chunks that are currently being processed. Each element in the
506 // stack is an index into lc_chunks and lc_extras. Its size is
507 // 'lc_n_chunks' because in the worst case that's how many chunks could be
508 // pushed onto it (actually I think the maximum is lc_n_chunks-1 but let's
509 // be conservative).
510 static Int* lc_markstack;
511 // The index of the top element of the stack; -1 if the stack is empty, 0 if
512 // the stack has one element, 1 if it has two, etc.
513 static Int lc_markstack_top;
515 // Keeps track of how many bytes of memory we've scanned, for printing.
516 // (Nb: We don't keep track of how many register bytes we've scanned.)
517 static SizeT lc_scanned_szB;
518 // Keeps track of how many bytes we have not scanned due to read errors that
519 // caused a signal such as SIGSEGV.
520 static SizeT lc_sig_skipped_szB;
523 SizeT MC_(bytes_leaked) = 0;
524 SizeT MC_(bytes_indirect) = 0;
525 SizeT MC_(bytes_dubious) = 0;
526 SizeT MC_(bytes_reachable) = 0;
527 SizeT MC_(bytes_suppressed) = 0;
529 SizeT MC_(blocks_leaked) = 0;
530 SizeT MC_(blocks_indirect) = 0;
531 SizeT MC_(blocks_dubious) = 0;
532 SizeT MC_(blocks_reachable) = 0;
533 SizeT MC_(blocks_suppressed) = 0;
535 // Subset of MC_(bytes_reachable) and MC_(blocks_reachable) which
536 // are considered reachable due to the corresponding heuristic.
537 static SizeT MC_(bytes_heuristically_reachable)[N_LEAK_CHECK_HEURISTICS]
538 = {0,0,0,0};
539 static SizeT MC_(blocks_heuristically_reachable)[N_LEAK_CHECK_HEURISTICS]
540 = {0,0,0,0};
542 // Determines if a pointer is to a chunk. Returns the chunk number et al
543 // via call-by-reference.
544 static Bool
545 lc_is_a_chunk_ptr(Addr ptr, Int* pch_no, MC_Chunk** pch, LC_Extra** pex)
547 Int ch_no;
548 MC_Chunk* ch;
549 LC_Extra* ex;
551 // Quick filter. Note: implemented with am, not with get_vabits2
552 // as ptr might be random data pointing anywhere. On 64 bit
553 // platforms, getting va bits for random data can be quite costly
554 // due to the secondary map.
555 if (!VG_(am_is_valid_for_client)(ptr, 1, VKI_PROT_READ)) {
556 return False;
557 } else {
558 ch_no = find_chunk_for(ptr, lc_chunks, lc_n_chunks);
559 tl_assert(ch_no >= -1 && ch_no < lc_n_chunks);
561 if (ch_no == -1) {
562 return False;
563 } else {
564 // Ok, we've found a pointer to a chunk. Get the MC_Chunk and its
565 // LC_Extra.
566 ch = lc_chunks[ch_no];
567 ex = &(lc_extras[ch_no]);
569 tl_assert(ptr >= ch->data);
570 tl_assert(ptr < ch->data + ch->szB + (ch->szB==0 ? 1 : 0));
572 if (VG_DEBUG_LEAKCHECK)
573 VG_(printf)("ptr=%#lx -> block %d\n", ptr, ch_no);
575 *pch_no = ch_no;
576 *pch = ch;
577 *pex = ex;
579 return True;
584 // Push a chunk (well, just its index) onto the mark stack.
585 static void lc_push(Int ch_no, MC_Chunk* ch)
587 if (!lc_extras[ch_no].pending) {
588 if (0) {
589 VG_(printf)("pushing %#lx-%#lx\n", ch->data, ch->data + ch->szB);
591 lc_markstack_top++;
592 tl_assert(lc_markstack_top < lc_n_chunks);
593 lc_markstack[lc_markstack_top] = ch_no;
594 tl_assert(!lc_extras[ch_no].pending);
595 lc_extras[ch_no].pending = True;
599 // Return the index of the chunk on the top of the mark stack, or -1 if
600 // there isn't one.
601 static Bool lc_pop(Int* ret)
603 if (-1 == lc_markstack_top) {
604 return False;
605 } else {
606 tl_assert(0 <= lc_markstack_top && lc_markstack_top < lc_n_chunks);
607 *ret = lc_markstack[lc_markstack_top];
608 lc_markstack_top--;
609 tl_assert(lc_extras[*ret].pending);
610 lc_extras[*ret].pending = False;
611 return True;
615 static const HChar* pp_heuristic(LeakCheckHeuristic h)
617 switch(h) {
618 case LchNone: return "none";
619 case LchStdString: return "stdstring";
620 case LchLength64: return "length64";
621 case LchNewArray: return "newarray";
622 case LchMultipleInheritance: return "multipleinheritance";
623 default: return "???invalid heuristic???";
627 // True if ptr looks like the address of a vtable, i.e. if ptr
628 // points to an array of pointers to functions.
629 // It is assumed the only caller of this function is heuristic_reachedness
630 // which must check that ptr is aligned and above page 0.
631 // Checking that ptr is above page 0 is an optimisation : it is assumed
632 // that no vtable is located in the page 0. So, all small integer values
633 // encountered during the scan will not incur the cost of calling this
634 // function.
635 static Bool aligned_ptr_above_page0_is_vtable_addr(Addr ptr)
637 // ??? If performance problem:
638 // ??? maybe implement a cache (array indexed by ptr % primenr)
639 // ??? of "I am a vtable ptr" ???
641 // ??? Maybe the debug info could (efficiently?) be used to detect vtables ?
643 // We consider ptr as a vtable ptr if it points to a table
644 // where we find only NULL pointers or pointers pointing at an
645 // executable region. We must find at least 2 non NULL pointers
646 // before considering ptr as a vtable pointer.
647 // We scan a maximum of VTABLE_MAX_CHECK words for these 2 non NULL
648 // pointers.
649 #define VTABLE_MAX_CHECK 20
651 NSegment const *seg;
652 UInt nr_fn_ptrs = 0;
653 Addr scan;
654 Addr scan_max;
656 // First verify ptr points inside a client mapped file section.
657 // ??? is a vtable always in a file mapped readable section ?
658 seg = VG_(am_find_nsegment) (ptr);
659 if (seg == NULL
660 || seg->kind != SkFileC
661 || !seg->hasR)
662 return False;
664 // Check potential function pointers, up to a maximum of VTABLE_MAX_CHECK.
665 scan_max = ptr + VTABLE_MAX_CHECK*sizeof(Addr);
666 // If ptr is near the end of seg, avoid scan_max exceeding the end of seg:
667 if (scan_max > seg->end - sizeof(Addr))
668 scan_max = seg->end - sizeof(Addr);
669 for (scan = ptr; scan <= scan_max; scan+=sizeof(Addr)) {
670 Addr pot_fn = *((Addr *)scan);
671 if (pot_fn == 0)
672 continue; // NULL fn pointer. Seems it can happen in vtable.
673 seg = VG_(am_find_nsegment) (pot_fn);
674 #if defined(VGA_ppc64be)
675 // ppc64BE uses a thunk table (function descriptors), so we have one
676 // more level of indirection to follow.
677 if (seg == NULL
678 || seg->kind != SkFileC
679 || !seg->hasR
680 || !seg->hasW)
681 return False; // ptr to nowhere, or not a ptr to thunks.
682 pot_fn = *((Addr *)pot_fn);
683 if (pot_fn == 0)
684 continue; // NULL fn pointer. Seems it can happen in vtable.
685 seg = VG_(am_find_nsegment) (pot_fn);
686 #endif
687 if (seg == NULL
688 || seg->kind != SkFileC
689 || !seg->hasT)
690 return False; // ptr to nowhere, or not a fn ptr.
691 nr_fn_ptrs++;
692 if (nr_fn_ptrs == 2)
693 return True;
696 return False;
699 // true if a is properly aligned and points to 64bits of valid memory
700 static Bool is_valid_aligned_ULong ( Addr a )
702 if (sizeof(Word) == 8)
703 return MC_(is_valid_aligned_word)(a);
705 return MC_(is_valid_aligned_word)(a)
706 && MC_(is_valid_aligned_word)(a + 4);
709 /* The below leak_search_fault_catcher is used to catch memory access
710 errors happening during leak_search. During the scan, we check
711 with aspacemgr and/or VA bits that each page or dereferenced location is
712 readable and belongs to the client. However, we still protect
713 against SIGSEGV and SIGBUS e.g. in case aspacemgr is desynchronised
714 with the real page mappings. Such a desynchronisation could happen
715 due to an aspacemgr bug. Note that if the application is using
716 mprotect(NONE), then a page can be unreadable but have addressable
717 and defined VA bits (see mc_main.c function mc_new_mem_mprotect).
718 Currently, 2 functions are dereferencing client memory during leak search:
719 heuristic_reachedness and lc_scan_memory.
720 Each such function has its own fault catcher, that will call
721 leak_search_fault_catcher with the proper 'who' and jmpbuf parameters. */
722 static volatile Addr bad_scanned_addr;
723 static
724 void leak_search_fault_catcher ( Int sigNo, Addr addr,
725 const HChar *who, VG_MINIMAL_JMP_BUF(jmpbuf) )
727 vki_sigset_t sigmask;
729 if (0)
730 VG_(printf)("OUCH! sig=%d addr=%#lx who=%s\n", sigNo, addr, who);
732 /* Signal handler runs with the signal masked.
733 Unmask the handled signal before longjmp-ing or return-ing.
734 Note that during leak search, we expect only SIGSEGV or SIGBUS
735 and we do not expect another occurrence until we longjmp-ed!return-ed
736 to resume the leak search. So, it is safe to unmask the signal
737 here. */
738 /* First get current mask (by passing NULL as first arg) */
739 VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
740 /* Then set a new sigmask, with this signal removed from the mask. */
741 VG_(sigdelset)(&sigmask, sigNo);
742 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
744 if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS) {
745 bad_scanned_addr = addr;
746 VG_MINIMAL_LONGJMP(jmpbuf);
747 } else {
748 /* ??? During leak search, we are not supposed to receive any
749 other sync signal that these 2.
750 In theory, we should not call VG_(umsg) in a signal handler,
751 but better (try to) report this unexpected behaviour. */
752 VG_(umsg)("leak_search_fault_catcher:"
753 " unexpected signal %d, catcher %s ???\n",
754 sigNo, who);
758 // jmpbuf and fault_catcher used during heuristic_reachedness
759 static VG_MINIMAL_JMP_BUF(heuristic_reachedness_jmpbuf);
760 static
761 void heuristic_reachedness_fault_catcher ( Int sigNo, Addr addr )
763 leak_search_fault_catcher (sigNo, addr,
764 "heuristic_reachedness_fault_catcher",
765 heuristic_reachedness_jmpbuf);
768 // If ch is heuristically reachable via an heuristic member of heur_set,
769 // returns this heuristic.
770 // If ch cannot be considered reachable using one of these heuristics,
771 // return LchNone.
772 // This should only be called when ptr is an interior ptr to ch.
773 // The StdString/NewArray/MultipleInheritance heuristics are directly
774 // inspired from DrMemory:
775 // see http://www.burningcutlery.com/derek/docs/drmem-CGO11.pdf [section VI,C]
776 // and bug 280271.
777 static LeakCheckHeuristic heuristic_reachedness (Addr ptr,
778 MC_Chunk *ch, LC_Extra *ex,
779 UInt heur_set)
782 fault_catcher_t prev_catcher;
784 prev_catcher = VG_(set_fault_catcher)(heuristic_reachedness_fault_catcher);
786 // See leak_search_fault_catcher
787 if (VG_MINIMAL_SETJMP(heuristic_reachedness_jmpbuf) != 0) {
788 VG_(set_fault_catcher) (prev_catcher);
789 return LchNone;
792 if (HiS(LchStdString, heur_set)) {
793 // Detects inner pointers to Std::String for layout being
794 // length capacity refcount char_array[] \0
795 // where ptr points to the beginning of the char_array.
796 // Note: we check definedness for length and capacity but
797 // not for refcount, as refcount size might be smaller than
798 // a SizeT, giving a uninitialised hole in the first 3 SizeT.
799 if ( ptr == ch->data + 3 * sizeof(SizeT)
800 && MC_(is_valid_aligned_word)(ch->data + sizeof(SizeT))) {
801 const SizeT capacity = *((SizeT*)(ch->data + sizeof(SizeT)));
802 if (3 * sizeof(SizeT) + capacity + 1 == ch->szB
803 && MC_(is_valid_aligned_word)(ch->data)) {
804 const SizeT length = *((SizeT*)ch->data);
805 if (length <= capacity) {
806 // ??? could check there is no null byte from ptr to ptr+length-1
807 // ??? and that there is a null byte at ptr+length.
808 // ???
809 // ??? could check that ch->allockind is MC_AllocNew ???
810 // ??? probably not a good idea, as I guess stdstring
811 // ??? allocator can be done via custom allocator
812 // ??? or even a call to malloc ????
813 VG_(set_fault_catcher) (prev_catcher);
814 return LchStdString;
820 if (HiS(LchLength64, heur_set)) {
821 // Detects inner pointers that point at 64bit offset (8 bytes) into a
822 // block following the length of the remaining as 64bit number
823 // (=total block size - 8).
824 // This is used e.g. by sqlite for tracking the total size of allocated
825 // memory.
826 // Note that on 64bit platforms, a block matching LchLength64 will
827 // also be matched by LchNewArray.
828 if ( ptr == ch->data + sizeof(ULong)
829 && is_valid_aligned_ULong(ch->data)) {
830 const ULong size = *((ULong*)ch->data);
831 if (size > 0 && (ch->szB - sizeof(ULong)) == size) {
832 VG_(set_fault_catcher) (prev_catcher);
833 return LchLength64;
838 if (HiS(LchNewArray, heur_set)) {
839 // Detects inner pointers at second word of new[] array, following
840 // a plausible nr of elements.
841 // Such inner pointers are used for arrays of elements
842 // having a destructor, as the delete[] of the array must know
843 // how many elements to destroy.
845 // We have a strange/wrong case for 'ptr = new MyClass[0];' :
846 // For such a case, the returned ptr points just outside the
847 // allocated chunk. This chunk is then seen as a definite
848 // leak by Valgrind, as it is not considered an interior pointer.
849 // It is the c++ equivalent of bug 99923 (malloc(0) wrongly considered
850 // as definitely leaked). See the trick in find_chunk_for handling
851 // 0-sized block. This trick does not work for 'new MyClass[0]'
852 // because a chunk "word-sized" is allocated to store the (0) nr
853 // of elements.
854 if ( ptr == ch->data + sizeof(SizeT)
855 && MC_(is_valid_aligned_word)(ch->data)) {
856 const SizeT nr_elts = *((SizeT*)ch->data);
857 if (nr_elts > 0 && (ch->szB - sizeof(SizeT)) % nr_elts == 0) {
858 // ??? could check that ch->allockind is MC_AllocNewVec ???
859 VG_(set_fault_catcher) (prev_catcher);
860 return LchNewArray;
865 if (HiS(LchMultipleInheritance, heur_set)) {
866 // Detect inner pointer used for multiple inheritance.
867 // Assumption is that the vtable pointers are before the object.
868 if (VG_IS_WORD_ALIGNED(ptr)
869 && MC_(is_valid_aligned_word)(ptr)) {
870 Addr first_addr;
871 Addr inner_addr;
873 // Avoid the call to is_vtable_addr when the addr is not
874 // aligned or points in the page0, as it is unlikely
875 // a vtable is located in this page. This last optimisation
876 // avoids to call aligned_ptr_above_page0_is_vtable_addr
877 // for all small integers.
878 // Note: we could possibly also avoid calling this function
879 // for small negative integers, as no vtable should be located
880 // in the last page.
881 inner_addr = *((Addr*)ptr);
882 if (VG_IS_WORD_ALIGNED(inner_addr)
883 && inner_addr >= (Addr)VKI_PAGE_SIZE
884 && MC_(is_valid_aligned_word)(ch->data)) {
885 first_addr = *((Addr*)ch->data);
886 if (VG_IS_WORD_ALIGNED(first_addr)
887 && first_addr >= (Addr)VKI_PAGE_SIZE
888 && aligned_ptr_above_page0_is_vtable_addr(inner_addr)
889 && aligned_ptr_above_page0_is_vtable_addr(first_addr)) {
890 // ??? could check that ch->allockind is MC_AllocNew ???
891 VG_(set_fault_catcher) (prev_catcher);
892 return LchMultipleInheritance;
898 VG_(set_fault_catcher) (prev_catcher);
899 return LchNone;
903 // If 'ptr' is pointing to a heap-allocated block which hasn't been seen
904 // before, push it onto the mark stack.
905 static void
906 lc_push_without_clique_if_a_chunk_ptr(Addr ptr, Bool is_prior_definite)
908 Int ch_no;
909 MC_Chunk* ch;
910 LC_Extra* ex;
911 Reachedness ch_via_ptr; // Is ch reachable via ptr, and how ?
913 if ( ! lc_is_a_chunk_ptr(ptr, &ch_no, &ch, &ex) )
914 return;
916 if (ex->state == Reachable) {
917 if (ex->heuristic && ptr == ch->data)
918 // If block was considered reachable via an heuristic, and it is now
919 // directly reachable via ptr, clear the heuristic field.
920 ex->heuristic = LchNone;
921 return;
924 // Possibly upgrade the state, ie. one of:
925 // - Unreached --> Possible
926 // - Unreached --> Reachable
927 // - Possible --> Reachable
929 if (ptr == ch->data)
930 ch_via_ptr = Reachable;
931 else if (detect_memory_leaks_last_heuristics) {
932 ex->heuristic
933 = heuristic_reachedness (ptr, ch, ex,
934 detect_memory_leaks_last_heuristics);
935 if (ex->heuristic)
936 ch_via_ptr = Reachable;
937 else
938 ch_via_ptr = Possible;
939 } else
940 ch_via_ptr = Possible;
942 if (ch_via_ptr == Reachable && is_prior_definite) {
943 // 'ptr' points to the start of the block or is to be considered as
944 // pointing to the start of the block, and the prior node is
945 // definite, which means that this block is definitely reachable.
946 ex->state = Reachable;
948 // State has changed to Reachable so (re)scan the block to make
949 // sure any blocks it points to are correctly marked.
950 lc_push(ch_no, ch);
952 } else if (ex->state == Unreached) {
953 // Either 'ptr' is a interior-pointer, or the prior node isn't definite,
954 // which means that we can only mark this block as possibly reachable.
955 ex->state = Possible;
957 // State has changed to Possible so (re)scan the block to make
958 // sure any blocks it points to are correctly marked.
959 lc_push(ch_no, ch);
963 static void
964 lc_push_if_a_chunk_ptr_register(ThreadId tid, const HChar* regname, Addr ptr)
966 lc_push_without_clique_if_a_chunk_ptr(ptr, /*is_prior_definite*/True);
969 // If ptr is pointing to a heap-allocated block which hasn't been seen
970 // before, push it onto the mark stack. Clique is the index of the
971 // clique leader.
972 static void
973 lc_push_with_clique_if_a_chunk_ptr(Addr ptr, Int clique, Int cur_clique)
975 Int ch_no;
976 MC_Chunk* ch;
977 LC_Extra* ex;
979 tl_assert(0 <= clique && clique < lc_n_chunks);
981 if ( ! lc_is_a_chunk_ptr(ptr, &ch_no, &ch, &ex) )
982 return;
984 // If it's not Unreached, it's already been handled so ignore it.
985 // If ch_no==clique, it's the clique leader, which means this is a cyclic
986 // structure; again ignore it because it's already been handled.
987 if (ex->state == Unreached && ch_no != clique) {
988 // Note that, unlike reachable blocks, we currently don't distinguish
989 // between start-pointers and interior-pointers here. We probably
990 // should, though.
991 lc_push(ch_no, ch);
993 // Add the block to the clique, and add its size to the
994 // clique-leader's indirect size. Also, if the new block was
995 // itself a clique leader, it isn't any more, so add its
996 // indirect_szB to the new clique leader.
997 if (VG_DEBUG_CLIQUE) {
998 if (ex->IorC.indirect_szB > 0)
999 VG_(printf)(" clique %d joining clique %d adding %lu+%lu\n",
1000 ch_no, clique, (SizeT)ch->szB, ex->IorC.indirect_szB);
1001 else
1002 VG_(printf)(" block %d joining clique %d adding %lu\n",
1003 ch_no, clique, (SizeT)ch->szB);
1006 lc_extras[clique].IorC.indirect_szB += ch->szB;
1007 lc_extras[clique].IorC.indirect_szB += ex->IorC.indirect_szB;
1008 ex->state = IndirectLeak;
1009 ex->IorC.clique = (SizeT) cur_clique;
1013 static void
1014 lc_push_if_a_chunk_ptr(Addr ptr,
1015 Int clique, Int cur_clique, Bool is_prior_definite)
1017 if (-1 == clique)
1018 lc_push_without_clique_if_a_chunk_ptr(ptr, is_prior_definite);
1019 else
1020 lc_push_with_clique_if_a_chunk_ptr(ptr, clique, cur_clique);
1024 static VG_MINIMAL_JMP_BUF(lc_scan_memory_jmpbuf);
1025 static
1026 void lc_scan_memory_fault_catcher ( Int sigNo, Addr addr )
1028 leak_search_fault_catcher (sigNo, addr,
1029 "lc_scan_memory_fault_catcher",
1030 lc_scan_memory_jmpbuf);
1033 // lc_scan_memory has 2 modes:
1035 // 1. Leak check mode (searched == 0).
1036 // -----------------------------------
1037 // Scan a block of memory between [start, start+len). This range may
1038 // be bogus, inaccessible, or otherwise strange; we deal with it. For each
1039 // valid aligned word we assume it's a pointer to a chunk a push the chunk
1040 // onto the mark stack if so.
1041 // clique is the "highest level clique" in which indirectly leaked blocks have
1042 // to be collected. cur_clique is the current "lower" level clique through which
1043 // the memory to be scanned has been found.
1044 // Example: in the below tree if A is leaked, the top level clique will
1045 // be A, while lower level cliques will be B and C.
1050 / \ / \
1051 D E F G
1053 // Proper handling of top and lowest level clique allows block_list of a loss
1054 // record to describe the hierarchy of indirectly leaked blocks.
1056 // 2. Search ptr mode (searched != 0).
1057 // -----------------------------------
1058 // In this mode, searches for pointers to a specific address range
1059 // In such a case, lc_scan_memory just scans [start..start+len[ for pointers
1060 // to searched and outputs the places where searched is found.
1061 // It does not recursively scans the found memory.
1062 static void
1063 lc_scan_memory(Addr start, SizeT len, Bool is_prior_definite,
1064 Int clique, Int cur_clique,
1065 Addr searched, SizeT szB)
1067 /* memory scan is based on the assumption that valid pointers are aligned
1068 on a multiple of sizeof(Addr). So, we can (and must) skip the begin and
1069 end portions of the block if they are not aligned on sizeof(Addr):
1070 These cannot be a valid pointer, and calls to MC_(is_valid_aligned_word)
1071 will assert for a non aligned address. */
1072 #if defined(VGA_s390x)
1073 // Define ptr as volatile, as on this platform, the value of ptr
1074 // is read in code executed via a longjmp.
1075 volatile
1076 #endif
1077 Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
1078 const Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
1079 fault_catcher_t prev_catcher;
1081 if (VG_DEBUG_LEAKCHECK)
1082 VG_(printf)("scan %#lx-%#lx (%lu)\n", start, end, len);
1084 prev_catcher = VG_(set_fault_catcher)(lc_scan_memory_fault_catcher);
1086 /* Optimisation: the loop below will check for each begin
1087 of SM chunk if the chunk is fully unaddressable. The idea is to
1088 skip efficiently such fully unaddressable SM chunks.
1089 So, we preferably start the loop on a chunk boundary.
1090 If the chunk is not fully unaddressable, we might be in
1091 an unaddressable page. Again, the idea is to skip efficiently
1092 such unaddressable page : this is the "else" part.
1093 We use an "else" so that two consecutive fully unaddressable
1094 SM chunks will be skipped efficiently: first one is skipped
1095 by this piece of code. The next SM chunk will be skipped inside
1096 the loop. */
1097 if ( ! MC_(is_within_valid_secondary)(ptr) ) {
1098 // Skip an invalid SM chunk till the beginning of the next SM Chunk.
1099 ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
1100 } else if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) {
1101 // else we are in a (at least partially) valid SM chunk.
1102 // We might be in the middle of an unreadable page.
1103 // Do a cheap check to see if it's valid;
1104 // if not, skip onto the next page.
1105 ptr = VG_PGROUNDUP(ptr+1); // First page is bad - skip it.
1107 /* The above optimisation and below loop is based on some relationships
1108 between VKI_PAGE_SIZE, SM_SIZE and sizeof(Addr) which are asserted in
1109 MC_(detect_memory_leaks). */
1111 // See leak_search_fault_catcher
1112 if (VG_MINIMAL_SETJMP(lc_scan_memory_jmpbuf) != 0) {
1113 // Catch read error ...
1114 # if defined(VGA_s390x)
1115 // For a SIGSEGV, s390 delivers the page address of the bad address.
1116 // For a SIGBUS, old s390 kernels deliver a NULL address.
1117 // bad_scanned_addr can thus not be used.
1118 // So, on this platform, we always skip a full page from ptr.
1119 // The below implies to mark ptr as volatile, as we read the value
1120 // after a longjmp to here.
1121 lc_sig_skipped_szB += VKI_PAGE_SIZE;
1122 ptr = ptr + VKI_PAGE_SIZE; // Unaddressable, - skip it.
1123 # else
1124 // On other platforms, just skip one Addr.
1125 lc_sig_skipped_szB += sizeof(Addr);
1126 // PJF asserts are always on
1127 // coverity[ASSERT_SIDE_EFFECT:FALSE]
1128 tl_assert(bad_scanned_addr >= VG_ROUNDUP(start, sizeof(Addr)));
1129 // coverity[ASSERT_SIDE_EFFECT:FALSE]
1130 tl_assert(bad_scanned_addr < VG_ROUNDDN(start+len, sizeof(Addr)));
1131 ptr = bad_scanned_addr + sizeof(Addr); // Unaddressable, - skip it.
1132 #endif
1134 while (ptr < end) {
1135 Addr addr;
1137 // Skip invalid chunks.
1138 if (UNLIKELY((ptr % SM_SIZE) == 0)) {
1139 if (! MC_(is_within_valid_secondary)(ptr) ) {
1140 ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
1141 continue;
1145 // Look to see if this page seems reasonable.
1146 if (UNLIKELY((ptr % VKI_PAGE_SIZE) == 0)) {
1147 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) {
1148 ptr += VKI_PAGE_SIZE; // Bad page - skip it.
1149 continue;
1153 if ( MC_(is_valid_aligned_word)(ptr) ) {
1154 lc_scanned_szB += sizeof(Addr);
1155 // If the below read fails, we will longjmp to the loop begin.
1156 addr = *(Addr *)ptr;
1157 // If we get here, the scanned word is in valid memory. Now
1158 // let's see if its contents point to a chunk.
1159 if (UNLIKELY(searched)) {
1160 if (addr >= searched && addr < searched + szB) {
1161 const DiEpoch cur_ep = VG_(current_DiEpoch)();
1162 // The found addr is 'live', so use cur_ep to describe it.
1163 if (addr == searched) {
1164 VG_(umsg)("*%#lx points at %#lx\n", ptr, searched);
1165 MC_(pp_describe_addr) (cur_ep, ptr);
1166 } else {
1167 Int ch_no;
1168 MC_Chunk *ch;
1169 LC_Extra *ex;
1170 VG_(umsg)("*%#lx interior points at %lu bytes inside %#lx\n",
1171 ptr, (long unsigned) addr - searched, searched);
1172 MC_(pp_describe_addr) (cur_ep, ptr);
1173 if (lc_is_a_chunk_ptr(addr, &ch_no, &ch, &ex) ) {
1174 Int h;
1175 for (h = LchStdString; h < N_LEAK_CHECK_HEURISTICS; h++) {
1176 if (heuristic_reachedness(addr, ch, ex, H2S(h)) == h) {
1177 VG_(umsg)("block at %#lx considered reachable "
1178 "by ptr %#lx using %s heuristic\n",
1179 ch->data, addr, pp_heuristic(h));
1182 // Verify the loop above has properly scanned all
1183 // heuristics. If the below fails, it probably means the
1184 // LeakCheckHeuristic enum is not in sync anymore with the
1185 // above loop and/or with N_LEAK_CHECK_HEURISTICS.
1186 tl_assert (h == N_LEAK_CHECK_HEURISTICS);
1190 } else {
1191 lc_push_if_a_chunk_ptr(addr, clique, cur_clique, is_prior_definite);
1193 } else if (0 && VG_DEBUG_LEAKCHECK) {
1194 VG_(printf)("%#lx not valid\n", ptr);
1196 ptr += sizeof(Addr);
1199 VG_(set_fault_catcher)(prev_catcher);
1203 // Process the mark stack until empty.
1204 static void lc_process_markstack(Int clique)
1206 Int top = -1; // shut gcc up
1207 Bool is_prior_definite;
1209 while (lc_pop(&top)) {
1210 tl_assert(top >= 0 && top < lc_n_chunks);
1212 // See comment about 'is_prior_definite' at the top to understand this.
1213 is_prior_definite = ( Possible != lc_extras[top].state );
1215 lc_scan_memory(lc_chunks[top]->data, lc_chunks[top]->szB,
1216 is_prior_definite, clique, (clique == -1 ? -1 : top),
1217 /*searched*/ 0, 0);
1221 static Word cmp_LossRecordKey_LossRecord(const void* key, const void* elem)
1223 const LossRecordKey* a = key;
1224 const LossRecordKey* b = &(((const LossRecord*)elem)->key);
1226 // Compare on states first because that's fast.
1227 if (a->state < b->state) return -1;
1228 if (a->state > b->state) return 1;
1229 // Ok, the states are equal. Now compare the locations, which is slower.
1230 if (VG_(eq_ExeContext)(
1231 MC_(clo_leak_resolution), a->allocated_at, b->allocated_at))
1232 return 0;
1233 // Different locations. Ordering is arbitrary, just use the ec pointer.
1234 if (a->allocated_at < b->allocated_at) return -1;
1235 if (a->allocated_at > b->allocated_at) return 1;
1236 VG_(tool_panic)("bad LossRecord comparison");
1239 static Int cmp_LossRecords(const void* va, const void* vb)
1241 const LossRecord* lr_a = *(const LossRecord *const *)va;
1242 const LossRecord* lr_b = *(const LossRecord *const *)vb;
1243 SizeT total_szB_a = lr_a->szB + lr_a->indirect_szB;
1244 SizeT total_szB_b = lr_b->szB + lr_b->indirect_szB;
1246 // First compare by sizes.
1247 if (total_szB_a < total_szB_b) return -1;
1248 if (total_szB_a > total_szB_b) return 1;
1249 // If size are equal, compare by states.
1250 if (lr_a->key.state < lr_b->key.state) return -1;
1251 if (lr_a->key.state > lr_b->key.state) return 1;
1252 // If they're still equal here, it doesn't matter that much, but we keep
1253 // comparing other things so that regtests are as deterministic as
1254 // possible. So: compare num_blocks.
1255 if (lr_a->num_blocks < lr_b->num_blocks) return -1;
1256 if (lr_a->num_blocks > lr_b->num_blocks) return 1;
1257 // Finally, compare ExeContext addresses... older ones are likely to have
1258 // lower addresses.
1259 if (lr_a->key.allocated_at < lr_b->key.allocated_at) return -1;
1260 if (lr_a->key.allocated_at > lr_b->key.allocated_at) return 1;
1261 return 0;
1264 // allocates or reallocates lr_array, and set its elements to the loss records
1265 // contains in lr_table.
1266 static UInt get_lr_array_from_lr_table(void) {
1267 UInt i, n_lossrecords;
1268 LossRecord* lr;
1270 n_lossrecords = VG_(OSetGen_Size)(lr_table);
1272 // (re-)create the array of pointers to the loss records.
1273 // lr_array is kept to allow producing the block list from gdbserver.
1274 if (lr_array != NULL)
1275 VG_(free)(lr_array);
1276 lr_array = VG_(malloc)("mc.pr.2", n_lossrecords * sizeof(LossRecord*));
1277 i = 0;
1278 VG_(OSetGen_ResetIter)(lr_table);
1279 while ( (lr = VG_(OSetGen_Next)(lr_table)) ) {
1280 lr_array[i++] = lr;
1282 tl_assert(i == n_lossrecords);
1283 return n_lossrecords;
1287 static void get_printing_rules(LeakCheckParams* lcp,
1288 LossRecord* lr,
1289 Bool* count_as_error,
1290 Bool* print_record)
1292 // Rules for printing:
1293 // - We don't show suppressed loss records ever (and that's controlled
1294 // within the error manager).
1295 // - We show non-suppressed loss records that are specified in
1296 // --show-leak-kinds=... if --leak-check=yes.
1298 Bool delta_considered;
1300 switch (lcp->deltamode) {
1301 case LCD_Any:
1302 delta_considered = lr->num_blocks > 0;
1303 break;
1304 case LCD_Increased:
1305 delta_considered
1306 = lr->szB > lr->old_szB
1307 || lr->indirect_szB > lr->old_indirect_szB
1308 || lr->num_blocks > lr->old_num_blocks;
1309 break;
1310 case LCD_Changed:
1311 delta_considered = lr->szB != lr->old_szB
1312 || lr->indirect_szB != lr->old_indirect_szB
1313 || lr->num_blocks != lr->old_num_blocks;
1314 break;
1315 case LCD_New:
1316 delta_considered
1317 = lr->num_blocks > 0 && lr->old_num_blocks == 0;
1318 break;
1319 default:
1320 tl_assert(0);
1323 *print_record = lcp->mode == LC_Full && delta_considered
1324 && RiS(lr->key.state,lcp->show_leak_kinds);
1325 // We don't count a leaks as errors with lcp->mode==LC_Summary.
1326 // Otherwise you can get high error counts with few or no error
1327 // messages, which can be confusing. Otherwise, we count as errors
1328 // the leak kinds requested by --errors-for-leak-kinds=...
1329 *count_as_error = lcp->mode == LC_Full && delta_considered
1330 && RiS(lr->key.state,lcp->errors_for_leak_kinds);
1334 // Types and functions for xtree leak report.
1337 static XTree* leak_xt;
1339 /* Sizes and delta sizes for a loss record output in an xtree.
1340 As the output format can only show positive values, we need values for
1341 the increase and decrease cases. */
1342 typedef
1343 struct _XT_BIBK {
1344 ULong szB; // Current values
1345 ULong indirect_szB;
1346 ULong num_blocks;
1347 } XT_BIBK; // Bytes, Indirect bytes, BlocKs
1349 typedef
1350 enum {
1351 XT_Value =0,
1352 XT_Increase =1,
1353 XT_Decrease =2
1355 XT_VID; // Value or Increase or Decrease
1357 typedef
1358 struct _XT_lr {
1359 XT_BIBK vid[3]; // indexed by XT_VID
1360 } XT_lr;
1362 typedef
1363 struct _XT_Leak {
1364 XT_lr xt_lr[4]; // indexed by Reachedness
1365 } XT_Leak;
1367 static void MC_(XT_Leak_init)(void* xtl)
1369 VG_(memset) (xtl, 0, sizeof(XT_Leak));
1371 static void MC_(XT_Leak_add) (void* to, const void* xtleak)
1373 XT_Leak* xto = to;
1374 const XT_Leak* xtl = xtleak;
1376 for (int r = Reachable; r <= Unreached; r++)
1377 for (int d = 0; d < 3; d++) {
1378 xto->xt_lr[r].vid[d].szB += xtl->xt_lr[r].vid[d].szB;
1379 xto->xt_lr[r].vid[d].indirect_szB += xtl->xt_lr[r].vid[d].indirect_szB;
1380 xto->xt_lr[r].vid[d].num_blocks += xtl->xt_lr[r].vid[d].num_blocks;
1383 static void XT_insert_lr (LossRecord* lr)
1385 XT_Leak xtl;
1386 Reachedness i = lr->key.state;
1388 MC_(XT_Leak_init)(&xtl);
1390 xtl.xt_lr[i].vid[XT_Value].szB = lr->szB;
1391 xtl.xt_lr[i].vid[XT_Value].indirect_szB = lr->indirect_szB;
1392 xtl.xt_lr[i].vid[XT_Value].num_blocks = lr->num_blocks;
1394 if (lr->szB > lr->old_szB)
1395 xtl.xt_lr[i].vid[XT_Increase].szB = lr->szB - lr->old_szB;
1396 else
1397 xtl.xt_lr[i].vid[XT_Decrease].szB = lr->old_szB - lr->szB;
1398 if (lr->indirect_szB > lr->old_indirect_szB)
1399 xtl.xt_lr[i].vid[XT_Increase].indirect_szB
1400 = lr->indirect_szB - lr->old_indirect_szB;
1401 else
1402 xtl.xt_lr[i].vid[XT_Decrease].indirect_szB
1403 = lr->old_indirect_szB - lr->indirect_szB;
1404 if (lr->num_blocks > lr->old_num_blocks)
1405 xtl.xt_lr[i].vid[XT_Increase].num_blocks
1406 = lr->num_blocks - lr->old_num_blocks;
1407 else
1408 xtl.xt_lr[i].vid[XT_Decrease].num_blocks
1409 = lr->old_num_blocks - lr->num_blocks;
1411 VG_(XT_add_to_ec)(leak_xt, lr->key.allocated_at, &xtl);
1414 static void MC_(XT_Leak_sub) (void* from, const void* xtleak)
1416 tl_assert(0); // Should not be called.
1418 static const HChar* MC_(XT_Leak_img) (const void* xtleak)
1420 static XT_Leak zero;
1421 static HChar buf[600];
1422 UInt off = 0;
1424 const XT_Leak* xtl = xtleak;
1426 if (VG_(memcmp)(xtl, &zero, sizeof(XT_Leak)) != 0) {
1427 for (UInt d = XT_Value; d <= XT_Decrease; d++) {
1428 // print szB. We add indirect_szB to have the Unreachable showing
1429 // the total bytes loss, including indirect loss. This is similar
1430 // to the textual and xml reports.
1431 for (UInt r = Reachable; r <= Unreached; r++)
1432 off += VG_(sprintf) (buf + off, " %llu",
1433 xtl->xt_lr[r].vid[d].szB
1434 + xtl->xt_lr[r].vid[d].indirect_szB);
1435 // print indirect_szB, only for reachedness having such values)
1436 for (UInt r = Reachable; r <= Unreached; r++)
1437 if (r == Unreached)
1438 off += VG_(sprintf) (buf + off, " %llu",
1439 xtl->xt_lr[r].vid[d].indirect_szB);
1440 // print num_blocks
1441 for (UInt r = Reachable; r <= Unreached; r++)
1442 off += VG_(sprintf) (buf + off, " %llu",
1443 xtl->xt_lr[r].vid[d].num_blocks);
1445 return buf + 1; // + 1 to skip the useless first space
1446 } else {
1447 return NULL;
1451 /* The short event name is made of 2 or 3 or 4 letters:
1452 an optional delta indication: i = increase d = decrease
1453 a loss kind: R = Reachable P = Possibly I = Indirectly D = Definitely
1454 an optional i to indicate this loss record has indirectly lost bytes
1455 B = Bytes or Bk = Blocks.
1456 Note that indirectly lost bytes/blocks can thus be counted in 2
1457 loss records: the loss records for their "own" allocation stack trace,
1458 and the loss record of the 'main' Definitely or Possibly loss record
1459 in the indirectly lost count for these loss records. */
1460 static const HChar* XT_Leak_events =
1461 ////// XT_Value szB
1462 "RB : Reachable Bytes" ","
1463 "PB : Possibly lost Bytes" ","
1464 "IB : Indirectly lost Bytes" ","
1465 "DB : Definitely lost Bytes (direct plus indirect)" ","
1467 ////// XT_Value indirect_szB
1468 // no RIB
1469 // no PIB
1470 // no IIB
1471 "DIB : Definitely Indirectly lost Bytes (subset of DB)" ","
1473 ////// XT_Value num_blocks
1474 "RBk : reachable Blocks" ","
1475 "PBk : Possibly lost Blocks" ","
1476 "IBk : Indirectly lost Blocks" ","
1477 "DBk : Definitely lost Blocks" ","
1479 ////// XT_Increase szB
1480 "iRB : increase Reachable Bytes" ","
1481 "iPB : increase Possibly lost Bytes" ","
1482 "iIB : increase Indirectly lost Bytes" ","
1483 "iDB : increase Definitely lost Bytes" ","
1485 ////// XT_Increase indirect_szB
1486 // no iRIB
1487 // no iPIB
1488 // no iIIB
1489 "iDIB : increase Definitely Indirectly lost Bytes" ","
1491 ////// XT_Increase num_blocks
1492 "iRBk : increase reachable Blocks" ","
1493 "iPBk : increase Possibly lost Blocks" ","
1494 "iIBk : increase Indirectly lost Blocks" ","
1495 "iDBk : increase Definitely lost Blocks" ","
1498 ////// XT_Decrease szB
1499 "dRB : decrease Reachable Bytes" ","
1500 "dPB : decrease Possibly lost Bytes" ","
1501 "dIB : decrease Indirectly lost Bytes" ","
1502 "dDB : decrease Definitely lost Bytes" ","
1504 ////// XT_Decrease indirect_szB
1505 // no dRIB
1506 // no dPIB
1507 // no dIIB
1508 "dDIB : decrease Definitely Indirectly lost Bytes" ","
1510 ////// XT_Decrease num_blocks
1511 "dRBk : decrease reachable Blocks" ","
1512 "dPBk : decrease Possibly lost Blocks" ","
1513 "dIBk : decrease Indirectly lost Blocks" ","
1514 "dDBk : decrease Definitely lost Blocks";
1516 static void print_results(ThreadId tid, LeakCheckParams* lcp)
1518 Int i, n_lossrecords, start_lr_output_scan;
1519 LossRecord* lr;
1520 Bool is_suppressed;
1521 /* old_* variables are used to report delta in summary. */
1522 SizeT old_bytes_leaked = MC_(bytes_leaked);
1523 SizeT old_bytes_indirect = MC_(bytes_indirect);
1524 SizeT old_bytes_dubious = MC_(bytes_dubious);
1525 SizeT old_bytes_reachable = MC_(bytes_reachable);
1526 SizeT old_bytes_suppressed = MC_(bytes_suppressed);
1527 SizeT old_blocks_leaked = MC_(blocks_leaked);
1528 SizeT old_blocks_indirect = MC_(blocks_indirect);
1529 SizeT old_blocks_dubious = MC_(blocks_dubious);
1530 SizeT old_blocks_reachable = MC_(blocks_reachable);
1531 SizeT old_blocks_suppressed = MC_(blocks_suppressed);
1533 SizeT old_bytes_heuristically_reachable[N_LEAK_CHECK_HEURISTICS];
1534 SizeT old_blocks_heuristically_reachable[N_LEAK_CHECK_HEURISTICS];
1536 for (i = 0; i < N_LEAK_CHECK_HEURISTICS; i++) {
1537 old_bytes_heuristically_reachable[i]
1538 = MC_(bytes_heuristically_reachable)[i];
1539 MC_(bytes_heuristically_reachable)[i] = 0;
1540 old_blocks_heuristically_reachable[i]
1541 = MC_(blocks_heuristically_reachable)[i];
1542 MC_(blocks_heuristically_reachable)[i] = 0;
1545 if (lr_table == NULL)
1546 // Create the lr_table, which holds the loss records.
1547 // If the lr_table already exists, it means it contains
1548 // loss_records from the previous leak search. The old_*
1549 // values in these records are used to implement the
1550 // leak check delta mode
1551 lr_table =
1552 VG_(OSetGen_Create)(offsetof(LossRecord, key),
1553 cmp_LossRecordKey_LossRecord,
1554 VG_(malloc), "mc.pr.1",
1555 VG_(free));
1557 // If we have loss records from a previous search, reset values to have
1558 // proper printing of the deltas between previous search and this search.
1559 n_lossrecords = get_lr_array_from_lr_table();
1560 for (i = 0; i < n_lossrecords; i++) {
1561 if (lr_array[i]->num_blocks == 0) {
1562 // remove from lr_table the old loss_records with 0 bytes found
1563 VG_(OSetGen_Remove) (lr_table, &lr_array[i]->key);
1564 VG_(OSetGen_FreeNode)(lr_table, lr_array[i]);
1565 } else {
1566 // move the leak sizes to old_* and zero the current sizes
1567 // for next leak search
1568 lr_array[i]->old_szB = lr_array[i]->szB;
1569 lr_array[i]->old_indirect_szB = lr_array[i]->indirect_szB;
1570 lr_array[i]->old_num_blocks = lr_array[i]->num_blocks;
1571 lr_array[i]->szB = 0;
1572 lr_array[i]->indirect_szB = 0;
1573 lr_array[i]->num_blocks = 0;
1576 // lr_array now contains "invalid" loss records => free it.
1577 // lr_array will be re-created below with the kept and new loss records.
1578 VG_(free) (lr_array);
1579 lr_array = NULL;
1581 // Convert the chunks into loss records, merging them where appropriate.
1582 for (i = 0; i < lc_n_chunks; i++) {
1583 MC_Chunk* ch = lc_chunks[i];
1584 LC_Extra* ex = &(lc_extras)[i];
1585 LossRecord* old_lr;
1586 LossRecordKey lrkey;
1587 lrkey.state = ex->state;
1588 lrkey.allocated_at = MC_(allocated_at)(ch);
1590 if (ex->heuristic) {
1591 MC_(bytes_heuristically_reachable)[ex->heuristic] += ch->szB;
1592 MC_(blocks_heuristically_reachable)[ex->heuristic]++;
1593 if (VG_DEBUG_LEAKCHECK)
1594 VG_(printf)("heuristic %s %#lx len %lu\n",
1595 pp_heuristic(ex->heuristic),
1596 ch->data, (SizeT)ch->szB);
1599 old_lr = VG_(OSetGen_Lookup)(lr_table, &lrkey);
1600 if (old_lr) {
1601 // We found an existing loss record matching this chunk. Update the
1602 // loss record's details in-situ. This is safe because we don't
1603 // change the elements used as the OSet key.
1604 old_lr->szB += ch->szB;
1605 if (ex->state == Unreached)
1606 old_lr->indirect_szB += ex->IorC.indirect_szB;
1607 old_lr->num_blocks++;
1608 } else {
1609 // No existing loss record matches this chunk. Create a new loss
1610 // record, initialise it from the chunk, and insert it into lr_table.
1611 lr = VG_(OSetGen_AllocNode)(lr_table, sizeof(LossRecord));
1612 lr->key = lrkey;
1613 lr->szB = ch->szB;
1614 if (ex->state == Unreached)
1615 lr->indirect_szB = ex->IorC.indirect_szB;
1616 else
1617 lr->indirect_szB = 0;
1618 lr->num_blocks = 1;
1619 lr->old_szB = 0;
1620 lr->old_indirect_szB = 0;
1621 lr->old_num_blocks = 0;
1622 VG_(OSetGen_Insert)(lr_table, lr);
1626 // (re-)create the array of pointers to the (new) loss records.
1627 n_lossrecords = get_lr_array_from_lr_table ();
1628 tl_assert(VG_(OSetGen_Size)(lr_table) == n_lossrecords);
1630 // Sort the array by loss record sizes.
1631 VG_(ssort)(lr_array, n_lossrecords, sizeof(LossRecord*),
1632 cmp_LossRecords);
1634 // Zero totals.
1635 MC_(blocks_leaked) = MC_(bytes_leaked) = 0;
1636 MC_(blocks_indirect) = MC_(bytes_indirect) = 0;
1637 MC_(blocks_dubious) = MC_(bytes_dubious) = 0;
1638 MC_(blocks_reachable) = MC_(bytes_reachable) = 0;
1639 MC_(blocks_suppressed) = MC_(bytes_suppressed) = 0;
1641 // If there is a maximum nr of loss records we can output, then first
1642 // compute from where the output scan has to start.
1643 // By default, start from the first loss record. Compute a higher
1644 // value if there is a maximum to respect. We need to print the last
1645 // records, as the one with the biggest sizes are more interesting.
1646 start_lr_output_scan = 0;
1647 if (lcp->mode == LC_Full && lcp->max_loss_records_output < n_lossrecords) {
1648 Int nr_printable_records = 0;
1649 for (i = n_lossrecords - 1; i >= 0 && start_lr_output_scan == 0; i--) {
1650 Bool count_as_error, print_record;
1651 lr = lr_array[i];
1652 get_printing_rules (lcp, lr, &count_as_error, &print_record);
1653 // Do not use get_printing_rules results for is_suppressed, as we
1654 // only want to check if the record would be suppressed.
1655 is_suppressed =
1656 MC_(record_leak_error) ( tid, i+1, n_lossrecords, lr,
1657 False /* print_record */,
1658 False /* count_as_error */);
1659 if (print_record && !is_suppressed) {
1660 nr_printable_records++;
1661 if (nr_printable_records == lcp->max_loss_records_output)
1662 start_lr_output_scan = i;
1667 if (lcp->xt_filename != NULL)
1668 leak_xt = VG_(XT_create) (VG_(malloc),
1669 "mc_leakcheck.leak_xt",
1670 VG_(free),
1671 sizeof(XT_Leak),
1672 MC_(XT_Leak_init),
1673 MC_(XT_Leak_add),
1674 MC_(XT_Leak_sub),
1675 VG_(XT_filter_maybe_below_main));
1677 // Print the loss records (in size order) and collect summary stats.
1678 for (i = start_lr_output_scan; i < n_lossrecords; i++) {
1679 Bool count_as_error, print_record;
1680 lr = lr_array[i];
1681 get_printing_rules(lcp, lr, &count_as_error, &print_record);
1682 is_suppressed =
1683 MC_(record_leak_error)
1684 ( tid, i+1, n_lossrecords, lr,
1685 lcp->xt_filename == NULL ? print_record : False,
1686 count_as_error );
1687 if (lcp->xt_filename != NULL && !is_suppressed && print_record)
1688 XT_insert_lr (lr);
1690 if (is_suppressed) {
1691 MC_(blocks_suppressed) += lr->num_blocks;
1692 MC_(bytes_suppressed) += lr->szB;
1694 } else if (Unreached == lr->key.state) {
1695 MC_(blocks_leaked) += lr->num_blocks;
1696 MC_(bytes_leaked) += lr->szB;
1698 } else if (IndirectLeak == lr->key.state) {
1699 MC_(blocks_indirect) += lr->num_blocks;
1700 MC_(bytes_indirect) += lr->szB;
1702 } else if (Possible == lr->key.state) {
1703 MC_(blocks_dubious) += lr->num_blocks;
1704 MC_(bytes_dubious) += lr->szB;
1706 } else if (Reachable == lr->key.state) {
1707 MC_(blocks_reachable) += lr->num_blocks;
1708 MC_(bytes_reachable) += lr->szB;
1710 } else {
1711 VG_(tool_panic)("unknown loss mode");
1715 if (lcp->xt_filename != NULL) {
1716 VG_(XT_callgrind_print)(leak_xt,
1717 lcp->xt_filename,
1718 XT_Leak_events,
1719 MC_(XT_Leak_img));
1720 if (VG_(clo_verbosity) >= 1 || lcp->requested_by_monitor_command)
1721 VG_(umsg)("xtree leak report: %s\n", lcp->xt_filename);
1722 VG_(XT_delete)(leak_xt);
1725 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
1726 HChar d_bytes[31];
1727 HChar d_blocks[31];
1728 # define DBY(new,old) \
1729 MC_(snprintf_delta) (d_bytes, sizeof(d_bytes), (new), (old), \
1730 lcp->deltamode)
1731 # define DBL(new,old) \
1732 MC_(snprintf_delta) (d_blocks, sizeof(d_blocks), (new), (old), \
1733 lcp->deltamode)
1735 VG_(umsg)("LEAK SUMMARY:\n");
1736 VG_(umsg)(" definitely lost: %'lu%s bytes in %'lu%s blocks\n",
1737 MC_(bytes_leaked),
1738 DBY (MC_(bytes_leaked), old_bytes_leaked),
1739 MC_(blocks_leaked),
1740 DBL (MC_(blocks_leaked), old_blocks_leaked));
1741 VG_(umsg)(" indirectly lost: %'lu%s bytes in %'lu%s blocks\n",
1742 MC_(bytes_indirect),
1743 DBY (MC_(bytes_indirect), old_bytes_indirect),
1744 MC_(blocks_indirect),
1745 DBL (MC_(blocks_indirect), old_blocks_indirect));
1746 VG_(umsg)(" possibly lost: %'lu%s bytes in %'lu%s blocks\n",
1747 MC_(bytes_dubious),
1748 DBY (MC_(bytes_dubious), old_bytes_dubious),
1749 MC_(blocks_dubious),
1750 DBL (MC_(blocks_dubious), old_blocks_dubious));
1751 VG_(umsg)(" still reachable: %'lu%s bytes in %'lu%s blocks\n",
1752 MC_(bytes_reachable),
1753 DBY (MC_(bytes_reachable), old_bytes_reachable),
1754 MC_(blocks_reachable),
1755 DBL (MC_(blocks_reachable), old_blocks_reachable));
1756 for (i = 0; i < N_LEAK_CHECK_HEURISTICS; i++)
1757 if (old_blocks_heuristically_reachable[i] > 0
1758 || MC_(blocks_heuristically_reachable)[i] > 0) {
1759 VG_(umsg)(" of which "
1760 "reachable via heuristic:\n");
1761 break;
1763 for (i = 0; i < N_LEAK_CHECK_HEURISTICS; i++)
1764 if (old_blocks_heuristically_reachable[i] > 0
1765 || MC_(blocks_heuristically_reachable)[i] > 0)
1766 VG_(umsg)(" %-19s: "
1767 "%'lu%s bytes in %'lu%s blocks\n",
1768 pp_heuristic(i),
1769 MC_(bytes_heuristically_reachable)[i],
1770 DBY (MC_(bytes_heuristically_reachable)[i],
1771 old_bytes_heuristically_reachable[i]),
1772 MC_(blocks_heuristically_reachable)[i],
1773 DBL (MC_(blocks_heuristically_reachable)[i],
1774 old_blocks_heuristically_reachable[i]));
1775 VG_(umsg)(" suppressed: %'lu%s bytes in %'lu%s blocks\n",
1776 MC_(bytes_suppressed),
1777 DBY (MC_(bytes_suppressed), old_bytes_suppressed),
1778 MC_(blocks_suppressed),
1779 DBL (MC_(blocks_suppressed), old_blocks_suppressed));
1780 if (lcp->mode != LC_Full &&
1781 (MC_(blocks_leaked) + MC_(blocks_indirect) +
1782 MC_(blocks_dubious) + MC_(blocks_reachable)) > 0) {
1783 if (lcp->requested_by_monitor_command)
1784 VG_(umsg)("To see details of leaked memory, "
1785 "give 'full' arg to leak_check\n");
1786 else
1787 VG_(umsg)("Rerun with --leak-check=full to see details "
1788 "of leaked memory\n");
1790 if (lcp->mode == LC_Full &&
1791 MC_(blocks_reachable) > 0 && !RiS(Reachable,lcp->show_leak_kinds)) {
1792 VG_(umsg)("Reachable blocks (those to which a pointer "
1793 "was found) are not shown.\n");
1794 if (lcp->requested_by_monitor_command)
1795 VG_(umsg)("To see them, add 'reachable any' args to leak_check\n");
1796 else
1797 VG_(umsg)("To see them, rerun with: --leak-check=full "
1798 "--show-leak-kinds=all\n");
1800 VG_(umsg)("\n");
1801 #undef DBL
1802 #undef DBY
1806 // print recursively all indirectly leaked blocks collected in clique.
1807 // Printing stops when *remaining reaches 0.
1808 static void print_clique (Int clique, UInt level, UInt *remaining)
1810 Int ind;
1811 UInt i, n_lossrecords;
1813 n_lossrecords = VG_(OSetGen_Size)(lr_table);
1815 for (ind = 0; ind < lc_n_chunks && *remaining > 0; ind++) {
1816 LC_Extra* ind_ex = &(lc_extras)[ind];
1817 if (ind_ex->state == IndirectLeak
1818 && ind_ex->IorC.clique == (SizeT) clique) {
1819 MC_Chunk* ind_ch = lc_chunks[ind];
1820 LossRecord* ind_lr;
1821 LossRecordKey ind_lrkey;
1822 UInt lr_i;
1823 ind_lrkey.state = ind_ex->state;
1824 ind_lrkey.allocated_at = MC_(allocated_at)(ind_ch);
1825 ind_lr = VG_(OSetGen_Lookup)(lr_table, &ind_lrkey);
1826 for (lr_i = 0; lr_i < n_lossrecords; lr_i++)
1827 if (ind_lr == lr_array[lr_i])
1828 break;
1829 for (i = 0; i < level; i++)
1830 VG_(umsg)(" ");
1831 VG_(umsg)("%p[%lu] indirect loss record %u\n",
1832 (void *)ind_ch->data, (SizeT)ind_ch->szB,
1833 lr_i+1); // lr_i+1 for user numbering.
1834 (*remaining)--;
1835 if (lr_i >= n_lossrecords)
1836 VG_(umsg)
1837 ("error: no indirect loss record found for %p[%lu]?????\n",
1838 (void *)ind_ch->data, (SizeT)ind_ch->szB);
1839 print_clique(ind, level+1, remaining);
1844 Bool MC_(print_block_list) ( UInt loss_record_nr_from,
1845 UInt loss_record_nr_to,
1846 UInt max_blocks,
1847 UInt heuristics)
1849 UInt loss_record_nr;
1850 UInt i, n_lossrecords;
1851 LossRecord* lr;
1852 Bool lr_printed;
1853 UInt remaining = max_blocks;
1855 if (lr_table == NULL || lc_chunks == NULL || lc_extras == NULL) {
1856 VG_(umsg)("Can't print block list : no valid leak search result\n");
1857 return False;
1860 if (lc_chunks_n_frees_marker != MC_(get_cmalloc_n_frees)()) {
1861 VG_(umsg)("Can't print obsolete block list : redo a leak search first\n");
1862 return False;
1865 n_lossrecords = VG_(OSetGen_Size)(lr_table);
1866 if (loss_record_nr_from >= n_lossrecords)
1867 return False; // Invalid starting loss record nr.
1869 if (loss_record_nr_to >= n_lossrecords)
1870 loss_record_nr_to = n_lossrecords - 1;
1872 tl_assert (lr_array);
1874 for (loss_record_nr = loss_record_nr_from;
1875 loss_record_nr <= loss_record_nr_to && remaining > 0;
1876 loss_record_nr++) {
1877 lr = lr_array[loss_record_nr];
1878 lr_printed = False;
1880 /* If user asks to print a specific loss record, we print
1881 the block details, even if no block will be shown for this lr.
1882 If user asks to print a range of lr, we only print lr details
1883 when at least one block is shown. */
1884 if (loss_record_nr_from == loss_record_nr_to) {
1885 /* (+1 on loss_record_nr as user numbering for loss records
1886 starts at 1). */
1887 MC_(pp_LossRecord)(loss_record_nr+1, n_lossrecords, lr);
1888 lr_printed = True;
1891 // Match the chunks with loss records.
1892 for (i = 0; i < lc_n_chunks && remaining > 0; i++) {
1893 MC_Chunk* ch = lc_chunks[i];
1894 LC_Extra* ex = &(lc_extras)[i];
1895 LossRecord* old_lr;
1896 LossRecordKey lrkey;
1897 lrkey.state = ex->state;
1898 lrkey.allocated_at = MC_(allocated_at)(ch);
1900 old_lr = VG_(OSetGen_Lookup)(lr_table, &lrkey);
1901 if (old_lr) {
1902 // We found an existing loss record matching this chunk.
1903 // If this is the loss record we are looking for, output the
1904 // pointer.
1905 if (old_lr == lr_array[loss_record_nr]
1906 && (heuristics == 0 || HiS(ex->heuristic, heuristics))) {
1907 if (!lr_printed) {
1908 MC_(pp_LossRecord)(loss_record_nr+1, n_lossrecords, lr);
1909 lr_printed = True;
1912 if (ex->heuristic)
1913 VG_(umsg)("%p[%lu] (found via heuristic %s)\n",
1914 (void *)ch->data, (SizeT)ch->szB,
1915 pp_heuristic (ex->heuristic));
1916 else
1917 VG_(umsg)("%p[%lu]\n",
1918 (void *)ch->data, (SizeT)ch->szB);
1919 remaining--;
1920 if (ex->state != Reachable) {
1921 // We can print the clique in all states, except Reachable.
1922 // In Unreached state, lc_chunk[i] is the clique leader.
1923 // In IndirectLeak, lc_chunk[i] might have been a clique
1924 // leader which was later collected in another clique.
1925 // For Possible, lc_chunk[i] might be the top of a clique
1926 // or an intermediate clique.
1927 print_clique(i, 1, &remaining);
1930 } else {
1931 // No existing loss record matches this chunk ???
1932 VG_(umsg)("error: no loss record found for %p[%lu]?????\n",
1933 (void *)ch->data, (SizeT)ch->szB);
1937 return True;
1940 // If searched = 0, scan memory root set, pushing onto the mark stack the blocks
1941 // encountered.
1942 // Otherwise (searched != 0), scan the memory root set searching for ptr
1943 // pointing inside [searched, searched+szB[.
1944 static void scan_memory_root_set(Addr searched, SizeT szB)
1946 Int i;
1947 Int n_seg_starts;
1948 Addr* seg_starts = VG_(get_segment_starts)( SkFileC | SkAnonC | SkShmC,
1949 &n_seg_starts );
1951 tl_assert(seg_starts && n_seg_starts > 0);
1953 lc_scanned_szB = 0;
1954 lc_sig_skipped_szB = 0;
1956 // VG_(am_show_nsegments)( 0, "leakcheck");
1957 for (i = 0; i < n_seg_starts; i++) {
1958 SizeT seg_size;
1959 NSegment const* seg = VG_(am_find_nsegment)( seg_starts[i] );
1960 tl_assert(seg);
1961 tl_assert(seg->kind == SkFileC || seg->kind == SkAnonC ||
1962 seg->kind == SkShmC);
1964 if (!(seg->hasR && seg->hasW)) continue;
1965 if (seg->isCH) continue;
1967 // Don't poke around in device segments as this may cause
1968 // hangs. Include /dev/zero just in case someone allocated
1969 // memory by explicitly mapping /dev/zero.
1970 if (seg->kind == SkFileC
1971 && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
1972 const HChar* dev_name = VG_(am_get_filename)( seg );
1973 if (dev_name && 0 == VG_(strcmp)(dev_name, "/dev/zero")) {
1974 // Don't skip /dev/zero.
1975 } else {
1976 // Skip this device mapping.
1977 continue;
1981 if (0)
1982 VG_(printf)("ACCEPT %2d %#lx %#lx\n", i, seg->start, seg->end);
1984 // Scan the segment. We use -1 for the clique number, because this
1985 // is a root-set.
1986 seg_size = seg->end - seg->start + 1;
1987 if (VG_(clo_verbosity) > 2) {
1988 VG_(message)(Vg_DebugMsg,
1989 " Scanning root segment: %#lx..%#lx (%lu)\n",
1990 seg->start, seg->end, seg_size);
1992 lc_scan_memory(seg->start, seg_size, /*is_prior_definite*/True,
1993 /*clique*/-1, /*cur_clique*/-1,
1994 searched, szB);
1996 VG_(free)(seg_starts);
1999 static MC_Mempool *find_mp_of_chunk (MC_Chunk* mc_search)
2001 MC_Mempool* mp;
2003 tl_assert( MC_(mempool_list) );
2005 VG_(HT_ResetIter)( MC_(mempool_list) );
2006 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
2007 MC_Chunk* mc;
2008 VG_(HT_ResetIter)(mp->chunks);
2009 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
2010 if (mc == mc_search)
2011 return mp;
2015 return NULL;
2018 /*------------------------------------------------------------*/
2019 /*--- Top-level entry point. ---*/
2020 /*------------------------------------------------------------*/
2022 void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckParams* lcp)
2024 Int i, j;
2026 tl_assert(lcp->mode != LC_Off);
2028 // Verify some assertions which are used in lc_scan_memory.
2029 tl_assert((VKI_PAGE_SIZE % sizeof(Addr)) == 0);
2030 tl_assert((SM_SIZE % sizeof(Addr)) == 0);
2031 // Above two assertions are critical, while below assertion
2032 // ensures that the optimisation in the loop is done in the
2033 // correct order : the loop checks for (big) SM chunk skipping
2034 // before checking for (smaller) page skipping.
2035 tl_assert((SM_SIZE % VKI_PAGE_SIZE) == 0);
2037 MC_(leak_search_gen)++;
2038 MC_(detect_memory_leaks_last_delta_mode) = lcp->deltamode;
2039 detect_memory_leaks_last_heuristics = lcp->heuristics;
2041 // Get the chunks, stop if there were none.
2042 if (lc_chunks) {
2043 VG_(free)(lc_chunks);
2044 lc_chunks = NULL;
2046 lc_chunks = get_sorted_array_of_active_chunks(&lc_n_chunks);
2047 lc_chunks_n_frees_marker = MC_(get_cmalloc_n_frees)();
2048 if (lc_n_chunks == 0) {
2049 tl_assert(lc_chunks == NULL);
2050 if (lr_table != NULL) {
2051 // forget the previous recorded LossRecords as next leak search
2052 // can in any case just create new leaks.
2053 // Maybe it would be better to rather call print_result ?
2054 // (at least when leak decreases are requested)
2055 // This will then output all LossRecords with a size decreasing to 0
2056 VG_(OSetGen_Destroy) (lr_table);
2057 lr_table = NULL;
2059 if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
2060 VG_(umsg)("All heap blocks were freed -- no leaks are possible\n");
2061 VG_(umsg)("\n");
2063 return;
2066 // Sanity check -- make sure they don't overlap. One exception is that
2067 // we allow a MALLOCLIKE block to sit entirely within a malloc() block.
2068 // This is for bug 100628. If this occurs, we ignore the malloc() block
2069 // for leak-checking purposes. This is a hack and probably should be done
2070 // better, but at least it's consistent with mempools (which are treated
2071 // like this in find_active_chunks). Mempools have a separate VgHashTable
2072 // for mempool chunks, but if custom-allocated blocks are put in a separate
2073 // table from normal heap blocks it makes free-mismatch checking more
2074 // difficult.
2075 // Another exception: Metapool memory blocks overlap by definition. The meta-
2076 // block is allocated (by a custom allocator), and chunks of that block are
2077 // allocated again for use by the application: Not an error.
2079 // If this check fails, it probably means that the application
2080 // has done something stupid with VALGRIND_MALLOCLIKE_BLOCK client
2081 // requests, eg. has made overlapping requests (which are
2082 // nonsensical), or used VALGRIND_MALLOCLIKE_BLOCK for stack locations;
2083 // again nonsensical.
2085 for (i = 0; i < lc_n_chunks-1; i++) {
2086 MC_Chunk* ch1 = lc_chunks[i];
2087 MC_Chunk* ch2 = lc_chunks[i+1];
2089 Addr start1 = ch1->data;
2090 Addr start2 = ch2->data;
2091 Addr end1 = ch1->data + ch1->szB - 1;
2092 Addr end2 = ch2->data + ch2->szB - 1;
2093 Bool isCustom1 = ch1->allockind == MC_AllocCustom;
2094 Bool isCustom2 = ch2->allockind == MC_AllocCustom;
2096 if (end1 < start2) {
2097 // Normal case - no overlap.
2099 // We used to allow exact duplicates, I'm not sure why. --njn
2100 //} else if (start1 == start2 && end1 == end2) {
2101 // Degenerate case: exact duplicates.
2103 } else if (start1 >= start2 && end1 <= end2 && isCustom1 && !isCustom2) {
2104 // Block i is MALLOCLIKE and entirely within block i+1.
2105 // Remove block i+1.
2106 for (j = i+1; j < lc_n_chunks-1; j++) {
2107 lc_chunks[j] = lc_chunks[j+1];
2109 lc_n_chunks--;
2111 } else if (start2 >= start1 && end2 <= end1 && isCustom2 && !isCustom1) {
2112 // Block i+1 is MALLOCLIKE and entirely within block i.
2113 // Remove block i.
2114 for (j = i; j < lc_n_chunks-1; j++) {
2115 lc_chunks[j] = lc_chunks[j+1];
2117 lc_n_chunks--;
2119 } else {
2120 // Overlap is allowed ONLY when one of the two candicates is a block
2121 // from a memory pool that has the metapool attribute set.
2122 // All other mixtures trigger the error + assert.
2123 MC_Mempool* mp;
2124 Bool ch1_is_meta = False, ch2_is_meta = False;
2125 Bool Inappropriate = False;
2127 if (MC_(is_mempool_block)(ch1)) {
2128 mp = find_mp_of_chunk(ch1);
2129 if (mp && mp->metapool) {
2130 ch1_is_meta = True;
2134 if (MC_(is_mempool_block)(ch2)) {
2135 mp = find_mp_of_chunk(ch2);
2136 if (mp && mp->metapool) {
2137 ch2_is_meta = True;
2141 // If one of the blocks is a meta block, the other must be entirely
2142 // within that meta block, or something is really wrong with the custom
2143 // allocator.
2144 if (ch1_is_meta != ch2_is_meta) {
2145 if ( (ch1_is_meta && (start2 < start1 || end2 > end1)) ||
2146 (ch2_is_meta && (start1 < start2 || end1 > end2)) ) {
2147 Inappropriate = True;
2151 if (ch1_is_meta == ch2_is_meta || Inappropriate) {
2152 VG_(umsg)("Block 0x%lx..0x%lx overlaps with block 0x%lx..0x%lx\n",
2153 start1, end1, start2, end2);
2154 VG_(umsg)("Blocks allocation contexts:\n"),
2155 VG_(pp_ExeContext)( MC_(allocated_at)(ch1));
2156 VG_(umsg)("\n"),
2157 VG_(pp_ExeContext)( MC_(allocated_at)(ch2));
2158 VG_(umsg)("This is usually caused by using ");
2159 VG_(umsg)("VALGRIND_MALLOCLIKE_BLOCK in an inappropriate way.\n");
2160 tl_assert (0);
2165 // Initialise lc_extras.
2166 if (lc_extras) {
2167 VG_(free)(lc_extras);
2168 lc_extras = NULL;
2170 lc_extras = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(LC_Extra) );
2171 for (i = 0; i < lc_n_chunks; i++) {
2172 lc_extras[i].state = Unreached;
2173 lc_extras[i].pending = False;
2174 lc_extras[i].heuristic = LchNone;
2175 lc_extras[i].IorC.indirect_szB = 0;
2178 // Initialise lc_markstack.
2179 lc_markstack = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(Int) );
2180 for (i = 0; i < lc_n_chunks; i++) {
2181 lc_markstack[i] = -1;
2183 lc_markstack_top = -1;
2185 // Verbosity.
2186 if (VG_(clo_verbosity) > 1 && !VG_(clo_xml)) {
2187 VG_(umsg)( "Searching for pointers to %'d not-freed blocks\n",
2188 lc_n_chunks );
2191 // Scan the memory root-set, pushing onto the mark stack any blocks
2192 // pointed to.
2193 scan_memory_root_set(/*searched*/0, 0);
2195 // Scan GP registers for chunk pointers.
2196 VG_(apply_to_GP_regs)(lc_push_if_a_chunk_ptr_register);
2198 // Process the pushed blocks. After this, every block that is reachable
2199 // from the root-set has been traced.
2200 lc_process_markstack(/*clique*/-1);
2202 if (VG_(clo_verbosity) > 1 && !VG_(clo_xml)) {
2203 VG_(umsg)("Checked %'lu bytes\n", lc_scanned_szB);
2204 if (lc_sig_skipped_szB > 0)
2205 VG_(umsg)("Skipped %'lu bytes due to read errors\n",
2206 lc_sig_skipped_szB);
2207 VG_(umsg)( "\n" );
2210 // Trace all the leaked blocks to determine which are directly leaked and
2211 // which are indirectly leaked. For each Unreached block, push it onto
2212 // the mark stack, and find all the as-yet-Unreached blocks reachable
2213 // from it. These form a clique and are marked IndirectLeak, and their
2214 // size is added to the clique leader's indirect size. If one of the
2215 // found blocks was itself a clique leader (from a previous clique), then
2216 // the cliques are merged.
2217 for (i = 0; i < lc_n_chunks; i++) {
2218 MC_Chunk* ch = lc_chunks[i];
2219 LC_Extra* ex = &(lc_extras[i]);
2221 if (VG_DEBUG_CLIQUE)
2222 VG_(printf)("cliques: %d at %#lx -> Loss state %d\n",
2223 i, ch->data, ex->state);
2225 tl_assert(lc_markstack_top == -1);
2227 if (ex->state == Unreached) {
2228 if (VG_DEBUG_CLIQUE)
2229 VG_(printf)("%d: gathering clique %#lx\n", i, ch->data);
2231 // Push this Unreached block onto the stack and process it.
2232 lc_push(i, ch);
2233 lc_process_markstack(/*clique*/i);
2235 tl_assert(lc_markstack_top == -1);
2236 tl_assert(ex->state == Unreached);
2240 print_results( tid, lcp);
2242 VG_(free) ( lc_markstack );
2243 lc_markstack = NULL;
2244 // lc_chunks, lc_extras, lr_array and lr_table are kept (needed if user
2245 // calls MC_(print_block_list)). lr_table also used for delta leak reporting
2246 // between this leak search and the next leak search.
2249 static Addr searched_wpa;
2250 static SizeT searched_szB;
2251 static void
2252 search_address_in_GP_reg(ThreadId tid, const HChar* regname, Addr addr_in_reg)
2254 if (addr_in_reg >= searched_wpa
2255 && addr_in_reg < searched_wpa + searched_szB) {
2256 if (addr_in_reg == searched_wpa)
2257 VG_(umsg)
2258 ("tid %u register %s pointing at %#lx\n",
2259 tid, regname, searched_wpa);
2260 else
2261 VG_(umsg)
2262 ("tid %u register %s interior pointing %lu bytes inside %#lx\n",
2263 tid, regname, (long unsigned) addr_in_reg - searched_wpa,
2264 searched_wpa);
2268 void MC_(who_points_at) ( Addr address, SizeT szB)
2270 MC_Chunk** chunks;
2271 Int n_chunks;
2272 Int i;
2274 if (szB == 1)
2275 VG_(umsg) ("Searching for pointers to %#lx\n", address);
2276 else
2277 VG_(umsg) ("Searching for pointers pointing in %lu bytes from %#lx\n",
2278 szB, address);
2280 chunks = get_sorted_array_of_active_chunks(&n_chunks);
2282 // Scan memory root-set, searching for ptr pointing in address[szB]
2283 scan_memory_root_set(address, szB);
2285 // Scan active malloc-ed chunks
2286 for (i = 0; i < n_chunks; i++) {
2287 lc_scan_memory(chunks[i]->data, chunks[i]->szB,
2288 /*is_prior_definite*/True,
2289 /*clique*/-1, /*cur_clique*/-1,
2290 address, szB);
2292 VG_(free) ( chunks );
2294 // Scan GP registers for pointers to address range.
2295 searched_wpa = address;
2296 searched_szB = szB;
2297 VG_(apply_to_GP_regs)(search_address_in_GP_reg);
2301 /*--------------------------------------------------------------------*/
2302 /*--- end ---*/
2303 /*--------------------------------------------------------------------*/