1 // Copyright (c) 2006, Google Inc.
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Author: Sanjay Ghemawat
32 // Maxim Lifantsev (refactoring)
38 #include <unistd.h> // for write()
40 #include <fcntl.h> // for open()
43 #ifndef GLOB_NOMATCH // true on some old cygwins
44 # define GLOB_NOMATCH 0
47 #ifdef HAVE_INTTYPES_H
48 #include <inttypes.h> // for PRIxPTR
57 #include <algorithm> // for sort(), equal(), and copy()
59 #include "heap-profile-table.h"
61 #include "base/logging.h"
62 #include "raw_printer.h"
63 #include "symbolize.h"
64 #include <gperftools/stacktrace.h>
65 #include <gperftools/malloc_hook.h>
66 #include "memory_region_map.h"
67 #include "base/commandlineflags.h"
68 #include "base/logging.h" // for the RawFD I/O commands
69 #include "base/sysinfo.h"
77 using tcmalloc::FillProcSelfMaps
; // from sysinfo.h
78 using tcmalloc::DumpProcSelfMaps
; // from sysinfo.h
80 //----------------------------------------------------------------------
82 DEFINE_bool(cleanup_old_heap_profiles
,
83 EnvToBool("HEAP_PROFILE_CLEANUP", true),
84 "At initialization time, delete old heap profiles.");
86 DEFINE_int32(heap_check_max_leaks
,
87 EnvToInt("HEAP_CHECK_MAX_LEAKS", 20),
88 "The maximum number of leak reports to print.");
90 //----------------------------------------------------------------------
92 // header of the dumped heap profile
93 static const char kProfileHeader
[] = "heap profile: ";
94 static const char kProcSelfMapsHeader
[] = "\nMAPPED_LIBRARIES:\n";
95 #if defined(TYPE_PROFILING)
96 static const char kTypeProfileStatsHeader
[] = "type statistics:\n";
97 #endif // defined(TYPE_PROFILING)
99 //----------------------------------------------------------------------
101 const char HeapProfileTable::kFileExt
[] = ".heap";
103 //----------------------------------------------------------------------
105 static const int kHashTableSize
= 179999; // Size for bucket_table_.
106 // GCC requires this declaration, but MSVC does not allow it.
107 #if !defined(COMPILER_MSVC)
108 /*static*/ const int HeapProfileTable::kMaxStackDepth
;
111 //----------------------------------------------------------------------
113 // We strip out different number of stack frames in debug mode
114 // because less inlining happens in that case
116 static const int kStripFrames
= 2;
118 static const int kStripFrames
= 3;
121 // For sorting Stats or Buckets by in-use space
122 static bool ByAllocatedSpace(HeapProfileTable::Stats
* a
,
123 HeapProfileTable::Stats
* b
) {
124 // Return true iff "a" has more allocated space than "b"
125 return (a
->alloc_size
- a
->free_size
) > (b
->alloc_size
- b
->free_size
);
128 //----------------------------------------------------------------------
130 HeapProfileTable::HeapProfileTable(Allocator alloc
,
136 profile_mmap_(profile_mmap
),
139 // Make a hash table for buckets.
140 const int table_bytes
= kHashTableSize
* sizeof(*bucket_table_
);
141 bucket_table_
= static_cast<Bucket
**>(alloc_(table_bytes
));
142 memset(bucket_table_
, 0, table_bytes
);
144 // Make an allocation map.
146 new(alloc_(sizeof(AllocationMap
))) AllocationMap(alloc_
, dealloc_
);
149 memset(&total_
, 0, sizeof(total_
));
153 HeapProfileTable::~HeapProfileTable() {
154 // Free the allocation map.
155 address_map_
->~AllocationMap();
156 dealloc_(address_map_
);
159 // Free the hash table.
160 for (int i
= 0; i
< kHashTableSize
; i
++) {
161 for (Bucket
* curr
= bucket_table_
[i
]; curr
!= 0; /**/) {
162 Bucket
* bucket
= curr
;
164 dealloc_(bucket
->stack
);
168 dealloc_(bucket_table_
);
169 bucket_table_
= NULL
;
172 HeapProfileTable::Bucket
* HeapProfileTable::GetBucket(int depth
,
173 const void* const key
[]) {
176 for (int i
= 0; i
< depth
; i
++) {
177 h
+= reinterpret_cast<uintptr_t>(key
[i
]);
184 // Lookup stack trace in table
185 unsigned int buck
= ((unsigned int) h
) % kHashTableSize
;
186 for (Bucket
* b
= bucket_table_
[buck
]; b
!= 0; b
= b
->next
) {
187 if ((b
->hash
== h
) &&
188 (b
->depth
== depth
) &&
189 equal(key
, key
+ depth
, b
->stack
)) {
195 const size_t key_size
= sizeof(key
[0]) * depth
;
196 const void** kcopy
= reinterpret_cast<const void**>(alloc_(key_size
));
197 copy(key
, key
+ depth
, kcopy
);
198 Bucket
* b
= reinterpret_cast<Bucket
*>(alloc_(sizeof(Bucket
)));
199 memset(b
, 0, sizeof(*b
));
203 b
->next
= bucket_table_
[buck
];
204 bucket_table_
[buck
] = b
;
209 int HeapProfileTable::GetCallerStackTrace(
210 int skip_count
, void* stack
[kMaxStackDepth
]) {
211 return MallocHook::GetCallerStackTrace(
212 stack
, kMaxStackDepth
, kStripFrames
+ skip_count
+ 1);
215 void HeapProfileTable::RecordAlloc(
216 const void* ptr
, size_t bytes
, int stack_depth
,
217 const void* const call_stack
[]) {
218 Bucket
* b
= GetBucket(stack_depth
, call_stack
);
220 b
->alloc_size
+= bytes
;
222 total_
.alloc_size
+= bytes
;
225 v
.set_bucket(b
); // also did set_live(false); set_ignore(false)
227 address_map_
->Insert(ptr
, v
);
230 void HeapProfileTable::RecordFree(const void* ptr
) {
232 if (address_map_
->FindAndRemove(ptr
, &v
)) {
233 Bucket
* b
= v
.bucket();
235 b
->free_size
+= v
.bytes
;
237 total_
.free_size
+= v
.bytes
;
241 bool HeapProfileTable::FindAlloc(const void* ptr
, size_t* object_size
) const {
242 const AllocValue
* alloc_value
= address_map_
->Find(ptr
);
243 if (alloc_value
!= NULL
) *object_size
= alloc_value
->bytes
;
244 return alloc_value
!= NULL
;
247 bool HeapProfileTable::FindAllocDetails(const void* ptr
,
248 AllocInfo
* info
) const {
249 const AllocValue
* alloc_value
= address_map_
->Find(ptr
);
250 if (alloc_value
!= NULL
) {
251 info
->object_size
= alloc_value
->bytes
;
252 info
->call_stack
= alloc_value
->bucket()->stack
;
253 info
->stack_depth
= alloc_value
->bucket()->depth
;
255 return alloc_value
!= NULL
;
258 bool HeapProfileTable::FindInsideAlloc(const void* ptr
,
260 const void** object_ptr
,
261 size_t* object_size
) const {
262 const AllocValue
* alloc_value
=
263 address_map_
->FindInside(&AllocValueSize
, max_size
, ptr
, object_ptr
);
264 if (alloc_value
!= NULL
) *object_size
= alloc_value
->bytes
;
265 return alloc_value
!= NULL
;
268 bool HeapProfileTable::MarkAsLive(const void* ptr
) {
269 AllocValue
* alloc
= address_map_
->FindMutable(ptr
);
270 if (alloc
&& !alloc
->live()) {
271 alloc
->set_live(true);
277 void HeapProfileTable::MarkAsIgnored(const void* ptr
) {
278 AllocValue
* alloc
= address_map_
->FindMutable(ptr
);
280 alloc
->set_ignore(true);
284 void HeapProfileTable::IterateAllocationAddresses(AddressIterator f
,
286 const AllocationAddressIteratorArgs
args(f
, data
);
287 address_map_
->Iterate
<const AllocationAddressIteratorArgs
&>(
288 AllocationAddressesIterator
, args
);
291 void HeapProfileTable::MarkCurrentAllocations(AllocationMark mark
) {
292 const MarkArgs
args(mark
, true);
293 address_map_
->Iterate
<const MarkArgs
&>(MarkIterator
, args
);
296 void HeapProfileTable::MarkUnmarkedAllocations(AllocationMark mark
) {
297 const MarkArgs
args(mark
, false);
298 address_map_
->Iterate
<const MarkArgs
&>(MarkIterator
, args
);
301 // We'd be happier using snprintfer, but we don't to reduce dependencies.
302 int HeapProfileTable::UnparseBucket(const Bucket
& b
,
303 char* buf
, int buflen
, int bufsize
,
305 Stats
* profile_stats
) {
306 if (profile_stats
!= NULL
) {
307 profile_stats
->allocs
+= b
.allocs
;
308 profile_stats
->alloc_size
+= b
.alloc_size
;
309 profile_stats
->frees
+= b
.frees
;
310 profile_stats
->free_size
+= b
.free_size
;
313 snprintf(buf
+ buflen
, bufsize
- buflen
,
314 "%6d: %8" PRId64
" [%6d: %8" PRId64
"] @%s",
316 b
.alloc_size
- b
.free_size
,
320 // If it looks like the snprintf failed, ignore the fact we printed anything
321 if (printed
< 0 || printed
>= bufsize
- buflen
) return buflen
;
323 for (int d
= 0; d
< b
.depth
; d
++) {
324 printed
= snprintf(buf
+ buflen
, bufsize
- buflen
, " 0x%08" PRIxPTR
,
325 reinterpret_cast<uintptr_t>(b
.stack
[d
]));
326 if (printed
< 0 || printed
>= bufsize
- buflen
) return buflen
;
329 printed
= snprintf(buf
+ buflen
, bufsize
- buflen
, "\n");
330 if (printed
< 0 || printed
>= bufsize
- buflen
) return buflen
;
335 HeapProfileTable::Bucket
**
336 HeapProfileTable::MakeSortedBucketList() const {
337 Bucket
** list
= static_cast<Bucket
**>(alloc_(sizeof(Bucket
) * num_buckets_
));
339 int bucket_count
= 0;
340 for (int i
= 0; i
< kHashTableSize
; i
++) {
341 for (Bucket
* curr
= bucket_table_
[i
]; curr
!= 0; curr
= curr
->next
) {
342 list
[bucket_count
++] = curr
;
345 RAW_DCHECK(bucket_count
== num_buckets_
, "");
347 sort(list
, list
+ num_buckets_
, ByAllocatedSpace
);
352 void HeapProfileTable::DumpMarkedObjects(AllocationMark mark
,
353 const char* file_name
) {
354 RawFD fd
= RawOpenForWriting(file_name
);
355 if (fd
== kIllegalRawFD
) {
356 RAW_LOG(ERROR
, "Failed dumping live objects to %s", file_name
);
359 const DumpMarkedArgs
args(fd
, mark
);
360 address_map_
->Iterate
<const DumpMarkedArgs
&>(DumpMarkedIterator
, args
);
364 #if defined(TYPE_PROFILING)
365 void HeapProfileTable::DumpTypeStatistics(const char* file_name
) const {
366 RawFD fd
= RawOpenForWriting(file_name
);
367 if (fd
== kIllegalRawFD
) {
368 RAW_LOG(ERROR
, "Failed dumping type statistics to %s", file_name
);
372 AddressMap
<TypeCount
>* type_size_map
;
373 type_size_map
= new(alloc_(sizeof(AddressMap
<TypeCount
>)))
374 AddressMap
<TypeCount
>(alloc_
, dealloc_
);
375 address_map_
->Iterate(TallyTypesItererator
, type_size_map
);
377 RawWrite(fd
, kTypeProfileStatsHeader
, strlen(kTypeProfileStatsHeader
));
378 const DumpArgs
args(fd
, NULL
);
379 type_size_map
->Iterate
<const DumpArgs
&>(DumpTypesIterator
, args
);
382 type_size_map
->~AddressMap
<TypeCount
>();
383 dealloc_(type_size_map
);
385 #endif // defined(TYPE_PROFILING)
387 void HeapProfileTable::IterateOrderedAllocContexts(
388 AllocContextIterator callback
) const {
389 Bucket
** list
= MakeSortedBucketList();
390 AllocContextInfo info
;
391 for (int i
= 0; i
< num_buckets_
; ++i
) {
392 *static_cast<Stats
*>(&info
) = *static_cast<Stats
*>(list
[i
]);
393 info
.stack_depth
= list
[i
]->depth
;
394 info
.call_stack
= list
[i
]->stack
;
400 int HeapProfileTable::FillOrderedProfile(char buf
[], int size
) const {
401 Bucket
** list
= MakeSortedBucketList();
403 // Our file format is "bucket, bucket, ..., bucket, proc_self_maps_info".
404 // In the cases buf is too small, we'd rather leave out the last
405 // buckets than leave out the /proc/self/maps info. To ensure that,
406 // we actually print the /proc/self/maps info first, then move it to
407 // the end of the buffer, then write the bucket info into whatever
408 // is remaining, and then move the maps info one last time to close
410 int map_length
= snprintf(buf
, size
, "%s", kProcSelfMapsHeader
);
411 if (map_length
< 0 || map_length
>= size
) return 0;
412 bool dummy
; // "wrote_all" -- did /proc/self/maps fit in its entirety?
413 map_length
+= FillProcSelfMaps(buf
+ map_length
, size
- map_length
, &dummy
);
414 RAW_DCHECK(map_length
<= size
, "");
415 char* const map_start
= buf
+ size
- map_length
; // move to end
416 memmove(map_start
, buf
, map_length
);
420 memset(&stats
, 0, sizeof(stats
));
421 int bucket_length
= snprintf(buf
, size
, "%s", kProfileHeader
);
422 if (bucket_length
< 0 || bucket_length
>= size
) return 0;
423 bucket_length
= UnparseBucket(total_
, buf
, bucket_length
, size
,
424 " heapprofile", &stats
);
426 // Dump the mmap list first.
428 BufferArgs
buffer(buf
, bucket_length
, size
);
429 MemoryRegionMap::IterateBuckets
<BufferArgs
*>(DumpBucketIterator
, &buffer
);
430 bucket_length
= buffer
.buflen
;
433 for (int i
= 0; i
< num_buckets_
; i
++) {
434 bucket_length
= UnparseBucket(*list
[i
], buf
, bucket_length
, size
, "",
437 RAW_DCHECK(bucket_length
< size
, "");
441 RAW_DCHECK(buf
+ bucket_length
<= map_start
, "");
442 memmove(buf
+ bucket_length
, map_start
, map_length
); // close the gap
444 return bucket_length
+ map_length
;
448 void HeapProfileTable::DumpBucketIterator(const Bucket
* bucket
,
450 args
->buflen
= UnparseBucket(*bucket
, args
->buf
, args
->buflen
, args
->bufsize
,
454 #if defined(TYPE_PROFILING)
456 void HeapProfileTable::TallyTypesItererator(
459 AddressMap
<TypeCount
>* type_size_map
) {
460 const std::type_info
* type
= LookupType(ptr
);
462 const void* key
= NULL
;
466 TypeCount
* count
= type_size_map
->FindMutable(key
);
468 count
->bytes
+= value
->bytes
;
471 type_size_map
->Insert(key
, TypeCount(value
->bytes
, 1));
476 void HeapProfileTable::DumpTypesIterator(const void* ptr
,
478 const DumpArgs
& args
) {
481 const char* mangled_type_name
= static_cast<const char*>(ptr
);
482 len
= snprintf(buf
, sizeof(buf
), "%6d: %8" PRId64
" @ %s\n",
483 count
->objects
, count
->bytes
,
484 mangled_type_name
? mangled_type_name
: "(no_typeinfo)");
485 RawWrite(args
.fd
, buf
, len
);
487 #endif // defined(TYPE_PROFILING)
490 void HeapProfileTable::DumpNonLiveIterator(const void* ptr
, AllocValue
* v
,
491 const DumpArgs
& args
) {
500 memset(&b
, 0, sizeof(b
));
502 b
.alloc_size
= v
->bytes
;
503 b
.depth
= v
->bucket()->depth
;
504 b
.stack
= v
->bucket()->stack
;
506 int len
= UnparseBucket(b
, buf
, 0, sizeof(buf
), "", args
.profile_stats
);
507 RawWrite(args
.fd
, buf
, len
);
511 void HeapProfileTable::DumpMarkedIterator(const void* ptr
, AllocValue
* v
,
512 const DumpMarkedArgs
& args
) {
513 if (v
->mark() != args
.mark
)
516 memset(&b
, 0, sizeof(b
));
518 b
.alloc_size
= v
->bytes
;
519 b
.depth
= v
->bucket()->depth
;
520 b
.stack
= v
->bucket()->stack
;
522 snprintf(addr
, 16, "0x%08" PRIxPTR
, reinterpret_cast<uintptr_t>(ptr
));
524 int len
= UnparseBucket(b
, buf
, 0, sizeof(buf
), addr
, NULL
);
525 RawWrite(args
.fd
, buf
, len
);
529 void HeapProfileTable::AllocationAddressesIterator(
532 const AllocationAddressIteratorArgs
& args
) {
533 args
.callback(args
.data
, ptr
);
537 void HeapProfileTable::MarkIterator(const void* ptr
, AllocValue
* v
,
538 const MarkArgs
& args
) {
539 if (!args
.mark_all
&& v
->mark() != UNMARKED
)
541 v
->set_mark(args
.mark
);
544 // Callback from NonLiveSnapshot; adds entry to arg->dest
545 // if not the entry is not live and is not present in arg->base.
546 void HeapProfileTable::AddIfNonLive(const void* ptr
, AllocValue
* v
,
547 AddNonLiveArgs
* arg
) {
551 if (arg
->base
!= NULL
&& arg
->base
->map_
.Find(ptr
) != NULL
) {
552 // Present in arg->base, so do not save
554 arg
->dest
->Add(ptr
, *v
);
559 bool HeapProfileTable::WriteProfile(const char* file_name
,
561 AllocationMap
* allocations
) {
562 RAW_VLOG(1, "Dumping non-live heap profile to %s", file_name
);
563 RawFD fd
= RawOpenForWriting(file_name
);
564 if (fd
== kIllegalRawFD
) {
565 RAW_LOG(ERROR
, "Failed dumping filtered heap profile to %s", file_name
);
568 RawWrite(fd
, kProfileHeader
, strlen(kProfileHeader
));
570 int len
= UnparseBucket(total
, buf
, 0, sizeof(buf
), " heapprofile",
572 RawWrite(fd
, buf
, len
);
573 const DumpArgs
args(fd
, NULL
);
574 allocations
->Iterate
<const DumpArgs
&>(DumpNonLiveIterator
, args
);
575 RawWrite(fd
, kProcSelfMapsHeader
, strlen(kProcSelfMapsHeader
));
576 DumpProcSelfMaps(fd
);
581 void HeapProfileTable::CleanupOldProfiles(const char* prefix
) {
582 if (!FLAGS_cleanup_old_heap_profiles
)
585 snprintf(buf
, 1000,"%s.%05d.", prefix
, getpid());
586 string pattern
= string(buf
) + ".*" + kFileExt
;
588 #if defined(HAVE_GLOB_H)
590 const int r
= glob(pattern
.c_str(), GLOB_ERR
, NULL
, &g
);
591 if (r
== 0 || r
== GLOB_NOMATCH
) {
592 const int prefix_length
= strlen(prefix
);
593 for (int i
= 0; i
< g
.gl_pathc
; i
++) {
594 const char* fname
= g
.gl_pathv
[i
];
595 if ((strlen(fname
) >= prefix_length
) &&
596 (memcmp(fname
, prefix
, prefix_length
) == 0)) {
597 RAW_VLOG(1, "Removing old heap profile %s", fname
);
603 #else /* HAVE_GLOB_H */
604 RAW_LOG(WARNING
, "Unable to remove old heap profiles (can't run glob())");
608 HeapProfileTable::Snapshot
* HeapProfileTable::TakeSnapshot() {
609 Snapshot
* s
= new (alloc_(sizeof(Snapshot
))) Snapshot(alloc_
, dealloc_
);
610 address_map_
->Iterate(AddToSnapshot
, s
);
614 void HeapProfileTable::ReleaseSnapshot(Snapshot
* s
) {
619 // Callback from TakeSnapshot; adds a single entry to snapshot
620 void HeapProfileTable::AddToSnapshot(const void* ptr
, AllocValue
* v
,
621 Snapshot
* snapshot
) {
622 snapshot
->Add(ptr
, *v
);
625 HeapProfileTable::Snapshot
* HeapProfileTable::NonLiveSnapshot(
627 RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n",
628 int(total_
.allocs
- total_
.frees
),
629 int(total_
.alloc_size
- total_
.free_size
));
631 Snapshot
* s
= new (alloc_(sizeof(Snapshot
))) Snapshot(alloc_
, dealloc_
);
635 address_map_
->Iterate
<AddNonLiveArgs
*>(AddIfNonLive
, &args
);
636 RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n",
637 int(s
->total_
.allocs
- s
->total_
.frees
),
638 int(s
->total_
.alloc_size
- s
->total_
.free_size
));
642 // Information kept per unique bucket seen
643 struct HeapProfileTable::Snapshot::Entry
{
647 Entry() : count(0), bytes(0) { }
649 // Order by decreasing bytes
650 bool operator<(const Entry
& x
) const {
651 return this->bytes
> x
.bytes
;
655 // State used to generate leak report. We keep a mapping from Bucket pointer
656 // the collected stats for that bucket.
657 struct HeapProfileTable::Snapshot::ReportState
{
658 map
<Bucket
*, Entry
> buckets_
;
661 // Callback from ReportLeaks; updates ReportState.
662 void HeapProfileTable::Snapshot::ReportCallback(const void* ptr
,
664 ReportState
* state
) {
665 Entry
* e
= &state
->buckets_
[v
->bucket()]; // Creates empty Entry first time
666 e
->bucket
= v
->bucket();
668 e
->bytes
+= v
->bytes
;
671 void HeapProfileTable::Snapshot::ReportLeaks(const char* checker_name
,
672 const char* filename
,
673 bool should_symbolize
) {
674 // This is only used by the heap leak checker, but is intimately
675 // tied to the allocation map that belongs in this module and is
676 // therefore placed here.
677 RAW_LOG(ERROR
, "Leak check %s detected leaks of %" PRIuS
" bytes "
678 "in %" PRIuS
" objects",
680 size_t(total_
.alloc_size
),
681 size_t(total_
.allocs
));
683 // Group objects by Bucket
685 map_
.Iterate(&ReportCallback
, &state
);
687 // Sort buckets by decreasing leaked size
688 const int n
= state
.buckets_
.size();
689 Entry
* entries
= new Entry
[n
];
691 for (map
<Bucket
*,Entry
>::const_iterator iter
= state
.buckets_
.begin();
692 iter
!= state
.buckets_
.end();
694 entries
[dst
++] = iter
->second
;
696 sort(entries
, entries
+ n
);
698 // Report a bounded number of leaks to keep the leak report from
700 const int to_report
=
701 (FLAGS_heap_check_max_leaks
> 0 &&
702 n
> FLAGS_heap_check_max_leaks
) ? FLAGS_heap_check_max_leaks
: n
;
703 RAW_LOG(ERROR
, "The %d largest leaks:", to_report
);
706 SymbolTable symbolization_table
;
707 for (int i
= 0; i
< to_report
; i
++) {
708 const Entry
& e
= entries
[i
];
709 for (int j
= 0; j
< e
.bucket
->depth
; j
++) {
710 symbolization_table
.Add(e
.bucket
->stack
[j
]);
713 static const int kBufSize
= 2<<10;
714 char buffer
[kBufSize
];
715 if (should_symbolize
)
716 symbolization_table
.Symbolize();
717 for (int i
= 0; i
< to_report
; i
++) {
718 const Entry
& e
= entries
[i
];
719 base::RawPrinter
printer(buffer
, kBufSize
);
720 printer
.Printf("Leak of %d bytes in %d objects allocated from:\n",
722 for (int j
= 0; j
< e
.bucket
->depth
; j
++) {
723 const void* pc
= e
.bucket
->stack
[j
];
724 printer
.Printf("\t@ %" PRIxPTR
" %s\n",
725 reinterpret_cast<uintptr_t>(pc
), symbolization_table
.GetSymbol(pc
));
727 RAW_LOG(ERROR
, "%s", buffer
);
731 RAW_LOG(ERROR
, "Skipping leaks numbered %d..%d",
736 // TODO: Dump the sorted Entry list instead of dumping raw data?
737 // (should be much shorter)
738 if (!HeapProfileTable::WriteProfile(filename
, total_
, &map_
)) {
739 RAW_LOG(ERROR
, "Could not write pprof profile to %s", filename
);
743 void HeapProfileTable::Snapshot::ReportObject(const void* ptr
,
746 // Perhaps also log the allocation stack trace (unsymbolized)
747 // on this line in case somebody finds it useful.
748 RAW_LOG(ERROR
, "leaked %" PRIuS
" byte object %p", v
->bytes
, ptr
);
751 void HeapProfileTable::Snapshot::ReportIndividualObjects() {
753 map_
.Iterate(ReportObject
, &unused
);