1 // Copyright (c) 2006, Google Inc.
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Author: Sanjay Ghemawat
32 // Maxim Lifantsev (refactoring)
38 #include <unistd.h> // for write()
40 #include <fcntl.h> // for open()
43 #ifndef GLOB_NOMATCH // true on some old cygwins
44 # define GLOB_NOMATCH 0
47 #ifdef HAVE_INTTYPES_H
48 #include <inttypes.h> // for PRIxPTR
57 #include <algorithm> // for sort(), equal(), and copy()
59 #include "heap-profile-table.h"
61 #include "base/logging.h"
62 #include "raw_printer.h"
63 #include "symbolize.h"
64 #include <gperftools/stacktrace.h>
65 #include <gperftools/malloc_hook.h>
66 #include "memory_region_map.h"
67 #include "base/commandlineflags.h"
68 #include "base/logging.h" // for the RawFD I/O commands
69 #include "base/sysinfo.h"
77 using tcmalloc::FillProcSelfMaps
; // from sysinfo.h
78 using tcmalloc::DumpProcSelfMaps
; // from sysinfo.h
80 //----------------------------------------------------------------------
82 DEFINE_bool(cleanup_old_heap_profiles
,
83 EnvToBool("HEAP_PROFILE_CLEANUP", true),
84 "At initialization time, delete old heap profiles.");
86 DEFINE_int32(heap_check_max_leaks
,
87 EnvToInt("HEAP_CHECK_MAX_LEAKS", 20),
88 "The maximum number of leak reports to print.");
90 //----------------------------------------------------------------------
92 // header of the dumped heap profile
93 static const char kProfileHeader
[] = "heap profile: ";
94 static const char kProcSelfMapsHeader
[] = "\nMAPPED_LIBRARIES:\n";
95 #if defined(TYPE_PROFILING)
96 static const char kTypeProfileStatsHeader
[] = "type statistics:\n";
97 #endif // defined(TYPE_PROFILING)
99 //----------------------------------------------------------------------
101 const char HeapProfileTable::kFileExt
[] = ".heap";
103 //----------------------------------------------------------------------
105 static const int kHashTableSize
= 179999; // Size for bucket_table_.
106 /*static*/ const int HeapProfileTable::kMaxStackDepth
;
108 //----------------------------------------------------------------------
110 // We strip out different number of stack frames in debug mode
111 // because less inlining happens in that case
113 static const int kStripFrames
= 2;
115 static const int kStripFrames
= 3;
118 // For sorting Stats or Buckets by in-use space
119 static bool ByAllocatedSpace(HeapProfileTable::Stats
* a
,
120 HeapProfileTable::Stats
* b
) {
121 // Return true iff "a" has more allocated space than "b"
122 return (a
->alloc_size
- a
->free_size
) > (b
->alloc_size
- b
->free_size
);
125 //----------------------------------------------------------------------
127 HeapProfileTable::HeapProfileTable(Allocator alloc
,
133 profile_mmap_(profile_mmap
),
136 // Make a hash table for buckets.
137 const int table_bytes
= kHashTableSize
* sizeof(*bucket_table_
);
138 bucket_table_
= static_cast<Bucket
**>(alloc_(table_bytes
));
139 memset(bucket_table_
, 0, table_bytes
);
141 // Make an allocation map.
143 new(alloc_(sizeof(AllocationMap
))) AllocationMap(alloc_
, dealloc_
);
146 memset(&total_
, 0, sizeof(total_
));
150 HeapProfileTable::~HeapProfileTable() {
151 // Free the allocation map.
152 address_map_
->~AllocationMap();
153 dealloc_(address_map_
);
156 // Free the hash table.
157 for (int i
= 0; i
< kHashTableSize
; i
++) {
158 for (Bucket
* curr
= bucket_table_
[i
]; curr
!= 0; /**/) {
159 Bucket
* bucket
= curr
;
161 dealloc_(bucket
->stack
);
165 dealloc_(bucket_table_
);
166 bucket_table_
= NULL
;
169 HeapProfileTable::Bucket
* HeapProfileTable::GetBucket(int depth
,
170 const void* const key
[]) {
173 for (int i
= 0; i
< depth
; i
++) {
174 h
+= reinterpret_cast<uintptr_t>(key
[i
]);
181 // Lookup stack trace in table
182 unsigned int buck
= ((unsigned int) h
) % kHashTableSize
;
183 for (Bucket
* b
= bucket_table_
[buck
]; b
!= 0; b
= b
->next
) {
184 if ((b
->hash
== h
) &&
185 (b
->depth
== depth
) &&
186 equal(key
, key
+ depth
, b
->stack
)) {
192 const size_t key_size
= sizeof(key
[0]) * depth
;
193 const void** kcopy
= reinterpret_cast<const void**>(alloc_(key_size
));
194 copy(key
, key
+ depth
, kcopy
);
195 Bucket
* b
= reinterpret_cast<Bucket
*>(alloc_(sizeof(Bucket
)));
196 memset(b
, 0, sizeof(*b
));
200 b
->next
= bucket_table_
[buck
];
201 bucket_table_
[buck
] = b
;
206 int HeapProfileTable::GetCallerStackTrace(
207 int skip_count
, void* stack
[kMaxStackDepth
]) {
208 return MallocHook::GetCallerStackTrace(
209 stack
, kMaxStackDepth
, kStripFrames
+ skip_count
+ 1);
212 void HeapProfileTable::RecordAlloc(
213 const void* ptr
, size_t bytes
, int stack_depth
,
214 const void* const call_stack
[]) {
215 Bucket
* b
= GetBucket(stack_depth
, call_stack
);
217 b
->alloc_size
+= bytes
;
219 total_
.alloc_size
+= bytes
;
222 v
.set_bucket(b
); // also did set_live(false); set_ignore(false)
224 address_map_
->Insert(ptr
, v
);
227 void HeapProfileTable::RecordFree(const void* ptr
) {
229 if (address_map_
->FindAndRemove(ptr
, &v
)) {
230 Bucket
* b
= v
.bucket();
232 b
->free_size
+= v
.bytes
;
234 total_
.free_size
+= v
.bytes
;
238 bool HeapProfileTable::FindAlloc(const void* ptr
, size_t* object_size
) const {
239 const AllocValue
* alloc_value
= address_map_
->Find(ptr
);
240 if (alloc_value
!= NULL
) *object_size
= alloc_value
->bytes
;
241 return alloc_value
!= NULL
;
244 bool HeapProfileTable::FindAllocDetails(const void* ptr
,
245 AllocInfo
* info
) const {
246 const AllocValue
* alloc_value
= address_map_
->Find(ptr
);
247 if (alloc_value
!= NULL
) {
248 info
->object_size
= alloc_value
->bytes
;
249 info
->call_stack
= alloc_value
->bucket()->stack
;
250 info
->stack_depth
= alloc_value
->bucket()->depth
;
252 return alloc_value
!= NULL
;
255 bool HeapProfileTable::FindInsideAlloc(const void* ptr
,
257 const void** object_ptr
,
258 size_t* object_size
) const {
259 const AllocValue
* alloc_value
=
260 address_map_
->FindInside(&AllocValueSize
, max_size
, ptr
, object_ptr
);
261 if (alloc_value
!= NULL
) *object_size
= alloc_value
->bytes
;
262 return alloc_value
!= NULL
;
265 bool HeapProfileTable::MarkAsLive(const void* ptr
) {
266 AllocValue
* alloc
= address_map_
->FindMutable(ptr
);
267 if (alloc
&& !alloc
->live()) {
268 alloc
->set_live(true);
274 void HeapProfileTable::MarkAsIgnored(const void* ptr
) {
275 AllocValue
* alloc
= address_map_
->FindMutable(ptr
);
277 alloc
->set_ignore(true);
281 void HeapProfileTable::IterateAllocationAddresses(AddressIterator f
,
283 const AllocationAddressIteratorArgs
args(f
, data
);
284 address_map_
->Iterate
<const AllocationAddressIteratorArgs
&>(
285 AllocationAddressesIterator
, args
);
288 void HeapProfileTable::MarkCurrentAllocations(AllocationMark mark
) {
289 const MarkArgs
args(mark
, true);
290 address_map_
->Iterate
<const MarkArgs
&>(MarkIterator
, args
);
293 void HeapProfileTable::MarkUnmarkedAllocations(AllocationMark mark
) {
294 const MarkArgs
args(mark
, true);
295 address_map_
->Iterate
<const MarkArgs
&>(MarkIterator
, args
);
298 // We'd be happier using snprintfer, but we don't to reduce dependencies.
299 int HeapProfileTable::UnparseBucket(const Bucket
& b
,
300 char* buf
, int buflen
, int bufsize
,
302 Stats
* profile_stats
) {
303 if (profile_stats
!= NULL
) {
304 profile_stats
->allocs
+= b
.allocs
;
305 profile_stats
->alloc_size
+= b
.alloc_size
;
306 profile_stats
->frees
+= b
.frees
;
307 profile_stats
->free_size
+= b
.free_size
;
310 snprintf(buf
+ buflen
, bufsize
- buflen
, "%6d: %8"PRId64
" [%6d: %8"PRId64
"] @%s",
312 b
.alloc_size
- b
.free_size
,
316 // If it looks like the snprintf failed, ignore the fact we printed anything
317 if (printed
< 0 || printed
>= bufsize
- buflen
) return buflen
;
319 for (int d
= 0; d
< b
.depth
; d
++) {
320 printed
= snprintf(buf
+ buflen
, bufsize
- buflen
, " 0x%08" PRIxPTR
,
321 reinterpret_cast<uintptr_t>(b
.stack
[d
]));
322 if (printed
< 0 || printed
>= bufsize
- buflen
) return buflen
;
325 printed
= snprintf(buf
+ buflen
, bufsize
- buflen
, "\n");
326 if (printed
< 0 || printed
>= bufsize
- buflen
) return buflen
;
331 HeapProfileTable::Bucket
**
332 HeapProfileTable::MakeSortedBucketList() const {
333 Bucket
** list
= static_cast<Bucket
**>(alloc_(sizeof(Bucket
) * num_buckets_
));
335 int bucket_count
= 0;
336 for (int i
= 0; i
< kHashTableSize
; i
++) {
337 for (Bucket
* curr
= bucket_table_
[i
]; curr
!= 0; curr
= curr
->next
) {
338 list
[bucket_count
++] = curr
;
341 RAW_DCHECK(bucket_count
== num_buckets_
, "");
343 sort(list
, list
+ num_buckets_
, ByAllocatedSpace
);
348 void HeapProfileTable::DumpMarkedObjects(AllocationMark mark
,
349 const char* file_name
) {
350 RawFD fd
= RawOpenForWriting(file_name
);
351 if (fd
== kIllegalRawFD
) {
352 RAW_LOG(ERROR
, "Failed dumping live objects to %s", file_name
);
355 const DumpMarkedArgs
args(fd
, mark
);
356 address_map_
->Iterate
<const DumpMarkedArgs
&>(DumpMarkedIterator
, args
);
360 #if defined(TYPE_PROFILING)
361 void HeapProfileTable::DumpTypeStatistics(const char* file_name
) const {
362 RawFD fd
= RawOpenForWriting(file_name
);
363 if (fd
== kIllegalRawFD
) {
364 RAW_LOG(ERROR
, "Failed dumping type statistics to %s", file_name
);
368 AddressMap
<TypeCount
>* type_size_map
;
369 type_size_map
= new(alloc_(sizeof(AddressMap
<TypeCount
>)))
370 AddressMap
<TypeCount
>(alloc_
, dealloc_
);
371 address_map_
->Iterate(TallyTypesItererator
, type_size_map
);
373 RawWrite(fd
, kTypeProfileStatsHeader
, strlen(kTypeProfileStatsHeader
));
374 const DumpArgs
args(fd
, NULL
);
375 type_size_map
->Iterate
<const DumpArgs
&>(DumpTypesIterator
, args
);
378 type_size_map
->~AddressMap
<TypeCount
>();
379 dealloc_(type_size_map
);
381 #endif // defined(TYPE_PROFILING)
383 void HeapProfileTable::IterateOrderedAllocContexts(
384 AllocContextIterator callback
) const {
385 Bucket
** list
= MakeSortedBucketList();
386 AllocContextInfo info
;
387 for (int i
= 0; i
< num_buckets_
; ++i
) {
388 *static_cast<Stats
*>(&info
) = *static_cast<Stats
*>(list
[i
]);
389 info
.stack_depth
= list
[i
]->depth
;
390 info
.call_stack
= list
[i
]->stack
;
396 int HeapProfileTable::FillOrderedProfile(char buf
[], int size
) const {
397 Bucket
** list
= MakeSortedBucketList();
399 // Our file format is "bucket, bucket, ..., bucket, proc_self_maps_info".
400 // In the cases buf is too small, we'd rather leave out the last
401 // buckets than leave out the /proc/self/maps info. To ensure that,
402 // we actually print the /proc/self/maps info first, then move it to
403 // the end of the buffer, then write the bucket info into whatever
404 // is remaining, and then move the maps info one last time to close
406 int map_length
= snprintf(buf
, size
, "%s", kProcSelfMapsHeader
);
407 if (map_length
< 0 || map_length
>= size
) return 0;
408 bool dummy
; // "wrote_all" -- did /proc/self/maps fit in its entirety?
409 map_length
+= FillProcSelfMaps(buf
+ map_length
, size
- map_length
, &dummy
);
410 RAW_DCHECK(map_length
<= size
, "");
411 char* const map_start
= buf
+ size
- map_length
; // move to end
412 memmove(map_start
, buf
, map_length
);
416 memset(&stats
, 0, sizeof(stats
));
417 int bucket_length
= snprintf(buf
, size
, "%s", kProfileHeader
);
418 if (bucket_length
< 0 || bucket_length
>= size
) return 0;
419 bucket_length
= UnparseBucket(total_
, buf
, bucket_length
, size
,
420 " heapprofile", &stats
);
422 // Dump the mmap list first.
424 BufferArgs
buffer(buf
, bucket_length
, size
);
425 MemoryRegionMap::IterateBuckets
<BufferArgs
*>(DumpBucketIterator
, &buffer
);
426 bucket_length
= buffer
.buflen
;
429 for (int i
= 0; i
< num_buckets_
; i
++) {
430 bucket_length
= UnparseBucket(*list
[i
], buf
, bucket_length
, size
, "",
433 RAW_DCHECK(bucket_length
< size
, "");
437 RAW_DCHECK(buf
+ bucket_length
<= map_start
, "");
438 memmove(buf
+ bucket_length
, map_start
, map_length
); // close the gap
440 return bucket_length
+ map_length
;
444 void HeapProfileTable::DumpBucketIterator(const Bucket
* bucket
,
446 args
->buflen
= UnparseBucket(*bucket
, args
->buf
, args
->buflen
, args
->bufsize
,
450 #if defined(TYPE_PROFILING)
452 void HeapProfileTable::TallyTypesItererator(
455 AddressMap
<TypeCount
>* type_size_map
) {
456 const std::type_info
* type
= LookupType(ptr
);
458 const void* key
= NULL
;
462 TypeCount
* count
= type_size_map
->FindMutable(key
);
464 count
->bytes
+= value
->bytes
;
467 type_size_map
->Insert(key
, TypeCount(value
->bytes
, 1));
472 void HeapProfileTable::DumpTypesIterator(const void* ptr
,
474 const DumpArgs
& args
) {
477 const char* mangled_type_name
= static_cast<const char*>(ptr
);
478 len
= snprintf(buf
, sizeof(buf
), "%6d: %8"PRId64
" @ %s\n",
479 count
->objects
, count
->bytes
,
480 mangled_type_name
? mangled_type_name
: "(no_typeinfo)");
481 RawWrite(args
.fd
, buf
, len
);
483 #endif // defined(TYPE_PROFILING)
486 void HeapProfileTable::DumpNonLiveIterator(const void* ptr
, AllocValue
* v
,
487 const DumpArgs
& args
) {
496 memset(&b
, 0, sizeof(b
));
498 b
.alloc_size
= v
->bytes
;
499 b
.depth
= v
->bucket()->depth
;
500 b
.stack
= v
->bucket()->stack
;
502 int len
= UnparseBucket(b
, buf
, 0, sizeof(buf
), "", args
.profile_stats
);
503 RawWrite(args
.fd
, buf
, len
);
507 void HeapProfileTable::DumpMarkedIterator(const void* ptr
, AllocValue
* v
,
508 const DumpMarkedArgs
& args
) {
509 if (v
->mark() != args
.mark
)
512 memset(&b
, 0, sizeof(b
));
514 b
.alloc_size
= v
->bytes
;
515 b
.depth
= v
->bucket()->depth
;
516 b
.stack
= v
->bucket()->stack
;
518 snprintf(addr
, 16, "0x%08" PRIxPTR
, ptr
);
520 int len
= UnparseBucket(b
, buf
, 0, sizeof(buf
), addr
, NULL
);
521 RawWrite(args
.fd
, buf
, len
);
525 void HeapProfileTable::AllocationAddressesIterator(
528 const AllocationAddressIteratorArgs
& args
) {
529 args
.callback(args
.data
, ptr
);
533 void HeapProfileTable::MarkIterator(const void* ptr
, AllocValue
* v
,
534 const MarkArgs
& args
) {
535 if (!args
.mark_all
&& v
->mark() != UNMARKED
)
537 v
->set_mark(args
.mark
);
540 // Callback from NonLiveSnapshot; adds entry to arg->dest
541 // if not the entry is not live and is not present in arg->base.
542 void HeapProfileTable::AddIfNonLive(const void* ptr
, AllocValue
* v
,
543 AddNonLiveArgs
* arg
) {
547 if (arg
->base
!= NULL
&& arg
->base
->map_
.Find(ptr
) != NULL
) {
548 // Present in arg->base, so do not save
550 arg
->dest
->Add(ptr
, *v
);
555 bool HeapProfileTable::WriteProfile(const char* file_name
,
557 AllocationMap
* allocations
) {
558 RAW_VLOG(1, "Dumping non-live heap profile to %s", file_name
);
559 RawFD fd
= RawOpenForWriting(file_name
);
560 if (fd
== kIllegalRawFD
) {
561 RAW_LOG(ERROR
, "Failed dumping filtered heap profile to %s", file_name
);
564 RawWrite(fd
, kProfileHeader
, strlen(kProfileHeader
));
566 int len
= UnparseBucket(total
, buf
, 0, sizeof(buf
), " heapprofile",
568 RawWrite(fd
, buf
, len
);
569 const DumpArgs
args(fd
, NULL
);
570 allocations
->Iterate
<const DumpArgs
&>(DumpNonLiveIterator
, args
);
571 RawWrite(fd
, kProcSelfMapsHeader
, strlen(kProcSelfMapsHeader
));
572 DumpProcSelfMaps(fd
);
577 void HeapProfileTable::CleanupOldProfiles(const char* prefix
) {
578 if (!FLAGS_cleanup_old_heap_profiles
)
581 snprintf(buf
, 1000,"%s.%05d.", prefix
, getpid());
582 string pattern
= string(buf
) + ".*" + kFileExt
;
584 #if defined(HAVE_GLOB_H)
586 const int r
= glob(pattern
.c_str(), GLOB_ERR
, NULL
, &g
);
587 if (r
== 0 || r
== GLOB_NOMATCH
) {
588 const int prefix_length
= strlen(prefix
);
589 for (int i
= 0; i
< g
.gl_pathc
; i
++) {
590 const char* fname
= g
.gl_pathv
[i
];
591 if ((strlen(fname
) >= prefix_length
) &&
592 (memcmp(fname
, prefix
, prefix_length
) == 0)) {
593 RAW_VLOG(1, "Removing old heap profile %s", fname
);
599 #else /* HAVE_GLOB_H */
600 RAW_LOG(WARNING
, "Unable to remove old heap profiles (can't run glob())");
604 HeapProfileTable::Snapshot
* HeapProfileTable::TakeSnapshot() {
605 Snapshot
* s
= new (alloc_(sizeof(Snapshot
))) Snapshot(alloc_
, dealloc_
);
606 address_map_
->Iterate(AddToSnapshot
, s
);
610 void HeapProfileTable::ReleaseSnapshot(Snapshot
* s
) {
615 // Callback from TakeSnapshot; adds a single entry to snapshot
616 void HeapProfileTable::AddToSnapshot(const void* ptr
, AllocValue
* v
,
617 Snapshot
* snapshot
) {
618 snapshot
->Add(ptr
, *v
);
621 HeapProfileTable::Snapshot
* HeapProfileTable::NonLiveSnapshot(
623 RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n",
624 int(total_
.allocs
- total_
.frees
),
625 int(total_
.alloc_size
- total_
.free_size
));
627 Snapshot
* s
= new (alloc_(sizeof(Snapshot
))) Snapshot(alloc_
, dealloc_
);
631 address_map_
->Iterate
<AddNonLiveArgs
*>(AddIfNonLive
, &args
);
632 RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n",
633 int(s
->total_
.allocs
- s
->total_
.frees
),
634 int(s
->total_
.alloc_size
- s
->total_
.free_size
));
638 // Information kept per unique bucket seen
639 struct HeapProfileTable::Snapshot::Entry
{
643 Entry() : count(0), bytes(0) { }
645 // Order by decreasing bytes
646 bool operator<(const Entry
& x
) const {
647 return this->bytes
> x
.bytes
;
651 // State used to generate leak report. We keep a mapping from Bucket pointer
652 // the collected stats for that bucket.
653 struct HeapProfileTable::Snapshot::ReportState
{
654 map
<Bucket
*, Entry
> buckets_
;
657 // Callback from ReportLeaks; updates ReportState.
658 void HeapProfileTable::Snapshot::ReportCallback(const void* ptr
,
660 ReportState
* state
) {
661 Entry
* e
= &state
->buckets_
[v
->bucket()]; // Creates empty Entry first time
662 e
->bucket
= v
->bucket();
664 e
->bytes
+= v
->bytes
;
667 void HeapProfileTable::Snapshot::ReportLeaks(const char* checker_name
,
668 const char* filename
,
669 bool should_symbolize
) {
670 // This is only used by the heap leak checker, but is intimately
671 // tied to the allocation map that belongs in this module and is
672 // therefore placed here.
673 RAW_LOG(ERROR
, "Leak check %s detected leaks of %"PRIuS
" bytes "
674 "in %"PRIuS
" objects",
676 size_t(total_
.alloc_size
),
677 size_t(total_
.allocs
));
679 // Group objects by Bucket
681 map_
.Iterate(&ReportCallback
, &state
);
683 // Sort buckets by decreasing leaked size
684 const int n
= state
.buckets_
.size();
685 Entry
* entries
= new Entry
[n
];
687 for (map
<Bucket
*,Entry
>::const_iterator iter
= state
.buckets_
.begin();
688 iter
!= state
.buckets_
.end();
690 entries
[dst
++] = iter
->second
;
692 sort(entries
, entries
+ n
);
694 // Report a bounded number of leaks to keep the leak report from
696 const int to_report
=
697 (FLAGS_heap_check_max_leaks
> 0 &&
698 n
> FLAGS_heap_check_max_leaks
) ? FLAGS_heap_check_max_leaks
: n
;
699 RAW_LOG(ERROR
, "The %d largest leaks:", to_report
);
702 SymbolTable symbolization_table
;
703 for (int i
= 0; i
< to_report
; i
++) {
704 const Entry
& e
= entries
[i
];
705 for (int j
= 0; j
< e
.bucket
->depth
; j
++) {
706 symbolization_table
.Add(e
.bucket
->stack
[j
]);
709 static const int kBufSize
= 2<<10;
710 char buffer
[kBufSize
];
711 if (should_symbolize
)
712 symbolization_table
.Symbolize();
713 for (int i
= 0; i
< to_report
; i
++) {
714 const Entry
& e
= entries
[i
];
715 base::RawPrinter
printer(buffer
, kBufSize
);
716 printer
.Printf("Leak of %d bytes in %d objects allocated from:\n",
718 for (int j
= 0; j
< e
.bucket
->depth
; j
++) {
719 const void* pc
= e
.bucket
->stack
[j
];
720 printer
.Printf("\t@ %"PRIxPTR
" %s\n",
721 reinterpret_cast<uintptr_t>(pc
), symbolization_table
.GetSymbol(pc
));
723 RAW_LOG(ERROR
, "%s", buffer
);
727 RAW_LOG(ERROR
, "Skipping leaks numbered %d..%d",
732 // TODO: Dump the sorted Entry list instead of dumping raw data?
733 // (should be much shorter)
734 if (!HeapProfileTable::WriteProfile(filename
, total_
, &map_
)) {
735 RAW_LOG(ERROR
, "Could not write pprof profile to %s", filename
);
739 void HeapProfileTable::Snapshot::ReportObject(const void* ptr
,
742 // Perhaps also log the allocation stack trace (unsymbolized)
743 // on this line in case somebody finds it useful.
744 RAW_LOG(ERROR
, "leaked %"PRIuS
" byte object %p", v
->bytes
, ptr
);
747 void HeapProfileTable::Snapshot::ReportIndividualObjects() {
749 map_
.Iterate(ReportObject
, &unused
);