1 // Copyright (c) 2006, Google Inc.
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Author: Sanjay Ghemawat
32 // Maxim Lifantsev (refactoring)
38 #include <unistd.h> // for write()
40 #include <fcntl.h> // for open()
43 #ifndef GLOB_NOMATCH // true on some old cygwins
44 # define GLOB_NOMATCH 0
47 #ifdef HAVE_INTTYPES_H
48 #include <inttypes.h> // for PRIxPTR
57 #include <algorithm> // for sort(), equal(), and copy()
59 #include "heap-profile-table.h"
61 #include "base/logging.h"
62 #include "raw_printer.h"
63 #include "symbolize.h"
64 #include <gperftools/stacktrace.h>
65 #include <gperftools/malloc_hook.h>
66 #include "memory_region_map.h"
67 #include "base/commandlineflags.h"
68 #include "base/logging.h" // for the RawFD I/O commands
69 #include "base/sysinfo.h"
77 using tcmalloc::FillProcSelfMaps
; // from sysinfo.h
78 using tcmalloc::DumpProcSelfMaps
; // from sysinfo.h
80 //----------------------------------------------------------------------
82 DEFINE_bool(cleanup_old_heap_profiles
,
83 EnvToBool("HEAP_PROFILE_CLEANUP", true),
84 "At initialization time, delete old heap profiles.");
86 DEFINE_int32(heap_check_max_leaks
,
87 EnvToInt("HEAP_CHECK_MAX_LEAKS", 20),
88 "The maximum number of leak reports to print.");
90 //----------------------------------------------------------------------
92 // header of the dumped heap profile
93 static const char kProfileHeader
[] = "heap profile: ";
94 static const char kProcSelfMapsHeader
[] = "\nMAPPED_LIBRARIES:\n";
96 //----------------------------------------------------------------------
98 const char HeapProfileTable::kFileExt
[] = ".heap";
100 //----------------------------------------------------------------------
102 // Size for alloc_table_ and mmap_table_.
103 static const int kHashTableSize
= 179999;
104 /*static*/ const int HeapProfileTable::kMaxStackDepth
;
106 //----------------------------------------------------------------------
108 // We strip out different number of stack frames in debug mode
109 // because less inlining happens in that case
111 static const int kStripFrames
= 2;
113 static const int kStripFrames
= 3;
116 // For sorting Stats or Buckets by in-use space
117 static bool ByAllocatedSpace(HeapProfileTable::Stats
* a
,
118 HeapProfileTable::Stats
* b
) {
119 // Return true iff "a" has more allocated space than "b"
120 return (a
->alloc_size
- a
->free_size
) > (b
->alloc_size
- b
->free_size
);
123 //----------------------------------------------------------------------
125 HeapProfileTable::HeapProfileTable(Allocator alloc
, DeAllocator dealloc
)
126 : alloc_(alloc
), dealloc_(dealloc
) {
127 // Initialize the overall profile stats.
128 memset(&total_
, 0, sizeof(total_
));
130 // Make the malloc table.
131 const int alloc_table_bytes
= kHashTableSize
* sizeof(*alloc_table_
);
132 alloc_table_
= reinterpret_cast<Bucket
**>(alloc_(alloc_table_bytes
));
133 memset(alloc_table_
, 0, alloc_table_bytes
);
134 num_alloc_buckets_
= 0;
136 // Initialize the mmap table.
138 num_available_mmap_buckets_
= 0;
140 // Make malloc and mmap allocation maps.
142 new(alloc_(sizeof(AllocationMap
))) AllocationMap(alloc_
, dealloc_
);
143 mmap_address_map_
= NULL
;
146 HeapProfileTable::~HeapProfileTable() {
147 DeallocateBucketTable(alloc_table_
);
149 DeallocateBucketTable(mmap_table_
);
151 DeallocateAllocationMap(alloc_address_map_
);
152 alloc_address_map_
= NULL
;
153 DeallocateAllocationMap(mmap_address_map_
);
154 mmap_address_map_
= NULL
;
157 void HeapProfileTable::DeallocateAllocationMap(AllocationMap
* allocation
) {
158 if (allocation
!= NULL
) {
159 alloc_address_map_
->~AllocationMap();
160 dealloc_(allocation
);
164 void HeapProfileTable::DeallocateBucketTable(Bucket
** table
) {
166 for (int b
= 0; b
< kHashTableSize
; b
++) {
167 for (Bucket
* x
= table
[b
]; x
!= 0; /**/) {
178 HeapProfileTable::Bucket
* HeapProfileTable::GetBucket(
179 int depth
, const void* const key
[], Bucket
** table
,
183 for (int i
= 0; i
< depth
; i
++) {
184 h
+= reinterpret_cast<uintptr_t>(key
[i
]);
191 // Lookup stack trace in table
192 unsigned int buck
= ((unsigned int) h
) % kHashTableSize
;
193 for (Bucket
* b
= table
[buck
]; b
!= 0; b
= b
->next
) {
194 if ((b
->hash
== h
) &&
195 (b
->depth
== depth
) &&
196 equal(key
, key
+ depth
, b
->stack
)) {
202 const size_t key_size
= sizeof(key
[0]) * depth
;
203 const void** kcopy
= reinterpret_cast<const void**>(alloc_(key_size
));
204 copy(key
, key
+ depth
, kcopy
);
205 Bucket
* b
= reinterpret_cast<Bucket
*>(alloc_(sizeof(Bucket
)));
206 memset(b
, 0, sizeof(*b
));
210 b
->next
= table
[buck
];
212 if (bucket_count
!= NULL
) {
218 int HeapProfileTable::GetCallerStackTrace(
219 int skip_count
, void* stack
[kMaxStackDepth
]) {
220 return MallocHook::GetCallerStackTrace(
221 stack
, kMaxStackDepth
, kStripFrames
+ skip_count
+ 1);
224 void HeapProfileTable::RecordAlloc(
225 const void* ptr
, size_t bytes
, int stack_depth
,
226 const void* const call_stack
[]) {
227 Bucket
* b
= GetBucket(stack_depth
, call_stack
, alloc_table_
,
228 &num_alloc_buckets_
);
230 b
->alloc_size
+= bytes
;
232 total_
.alloc_size
+= bytes
;
235 v
.set_bucket(b
); // also did set_live(false); set_ignore(false)
237 alloc_address_map_
->Insert(ptr
, v
);
240 void HeapProfileTable::RecordFree(const void* ptr
) {
242 if (alloc_address_map_
->FindAndRemove(ptr
, &v
)) {
243 Bucket
* b
= v
.bucket();
245 b
->free_size
+= v
.bytes
;
247 total_
.free_size
+= v
.bytes
;
251 bool HeapProfileTable::FindAlloc(const void* ptr
, size_t* object_size
) const {
252 const AllocValue
* alloc_value
= alloc_address_map_
->Find(ptr
);
253 if (alloc_value
!= NULL
) *object_size
= alloc_value
->bytes
;
254 return alloc_value
!= NULL
;
257 bool HeapProfileTable::FindAllocDetails(const void* ptr
,
258 AllocInfo
* info
) const {
259 const AllocValue
* alloc_value
= alloc_address_map_
->Find(ptr
);
260 if (alloc_value
!= NULL
) {
261 info
->object_size
= alloc_value
->bytes
;
262 info
->call_stack
= alloc_value
->bucket()->stack
;
263 info
->stack_depth
= alloc_value
->bucket()->depth
;
265 return alloc_value
!= NULL
;
268 bool HeapProfileTable::FindInsideAlloc(const void* ptr
,
270 const void** object_ptr
,
271 size_t* object_size
) const {
272 const AllocValue
* alloc_value
=
273 alloc_address_map_
->FindInside(&AllocValueSize
, max_size
, ptr
, object_ptr
);
274 if (alloc_value
!= NULL
) *object_size
= alloc_value
->bytes
;
275 return alloc_value
!= NULL
;
278 bool HeapProfileTable::MarkAsLive(const void* ptr
) {
279 AllocValue
* alloc
= alloc_address_map_
->FindMutable(ptr
);
280 if (alloc
&& !alloc
->live()) {
281 alloc
->set_live(true);
287 void HeapProfileTable::MarkAsIgnored(const void* ptr
) {
288 AllocValue
* alloc
= alloc_address_map_
->FindMutable(ptr
);
290 alloc
->set_ignore(true);
294 // We'd be happier using snprintfer, but we don't to reduce dependencies.
295 int HeapProfileTable::UnparseBucket(const Bucket
& b
,
296 char* buf
, int buflen
, int bufsize
,
298 Stats
* profile_stats
) {
299 if (profile_stats
!= NULL
) {
300 profile_stats
->allocs
+= b
.allocs
;
301 profile_stats
->alloc_size
+= b
.alloc_size
;
302 profile_stats
->frees
+= b
.frees
;
303 profile_stats
->free_size
+= b
.free_size
;
306 snprintf(buf
+ buflen
, bufsize
- buflen
, "%6d: %8"PRId64
" [%6d: %8"PRId64
"] @%s",
308 b
.alloc_size
- b
.free_size
,
312 // If it looks like the snprintf failed, ignore the fact we printed anything
313 if (printed
< 0 || printed
>= bufsize
- buflen
) return buflen
;
315 for (int d
= 0; d
< b
.depth
; d
++) {
316 printed
= snprintf(buf
+ buflen
, bufsize
- buflen
, " 0x%08" PRIxPTR
,
317 reinterpret_cast<uintptr_t>(b
.stack
[d
]));
318 if (printed
< 0 || printed
>= bufsize
- buflen
) return buflen
;
321 printed
= snprintf(buf
+ buflen
, bufsize
- buflen
, "\n");
322 if (printed
< 0 || printed
>= bufsize
- buflen
) return buflen
;
327 HeapProfileTable::Bucket
**
328 HeapProfileTable::MakeSortedBucketList() const {
329 Bucket
** list
= reinterpret_cast<Bucket
**>(alloc_(sizeof(Bucket
) *
330 (num_alloc_buckets_
+ num_available_mmap_buckets_
)));
332 RAW_DCHECK(mmap_table_
!= NULL
|| num_available_mmap_buckets_
== 0, "");
336 for (int b
= 0; b
< kHashTableSize
; b
++) {
337 for (Bucket
* x
= alloc_table_
[b
]; x
!= 0; x
= x
->next
) {
341 RAW_DCHECK(n
== num_alloc_buckets_
, "");
343 if (mmap_table_
!= NULL
) {
344 for (int b
= 0; b
< kHashTableSize
; b
++) {
345 for (Bucket
* x
= mmap_table_
[b
]; x
!= 0; x
= x
->next
) {
350 RAW_DCHECK(n
== num_alloc_buckets_
+ num_available_mmap_buckets_
, "");
352 sort(list
, list
+ num_alloc_buckets_
+ num_available_mmap_buckets_
,
358 void HeapProfileTable::RefreshMMapData() {
360 static const int mmap_table_bytes
= kHashTableSize
* sizeof(*mmap_table_
);
361 if (mmap_table_
== NULL
) {
362 mmap_table_
= reinterpret_cast<Bucket
**>(alloc_(mmap_table_bytes
));
363 memset(mmap_table_
, 0, mmap_table_bytes
);
365 num_available_mmap_buckets_
= 0;
369 new(alloc_(sizeof(AllocationMap
))) AllocationMap(alloc_
, dealloc_
);
371 MemoryRegionMap::LockHolder l
;
372 for (MemoryRegionMap::RegionIterator r
=
373 MemoryRegionMap::BeginRegionLocked();
374 r
!= MemoryRegionMap::EndRegionLocked(); ++r
) {
376 GetBucket(r
->call_stack_depth
, r
->call_stack
, mmap_table_
, NULL
);
377 if (b
->alloc_size
== 0) {
378 num_available_mmap_buckets_
+= 1;
381 b
->alloc_size
+= r
->end_addr
- r
->start_addr
;
385 v
.bytes
= r
->end_addr
- r
->start_addr
;
386 mmap_address_map_
->Insert(reinterpret_cast<const void*>(r
->start_addr
), v
);
390 void HeapProfileTable::ClearMMapData() {
391 if (mmap_address_map_
!= NULL
) {
392 mmap_address_map_
->Iterate(ZeroBucketCountsIterator
, this);
393 mmap_address_map_
->~AllocationMap();
394 dealloc_(mmap_address_map_
);
395 mmap_address_map_
= NULL
;
399 void HeapProfileTable::IterateOrderedAllocContexts(
400 AllocContextIterator callback
) const {
401 Bucket
** list
= MakeSortedBucketList();
402 AllocContextInfo info
;
403 for (int i
= 0; i
< num_alloc_buckets_
; ++i
) {
404 *static_cast<Stats
*>(&info
) = *static_cast<Stats
*>(list
[i
]);
405 info
.stack_depth
= list
[i
]->depth
;
406 info
.call_stack
= list
[i
]->stack
;
412 int HeapProfileTable::FillOrderedProfile(char buf
[], int size
) const {
413 Bucket
** list
= MakeSortedBucketList();
415 // Our file format is "bucket, bucket, ..., bucket, proc_self_maps_info".
416 // In the cases buf is too small, we'd rather leave out the last
417 // buckets than leave out the /proc/self/maps info. To ensure that,
418 // we actually print the /proc/self/maps info first, then move it to
419 // the end of the buffer, then write the bucket info into whatever
420 // is remaining, and then move the maps info one last time to close
422 int map_length
= snprintf(buf
, size
, "%s", kProcSelfMapsHeader
);
423 if (map_length
< 0 || map_length
>= size
) return 0;
424 bool dummy
; // "wrote_all" -- did /proc/self/maps fit in its entirety?
425 map_length
+= FillProcSelfMaps(buf
+ map_length
, size
- map_length
, &dummy
);
426 RAW_DCHECK(map_length
<= size
, "");
427 char* const map_start
= buf
+ size
- map_length
; // move to end
428 memmove(map_start
, buf
, map_length
);
432 memset(&stats
, 0, sizeof(stats
));
433 int bucket_length
= snprintf(buf
, size
, "%s", kProfileHeader
);
434 if (bucket_length
< 0 || bucket_length
>= size
) return 0;
435 Bucket
total_with_mmap(total_
);
436 if (mmap_table_
!= NULL
) {
437 total_with_mmap
.alloc_size
+= MemoryRegionMap::MapSize();
438 total_with_mmap
.free_size
+= MemoryRegionMap::UnmapSize();
440 bucket_length
= UnparseBucket(total_with_mmap
, buf
, bucket_length
, size
,
441 " heapprofile", &stats
);
442 for (int i
= 0; i
< num_alloc_buckets_
; i
++) {
443 bucket_length
= UnparseBucket(*list
[i
], buf
, bucket_length
, size
, "",
446 RAW_DCHECK(bucket_length
< size
, "");
450 RAW_DCHECK(buf
+ bucket_length
<= map_start
, "");
451 memmove(buf
+ bucket_length
, map_start
, map_length
); // close the gap
453 return bucket_length
+ map_length
;
457 void HeapProfileTable::DumpNonLiveIterator(const void* ptr
, AllocValue
* v
,
458 const DumpArgs
& args
) {
467 memset(&b
, 0, sizeof(b
));
469 b
.alloc_size
= v
->bytes
;
470 b
.depth
= v
->bucket()->depth
;
471 b
.stack
= v
->bucket()->stack
;
473 int len
= UnparseBucket(b
, buf
, 0, sizeof(buf
), "", args
.profile_stats
);
474 RawWrite(args
.fd
, buf
, len
);
477 inline void HeapProfileTable::ZeroBucketCountsIterator(
478 const void* ptr
, AllocValue
* v
, HeapProfileTable
* heap_profile
) {
479 Bucket
* b
= v
->bucket();
488 // Callback from NonLiveSnapshot; adds entry to arg->dest
489 // if not the entry is not live and is not present in arg->base.
490 void HeapProfileTable::AddIfNonLive(const void* ptr
, AllocValue
* v
,
491 AddNonLiveArgs
* arg
) {
495 if (arg
->base
!= NULL
&& arg
->base
->map_
.Find(ptr
) != NULL
) {
496 // Present in arg->base, so do not save
498 arg
->dest
->Add(ptr
, *v
);
503 bool HeapProfileTable::WriteProfile(const char* file_name
,
505 AllocationMap
* allocations
) {
506 RAW_VLOG(1, "Dumping non-live heap profile to %s", file_name
);
507 RawFD fd
= RawOpenForWriting(file_name
);
508 if (fd
!= kIllegalRawFD
) {
509 RawWrite(fd
, kProfileHeader
, strlen(kProfileHeader
));
511 int len
= UnparseBucket(total
, buf
, 0, sizeof(buf
), " heapprofile",
513 RawWrite(fd
, buf
, len
);
514 const DumpArgs
args(fd
, NULL
);
515 allocations
->Iterate
<const DumpArgs
&>(DumpNonLiveIterator
, args
);
516 RawWrite(fd
, kProcSelfMapsHeader
, strlen(kProcSelfMapsHeader
));
517 DumpProcSelfMaps(fd
);
521 RAW_LOG(ERROR
, "Failed dumping filtered heap profile to %s", file_name
);
526 void HeapProfileTable::CleanupOldProfiles(const char* prefix
) {
527 if (!FLAGS_cleanup_old_heap_profiles
)
529 string pattern
= string(prefix
) + ".*" + kFileExt
;
530 #if defined(HAVE_GLOB_H)
532 const int r
= glob(pattern
.c_str(), GLOB_ERR
, NULL
, &g
);
533 if (r
== 0 || r
== GLOB_NOMATCH
) {
534 const int prefix_length
= strlen(prefix
);
535 for (int i
= 0; i
< g
.gl_pathc
; i
++) {
536 const char* fname
= g
.gl_pathv
[i
];
537 if ((strlen(fname
) >= prefix_length
) &&
538 (memcmp(fname
, prefix
, prefix_length
) == 0)) {
539 RAW_VLOG(1, "Removing old heap profile %s", fname
);
545 #else /* HAVE_GLOB_H */
546 RAW_LOG(WARNING
, "Unable to remove old heap profiles (can't run glob())");
550 HeapProfileTable::Snapshot
* HeapProfileTable::TakeSnapshot() {
551 Snapshot
* s
= new (alloc_(sizeof(Snapshot
))) Snapshot(alloc_
, dealloc_
);
552 alloc_address_map_
->Iterate(AddToSnapshot
, s
);
556 void HeapProfileTable::ReleaseSnapshot(Snapshot
* s
) {
561 // Callback from TakeSnapshot; adds a single entry to snapshot
562 void HeapProfileTable::AddToSnapshot(const void* ptr
, AllocValue
* v
,
563 Snapshot
* snapshot
) {
564 snapshot
->Add(ptr
, *v
);
567 HeapProfileTable::Snapshot
* HeapProfileTable::NonLiveSnapshot(
569 RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n",
570 int(total_
.allocs
- total_
.frees
),
571 int(total_
.alloc_size
- total_
.free_size
));
573 Snapshot
* s
= new (alloc_(sizeof(Snapshot
))) Snapshot(alloc_
, dealloc_
);
577 alloc_address_map_
->Iterate
<AddNonLiveArgs
*>(AddIfNonLive
, &args
);
578 RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n",
579 int(s
->total_
.allocs
- s
->total_
.frees
),
580 int(s
->total_
.alloc_size
- s
->total_
.free_size
));
584 // Information kept per unique bucket seen
585 struct HeapProfileTable::Snapshot::Entry
{
589 Entry() : count(0), bytes(0) { }
591 // Order by decreasing bytes
592 bool operator<(const Entry
& x
) const {
593 return this->bytes
> x
.bytes
;
597 // State used to generate leak report. We keep a mapping from Bucket pointer
598 // the collected stats for that bucket.
599 struct HeapProfileTable::Snapshot::ReportState
{
600 map
<Bucket
*, Entry
> buckets_
;
603 // Callback from ReportLeaks; updates ReportState.
604 void HeapProfileTable::Snapshot::ReportCallback(const void* ptr
,
606 ReportState
* state
) {
607 Entry
* e
= &state
->buckets_
[v
->bucket()]; // Creates empty Entry first time
608 e
->bucket
= v
->bucket();
610 e
->bytes
+= v
->bytes
;
613 void HeapProfileTable::Snapshot::ReportLeaks(const char* checker_name
,
614 const char* filename
,
615 bool should_symbolize
) {
616 // This is only used by the heap leak checker, but is intimately
617 // tied to the allocation map that belongs in this module and is
618 // therefore placed here.
619 RAW_LOG(ERROR
, "Leak check %s detected leaks of %"PRIuS
" bytes "
620 "in %"PRIuS
" objects",
622 size_t(total_
.alloc_size
),
623 size_t(total_
.allocs
));
625 // Group objects by Bucket
627 map_
.Iterate(&ReportCallback
, &state
);
629 // Sort buckets by decreasing leaked size
630 const int n
= state
.buckets_
.size();
631 Entry
* entries
= new Entry
[n
];
633 for (map
<Bucket
*,Entry
>::const_iterator iter
= state
.buckets_
.begin();
634 iter
!= state
.buckets_
.end();
636 entries
[dst
++] = iter
->second
;
638 sort(entries
, entries
+ n
);
640 // Report a bounded number of leaks to keep the leak report from
642 const int to_report
=
643 (FLAGS_heap_check_max_leaks
> 0 &&
644 n
> FLAGS_heap_check_max_leaks
) ? FLAGS_heap_check_max_leaks
: n
;
645 RAW_LOG(ERROR
, "The %d largest leaks:", to_report
);
648 SymbolTable symbolization_table
;
649 for (int i
= 0; i
< to_report
; i
++) {
650 const Entry
& e
= entries
[i
];
651 for (int j
= 0; j
< e
.bucket
->depth
; j
++) {
652 symbolization_table
.Add(e
.bucket
->stack
[j
]);
655 static const int kBufSize
= 2<<10;
656 char buffer
[kBufSize
];
657 if (should_symbolize
)
658 symbolization_table
.Symbolize();
659 for (int i
= 0; i
< to_report
; i
++) {
660 const Entry
& e
= entries
[i
];
661 base::RawPrinter
printer(buffer
, kBufSize
);
662 printer
.Printf("Leak of %d bytes in %d objects allocated from:\n",
664 for (int j
= 0; j
< e
.bucket
->depth
; j
++) {
665 const void* pc
= e
.bucket
->stack
[j
];
666 printer
.Printf("\t@ %"PRIxPTR
" %s\n",
667 reinterpret_cast<uintptr_t>(pc
), symbolization_table
.GetSymbol(pc
));
669 RAW_LOG(ERROR
, "%s", buffer
);
673 RAW_LOG(ERROR
, "Skipping leaks numbered %d..%d",
678 // TODO: Dump the sorted Entry list instead of dumping raw data?
679 // (should be much shorter)
680 if (!HeapProfileTable::WriteProfile(filename
, total_
, &map_
)) {
681 RAW_LOG(ERROR
, "Could not write pprof profile to %s", filename
);
685 void HeapProfileTable::Snapshot::ReportObject(const void* ptr
,
688 // Perhaps also log the allocation stack trace (unsymbolized)
689 // on this line in case somebody finds it useful.
690 RAW_LOG(ERROR
, "leaked %"PRIuS
" byte object %p", v
->bytes
, ptr
);
693 void HeapProfileTable::Snapshot::ReportIndividualObjects() {
695 map_
.Iterate(ReportObject
, &unused
);