cygprofile: increase timeouts to allow showing web contents
[chromium-blink-merge.git] / third_party / tcmalloc / vendor / src / heap-profile-table.cc
blob8b2ed337b20ab42228c8ee7539c16516328efbf6
1 // Copyright (c) 2006, Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 // ---
31 // Author: Sanjay Ghemawat
32 // Maxim Lifantsev (refactoring)
35 #include <config.h>
37 #ifdef HAVE_UNISTD_H
38 #include <unistd.h> // for write()
39 #endif
40 #include <fcntl.h> // for open()
41 #ifdef HAVE_GLOB_H
42 #include <glob.h>
43 #ifndef GLOB_NOMATCH // true on some old cygwins
44 # define GLOB_NOMATCH 0
45 #endif
46 #endif
47 #ifdef HAVE_INTTYPES_H
48 #include <inttypes.h> // for PRIxPTR
49 #endif
50 #ifdef HAVE_POLL_H
51 #include <poll.h>
52 #endif
53 #include <errno.h>
54 #include <stdarg.h>
55 #include <string>
56 #include <map>
57 #include <algorithm> // for sort(), equal(), and copy()
59 #include "heap-profile-table.h"
61 #include "base/logging.h"
62 #include "raw_printer.h"
63 #include "symbolize.h"
64 #include <gperftools/stacktrace.h>
65 #include <gperftools/malloc_hook.h>
66 #include "memory_region_map.h"
67 #include "base/commandlineflags.h"
68 #include "base/logging.h" // for the RawFD I/O commands
69 #include "base/sysinfo.h"
71 using std::sort;
72 using std::equal;
73 using std::copy;
74 using std::string;
75 using std::map;
77 using tcmalloc::FillProcSelfMaps; // from sysinfo.h
78 using tcmalloc::DumpProcSelfMaps; // from sysinfo.h
80 //----------------------------------------------------------------------
82 DEFINE_bool(cleanup_old_heap_profiles,
83 EnvToBool("HEAP_PROFILE_CLEANUP", true),
84 "At initialization time, delete old heap profiles.");
86 DEFINE_int32(heap_check_max_leaks,
87 EnvToInt("HEAP_CHECK_MAX_LEAKS", 20),
88 "The maximum number of leak reports to print.");
90 //----------------------------------------------------------------------
92 // header of the dumped heap profile
93 static const char kProfileHeader[] = "heap profile: ";
94 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n";
96 //----------------------------------------------------------------------
98 const char HeapProfileTable::kFileExt[] = ".heap";
100 //----------------------------------------------------------------------
102 // Size for alloc_table_ and mmap_table_.
103 static const int kHashTableSize = 179999;
104 /*static*/ const int HeapProfileTable::kMaxStackDepth;
106 //----------------------------------------------------------------------
108 // We strip out different number of stack frames in debug mode
109 // because less inlining happens in that case
110 #ifdef NDEBUG
111 static const int kStripFrames = 2;
112 #else
113 static const int kStripFrames = 3;
114 #endif
116 // For sorting Stats or Buckets by in-use space
117 static bool ByAllocatedSpace(HeapProfileTable::Stats* a,
118 HeapProfileTable::Stats* b) {
119 // Return true iff "a" has more allocated space than "b"
120 return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size);
123 //----------------------------------------------------------------------
125 HeapProfileTable::HeapProfileTable(Allocator alloc, DeAllocator dealloc)
126 : alloc_(alloc), dealloc_(dealloc) {
127 // Initialize the overall profile stats.
128 memset(&total_, 0, sizeof(total_));
130 // Make the malloc table.
131 const int alloc_table_bytes = kHashTableSize * sizeof(*alloc_table_);
132 alloc_table_ = reinterpret_cast<Bucket**>(alloc_(alloc_table_bytes));
133 memset(alloc_table_, 0, alloc_table_bytes);
134 num_alloc_buckets_ = 0;
136 // Initialize the mmap table.
137 mmap_table_ = NULL;
138 num_available_mmap_buckets_ = 0;
140 // Make malloc and mmap allocation maps.
141 alloc_address_map_ =
142 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_);
143 mmap_address_map_ = NULL;
146 HeapProfileTable::~HeapProfileTable() {
147 DeallocateBucketTable(alloc_table_);
148 alloc_table_ = NULL;
149 DeallocateBucketTable(mmap_table_);
150 mmap_table_ = NULL;
151 DeallocateAllocationMap(alloc_address_map_);
152 alloc_address_map_ = NULL;
153 DeallocateAllocationMap(mmap_address_map_);
154 mmap_address_map_ = NULL;
157 void HeapProfileTable::DeallocateAllocationMap(AllocationMap* allocation) {
158 if (allocation != NULL) {
159 alloc_address_map_->~AllocationMap();
160 dealloc_(allocation);
164 void HeapProfileTable::DeallocateBucketTable(Bucket** table) {
165 if (table != NULL) {
166 for (int b = 0; b < kHashTableSize; b++) {
167 for (Bucket* x = table[b]; x != 0; /**/) {
168 Bucket* b = x;
169 x = x->next;
170 dealloc_(b->stack);
171 dealloc_(b);
174 dealloc_(table);
178 HeapProfileTable::Bucket* HeapProfileTable::GetBucket(
179 int depth, const void* const key[], Bucket** table,
180 int* bucket_count) {
181 // Make hash-value
182 uintptr_t h = 0;
183 for (int i = 0; i < depth; i++) {
184 h += reinterpret_cast<uintptr_t>(key[i]);
185 h += h << 10;
186 h ^= h >> 6;
188 h += h << 3;
189 h ^= h >> 11;
191 // Lookup stack trace in table
192 unsigned int buck = ((unsigned int) h) % kHashTableSize;
193 for (Bucket* b = table[buck]; b != 0; b = b->next) {
194 if ((b->hash == h) &&
195 (b->depth == depth) &&
196 equal(key, key + depth, b->stack)) {
197 return b;
201 // Create new bucket
202 const size_t key_size = sizeof(key[0]) * depth;
203 const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size));
204 copy(key, key + depth, kcopy);
205 Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket)));
206 memset(b, 0, sizeof(*b));
207 b->hash = h;
208 b->depth = depth;
209 b->stack = kcopy;
210 b->next = table[buck];
211 table[buck] = b;
212 if (bucket_count != NULL) {
213 ++(*bucket_count);
215 return b;
218 int HeapProfileTable::GetCallerStackTrace(
219 int skip_count, void* stack[kMaxStackDepth]) {
220 return MallocHook::GetCallerStackTrace(
221 stack, kMaxStackDepth, kStripFrames + skip_count + 1);
224 void HeapProfileTable::RecordAlloc(
225 const void* ptr, size_t bytes, int stack_depth,
226 const void* const call_stack[]) {
227 Bucket* b = GetBucket(stack_depth, call_stack, alloc_table_,
228 &num_alloc_buckets_);
229 b->allocs++;
230 b->alloc_size += bytes;
231 total_.allocs++;
232 total_.alloc_size += bytes;
234 AllocValue v;
235 v.set_bucket(b); // also did set_live(false); set_ignore(false)
236 v.bytes = bytes;
237 alloc_address_map_->Insert(ptr, v);
240 void HeapProfileTable::RecordFree(const void* ptr) {
241 AllocValue v;
242 if (alloc_address_map_->FindAndRemove(ptr, &v)) {
243 Bucket* b = v.bucket();
244 b->frees++;
245 b->free_size += v.bytes;
246 total_.frees++;
247 total_.free_size += v.bytes;
251 bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const {
252 const AllocValue* alloc_value = alloc_address_map_->Find(ptr);
253 if (alloc_value != NULL) *object_size = alloc_value->bytes;
254 return alloc_value != NULL;
257 bool HeapProfileTable::FindAllocDetails(const void* ptr,
258 AllocInfo* info) const {
259 const AllocValue* alloc_value = alloc_address_map_->Find(ptr);
260 if (alloc_value != NULL) {
261 info->object_size = alloc_value->bytes;
262 info->call_stack = alloc_value->bucket()->stack;
263 info->stack_depth = alloc_value->bucket()->depth;
265 return alloc_value != NULL;
268 bool HeapProfileTable::FindInsideAlloc(const void* ptr,
269 size_t max_size,
270 const void** object_ptr,
271 size_t* object_size) const {
272 const AllocValue* alloc_value =
273 alloc_address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr);
274 if (alloc_value != NULL) *object_size = alloc_value->bytes;
275 return alloc_value != NULL;
278 bool HeapProfileTable::MarkAsLive(const void* ptr) {
279 AllocValue* alloc = alloc_address_map_->FindMutable(ptr);
280 if (alloc && !alloc->live()) {
281 alloc->set_live(true);
282 return true;
284 return false;
287 void HeapProfileTable::MarkAsIgnored(const void* ptr) {
288 AllocValue* alloc = alloc_address_map_->FindMutable(ptr);
289 if (alloc) {
290 alloc->set_ignore(true);
294 // We'd be happier using snprintfer, but we don't to reduce dependencies.
295 int HeapProfileTable::UnparseBucket(const Bucket& b,
296 char* buf, int buflen, int bufsize,
297 const char* extra,
298 Stats* profile_stats) {
299 if (profile_stats != NULL) {
300 profile_stats->allocs += b.allocs;
301 profile_stats->alloc_size += b.alloc_size;
302 profile_stats->frees += b.frees;
303 profile_stats->free_size += b.free_size;
305 int printed =
306 snprintf(buf + buflen, bufsize - buflen, "%6d: %8"PRId64" [%6d: %8"PRId64"] @%s",
307 b.allocs - b.frees,
308 b.alloc_size - b.free_size,
309 b.allocs,
310 b.alloc_size,
311 extra);
312 // If it looks like the snprintf failed, ignore the fact we printed anything
313 if (printed < 0 || printed >= bufsize - buflen) return buflen;
314 buflen += printed;
315 for (int d = 0; d < b.depth; d++) {
316 printed = snprintf(buf + buflen, bufsize - buflen, " 0x%08" PRIxPTR,
317 reinterpret_cast<uintptr_t>(b.stack[d]));
318 if (printed < 0 || printed >= bufsize - buflen) return buflen;
319 buflen += printed;
321 printed = snprintf(buf + buflen, bufsize - buflen, "\n");
322 if (printed < 0 || printed >= bufsize - buflen) return buflen;
323 buflen += printed;
324 return buflen;
327 HeapProfileTable::Bucket**
328 HeapProfileTable::MakeSortedBucketList() const {
329 Bucket** list = reinterpret_cast<Bucket**>(alloc_(sizeof(Bucket) *
330 (num_alloc_buckets_ + num_available_mmap_buckets_)));
332 RAW_DCHECK(mmap_table_ != NULL || num_available_mmap_buckets_ == 0, "");
334 int n = 0;
336 for (int b = 0; b < kHashTableSize; b++) {
337 for (Bucket* x = alloc_table_[b]; x != 0; x = x->next) {
338 list[n++] = x;
341 RAW_DCHECK(n == num_alloc_buckets_, "");
343 if (mmap_table_ != NULL) {
344 for (int b = 0; b < kHashTableSize; b++) {
345 for (Bucket* x = mmap_table_[b]; x != 0; x = x->next) {
346 list[n++] = x;
350 RAW_DCHECK(n == num_alloc_buckets_ + num_available_mmap_buckets_, "");
352 sort(list, list + num_alloc_buckets_ + num_available_mmap_buckets_,
353 ByAllocatedSpace);
355 return list;
358 void HeapProfileTable::RefreshMMapData() {
359 // Make the table
360 static const int mmap_table_bytes = kHashTableSize * sizeof(*mmap_table_);
361 if (mmap_table_ == NULL) {
362 mmap_table_ = reinterpret_cast<Bucket**>(alloc_(mmap_table_bytes));
363 memset(mmap_table_, 0, mmap_table_bytes);
365 num_available_mmap_buckets_ = 0;
367 ClearMMapData();
368 mmap_address_map_ =
369 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_);
371 MemoryRegionMap::LockHolder l;
372 for (MemoryRegionMap::RegionIterator r =
373 MemoryRegionMap::BeginRegionLocked();
374 r != MemoryRegionMap::EndRegionLocked(); ++r) {
375 Bucket* b =
376 GetBucket(r->call_stack_depth, r->call_stack, mmap_table_, NULL);
377 if (b->alloc_size == 0) {
378 num_available_mmap_buckets_ += 1;
380 b->allocs += 1;
381 b->alloc_size += r->end_addr - r->start_addr;
383 AllocValue v;
384 v.set_bucket(b);
385 v.bytes = r->end_addr - r->start_addr;
386 mmap_address_map_->Insert(reinterpret_cast<const void*>(r->start_addr), v);
390 void HeapProfileTable::ClearMMapData() {
391 if (mmap_address_map_ != NULL) {
392 mmap_address_map_->Iterate(ZeroBucketCountsIterator, this);
393 mmap_address_map_->~AllocationMap();
394 dealloc_(mmap_address_map_);
395 mmap_address_map_ = NULL;
399 void HeapProfileTable::IterateOrderedAllocContexts(
400 AllocContextIterator callback) const {
401 Bucket** list = MakeSortedBucketList();
402 AllocContextInfo info;
403 for (int i = 0; i < num_alloc_buckets_; ++i) {
404 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]);
405 info.stack_depth = list[i]->depth;
406 info.call_stack = list[i]->stack;
407 callback(info);
409 dealloc_(list);
412 int HeapProfileTable::FillOrderedProfile(char buf[], int size) const {
413 Bucket** list = MakeSortedBucketList();
415 // Our file format is "bucket, bucket, ..., bucket, proc_self_maps_info".
416 // In the cases buf is too small, we'd rather leave out the last
417 // buckets than leave out the /proc/self/maps info. To ensure that,
418 // we actually print the /proc/self/maps info first, then move it to
419 // the end of the buffer, then write the bucket info into whatever
420 // is remaining, and then move the maps info one last time to close
421 // any gaps. Whew!
422 int map_length = snprintf(buf, size, "%s", kProcSelfMapsHeader);
423 if (map_length < 0 || map_length >= size) return 0;
424 bool dummy; // "wrote_all" -- did /proc/self/maps fit in its entirety?
425 map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy);
426 RAW_DCHECK(map_length <= size, "");
427 char* const map_start = buf + size - map_length; // move to end
428 memmove(map_start, buf, map_length);
429 size -= map_length;
431 Stats stats;
432 memset(&stats, 0, sizeof(stats));
433 int bucket_length = snprintf(buf, size, "%s", kProfileHeader);
434 if (bucket_length < 0 || bucket_length >= size) return 0;
435 Bucket total_with_mmap(total_);
436 if (mmap_table_ != NULL) {
437 total_with_mmap.alloc_size += MemoryRegionMap::MapSize();
438 total_with_mmap.free_size += MemoryRegionMap::UnmapSize();
440 bucket_length = UnparseBucket(total_with_mmap, buf, bucket_length, size,
441 " heapprofile", &stats);
442 for (int i = 0; i < num_alloc_buckets_; i++) {
443 bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "",
444 &stats);
446 RAW_DCHECK(bucket_length < size, "");
448 dealloc_(list);
450 RAW_DCHECK(buf + bucket_length <= map_start, "");
451 memmove(buf + bucket_length, map_start, map_length); // close the gap
453 return bucket_length + map_length;
456 inline
457 void HeapProfileTable::DumpNonLiveIterator(const void* ptr, AllocValue* v,
458 const DumpArgs& args) {
459 if (v->live()) {
460 v->set_live(false);
461 return;
463 if (v->ignore()) {
464 return;
466 Bucket b;
467 memset(&b, 0, sizeof(b));
468 b.allocs = 1;
469 b.alloc_size = v->bytes;
470 b.depth = v->bucket()->depth;
471 b.stack = v->bucket()->stack;
472 char buf[1024];
473 int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats);
474 RawWrite(args.fd, buf, len);
477 inline void HeapProfileTable::ZeroBucketCountsIterator(
478 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile) {
479 Bucket* b = v->bucket();
480 if (b != NULL) {
481 b->allocs = 0;
482 b->alloc_size = 0;
483 b->free_size = 0;
484 b->frees = 0;
488 // Callback from NonLiveSnapshot; adds entry to arg->dest
489 // if not the entry is not live and is not present in arg->base.
490 void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v,
491 AddNonLiveArgs* arg) {
492 if (v->live()) {
493 v->set_live(false);
494 } else {
495 if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) {
496 // Present in arg->base, so do not save
497 } else {
498 arg->dest->Add(ptr, *v);
503 bool HeapProfileTable::WriteProfile(const char* file_name,
504 const Bucket& total,
505 AllocationMap* allocations) {
506 RAW_VLOG(1, "Dumping non-live heap profile to %s", file_name);
507 RawFD fd = RawOpenForWriting(file_name);
508 if (fd != kIllegalRawFD) {
509 RawWrite(fd, kProfileHeader, strlen(kProfileHeader));
510 char buf[512];
511 int len = UnparseBucket(total, buf, 0, sizeof(buf), " heapprofile",
512 NULL);
513 RawWrite(fd, buf, len);
514 const DumpArgs args(fd, NULL);
515 allocations->Iterate<const DumpArgs&>(DumpNonLiveIterator, args);
516 RawWrite(fd, kProcSelfMapsHeader, strlen(kProcSelfMapsHeader));
517 DumpProcSelfMaps(fd);
518 RawClose(fd);
519 return true;
520 } else {
521 RAW_LOG(ERROR, "Failed dumping filtered heap profile to %s", file_name);
522 return false;
526 void HeapProfileTable::CleanupOldProfiles(const char* prefix) {
527 if (!FLAGS_cleanup_old_heap_profiles)
528 return;
529 string pattern = string(prefix) + ".*" + kFileExt;
530 #if defined(HAVE_GLOB_H)
531 glob_t g;
532 const int r = glob(pattern.c_str(), GLOB_ERR, NULL, &g);
533 if (r == 0 || r == GLOB_NOMATCH) {
534 const int prefix_length = strlen(prefix);
535 for (int i = 0; i < g.gl_pathc; i++) {
536 const char* fname = g.gl_pathv[i];
537 if ((strlen(fname) >= prefix_length) &&
538 (memcmp(fname, prefix, prefix_length) == 0)) {
539 RAW_VLOG(1, "Removing old heap profile %s", fname);
540 unlink(fname);
544 globfree(&g);
545 #else /* HAVE_GLOB_H */
546 RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())");
547 #endif
550 HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() {
551 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
552 alloc_address_map_->Iterate(AddToSnapshot, s);
553 return s;
556 void HeapProfileTable::ReleaseSnapshot(Snapshot* s) {
557 s->~Snapshot();
558 dealloc_(s);
561 // Callback from TakeSnapshot; adds a single entry to snapshot
562 void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v,
563 Snapshot* snapshot) {
564 snapshot->Add(ptr, *v);
567 HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot(
568 Snapshot* base) {
569 RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n",
570 int(total_.allocs - total_.frees),
571 int(total_.alloc_size - total_.free_size));
573 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
574 AddNonLiveArgs args;
575 args.dest = s;
576 args.base = base;
577 alloc_address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args);
578 RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n",
579 int(s->total_.allocs - s->total_.frees),
580 int(s->total_.alloc_size - s->total_.free_size));
581 return s;
584 // Information kept per unique bucket seen
585 struct HeapProfileTable::Snapshot::Entry {
586 int count;
587 int bytes;
588 Bucket* bucket;
589 Entry() : count(0), bytes(0) { }
591 // Order by decreasing bytes
592 bool operator<(const Entry& x) const {
593 return this->bytes > x.bytes;
597 // State used to generate leak report. We keep a mapping from Bucket pointer
598 // the collected stats for that bucket.
599 struct HeapProfileTable::Snapshot::ReportState {
600 map<Bucket*, Entry> buckets_;
603 // Callback from ReportLeaks; updates ReportState.
604 void HeapProfileTable::Snapshot::ReportCallback(const void* ptr,
605 AllocValue* v,
606 ReportState* state) {
607 Entry* e = &state->buckets_[v->bucket()]; // Creates empty Entry first time
608 e->bucket = v->bucket();
609 e->count++;
610 e->bytes += v->bytes;
613 void HeapProfileTable::Snapshot::ReportLeaks(const char* checker_name,
614 const char* filename,
615 bool should_symbolize) {
616 // This is only used by the heap leak checker, but is intimately
617 // tied to the allocation map that belongs in this module and is
618 // therefore placed here.
619 RAW_LOG(ERROR, "Leak check %s detected leaks of %"PRIuS" bytes "
620 "in %"PRIuS" objects",
621 checker_name,
622 size_t(total_.alloc_size),
623 size_t(total_.allocs));
625 // Group objects by Bucket
626 ReportState state;
627 map_.Iterate(&ReportCallback, &state);
629 // Sort buckets by decreasing leaked size
630 const int n = state.buckets_.size();
631 Entry* entries = new Entry[n];
632 int dst = 0;
633 for (map<Bucket*,Entry>::const_iterator iter = state.buckets_.begin();
634 iter != state.buckets_.end();
635 ++iter) {
636 entries[dst++] = iter->second;
638 sort(entries, entries + n);
640 // Report a bounded number of leaks to keep the leak report from
641 // growing too long.
642 const int to_report =
643 (FLAGS_heap_check_max_leaks > 0 &&
644 n > FLAGS_heap_check_max_leaks) ? FLAGS_heap_check_max_leaks : n;
645 RAW_LOG(ERROR, "The %d largest leaks:", to_report);
647 // Print
648 SymbolTable symbolization_table;
649 for (int i = 0; i < to_report; i++) {
650 const Entry& e = entries[i];
651 for (int j = 0; j < e.bucket->depth; j++) {
652 symbolization_table.Add(e.bucket->stack[j]);
655 static const int kBufSize = 2<<10;
656 char buffer[kBufSize];
657 if (should_symbolize)
658 symbolization_table.Symbolize();
659 for (int i = 0; i < to_report; i++) {
660 const Entry& e = entries[i];
661 base::RawPrinter printer(buffer, kBufSize);
662 printer.Printf("Leak of %d bytes in %d objects allocated from:\n",
663 e.bytes, e.count);
664 for (int j = 0; j < e.bucket->depth; j++) {
665 const void* pc = e.bucket->stack[j];
666 printer.Printf("\t@ %"PRIxPTR" %s\n",
667 reinterpret_cast<uintptr_t>(pc), symbolization_table.GetSymbol(pc));
669 RAW_LOG(ERROR, "%s", buffer);
672 if (to_report < n) {
673 RAW_LOG(ERROR, "Skipping leaks numbered %d..%d",
674 to_report, n-1);
676 delete[] entries;
678 // TODO: Dump the sorted Entry list instead of dumping raw data?
679 // (should be much shorter)
680 if (!HeapProfileTable::WriteProfile(filename, total_, &map_)) {
681 RAW_LOG(ERROR, "Could not write pprof profile to %s", filename);
685 void HeapProfileTable::Snapshot::ReportObject(const void* ptr,
686 AllocValue* v,
687 char* unused) {
688 // Perhaps also log the allocation stack trace (unsymbolized)
689 // on this line in case somebody finds it useful.
690 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr);
693 void HeapProfileTable::Snapshot::ReportIndividualObjects() {
694 char unused;
695 map_.Iterate(ReportObject, &unused);