Restore variadic macros in DevToolsEmbedderMessageDispatcher
[chromium-blink-merge.git] / third_party / tcmalloc / chromium / src / heap-profile-table.cc
blob985ea20f843974fa753dc15343ab39770bdf157d
1 // Copyright (c) 2006, Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 // ---
31 // Author: Sanjay Ghemawat
32 // Maxim Lifantsev (refactoring)
35 #include <config.h>
37 #ifdef HAVE_UNISTD_H
38 #include <unistd.h> // for write()
39 #endif
40 #include <fcntl.h> // for open()
41 #ifdef HAVE_GLOB_H
42 #include <glob.h>
43 #ifndef GLOB_NOMATCH // true on some old cygwins
44 # define GLOB_NOMATCH 0
45 #endif
46 #endif
47 #ifdef HAVE_INTTYPES_H
48 #include <inttypes.h> // for PRIxPTR
49 #endif
50 #ifdef HAVE_POLL_H
51 #include <poll.h>
52 #endif
53 #include <errno.h>
54 #include <stdarg.h>
55 #include <string>
56 #include <map>
57 #include <algorithm> // for sort(), equal(), and copy()
59 #include "heap-profile-table.h"
61 #include "base/logging.h"
62 #include "raw_printer.h"
63 #include "symbolize.h"
64 #include <gperftools/stacktrace.h>
65 #include <gperftools/malloc_hook.h>
66 #include "memory_region_map.h"
67 #include "base/commandlineflags.h"
68 #include "base/logging.h" // for the RawFD I/O commands
69 #include "base/sysinfo.h"
71 using std::sort;
72 using std::equal;
73 using std::copy;
74 using std::string;
75 using std::map;
77 using tcmalloc::FillProcSelfMaps; // from sysinfo.h
78 using tcmalloc::DumpProcSelfMaps; // from sysinfo.h
80 //----------------------------------------------------------------------
82 DEFINE_bool(cleanup_old_heap_profiles,
83 EnvToBool("HEAP_PROFILE_CLEANUP", true),
84 "At initialization time, delete old heap profiles.");
86 DEFINE_int32(heap_check_max_leaks,
87 EnvToInt("HEAP_CHECK_MAX_LEAKS", 20),
88 "The maximum number of leak reports to print.");
90 //----------------------------------------------------------------------
92 // header of the dumped heap profile
93 static const char kProfileHeader[] = "heap profile: ";
94 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n";
96 //----------------------------------------------------------------------
98 const char HeapProfileTable::kFileExt[] = ".heap";
100 //----------------------------------------------------------------------
102 static const int kHashTableSize = 179999; // Size for bucket_table_.
103 // GCC requires this declaration, but MSVC does not allow it.
104 #if !defined(COMPILER_MSVC)
105 /*static*/ const int HeapProfileTable::kMaxStackDepth;
106 #endif
108 //----------------------------------------------------------------------
110 // We strip out different number of stack frames in debug mode
111 // because less inlining happens in that case
112 #ifdef NDEBUG
113 static const int kStripFrames = 2;
114 #else
115 static const int kStripFrames = 3;
116 #endif
118 // For sorting Stats or Buckets by in-use space
119 static bool ByAllocatedSpace(HeapProfileTable::Stats* a,
120 HeapProfileTable::Stats* b) {
121 // Return true iff "a" has more allocated space than "b"
122 return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size);
125 //----------------------------------------------------------------------
127 HeapProfileTable::HeapProfileTable(Allocator alloc,
128 DeAllocator dealloc,
129 bool profile_mmap)
130 : alloc_(alloc),
131 dealloc_(dealloc),
132 bucket_table_(NULL),
133 profile_mmap_(profile_mmap),
134 num_buckets_(0),
135 address_map_(NULL) {
136 // Make a hash table for buckets.
137 const int table_bytes = kHashTableSize * sizeof(*bucket_table_);
138 bucket_table_ = static_cast<Bucket**>(alloc_(table_bytes));
139 memset(bucket_table_, 0, table_bytes);
141 // Make an allocation map.
142 address_map_ =
143 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_);
145 // Initialize.
146 memset(&total_, 0, sizeof(total_));
147 num_buckets_ = 0;
150 HeapProfileTable::~HeapProfileTable() {
151 // Free the allocation map.
152 address_map_->~AllocationMap();
153 dealloc_(address_map_);
154 address_map_ = NULL;
156 // Free the hash table.
157 for (int i = 0; i < kHashTableSize; i++) {
158 for (Bucket* curr = bucket_table_[i]; curr != 0; /**/) {
159 Bucket* bucket = curr;
160 curr = curr->next;
161 dealloc_(bucket->stack);
162 dealloc_(bucket);
165 dealloc_(bucket_table_);
166 bucket_table_ = NULL;
169 HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth,
170 const void* const key[]) {
171 // Make hash-value
172 uintptr_t h = 0;
173 for (int i = 0; i < depth; i++) {
174 h += reinterpret_cast<uintptr_t>(key[i]);
175 h += h << 10;
176 h ^= h >> 6;
178 h += h << 3;
179 h ^= h >> 11;
181 // Lookup stack trace in table
182 unsigned int buck = ((unsigned int) h) % kHashTableSize;
183 for (Bucket* b = bucket_table_[buck]; b != 0; b = b->next) {
184 if ((b->hash == h) &&
185 (b->depth == depth) &&
186 equal(key, key + depth, b->stack)) {
187 return b;
191 // Create new bucket
192 const size_t key_size = sizeof(key[0]) * depth;
193 const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size));
194 copy(key, key + depth, kcopy);
195 Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket)));
196 memset(b, 0, sizeof(*b));
197 b->hash = h;
198 b->depth = depth;
199 b->stack = kcopy;
200 b->next = bucket_table_[buck];
201 bucket_table_[buck] = b;
202 num_buckets_++;
203 return b;
206 int HeapProfileTable::GetCallerStackTrace(
207 int skip_count, void* stack[kMaxStackDepth]) {
208 return MallocHook::GetCallerStackTrace(
209 stack, kMaxStackDepth, kStripFrames + skip_count + 1);
212 void HeapProfileTable::RecordAlloc(
213 const void* ptr, size_t bytes, int stack_depth,
214 const void* const call_stack[]) {
215 Bucket* b = GetBucket(stack_depth, call_stack);
216 b->allocs++;
217 b->alloc_size += bytes;
218 total_.allocs++;
219 total_.alloc_size += bytes;
221 AllocValue v;
222 v.set_bucket(b); // also did set_live(false); set_ignore(false)
223 v.bytes = bytes;
224 address_map_->Insert(ptr, v);
227 void HeapProfileTable::RecordFree(const void* ptr) {
228 AllocValue v;
229 if (address_map_->FindAndRemove(ptr, &v)) {
230 Bucket* b = v.bucket();
231 b->frees++;
232 b->free_size += v.bytes;
233 total_.frees++;
234 total_.free_size += v.bytes;
238 bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const {
239 const AllocValue* alloc_value = address_map_->Find(ptr);
240 if (alloc_value != NULL) *object_size = alloc_value->bytes;
241 return alloc_value != NULL;
244 bool HeapProfileTable::FindAllocDetails(const void* ptr,
245 AllocInfo* info) const {
246 const AllocValue* alloc_value = address_map_->Find(ptr);
247 if (alloc_value != NULL) {
248 info->object_size = alloc_value->bytes;
249 info->call_stack = alloc_value->bucket()->stack;
250 info->stack_depth = alloc_value->bucket()->depth;
252 return alloc_value != NULL;
255 bool HeapProfileTable::FindInsideAlloc(const void* ptr,
256 size_t max_size,
257 const void** object_ptr,
258 size_t* object_size) const {
259 const AllocValue* alloc_value =
260 address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr);
261 if (alloc_value != NULL) *object_size = alloc_value->bytes;
262 return alloc_value != NULL;
265 bool HeapProfileTable::MarkAsLive(const void* ptr) {
266 AllocValue* alloc = address_map_->FindMutable(ptr);
267 if (alloc && !alloc->live()) {
268 alloc->set_live(true);
269 return true;
271 return false;
274 void HeapProfileTable::MarkAsIgnored(const void* ptr) {
275 AllocValue* alloc = address_map_->FindMutable(ptr);
276 if (alloc) {
277 alloc->set_ignore(true);
281 void HeapProfileTable::IterateAllocationAddresses(AddressIterator f,
282 void* data) {
283 const AllocationAddressIteratorArgs args(f, data);
284 address_map_->Iterate<const AllocationAddressIteratorArgs&>(
285 AllocationAddressesIterator, args);
288 void HeapProfileTable::MarkCurrentAllocations(AllocationMark mark) {
289 const MarkArgs args(mark, true);
290 address_map_->Iterate<const MarkArgs&>(MarkIterator, args);
293 void HeapProfileTable::MarkUnmarkedAllocations(AllocationMark mark) {
294 const MarkArgs args(mark, false);
295 address_map_->Iterate<const MarkArgs&>(MarkIterator, args);
298 // We'd be happier using snprintfer, but we don't to reduce dependencies.
299 int HeapProfileTable::UnparseBucket(const Bucket& b,
300 char* buf, int buflen, int bufsize,
301 const char* extra,
302 Stats* profile_stats) {
303 if (profile_stats != NULL) {
304 profile_stats->allocs += b.allocs;
305 profile_stats->alloc_size += b.alloc_size;
306 profile_stats->frees += b.frees;
307 profile_stats->free_size += b.free_size;
309 int printed =
310 snprintf(buf + buflen, bufsize - buflen,
311 "%6d: %8" PRId64 " [%6d: %8" PRId64 "] @%s",
312 b.allocs - b.frees,
313 b.alloc_size - b.free_size,
314 b.allocs,
315 b.alloc_size,
316 extra);
317 // If it looks like the snprintf failed, ignore the fact we printed anything
318 if (printed < 0 || printed >= bufsize - buflen) return buflen;
319 buflen += printed;
320 for (int d = 0; d < b.depth; d++) {
321 printed = snprintf(buf + buflen, bufsize - buflen, " 0x%08" PRIxPTR,
322 reinterpret_cast<uintptr_t>(b.stack[d]));
323 if (printed < 0 || printed >= bufsize - buflen) return buflen;
324 buflen += printed;
326 printed = snprintf(buf + buflen, bufsize - buflen, "\n");
327 if (printed < 0 || printed >= bufsize - buflen) return buflen;
328 buflen += printed;
329 return buflen;
332 HeapProfileTable::Bucket**
333 HeapProfileTable::MakeSortedBucketList() const {
334 Bucket** list = static_cast<Bucket**>(alloc_(sizeof(Bucket) * num_buckets_));
336 int bucket_count = 0;
337 for (int i = 0; i < kHashTableSize; i++) {
338 for (Bucket* curr = bucket_table_[i]; curr != 0; curr = curr->next) {
339 list[bucket_count++] = curr;
342 RAW_DCHECK(bucket_count == num_buckets_, "");
344 sort(list, list + num_buckets_, ByAllocatedSpace);
346 return list;
349 void HeapProfileTable::DumpMarkedObjects(AllocationMark mark,
350 const char* file_name) {
351 RawFD fd = RawOpenForWriting(file_name);
352 if (fd == kIllegalRawFD) {
353 RAW_LOG(ERROR, "Failed dumping live objects to %s", file_name);
354 return;
356 const DumpMarkedArgs args(fd, mark);
357 address_map_->Iterate<const DumpMarkedArgs&>(DumpMarkedIterator, args);
358 RawClose(fd);
361 void HeapProfileTable::IterateOrderedAllocContexts(
362 AllocContextIterator callback) const {
363 Bucket** list = MakeSortedBucketList();
364 AllocContextInfo info;
365 for (int i = 0; i < num_buckets_; ++i) {
366 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]);
367 info.stack_depth = list[i]->depth;
368 info.call_stack = list[i]->stack;
369 callback(info);
371 dealloc_(list);
374 int HeapProfileTable::FillOrderedProfile(char buf[], int size) const {
375 Bucket** list = MakeSortedBucketList();
377 // Our file format is "bucket, bucket, ..., bucket, proc_self_maps_info".
378 // In the cases buf is too small, we'd rather leave out the last
379 // buckets than leave out the /proc/self/maps info. To ensure that,
380 // we actually print the /proc/self/maps info first, then move it to
381 // the end of the buffer, then write the bucket info into whatever
382 // is remaining, and then move the maps info one last time to close
383 // any gaps. Whew!
384 int map_length = snprintf(buf, size, "%s", kProcSelfMapsHeader);
385 if (map_length < 0 || map_length >= size) return 0;
386 bool dummy; // "wrote_all" -- did /proc/self/maps fit in its entirety?
387 map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy);
388 RAW_DCHECK(map_length <= size, "");
389 char* const map_start = buf + size - map_length; // move to end
390 memmove(map_start, buf, map_length);
391 size -= map_length;
393 Stats stats;
394 memset(&stats, 0, sizeof(stats));
395 int bucket_length = snprintf(buf, size, "%s", kProfileHeader);
396 if (bucket_length < 0 || bucket_length >= size) return 0;
397 bucket_length = UnparseBucket(total_, buf, bucket_length, size,
398 " heapprofile", &stats);
400 // Dump the mmap list first.
401 if (profile_mmap_) {
402 BufferArgs buffer(buf, bucket_length, size);
403 MemoryRegionMap::IterateBuckets<BufferArgs*>(DumpBucketIterator, &buffer);
404 bucket_length = buffer.buflen;
407 for (int i = 0; i < num_buckets_; i++) {
408 bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "",
409 &stats);
411 RAW_DCHECK(bucket_length < size, "");
413 dealloc_(list);
415 RAW_DCHECK(buf + bucket_length <= map_start, "");
416 memmove(buf + bucket_length, map_start, map_length); // close the gap
418 return bucket_length + map_length;
421 // static
422 void HeapProfileTable::DumpBucketIterator(const Bucket* bucket,
423 BufferArgs* args) {
424 args->buflen = UnparseBucket(*bucket, args->buf, args->buflen, args->bufsize,
425 "", NULL);
428 inline
429 void HeapProfileTable::DumpNonLiveIterator(const void* ptr, AllocValue* v,
430 const DumpArgs& args) {
431 if (v->live()) {
432 v->set_live(false);
433 return;
435 if (v->ignore()) {
436 return;
438 Bucket b;
439 memset(&b, 0, sizeof(b));
440 b.allocs = 1;
441 b.alloc_size = v->bytes;
442 b.depth = v->bucket()->depth;
443 b.stack = v->bucket()->stack;
444 char buf[1024];
445 int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats);
446 RawWrite(args.fd, buf, len);
449 inline
450 void HeapProfileTable::DumpMarkedIterator(const void* ptr, AllocValue* v,
451 const DumpMarkedArgs& args) {
452 if (v->mark() != args.mark)
453 return;
454 Bucket b;
455 memset(&b, 0, sizeof(b));
456 b.allocs = 1;
457 b.alloc_size = v->bytes;
458 b.depth = v->bucket()->depth;
459 b.stack = v->bucket()->stack;
460 char addr[16];
461 snprintf(addr, 16, "0x%08" PRIxPTR, reinterpret_cast<uintptr_t>(ptr));
462 char buf[1024];
463 int len = UnparseBucket(b, buf, 0, sizeof(buf), addr, NULL);
464 RawWrite(args.fd, buf, len);
467 inline
468 void HeapProfileTable::AllocationAddressesIterator(
469 const void* ptr,
470 AllocValue* v,
471 const AllocationAddressIteratorArgs& args) {
472 args.callback(args.data, ptr);
475 inline
476 void HeapProfileTable::MarkIterator(const void* ptr, AllocValue* v,
477 const MarkArgs& args) {
478 if (!args.mark_all && v->mark() != UNMARKED)
479 return;
480 v->set_mark(args.mark);
483 // Callback from NonLiveSnapshot; adds entry to arg->dest
484 // if not the entry is not live and is not present in arg->base.
485 void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v,
486 AddNonLiveArgs* arg) {
487 if (v->live()) {
488 v->set_live(false);
489 } else {
490 if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) {
491 // Present in arg->base, so do not save
492 } else {
493 arg->dest->Add(ptr, *v);
498 bool HeapProfileTable::WriteProfile(const char* file_name,
499 const Bucket& total,
500 AllocationMap* allocations) {
501 RAW_VLOG(1, "Dumping non-live heap profile to %s", file_name);
502 RawFD fd = RawOpenForWriting(file_name);
503 if (fd == kIllegalRawFD) {
504 RAW_LOG(ERROR, "Failed dumping filtered heap profile to %s", file_name);
505 return false;
507 RawWrite(fd, kProfileHeader, strlen(kProfileHeader));
508 char buf[512];
509 int len = UnparseBucket(total, buf, 0, sizeof(buf), " heapprofile",
510 NULL);
511 RawWrite(fd, buf, len);
512 const DumpArgs args(fd, NULL);
513 allocations->Iterate<const DumpArgs&>(DumpNonLiveIterator, args);
514 RawWrite(fd, kProcSelfMapsHeader, strlen(kProcSelfMapsHeader));
515 DumpProcSelfMaps(fd);
516 RawClose(fd);
517 return true;
520 void HeapProfileTable::CleanupOldProfiles(const char* prefix) {
521 if (!FLAGS_cleanup_old_heap_profiles)
522 return;
523 char buf[1000];
524 snprintf(buf, 1000,"%s.%05d.", prefix, getpid());
525 string pattern = string(buf) + ".*" + kFileExt;
527 #if defined(HAVE_GLOB_H)
528 glob_t g;
529 const int r = glob(pattern.c_str(), GLOB_ERR, NULL, &g);
530 if (r == 0 || r == GLOB_NOMATCH) {
531 const int prefix_length = strlen(prefix);
532 for (int i = 0; i < g.gl_pathc; i++) {
533 const char* fname = g.gl_pathv[i];
534 if ((strlen(fname) >= prefix_length) &&
535 (memcmp(fname, prefix, prefix_length) == 0)) {
536 RAW_VLOG(1, "Removing old heap profile %s", fname);
537 unlink(fname);
541 globfree(&g);
542 #else /* HAVE_GLOB_H */
543 RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())");
544 #endif
547 HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() {
548 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
549 address_map_->Iterate(AddToSnapshot, s);
550 return s;
553 void HeapProfileTable::ReleaseSnapshot(Snapshot* s) {
554 s->~Snapshot();
555 dealloc_(s);
558 // Callback from TakeSnapshot; adds a single entry to snapshot
559 void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v,
560 Snapshot* snapshot) {
561 snapshot->Add(ptr, *v);
564 HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot(
565 Snapshot* base) {
566 RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n",
567 int(total_.allocs - total_.frees),
568 int(total_.alloc_size - total_.free_size));
570 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
571 AddNonLiveArgs args;
572 args.dest = s;
573 args.base = base;
574 address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args);
575 RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n",
576 int(s->total_.allocs - s->total_.frees),
577 int(s->total_.alloc_size - s->total_.free_size));
578 return s;
581 // Information kept per unique bucket seen
582 struct HeapProfileTable::Snapshot::Entry {
583 int count;
584 int bytes;
585 Bucket* bucket;
586 Entry() : count(0), bytes(0) { }
588 // Order by decreasing bytes
589 bool operator<(const Entry& x) const {
590 return this->bytes > x.bytes;
594 // State used to generate leak report. We keep a mapping from Bucket pointer
595 // the collected stats for that bucket.
596 struct HeapProfileTable::Snapshot::ReportState {
597 map<Bucket*, Entry> buckets_;
600 // Callback from ReportLeaks; updates ReportState.
601 void HeapProfileTable::Snapshot::ReportCallback(const void* ptr,
602 AllocValue* v,
603 ReportState* state) {
604 Entry* e = &state->buckets_[v->bucket()]; // Creates empty Entry first time
605 e->bucket = v->bucket();
606 e->count++;
607 e->bytes += v->bytes;
610 void HeapProfileTable::Snapshot::ReportLeaks(const char* checker_name,
611 const char* filename,
612 bool should_symbolize) {
613 // This is only used by the heap leak checker, but is intimately
614 // tied to the allocation map that belongs in this module and is
615 // therefore placed here.
616 RAW_LOG(ERROR, "Leak check %s detected leaks of %" PRIuS " bytes "
617 "in %" PRIuS " objects",
618 checker_name,
619 size_t(total_.alloc_size),
620 size_t(total_.allocs));
622 // Group objects by Bucket
623 ReportState state;
624 map_.Iterate(&ReportCallback, &state);
626 // Sort buckets by decreasing leaked size
627 const int n = state.buckets_.size();
628 Entry* entries = new Entry[n];
629 int dst = 0;
630 for (map<Bucket*,Entry>::const_iterator iter = state.buckets_.begin();
631 iter != state.buckets_.end();
632 ++iter) {
633 entries[dst++] = iter->second;
635 sort(entries, entries + n);
637 // Report a bounded number of leaks to keep the leak report from
638 // growing too long.
639 const int to_report =
640 (FLAGS_heap_check_max_leaks > 0 &&
641 n > FLAGS_heap_check_max_leaks) ? FLAGS_heap_check_max_leaks : n;
642 RAW_LOG(ERROR, "The %d largest leaks:", to_report);
644 // Print
645 SymbolTable symbolization_table;
646 for (int i = 0; i < to_report; i++) {
647 const Entry& e = entries[i];
648 for (int j = 0; j < e.bucket->depth; j++) {
649 symbolization_table.Add(e.bucket->stack[j]);
652 static const int kBufSize = 2<<10;
653 char buffer[kBufSize];
654 if (should_symbolize)
655 symbolization_table.Symbolize();
656 for (int i = 0; i < to_report; i++) {
657 const Entry& e = entries[i];
658 base::RawPrinter printer(buffer, kBufSize);
659 printer.Printf("Leak of %d bytes in %d objects allocated from:\n",
660 e.bytes, e.count);
661 for (int j = 0; j < e.bucket->depth; j++) {
662 const void* pc = e.bucket->stack[j];
663 printer.Printf("\t@ %" PRIxPTR " %s\n",
664 reinterpret_cast<uintptr_t>(pc), symbolization_table.GetSymbol(pc));
666 RAW_LOG(ERROR, "%s", buffer);
669 if (to_report < n) {
670 RAW_LOG(ERROR, "Skipping leaks numbered %d..%d",
671 to_report, n-1);
673 delete[] entries;
675 // TODO: Dump the sorted Entry list instead of dumping raw data?
676 // (should be much shorter)
677 if (!HeapProfileTable::WriteProfile(filename, total_, &map_)) {
678 RAW_LOG(ERROR, "Could not write pprof profile to %s", filename);
682 void HeapProfileTable::Snapshot::ReportObject(const void* ptr,
683 AllocValue* v,
684 char* unused) {
685 // Perhaps also log the allocation stack trace (unsymbolized)
686 // on this line in case somebody finds it useful.
687 RAW_LOG(ERROR, "leaked %" PRIuS " byte object %p", v->bytes, ptr);
690 void HeapProfileTable::Snapshot::ReportIndividualObjects() {
691 char unused;
692 map_.Iterate(ReportObject, &unused);