Separate Simple Backend creation from initialization.
[chromium-blink-merge.git] / third_party / tcmalloc / chromium / src / heap-profile-table.cc
blob68ec5735026f8feced50e6612c760ced6915b809
1 // Copyright (c) 2006, Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 // ---
31 // Author: Sanjay Ghemawat
32 // Maxim Lifantsev (refactoring)
35 #include <config.h>
37 #ifdef HAVE_UNISTD_H
38 #include <unistd.h> // for write()
39 #endif
40 #include <fcntl.h> // for open()
41 #ifdef HAVE_GLOB_H
42 #include <glob.h>
43 #ifndef GLOB_NOMATCH // true on some old cygwins
44 # define GLOB_NOMATCH 0
45 #endif
46 #endif
47 #ifdef HAVE_INTTYPES_H
48 #include <inttypes.h> // for PRIxPTR
49 #endif
50 #ifdef HAVE_POLL_H
51 #include <poll.h>
52 #endif
53 #include <errno.h>
54 #include <stdarg.h>
55 #include <string>
56 #include <map>
57 #include <algorithm> // for sort(), equal(), and copy()
59 #include "heap-profile-table.h"
61 #include "base/logging.h"
62 #include "raw_printer.h"
63 #include "symbolize.h"
64 #include <gperftools/stacktrace.h>
65 #include <gperftools/malloc_hook.h>
66 #include "memory_region_map.h"
67 #include "base/commandlineflags.h"
68 #include "base/logging.h" // for the RawFD I/O commands
69 #include "base/sysinfo.h"
71 using std::sort;
72 using std::equal;
73 using std::copy;
74 using std::string;
75 using std::map;
77 using tcmalloc::FillProcSelfMaps; // from sysinfo.h
78 using tcmalloc::DumpProcSelfMaps; // from sysinfo.h
80 //----------------------------------------------------------------------
82 DEFINE_bool(cleanup_old_heap_profiles,
83 EnvToBool("HEAP_PROFILE_CLEANUP", true),
84 "At initialization time, delete old heap profiles.");
86 DEFINE_int32(heap_check_max_leaks,
87 EnvToInt("HEAP_CHECK_MAX_LEAKS", 20),
88 "The maximum number of leak reports to print.");
90 //----------------------------------------------------------------------
92 // header of the dumped heap profile
93 static const char kProfileHeader[] = "heap profile: ";
94 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n";
95 #if defined(TYPE_PROFILING)
96 static const char kTypeProfileStatsHeader[] = "type statistics:\n";
97 #endif // defined(TYPE_PROFILING)
99 //----------------------------------------------------------------------
101 const char HeapProfileTable::kFileExt[] = ".heap";
103 //----------------------------------------------------------------------
105 static const int kHashTableSize = 179999; // Size for bucket_table_.
106 /*static*/ const int HeapProfileTable::kMaxStackDepth;
108 //----------------------------------------------------------------------
110 // We strip out different number of stack frames in debug mode
111 // because less inlining happens in that case
112 #ifdef NDEBUG
113 static const int kStripFrames = 2;
114 #else
115 static const int kStripFrames = 3;
116 #endif
118 // For sorting Stats or Buckets by in-use space
119 static bool ByAllocatedSpace(HeapProfileTable::Stats* a,
120 HeapProfileTable::Stats* b) {
121 // Return true iff "a" has more allocated space than "b"
122 return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size);
125 //----------------------------------------------------------------------
127 HeapProfileTable::HeapProfileTable(Allocator alloc,
128 DeAllocator dealloc,
129 bool profile_mmap)
130 : alloc_(alloc),
131 dealloc_(dealloc),
132 bucket_table_(NULL),
133 profile_mmap_(profile_mmap),
134 num_buckets_(0),
135 address_map_(NULL) {
136 // Make a hash table for buckets.
137 const int table_bytes = kHashTableSize * sizeof(*bucket_table_);
138 bucket_table_ = static_cast<Bucket**>(alloc_(table_bytes));
139 memset(bucket_table_, 0, table_bytes);
141 // Make an allocation map.
142 address_map_ =
143 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_);
145 // Initialize.
146 memset(&total_, 0, sizeof(total_));
147 num_buckets_ = 0;
150 HeapProfileTable::~HeapProfileTable() {
151 // Free the allocation map.
152 address_map_->~AllocationMap();
153 dealloc_(address_map_);
154 address_map_ = NULL;
156 // Free the hash table.
157 for (int i = 0; i < kHashTableSize; i++) {
158 for (Bucket* curr = bucket_table_[i]; curr != 0; /**/) {
159 Bucket* bucket = curr;
160 curr = curr->next;
161 dealloc_(bucket->stack);
162 dealloc_(bucket);
165 dealloc_(bucket_table_);
166 bucket_table_ = NULL;
169 HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth,
170 const void* const key[]) {
171 // Make hash-value
172 uintptr_t h = 0;
173 for (int i = 0; i < depth; i++) {
174 h += reinterpret_cast<uintptr_t>(key[i]);
175 h += h << 10;
176 h ^= h >> 6;
178 h += h << 3;
179 h ^= h >> 11;
181 // Lookup stack trace in table
182 unsigned int buck = ((unsigned int) h) % kHashTableSize;
183 for (Bucket* b = bucket_table_[buck]; b != 0; b = b->next) {
184 if ((b->hash == h) &&
185 (b->depth == depth) &&
186 equal(key, key + depth, b->stack)) {
187 return b;
191 // Create new bucket
192 const size_t key_size = sizeof(key[0]) * depth;
193 const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size));
194 copy(key, key + depth, kcopy);
195 Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket)));
196 memset(b, 0, sizeof(*b));
197 b->hash = h;
198 b->depth = depth;
199 b->stack = kcopy;
200 b->next = bucket_table_[buck];
201 bucket_table_[buck] = b;
202 num_buckets_++;
203 return b;
206 int HeapProfileTable::GetCallerStackTrace(
207 int skip_count, void* stack[kMaxStackDepth]) {
208 return MallocHook::GetCallerStackTrace(
209 stack, kMaxStackDepth, kStripFrames + skip_count + 1);
212 void HeapProfileTable::RecordAlloc(
213 const void* ptr, size_t bytes, int stack_depth,
214 const void* const call_stack[]) {
215 Bucket* b = GetBucket(stack_depth, call_stack);
216 b->allocs++;
217 b->alloc_size += bytes;
218 total_.allocs++;
219 total_.alloc_size += bytes;
221 AllocValue v;
222 v.set_bucket(b); // also did set_live(false); set_ignore(false)
223 v.bytes = bytes;
224 address_map_->Insert(ptr, v);
227 void HeapProfileTable::RecordFree(const void* ptr) {
228 AllocValue v;
229 if (address_map_->FindAndRemove(ptr, &v)) {
230 Bucket* b = v.bucket();
231 b->frees++;
232 b->free_size += v.bytes;
233 total_.frees++;
234 total_.free_size += v.bytes;
238 bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const {
239 const AllocValue* alloc_value = address_map_->Find(ptr);
240 if (alloc_value != NULL) *object_size = alloc_value->bytes;
241 return alloc_value != NULL;
244 bool HeapProfileTable::FindAllocDetails(const void* ptr,
245 AllocInfo* info) const {
246 const AllocValue* alloc_value = address_map_->Find(ptr);
247 if (alloc_value != NULL) {
248 info->object_size = alloc_value->bytes;
249 info->call_stack = alloc_value->bucket()->stack;
250 info->stack_depth = alloc_value->bucket()->depth;
252 return alloc_value != NULL;
255 bool HeapProfileTable::FindInsideAlloc(const void* ptr,
256 size_t max_size,
257 const void** object_ptr,
258 size_t* object_size) const {
259 const AllocValue* alloc_value =
260 address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr);
261 if (alloc_value != NULL) *object_size = alloc_value->bytes;
262 return alloc_value != NULL;
265 bool HeapProfileTable::MarkAsLive(const void* ptr) {
266 AllocValue* alloc = address_map_->FindMutable(ptr);
267 if (alloc && !alloc->live()) {
268 alloc->set_live(true);
269 return true;
271 return false;
274 void HeapProfileTable::MarkAsIgnored(const void* ptr) {
275 AllocValue* alloc = address_map_->FindMutable(ptr);
276 if (alloc) {
277 alloc->set_ignore(true);
281 void HeapProfileTable::IterateAllocationAddresses(AddressIterator f,
282 void* data) {
283 const AllocationAddressIteratorArgs args(f, data);
284 address_map_->Iterate<const AllocationAddressIteratorArgs&>(
285 AllocationAddressesIterator, args);
288 void HeapProfileTable::MarkCurrentAllocations(AllocationMark mark) {
289 const MarkArgs args(mark, true);
290 address_map_->Iterate<const MarkArgs&>(MarkIterator, args);
293 void HeapProfileTable::MarkUnmarkedAllocations(AllocationMark mark) {
294 const MarkArgs args(mark, true);
295 address_map_->Iterate<const MarkArgs&>(MarkIterator, args);
298 // We'd be happier using snprintfer, but we don't to reduce dependencies.
299 int HeapProfileTable::UnparseBucket(const Bucket& b,
300 char* buf, int buflen, int bufsize,
301 const char* extra,
302 Stats* profile_stats) {
303 if (profile_stats != NULL) {
304 profile_stats->allocs += b.allocs;
305 profile_stats->alloc_size += b.alloc_size;
306 profile_stats->frees += b.frees;
307 profile_stats->free_size += b.free_size;
309 int printed =
310 snprintf(buf + buflen, bufsize - buflen, "%6d: %8"PRId64" [%6d: %8"PRId64"] @%s",
311 b.allocs - b.frees,
312 b.alloc_size - b.free_size,
313 b.allocs,
314 b.alloc_size,
315 extra);
316 // If it looks like the snprintf failed, ignore the fact we printed anything
317 if (printed < 0 || printed >= bufsize - buflen) return buflen;
318 buflen += printed;
319 for (int d = 0; d < b.depth; d++) {
320 printed = snprintf(buf + buflen, bufsize - buflen, " 0x%08" PRIxPTR,
321 reinterpret_cast<uintptr_t>(b.stack[d]));
322 if (printed < 0 || printed >= bufsize - buflen) return buflen;
323 buflen += printed;
325 printed = snprintf(buf + buflen, bufsize - buflen, "\n");
326 if (printed < 0 || printed >= bufsize - buflen) return buflen;
327 buflen += printed;
328 return buflen;
331 HeapProfileTable::Bucket**
332 HeapProfileTable::MakeSortedBucketList() const {
333 Bucket** list = static_cast<Bucket**>(alloc_(sizeof(Bucket) * num_buckets_));
335 int bucket_count = 0;
336 for (int i = 0; i < kHashTableSize; i++) {
337 for (Bucket* curr = bucket_table_[i]; curr != 0; curr = curr->next) {
338 list[bucket_count++] = curr;
341 RAW_DCHECK(bucket_count == num_buckets_, "");
343 sort(list, list + num_buckets_, ByAllocatedSpace);
345 return list;
348 void HeapProfileTable::DumpMarkedObjects(AllocationMark mark,
349 const char* file_name) {
350 RawFD fd = RawOpenForWriting(file_name);
351 if (fd == kIllegalRawFD) {
352 RAW_LOG(ERROR, "Failed dumping live objects to %s", file_name);
353 return;
355 const DumpMarkedArgs args(fd, mark);
356 address_map_->Iterate<const DumpMarkedArgs&>(DumpMarkedIterator, args);
357 RawClose(fd);
360 #if defined(TYPE_PROFILING)
361 void HeapProfileTable::DumpTypeStatistics(const char* file_name) const {
362 RawFD fd = RawOpenForWriting(file_name);
363 if (fd == kIllegalRawFD) {
364 RAW_LOG(ERROR, "Failed dumping type statistics to %s", file_name);
365 return;
368 AddressMap<TypeCount>* type_size_map;
369 type_size_map = new(alloc_(sizeof(AddressMap<TypeCount>)))
370 AddressMap<TypeCount>(alloc_, dealloc_);
371 address_map_->Iterate(TallyTypesItererator, type_size_map);
373 RawWrite(fd, kTypeProfileStatsHeader, strlen(kTypeProfileStatsHeader));
374 const DumpArgs args(fd, NULL);
375 type_size_map->Iterate<const DumpArgs&>(DumpTypesIterator, args);
376 RawClose(fd);
378 type_size_map->~AddressMap<TypeCount>();
379 dealloc_(type_size_map);
381 #endif // defined(TYPE_PROFILING)
383 void HeapProfileTable::IterateOrderedAllocContexts(
384 AllocContextIterator callback) const {
385 Bucket** list = MakeSortedBucketList();
386 AllocContextInfo info;
387 for (int i = 0; i < num_buckets_; ++i) {
388 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]);
389 info.stack_depth = list[i]->depth;
390 info.call_stack = list[i]->stack;
391 callback(info);
393 dealloc_(list);
396 int HeapProfileTable::FillOrderedProfile(char buf[], int size) const {
397 Bucket** list = MakeSortedBucketList();
399 // Our file format is "bucket, bucket, ..., bucket, proc_self_maps_info".
400 // In the cases buf is too small, we'd rather leave out the last
401 // buckets than leave out the /proc/self/maps info. To ensure that,
402 // we actually print the /proc/self/maps info first, then move it to
403 // the end of the buffer, then write the bucket info into whatever
404 // is remaining, and then move the maps info one last time to close
405 // any gaps. Whew!
406 int map_length = snprintf(buf, size, "%s", kProcSelfMapsHeader);
407 if (map_length < 0 || map_length >= size) return 0;
408 bool dummy; // "wrote_all" -- did /proc/self/maps fit in its entirety?
409 map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy);
410 RAW_DCHECK(map_length <= size, "");
411 char* const map_start = buf + size - map_length; // move to end
412 memmove(map_start, buf, map_length);
413 size -= map_length;
415 Stats stats;
416 memset(&stats, 0, sizeof(stats));
417 int bucket_length = snprintf(buf, size, "%s", kProfileHeader);
418 if (bucket_length < 0 || bucket_length >= size) return 0;
419 bucket_length = UnparseBucket(total_, buf, bucket_length, size,
420 " heapprofile", &stats);
422 // Dump the mmap list first.
423 if (profile_mmap_) {
424 BufferArgs buffer(buf, bucket_length, size);
425 MemoryRegionMap::IterateBuckets<BufferArgs*>(DumpBucketIterator, &buffer);
426 bucket_length = buffer.buflen;
429 for (int i = 0; i < num_buckets_; i++) {
430 bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "",
431 &stats);
433 RAW_DCHECK(bucket_length < size, "");
435 dealloc_(list);
437 RAW_DCHECK(buf + bucket_length <= map_start, "");
438 memmove(buf + bucket_length, map_start, map_length); // close the gap
440 return bucket_length + map_length;
443 // static
444 void HeapProfileTable::DumpBucketIterator(const Bucket* bucket,
445 BufferArgs* args) {
446 args->buflen = UnparseBucket(*bucket, args->buf, args->buflen, args->bufsize,
447 "", NULL);
450 #if defined(TYPE_PROFILING)
451 // static
452 void HeapProfileTable::TallyTypesItererator(
453 const void* ptr,
454 AllocValue* value,
455 AddressMap<TypeCount>* type_size_map) {
456 const std::type_info* type = LookupType(ptr);
458 const void* key = NULL;
459 if (type)
460 key = type->name();
462 TypeCount* count = type_size_map->FindMutable(key);
463 if (count) {
464 count->bytes += value->bytes;
465 ++count->objects;
466 } else {
467 type_size_map->Insert(key, TypeCount(value->bytes, 1));
471 // static
472 void HeapProfileTable::DumpTypesIterator(const void* ptr,
473 TypeCount* count,
474 const DumpArgs& args) {
475 char buf[1024];
476 int len;
477 const char* mangled_type_name = static_cast<const char*>(ptr);
478 len = snprintf(buf, sizeof(buf), "%6d: %8"PRId64" @ %s\n",
479 count->objects, count->bytes,
480 mangled_type_name ? mangled_type_name : "(no_typeinfo)");
481 RawWrite(args.fd, buf, len);
483 #endif // defined(TYPE_PROFILING)
485 inline
486 void HeapProfileTable::DumpNonLiveIterator(const void* ptr, AllocValue* v,
487 const DumpArgs& args) {
488 if (v->live()) {
489 v->set_live(false);
490 return;
492 if (v->ignore()) {
493 return;
495 Bucket b;
496 memset(&b, 0, sizeof(b));
497 b.allocs = 1;
498 b.alloc_size = v->bytes;
499 b.depth = v->bucket()->depth;
500 b.stack = v->bucket()->stack;
501 char buf[1024];
502 int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats);
503 RawWrite(args.fd, buf, len);
506 inline
507 void HeapProfileTable::DumpMarkedIterator(const void* ptr, AllocValue* v,
508 const DumpMarkedArgs& args) {
509 if (v->mark() != args.mark)
510 return;
511 Bucket b;
512 memset(&b, 0, sizeof(b));
513 b.allocs = 1;
514 b.alloc_size = v->bytes;
515 b.depth = v->bucket()->depth;
516 b.stack = v->bucket()->stack;
517 char addr[16];
518 snprintf(addr, 16, "0x%08" PRIxPTR, ptr);
519 char buf[1024];
520 int len = UnparseBucket(b, buf, 0, sizeof(buf), addr, NULL);
521 RawWrite(args.fd, buf, len);
524 inline
525 void HeapProfileTable::AllocationAddressesIterator(
526 const void* ptr,
527 AllocValue* v,
528 const AllocationAddressIteratorArgs& args) {
529 args.callback(args.data, ptr);
532 inline
533 void HeapProfileTable::MarkIterator(const void* ptr, AllocValue* v,
534 const MarkArgs& args) {
535 if (!args.mark_all && v->mark() != UNMARKED)
536 return;
537 v->set_mark(args.mark);
540 // Callback from NonLiveSnapshot; adds entry to arg->dest
541 // if not the entry is not live and is not present in arg->base.
542 void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v,
543 AddNonLiveArgs* arg) {
544 if (v->live()) {
545 v->set_live(false);
546 } else {
547 if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) {
548 // Present in arg->base, so do not save
549 } else {
550 arg->dest->Add(ptr, *v);
555 bool HeapProfileTable::WriteProfile(const char* file_name,
556 const Bucket& total,
557 AllocationMap* allocations) {
558 RAW_VLOG(1, "Dumping non-live heap profile to %s", file_name);
559 RawFD fd = RawOpenForWriting(file_name);
560 if (fd == kIllegalRawFD) {
561 RAW_LOG(ERROR, "Failed dumping filtered heap profile to %s", file_name);
562 return false;
564 RawWrite(fd, kProfileHeader, strlen(kProfileHeader));
565 char buf[512];
566 int len = UnparseBucket(total, buf, 0, sizeof(buf), " heapprofile",
567 NULL);
568 RawWrite(fd, buf, len);
569 const DumpArgs args(fd, NULL);
570 allocations->Iterate<const DumpArgs&>(DumpNonLiveIterator, args);
571 RawWrite(fd, kProcSelfMapsHeader, strlen(kProcSelfMapsHeader));
572 DumpProcSelfMaps(fd);
573 RawClose(fd);
574 return true;
577 void HeapProfileTable::CleanupOldProfiles(const char* prefix) {
578 if (!FLAGS_cleanup_old_heap_profiles)
579 return;
580 char buf[1000];
581 snprintf(buf, 1000,"%s.%05d.", prefix, getpid());
582 string pattern = string(buf) + ".*" + kFileExt;
584 #if defined(HAVE_GLOB_H)
585 glob_t g;
586 const int r = glob(pattern.c_str(), GLOB_ERR, NULL, &g);
587 if (r == 0 || r == GLOB_NOMATCH) {
588 const int prefix_length = strlen(prefix);
589 for (int i = 0; i < g.gl_pathc; i++) {
590 const char* fname = g.gl_pathv[i];
591 if ((strlen(fname) >= prefix_length) &&
592 (memcmp(fname, prefix, prefix_length) == 0)) {
593 RAW_VLOG(1, "Removing old heap profile %s", fname);
594 unlink(fname);
598 globfree(&g);
599 #else /* HAVE_GLOB_H */
600 RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())");
601 #endif
604 HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() {
605 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
606 address_map_->Iterate(AddToSnapshot, s);
607 return s;
610 void HeapProfileTable::ReleaseSnapshot(Snapshot* s) {
611 s->~Snapshot();
612 dealloc_(s);
615 // Callback from TakeSnapshot; adds a single entry to snapshot
616 void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v,
617 Snapshot* snapshot) {
618 snapshot->Add(ptr, *v);
621 HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot(
622 Snapshot* base) {
623 RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n",
624 int(total_.allocs - total_.frees),
625 int(total_.alloc_size - total_.free_size));
627 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
628 AddNonLiveArgs args;
629 args.dest = s;
630 args.base = base;
631 address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args);
632 RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n",
633 int(s->total_.allocs - s->total_.frees),
634 int(s->total_.alloc_size - s->total_.free_size));
635 return s;
638 // Information kept per unique bucket seen
639 struct HeapProfileTable::Snapshot::Entry {
640 int count;
641 int bytes;
642 Bucket* bucket;
643 Entry() : count(0), bytes(0) { }
645 // Order by decreasing bytes
646 bool operator<(const Entry& x) const {
647 return this->bytes > x.bytes;
651 // State used to generate leak report. We keep a mapping from Bucket pointer
652 // the collected stats for that bucket.
653 struct HeapProfileTable::Snapshot::ReportState {
654 map<Bucket*, Entry> buckets_;
657 // Callback from ReportLeaks; updates ReportState.
658 void HeapProfileTable::Snapshot::ReportCallback(const void* ptr,
659 AllocValue* v,
660 ReportState* state) {
661 Entry* e = &state->buckets_[v->bucket()]; // Creates empty Entry first time
662 e->bucket = v->bucket();
663 e->count++;
664 e->bytes += v->bytes;
667 void HeapProfileTable::Snapshot::ReportLeaks(const char* checker_name,
668 const char* filename,
669 bool should_symbolize) {
670 // This is only used by the heap leak checker, but is intimately
671 // tied to the allocation map that belongs in this module and is
672 // therefore placed here.
673 RAW_LOG(ERROR, "Leak check %s detected leaks of %"PRIuS" bytes "
674 "in %"PRIuS" objects",
675 checker_name,
676 size_t(total_.alloc_size),
677 size_t(total_.allocs));
679 // Group objects by Bucket
680 ReportState state;
681 map_.Iterate(&ReportCallback, &state);
683 // Sort buckets by decreasing leaked size
684 const int n = state.buckets_.size();
685 Entry* entries = new Entry[n];
686 int dst = 0;
687 for (map<Bucket*,Entry>::const_iterator iter = state.buckets_.begin();
688 iter != state.buckets_.end();
689 ++iter) {
690 entries[dst++] = iter->second;
692 sort(entries, entries + n);
694 // Report a bounded number of leaks to keep the leak report from
695 // growing too long.
696 const int to_report =
697 (FLAGS_heap_check_max_leaks > 0 &&
698 n > FLAGS_heap_check_max_leaks) ? FLAGS_heap_check_max_leaks : n;
699 RAW_LOG(ERROR, "The %d largest leaks:", to_report);
701 // Print
702 SymbolTable symbolization_table;
703 for (int i = 0; i < to_report; i++) {
704 const Entry& e = entries[i];
705 for (int j = 0; j < e.bucket->depth; j++) {
706 symbolization_table.Add(e.bucket->stack[j]);
709 static const int kBufSize = 2<<10;
710 char buffer[kBufSize];
711 if (should_symbolize)
712 symbolization_table.Symbolize();
713 for (int i = 0; i < to_report; i++) {
714 const Entry& e = entries[i];
715 base::RawPrinter printer(buffer, kBufSize);
716 printer.Printf("Leak of %d bytes in %d objects allocated from:\n",
717 e.bytes, e.count);
718 for (int j = 0; j < e.bucket->depth; j++) {
719 const void* pc = e.bucket->stack[j];
720 printer.Printf("\t@ %"PRIxPTR" %s\n",
721 reinterpret_cast<uintptr_t>(pc), symbolization_table.GetSymbol(pc));
723 RAW_LOG(ERROR, "%s", buffer);
726 if (to_report < n) {
727 RAW_LOG(ERROR, "Skipping leaks numbered %d..%d",
728 to_report, n-1);
730 delete[] entries;
732 // TODO: Dump the sorted Entry list instead of dumping raw data?
733 // (should be much shorter)
734 if (!HeapProfileTable::WriteProfile(filename, total_, &map_)) {
735 RAW_LOG(ERROR, "Could not write pprof profile to %s", filename);
739 void HeapProfileTable::Snapshot::ReportObject(const void* ptr,
740 AllocValue* v,
741 char* unused) {
742 // Perhaps also log the allocation stack trace (unsymbolized)
743 // on this line in case somebody finds it useful.
744 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr);
747 void HeapProfileTable::Snapshot::ReportIndividualObjects() {
748 char unused;
749 map_.Iterate(ReportObject, &unused);