1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
6 // Author: Sainbayar Sukhbaatar
10 #include "deep-heap-profile.h"
12 #ifdef DEEP_HEAP_PROFILE
16 #include <sys/types.h>
18 #include <unistd.h> // for getpagesize and getpid
19 #endif // HAVE_UNISTD_H
21 #include "base/cycleclock.h"
22 #include "base/sysinfo.h"
23 #include "internal_logging.h" // for ASSERT, etc
25 static const int kProfilerBufferSize
= 1 << 20;
26 static const int kHashTableSize
= 179999; // Same as heap-profile-table.cc.
28 static const int PAGEMAP_BYTES
= 8;
29 static const uint64 MAX_ADDRESS
= kuint64max
;
31 // Tag strings in heap profile dumps.
32 static const char kProfileHeader
[] = "heap profile: ";
33 static const char kProfileVersion
[] = "DUMP_DEEP_6";
34 static const char kMMapListHeader
[] = "MMAP_LIST:\n";
35 static const char kGlobalStatsHeader
[] = "GLOBAL_STATS:\n";
36 static const char kStacktraceHeader
[] = "STACKTRACES:\n";
37 static const char kProcSelfMapsHeader
[] = "\nMAPPED_LIBRARIES:\n";
39 static const char kVirtualLabel
[] = "virtual";
40 static const char kCommittedLabel
[] = "committed";
42 const char* DeepHeapProfile::kMapsRegionTypeDict
[] = {
53 #if defined(__linux__)
55 // Implements MemoryResidenceInfoGetterInterface for Linux.
56 class MemoryInfoGetterLinux
:
57 public DeepHeapProfile::MemoryResidenceInfoGetterInterface
{
59 MemoryInfoGetterLinux(): fd_(kIllegalRawFD
) {}
60 virtual ~MemoryInfoGetterLinux() {}
62 // Opens /proc/<pid>/pagemap and stores its file descriptor.
63 // It keeps open while the process is running.
65 // Note that file descriptors need to be refreshed after fork.
66 virtual void Initialize();
68 // Returns the number of resident (including swapped) bytes of the given
69 // memory region from |first_address| to |last_address| inclusive.
70 virtual size_t CommittedSize(uint64 first_address
, uint64 last_address
) const;
74 bool is_committed
; // Currently, we use only this
81 // Seeks to the offset of the open pagemap file.
82 // It returns true if succeeded.
83 bool Seek(uint64 address
) const;
85 // Reads a pagemap state from the current offset.
86 // It returns true if succeeded.
87 bool Read(State
* state
) const;
92 void MemoryInfoGetterLinux::Initialize() {
94 snprintf(filename
, sizeof(filename
), "/proc/%d/pagemap",
95 static_cast<int>(getpid()));
96 fd_
= open(filename
, O_RDONLY
);
97 RAW_DCHECK(fd_
!= -1, "Failed to open /proc/self/pagemap");
100 size_t MemoryInfoGetterLinux::CommittedSize(
101 uint64 first_address
, uint64 last_address
) const {
102 int page_size
= getpagesize();
103 uint64 page_address
= (first_address
/ page_size
) * page_size
;
104 size_t committed_size
= 0;
108 // Check every page on which the allocation resides.
109 while (page_address
<= last_address
) {
110 // Read corresponding physical page.
112 // TODO(dmikurube): Read pagemap in bulk for speed.
113 // TODO(dmikurube): Consider using mincore(2).
114 if (Read(&state
) == false) {
115 // We can't read the last region (e.g vsyscall).
117 RAW_LOG(0, "pagemap read failed @ %#llx %"PRId64
" bytes",
118 first_address
, last_address
- first_address
+ 1);
123 if (state
.is_committed
) {
124 // Calculate the size of the allocation part in this page.
125 size_t bytes
= page_size
;
127 // If looking at the last page in a given region.
128 if (last_address
<= page_address
- 1 + page_size
) {
129 bytes
= last_address
- page_address
+ 1;
132 // If looking at the first page in a given region.
133 if (page_address
< first_address
) {
134 bytes
-= first_address
- page_address
;
137 committed_size
+= bytes
;
139 if (page_address
> MAX_ADDRESS
- page_size
) {
142 page_address
+= page_size
;
145 return committed_size
;
148 bool MemoryInfoGetterLinux::Seek(uint64 address
) const {
149 int64 index
= (address
/ getpagesize()) * PAGEMAP_BYTES
;
150 int64 offset
= lseek64(fd_
, index
, SEEK_SET
);
151 RAW_DCHECK(offset
== index
, "Failed in seeking.");
155 bool MemoryInfoGetterLinux::Read(State
* state
) const {
156 static const uint64 U64_1
= 1;
157 static const uint64 PFN_FILTER
= (U64_1
<< 55) - U64_1
;
158 static const uint64 PAGE_PRESENT
= U64_1
<< 63;
159 static const uint64 PAGE_SWAP
= U64_1
<< 62;
160 static const uint64 PAGE_RESERVED
= U64_1
<< 61;
161 static const uint64 FLAG_NOPAGE
= U64_1
<< 20;
162 static const uint64 FLAG_KSM
= U64_1
<< 21;
163 static const uint64 FLAG_MMAP
= U64_1
<< 11;
165 uint64 pagemap_value
;
166 int result
= read(fd_
, &pagemap_value
, PAGEMAP_BYTES
);
167 if (result
!= PAGEMAP_BYTES
) {
171 // Check if the page is committed.
172 state
->is_committed
= (pagemap_value
& (PAGE_PRESENT
| PAGE_SWAP
));
174 state
->is_present
= (pagemap_value
& PAGE_PRESENT
);
175 state
->is_swapped
= (pagemap_value
& PAGE_SWAP
);
176 state
->is_shared
= false;
181 #endif // defined(__linux__)
183 } // anonymous namespace
185 DeepHeapProfile::MemoryResidenceInfoGetterInterface::
186 MemoryResidenceInfoGetterInterface() {}
188 DeepHeapProfile::MemoryResidenceInfoGetterInterface::
189 ~MemoryResidenceInfoGetterInterface() {}
191 DeepHeapProfile::MemoryResidenceInfoGetterInterface
*
192 DeepHeapProfile::MemoryResidenceInfoGetterInterface::Create() {
193 #if defined(__linux__)
194 return new MemoryInfoGetterLinux();
200 DeepHeapProfile::DeepHeapProfile(HeapProfileTable
* heap_profile
,
202 : memory_residence_info_getter_(
203 MemoryResidenceInfoGetterInterface::Create()),
204 most_recent_pid_(-1),
207 filename_prefix_(NULL
),
208 profiler_buffer_(NULL
),
209 deep_table_(kHashTableSize
, heap_profile
->alloc_
, heap_profile
->dealloc_
),
210 heap_profile_(heap_profile
) {
211 // Copy filename prefix.
212 const int prefix_length
= strlen(prefix
);
214 reinterpret_cast<char*>(heap_profile_
->alloc_(prefix_length
+ 1));
215 memcpy(filename_prefix_
, prefix
, prefix_length
);
216 filename_prefix_
[prefix_length
] = '\0';
219 reinterpret_cast<char*>(heap_profile_
->alloc_(kProfilerBufferSize
));
222 DeepHeapProfile::~DeepHeapProfile() {
223 heap_profile_
->dealloc_(profiler_buffer_
);
224 heap_profile_
->dealloc_(filename_prefix_
);
225 delete memory_residence_info_getter_
;
228 // Global malloc() should not be used in this function.
229 // Use LowLevelAlloc if required.
230 int DeepHeapProfile::FillOrderedProfile(char raw_buffer
[], int buffer_size
) {
231 TextBuffer
buffer(raw_buffer
, buffer_size
);
232 TextBuffer
global_buffer(profiler_buffer_
, kProfilerBufferSize
);
235 int64 starting_cycles
= CycleClock::Now();
239 // Re-open files in /proc/pid/ if the process is newly forked one.
240 if (most_recent_pid_
!= getpid()) {
241 most_recent_pid_
= getpid();
243 memory_residence_info_getter_
->Initialize();
244 deep_table_
.ResetIsLogged();
246 // Write maps into "|filename_prefix_|.<pid>.maps".
247 WriteProcMaps(filename_prefix_
, kProfilerBufferSize
, profiler_buffer_
);
250 // Reset committed sizes of buckets.
251 deep_table_
.ResetCommittedSize();
253 // Record committed sizes.
254 stats_
.SnapshotAllocations(this);
256 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf.
257 // glibc's snprintf internally allocates memory by alloca normally, but it
258 // allocates memory by malloc if large memory is required.
260 buffer
.AppendString(kProfileHeader
, 0);
261 buffer
.AppendString(kProfileVersion
, 0);
262 buffer
.AppendString("\n", 0);
264 // Fill buffer with the global stats.
265 buffer
.AppendString(kMMapListHeader
, 0);
267 stats_
.SnapshotMaps(memory_residence_info_getter_
, this, &buffer
);
269 // Fill buffer with the global stats.
270 buffer
.AppendString(kGlobalStatsHeader
, 0);
272 stats_
.Unparse(&buffer
);
274 buffer
.AppendString(kStacktraceHeader
, 0);
275 buffer
.AppendString(kVirtualLabel
, 10);
276 buffer
.AppendChar(' ');
277 buffer
.AppendString(kCommittedLabel
, 10);
278 buffer
.AppendString("\n", 0);
281 deep_table_
.UnparseForStats(&buffer
);
283 RAW_DCHECK(buffer
.FilledBytes() < buffer_size
, "");
285 // Write the bucket listing into a .bucket file.
286 deep_table_
.WriteForBucketFile(filename_prefix_
, dump_count_
, &global_buffer
);
289 int64 elapsed_cycles
= CycleClock::Now() - starting_cycles
;
290 double elapsed_seconds
= elapsed_cycles
/ CyclesPerSecond();
291 RAW_LOG(0, "Time spent on DeepProfiler: %.3f sec\n", elapsed_seconds
);
294 return buffer
.FilledBytes();
297 int DeepHeapProfile::TextBuffer::Size() {
301 int DeepHeapProfile::TextBuffer::FilledBytes() {
305 void DeepHeapProfile::TextBuffer::Clear() {
309 void DeepHeapProfile::TextBuffer::Write(RawFD fd
) {
310 RawWrite(fd
, buffer_
, cursor_
);
313 // TODO(dmikurube): These Append* functions should not use snprintf.
314 bool DeepHeapProfile::TextBuffer::AppendChar(char v
) {
315 return ForwardCursor(snprintf(buffer_
+ cursor_
, size_
- cursor_
, "%c", v
));
318 bool DeepHeapProfile::TextBuffer::AppendString(const char* s
, int d
) {
321 appended
= snprintf(buffer_
+ cursor_
, size_
- cursor_
, "%s", s
);
323 appended
= snprintf(buffer_
+ cursor_
, size_
- cursor_
, "%*s", d
, s
);
324 return ForwardCursor(appended
);
327 bool DeepHeapProfile::TextBuffer::AppendInt(int v
, int d
) {
330 appended
= snprintf(buffer_
+ cursor_
, size_
- cursor_
, "%d", v
);
332 appended
= snprintf(buffer_
+ cursor_
, size_
- cursor_
, "%*d", d
, v
);
333 return ForwardCursor(appended
);
336 bool DeepHeapProfile::TextBuffer::AppendLong(long v
, int d
) {
339 appended
= snprintf(buffer_
+ cursor_
, size_
- cursor_
, "%ld", v
);
341 appended
= snprintf(buffer_
+ cursor_
, size_
- cursor_
, "%*ld", d
, v
);
342 return ForwardCursor(appended
);
345 bool DeepHeapProfile::TextBuffer::AppendUnsignedLong(unsigned long v
, int d
) {
348 appended
= snprintf(buffer_
+ cursor_
, size_
- cursor_
, "%lu", v
);
350 appended
= snprintf(buffer_
+ cursor_
, size_
- cursor_
, "%*lu", d
, v
);
351 return ForwardCursor(appended
);
354 bool DeepHeapProfile::TextBuffer::AppendInt64(int64 v
, int d
) {
357 appended
= snprintf(buffer_
+ cursor_
, size_
- cursor_
, "%"PRId64
, v
);
359 appended
= snprintf(buffer_
+ cursor_
, size_
- cursor_
, "%*"PRId64
, d
, v
);
360 return ForwardCursor(appended
);
363 bool DeepHeapProfile::TextBuffer::AppendPtr(uint64 v
, int d
) {
366 appended
= snprintf(buffer_
+ cursor_
, size_
- cursor_
, "%"PRIxPTR
, v
);
368 appended
= snprintf(buffer_
+ cursor_
, size_
- cursor_
, "%0*"PRIxPTR
, d
, v
);
369 return ForwardCursor(appended
);
372 bool DeepHeapProfile::TextBuffer::ForwardCursor(int appended
) {
373 if (appended
< 0 || appended
>= size_
- cursor_
)
379 void DeepHeapProfile::DeepBucket::UnparseForStats(TextBuffer
* buffer
) {
380 buffer
->AppendInt64(bucket
->alloc_size
- bucket
->free_size
, 10);
381 buffer
->AppendChar(' ');
382 buffer
->AppendInt64(committed_size
, 10);
383 buffer
->AppendChar(' ');
384 buffer
->AppendInt(bucket
->allocs
, 6);
385 buffer
->AppendChar(' ');
386 buffer
->AppendInt(bucket
->frees
, 6);
387 buffer
->AppendString(" @ ", 0);
388 buffer
->AppendInt(id
, 0);
389 buffer
->AppendString("\n", 0);
392 void DeepHeapProfile::DeepBucket::UnparseForBucketFile(TextBuffer
* buffer
) {
393 buffer
->AppendInt(id
, 0);
394 buffer
->AppendChar(' ');
395 buffer
->AppendString(is_mmap
? "mmap" : "malloc", 0);
397 #if defined(TYPE_PROFILING)
398 buffer
->AppendString(" t0x", 0);
399 buffer
->AppendPtr(reinterpret_cast<uintptr_t>(type
), 0);
401 buffer
->AppendString(" nno_typeinfo", 0);
403 buffer
->AppendString(" n", 0);
404 buffer
->AppendString(type
->name(), 0);
408 for (int depth
= 0; depth
< bucket
->depth
; depth
++) {
409 buffer
->AppendString(" 0x", 0);
410 buffer
->AppendPtr(reinterpret_cast<uintptr_t>(bucket
->stack
[depth
]), 8);
412 buffer
->AppendString("\n", 0);
415 DeepHeapProfile::DeepBucketTable::DeepBucketTable(
417 HeapProfileTable::Allocator alloc
,
418 HeapProfileTable::DeAllocator dealloc
)
420 table_size_(table_size
),
424 const int bytes
= table_size
* sizeof(DeepBucket
*);
425 table_
= reinterpret_cast<DeepBucket
**>(alloc(bytes
));
426 memset(table_
, 0, bytes
);
429 DeepHeapProfile::DeepBucketTable::~DeepBucketTable() {
430 ASSERT(table_
!= NULL
);
431 for (int db
= 0; db
< table_size_
; db
++) {
432 for (DeepBucket
* x
= table_
[db
]; x
!= 0; /**/) {
441 DeepHeapProfile::DeepBucket
* DeepHeapProfile::DeepBucketTable::Lookup(
443 #if defined(TYPE_PROFILING)
444 const std::type_info
* type
,
450 AddToHashValue(reinterpret_cast<uintptr_t>(bucket
), &h
);
452 AddToHashValue(1, &h
);
454 AddToHashValue(0, &h
);
457 #if defined(TYPE_PROFILING)
459 AddToHashValue(0, &h
);
461 AddToHashValue(reinterpret_cast<uintptr_t>(type
->name()), &h
);
467 // Lookup stack trace in table
468 unsigned int buck
= ((unsigned int) h
) % table_size_
;
469 for (DeepBucket
* db
= table_
[buck
]; db
!= 0; db
= db
->next
) {
470 if (db
->bucket
== bucket
) {
475 // Create a new bucket
476 DeepBucket
* db
= reinterpret_cast<DeepBucket
*>(alloc_(sizeof(DeepBucket
)));
477 memset(db
, 0, sizeof(*db
));
479 #if defined(TYPE_PROFILING)
482 db
->committed_size
= 0;
483 db
->is_mmap
= is_mmap
;
484 db
->id
= (bucket_id_
++);
485 db
->is_logged
= false;
486 db
->next
= table_
[buck
];
491 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf.
492 void DeepHeapProfile::DeepBucketTable::UnparseForStats(TextBuffer
* buffer
) {
493 for (int i
= 0; i
< table_size_
; i
++) {
494 for (DeepBucket
* deep_bucket
= table_
[i
];
496 deep_bucket
= deep_bucket
->next
) {
497 Bucket
* bucket
= deep_bucket
->bucket
;
498 if (bucket
->alloc_size
- bucket
->free_size
== 0) {
499 continue; // Skip empty buckets.
501 deep_bucket
->UnparseForStats(buffer
);
506 void DeepHeapProfile::DeepBucketTable::WriteForBucketFile(
507 const char* prefix
, int dump_count
, TextBuffer
* buffer
) {
509 snprintf(filename
, sizeof(filename
),
510 "%s.%05d.%04d.buckets", prefix
, getpid(), dump_count
);
511 RawFD fd
= RawOpenForWriting(filename
);
512 RAW_DCHECK(fd
!= kIllegalRawFD
, "");
514 for (int i
= 0; i
< table_size_
; i
++) {
515 for (DeepBucket
* deep_bucket
= table_
[i
];
517 deep_bucket
= deep_bucket
->next
) {
518 Bucket
* bucket
= deep_bucket
->bucket
;
519 if (deep_bucket
->is_logged
) {
520 continue; // Skip the bucket if it is already logged.
522 if (bucket
->alloc_size
- bucket
->free_size
<= 64) {
523 continue; // Skip small buckets.
526 deep_bucket
->UnparseForBucketFile(buffer
);
527 deep_bucket
->is_logged
= true;
529 // Write to file if buffer 80% full.
530 if (buffer
->FilledBytes() > buffer
->Size() * 0.8) {
541 void DeepHeapProfile::DeepBucketTable::ResetCommittedSize() {
542 for (int i
= 0; i
< table_size_
; i
++) {
543 for (DeepBucket
* deep_bucket
= table_
[i
];
545 deep_bucket
= deep_bucket
->next
) {
546 deep_bucket
->committed_size
= 0;
551 void DeepHeapProfile::DeepBucketTable::ResetIsLogged() {
552 for (int i
= 0; i
< table_size_
; i
++) {
553 for (DeepBucket
* deep_bucket
= table_
[i
];
555 deep_bucket
= deep_bucket
->next
) {
556 deep_bucket
->is_logged
= false;
561 // This hash function is from HeapProfileTable::GetBucket.
563 void DeepHeapProfile::DeepBucketTable::AddToHashValue(
564 uintptr_t add
, uintptr_t* hash_value
) {
566 *hash_value
+= *hash_value
<< 10;
567 *hash_value
^= *hash_value
>> 6;
570 // This hash function is from HeapProfileTable::GetBucket.
572 void DeepHeapProfile::DeepBucketTable::FinishHashValue(uintptr_t* hash_value
) {
573 *hash_value
+= *hash_value
<< 3;
574 *hash_value
^= *hash_value
>> 11;
577 void DeepHeapProfile::RegionStats::Initialize() {
579 committed_bytes_
= 0;
582 uint64
DeepHeapProfile::RegionStats::Record(
583 const MemoryResidenceInfoGetterInterface
* memory_residence_info_getter
,
584 uint64 first_address
,
585 uint64 last_address
) {
587 virtual_bytes_
+= static_cast<size_t>(last_address
- first_address
+ 1);
588 committed
= memory_residence_info_getter
->CommittedSize(first_address
,
590 committed_bytes_
+= committed
;
594 void DeepHeapProfile::RegionStats::Unparse(const char* name
,
595 TextBuffer
* buffer
) {
596 buffer
->AppendString(name
, 25);
597 buffer
->AppendChar(' ');
598 buffer
->AppendLong(virtual_bytes_
, 12);
599 buffer
->AppendChar(' ');
600 buffer
->AppendLong(committed_bytes_
, 12);
601 buffer
->AppendString("\n", 0);
604 // Snapshots all virtual memory mappging stats by merging mmap(2) records from
605 // MemoryRegionMap and /proc/maps, the OS-level memory mapping information.
606 // Memory regions described in /proc/maps, but which are not created by mmap,
607 // are accounted as "unhooked" memory regions.
609 // This function assumes that every memory region created by mmap is covered
610 // by VMA(s) described in /proc/maps except for http://crbug.com/189114.
611 // Note that memory regions created with mmap don't align with borders of VMAs
612 // in /proc/maps. In other words, a memory region by mmap can cut across many
613 // VMAs. Also, of course a VMA can include many memory regions by mmap.
614 // It means that the following situation happens:
616 // => Virtual address
617 // <----- VMA #1 -----><----- VMA #2 ----->...<----- VMA #3 -----><- VMA #4 ->
618 // ..< mmap #1 >.<- mmap #2 -><- mmap #3 ->...<- mmap #4 ->..<-- mmap #5 -->..
620 // It can happen easily as permission can be changed by mprotect(2) for a part
621 // of a memory region. A change in permission splits VMA(s).
623 // To deal with the situation, this function iterates over MemoryRegionMap and
624 // /proc/maps independently. The iterator for MemoryRegionMap is initialized
625 // at the top outside the loop for /proc/maps, and it goes forward inside the
626 // loop while comparing their addresses.
628 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf.
629 void DeepHeapProfile::GlobalStats::SnapshotMaps(
630 const MemoryResidenceInfoGetterInterface
* memory_residence_info_getter
,
631 DeepHeapProfile
* deep_profile
,
632 TextBuffer
* mmap_dump_buffer
) {
633 MemoryRegionMap::LockHolder lock_holder
;
634 ProcMapsIterator::Buffer procmaps_iter_buffer
;
635 ProcMapsIterator
procmaps_iter(0, &procmaps_iter_buffer
);
636 uint64 vma_start_addr
, vma_last_addr
, offset
;
640 enum MapsRegionType type
;
641 for (int i
= 0; i
< NUMBER_OF_MAPS_REGION_TYPES
; ++i
) {
642 all_
[i
].Initialize();
643 unhooked_
[i
].Initialize();
645 profiled_mmap_
.Initialize();
647 MemoryRegionMap::RegionIterator mmap_iter
=
648 MemoryRegionMap::BeginRegionLocked();
649 DeepBucket
* deep_bucket
= GetInformationOfMemoryRegion(
650 mmap_iter
, memory_residence_info_getter
, deep_profile
);
652 while (procmaps_iter
.Next(&vma_start_addr
, &vma_last_addr
,
653 &flags
, &offset
, &inode
, &filename
)) {
654 if (mmap_dump_buffer
) {
656 int written
= procmaps_iter
.FormatLine(buffer
, sizeof(buffer
),
657 vma_start_addr
, vma_last_addr
,
658 flags
, offset
, inode
, filename
, 0);
659 mmap_dump_buffer
->AppendString(buffer
, 0);
662 // 'vma_last_addr' should be the last inclusive address of the region.
664 if (strcmp("[vsyscall]", filename
) == 0) {
665 continue; // Reading pagemap will fail in [vsyscall].
669 if (filename
[0] == '/') {
674 } else if (filename
[0] == '\0' || filename
[0] == '\n') {
676 } else if (strcmp(filename
, "[stack]") == 0) {
682 memory_residence_info_getter
, vma_start_addr
, vma_last_addr
);
684 // TODO(dmikurube): Stop double-counting pagemap.
685 if (MemoryRegionMap::IsRecordingLocked()) {
686 uint64 cursor
= vma_start_addr
;
689 // Iterates over MemoryRegionMap until the iterator moves out of the VMA.
692 cursor
= mmap_iter
->end_addr
;
694 // Don't break here even if mmap_iter == EndRegionLocked().
696 if (mmap_iter
!= MemoryRegionMap::EndRegionLocked()) {
697 deep_bucket
= GetInformationOfMemoryRegion(
698 mmap_iter
, memory_residence_info_getter
, deep_profile
);
703 uint64 last_address_of_unhooked
;
704 // If the next mmap entry is away from the current VMA.
705 if (mmap_iter
== MemoryRegionMap::EndRegionLocked() ||
706 mmap_iter
->start_addr
> vma_last_addr
) {
707 last_address_of_unhooked
= vma_last_addr
;
709 last_address_of_unhooked
= mmap_iter
->start_addr
- 1;
712 if (last_address_of_unhooked
+ 1 > cursor
) {
713 RAW_CHECK(cursor
>= vma_start_addr
,
714 "Wrong calculation for unhooked");
715 RAW_CHECK(last_address_of_unhooked
<= vma_last_addr
,
716 "Wrong calculation for unhooked");
717 uint64 committed_size
= unhooked_
[type
].Record(
718 memory_residence_info_getter
,
720 last_address_of_unhooked
);
721 if (mmap_dump_buffer
) {
722 mmap_dump_buffer
->AppendString(" ", 0);
723 mmap_dump_buffer
->AppendPtr(cursor
, 0);
724 mmap_dump_buffer
->AppendString(" - ", 0);
725 mmap_dump_buffer
->AppendPtr(last_address_of_unhooked
+ 1, 0);
726 mmap_dump_buffer
->AppendString(" unhooked ", 0);
727 mmap_dump_buffer
->AppendString(kMapsRegionTypeDict
[type
], 0);
728 mmap_dump_buffer
->AppendString(" ", 0);
729 mmap_dump_buffer
->AppendInt64(committed_size
, 0);
730 mmap_dump_buffer
->AppendString("\n", 0);
732 cursor
= last_address_of_unhooked
+ 1;
735 if (mmap_iter
!= MemoryRegionMap::EndRegionLocked() &&
736 mmap_iter
->start_addr
<= vma_last_addr
&&
738 bool trailing
= mmap_iter
->start_addr
< vma_start_addr
;
739 bool continued
= mmap_iter
->end_addr
- 1 > vma_last_addr
;
740 mmap_dump_buffer
->AppendString(trailing
? " (" : " ", 0);
741 mmap_dump_buffer
->AppendPtr(mmap_iter
->start_addr
, 0);
742 mmap_dump_buffer
->AppendString(trailing
? ")" : " ", 0);
743 mmap_dump_buffer
->AppendString("-", 0);
744 mmap_dump_buffer
->AppendString(continued
? "(" : " ", 0);
745 mmap_dump_buffer
->AppendPtr(mmap_iter
->end_addr
, 0);
746 mmap_dump_buffer
->AppendString(continued
? ")" : " ", 0);
747 mmap_dump_buffer
->AppendString(" hooked ", 0);
748 mmap_dump_buffer
->AppendString(kMapsRegionTypeDict
[type
], 0);
749 mmap_dump_buffer
->AppendString(" @ ", 0);
750 if (deep_bucket
!= NULL
) {
751 mmap_dump_buffer
->AppendInt(deep_bucket
->id
, 0);
753 mmap_dump_buffer
->AppendInt(0, 0);
755 mmap_dump_buffer
->AppendString("\n", 0);
757 } while (mmap_iter
!= MemoryRegionMap::EndRegionLocked() &&
758 mmap_iter
->end_addr
- 1 <= vma_last_addr
);
762 // TODO(dmikurube): Investigate and fix http://crbug.com/189114.
764 // The total committed memory usage in all_ (from /proc/<pid>/maps) is
765 // sometimes smaller than the sum of the committed mmap'ed addresses and
766 // unhooked regions. Within our observation, the difference was only 4KB
767 // in committed usage, zero in reserved virtual addresses
769 // A guess is that an uncommitted (but reserved) page may become committed
770 // during counting memory usage in the loop above.
772 // The difference is accounted as "ABSENT" to investigate such cases.
774 RegionStats all_total
;
775 RegionStats unhooked_total
;
776 for (int i
= 0; i
< NUMBER_OF_MAPS_REGION_TYPES
; ++i
) {
777 all_total
.AddAnotherRegionStat(all_
[i
]);
778 unhooked_total
.AddAnotherRegionStat(unhooked_
[i
]);
781 size_t absent_virtual
= profiled_mmap_
.virtual_bytes() +
782 unhooked_total
.virtual_bytes() -
783 all_total
.virtual_bytes();
784 if (absent_virtual
> 0)
785 all_
[ABSENT
].AddToVirtualBytes(absent_virtual
);
787 size_t absent_committed
= profiled_mmap_
.committed_bytes() +
788 unhooked_total
.committed_bytes() -
789 all_total
.committed_bytes();
790 if (absent_committed
> 0)
791 all_
[ABSENT
].AddToCommittedBytes(absent_committed
);
794 void DeepHeapProfile::GlobalStats::SnapshotAllocations(
795 DeepHeapProfile
* deep_profile
) {
796 profiled_malloc_
.Initialize();
798 deep_profile
->heap_profile_
->address_map_
->Iterate(RecordAlloc
, deep_profile
);
801 void DeepHeapProfile::GlobalStats::Unparse(TextBuffer
* buffer
) {
802 RegionStats all_total
;
803 RegionStats unhooked_total
;
804 for (int i
= 0; i
< NUMBER_OF_MAPS_REGION_TYPES
; ++i
) {
805 all_total
.AddAnotherRegionStat(all_
[i
]);
806 unhooked_total
.AddAnotherRegionStat(unhooked_
[i
]);
809 // "# total (%lu) %c= profiled-mmap (%lu) + nonprofiled-* (%lu)\n"
810 buffer
->AppendString("# total (", 0);
811 buffer
->AppendUnsignedLong(all_total
.committed_bytes(), 0);
812 buffer
->AppendString(") ", 0);
813 buffer
->AppendChar(all_total
.committed_bytes() ==
814 profiled_mmap_
.committed_bytes() +
815 unhooked_total
.committed_bytes() ? '=' : '!');
816 buffer
->AppendString("= profiled-mmap (", 0);
817 buffer
->AppendUnsignedLong(profiled_mmap_
.committed_bytes(), 0);
818 buffer
->AppendString(") + nonprofiled-* (", 0);
819 buffer
->AppendUnsignedLong(unhooked_total
.committed_bytes(), 0);
820 buffer
->AppendString(")\n", 0);
822 // " virtual committed"
823 buffer
->AppendString("", 26);
824 buffer
->AppendString(kVirtualLabel
, 12);
825 buffer
->AppendChar(' ');
826 buffer
->AppendString(kCommittedLabel
, 12);
827 buffer
->AppendString("\n", 0);
829 all_total
.Unparse("total", buffer
);
830 all_
[ABSENT
].Unparse("absent", buffer
);
831 all_
[FILE_EXEC
].Unparse("file-exec", buffer
);
832 all_
[FILE_NONEXEC
].Unparse("file-nonexec", buffer
);
833 all_
[ANONYMOUS
].Unparse("anonymous", buffer
);
834 all_
[STACK
].Unparse("stack", buffer
);
835 all_
[OTHER
].Unparse("other", buffer
);
836 unhooked_total
.Unparse("nonprofiled-total", buffer
);
837 unhooked_
[ABSENT
].Unparse("nonprofiled-absent", buffer
);
838 unhooked_
[ANONYMOUS
].Unparse("nonprofiled-anonymous", buffer
);
839 unhooked_
[FILE_EXEC
].Unparse("nonprofiled-file-exec", buffer
);
840 unhooked_
[FILE_NONEXEC
].Unparse("nonprofiled-file-nonexec", buffer
);
841 unhooked_
[STACK
].Unparse("nonprofiled-stack", buffer
);
842 unhooked_
[OTHER
].Unparse("nonprofiled-other", buffer
);
843 profiled_mmap_
.Unparse("profiled-mmap", buffer
);
844 profiled_malloc_
.Unparse("profiled-malloc", buffer
);
848 void DeepHeapProfile::GlobalStats::RecordAlloc(const void* pointer
,
849 AllocValue
* alloc_value
,
850 DeepHeapProfile
* deep_profile
) {
851 uint64 address
= reinterpret_cast<uintptr_t>(pointer
);
852 size_t committed
= deep_profile
->memory_residence_info_getter_
->CommittedSize(
853 address
, address
+ alloc_value
->bytes
- 1);
855 DeepBucket
* deep_bucket
= deep_profile
->deep_table_
.Lookup(
856 alloc_value
->bucket(),
857 #if defined(TYPE_PROFILING)
860 /* is_mmap */ false);
861 deep_bucket
->committed_size
+= committed
;
862 deep_profile
->stats_
.profiled_malloc_
.AddToVirtualBytes(alloc_value
->bytes
);
863 deep_profile
->stats_
.profiled_malloc_
.AddToCommittedBytes(committed
);
866 DeepHeapProfile::DeepBucket
*
867 DeepHeapProfile::GlobalStats::GetInformationOfMemoryRegion(
868 const MemoryRegionMap::RegionIterator
& mmap_iter
,
869 const MemoryResidenceInfoGetterInterface
* memory_residence_info_getter
,
870 DeepHeapProfile
* deep_profile
) {
871 size_t committed
= deep_profile
->memory_residence_info_getter_
->
872 CommittedSize(mmap_iter
->start_addr
, mmap_iter
->end_addr
- 1);
874 // TODO(dmikurube): Store a reference to the bucket in region.
875 Bucket
* bucket
= MemoryRegionMap::GetBucket(
876 mmap_iter
->call_stack_depth
, mmap_iter
->call_stack
);
877 DeepBucket
* deep_bucket
= NULL
;
878 if (bucket
!= NULL
) {
879 deep_bucket
= deep_profile
->deep_table_
.Lookup(
881 #if defined(TYPE_PROFILING)
882 NULL
, // No type information for memory regions by mmap.
885 if (deep_bucket
!= NULL
)
886 deep_bucket
->committed_size
+= committed
;
889 profiled_mmap_
.AddToVirtualBytes(
890 mmap_iter
->end_addr
- mmap_iter
->start_addr
);
891 profiled_mmap_
.AddToCommittedBytes(committed
);
897 void DeepHeapProfile::WriteProcMaps(const char* prefix
,
901 snprintf(filename
, sizeof(filename
),
902 "%s.%05d.maps", prefix
, static_cast<int>(getpid()));
904 RawFD fd
= RawOpenForWriting(filename
);
905 RAW_DCHECK(fd
!= kIllegalRawFD
, "");
909 length
= tcmalloc::FillProcSelfMaps(raw_buffer
, buffer_size
, &wrote_all
);
910 RAW_DCHECK(wrote_all
, "");
911 RAW_DCHECK(length
<= buffer_size
, "");
912 RawWrite(fd
, raw_buffer
, length
);
915 #else // DEEP_HEAP_PROFILE
917 DeepHeapProfile::DeepHeapProfile(HeapProfileTable
* heap_profile
,
919 : heap_profile_(heap_profile
) {
922 DeepHeapProfile::~DeepHeapProfile() {
925 int DeepHeapProfile::FillOrderedProfile(char raw_buffer
[], int buffer_size
) {
926 return heap_profile_
->FillOrderedProfile(raw_buffer
, buffer_size
);
929 #endif // DEEP_HEAP_PROFILE