1 // Copyright (c) 2005, Google Inc.
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Author: Sanjay Ghemawat
33 // TODO: Log large allocations
42 #ifdef HAVE_INTTYPES_H
46 #include <fcntl.h> // for open()
53 #include <sys/types.h>
58 #include <gperftools/heap-profiler.h>
60 #include "base/logging.h"
61 #include "base/basictypes.h" // for PRId64, among other things
62 #include "base/googleinit.h"
63 #include "base/commandlineflags.h"
64 #include "malloc_hook-inl.h"
65 #include "tcmalloc_guard.h"
66 #include <gperftools/malloc_hook.h>
67 #include <gperftools/malloc_extension.h>
68 #include "base/spinlock.h"
69 #include "base/low_level_alloc.h"
70 #include "base/sysinfo.h" // for GetUniquePathFromEnv()
71 #include "deep-heap-profile.h"
72 #include "heap-profile-table.h"
73 #include "memory_region_map.h"
78 #define PATH_MAX MAXPATHLEN
80 #define PATH_MAX 4096 // seems conservative for max filename len!
84 #if defined(__ANDROID__) || defined(ANDROID)
85 // On android, there are no environment variables.
86 // Instead, we use system properties, set via:
87 // adb shell setprop prop_name prop_value
88 // From <sys/system_properties.h>,
91 #define HEAPPROFILE "heapprof"
92 #define HEAP_PROFILE_ALLOCATION_INTERVAL "heapprof.allocation_interval"
93 #define HEAP_PROFILE_DEALLOCATION_INTERVAL "heapprof.deallocation_interval"
94 #define HEAP_PROFILE_INUSE_INTERVAL "heapprof.inuse_interval"
95 #define HEAP_PROFILE_TIME_INTERVAL "heapprof.time_interval"
96 #define HEAP_PROFILE_MMAP_LOG "heapprof.mmap_log"
97 #define HEAP_PROFILE_MMAP "heapprof.mmap"
98 #define HEAP_PROFILE_ONLY_MMAP "heapprof.only_mmap"
99 #define DEEP_HEAP_PROFILE "heapprof.deep_heap_profile"
100 #define DEEP_HEAP_PROFILE_PAGEFRAME "heapprof.deep.pageframe"
101 #define HEAP_PROFILE_TYPE_STATISTICS "heapprof.type_statistics"
102 #else // defined(__ANDROID__) || defined(ANDROID)
103 #define HEAPPROFILE "HEAPPROFILE"
104 #define HEAP_PROFILE_ALLOCATION_INTERVAL "HEAP_PROFILE_ALLOCATION_INTERVAL"
105 #define HEAP_PROFILE_DEALLOCATION_INTERVAL "HEAP_PROFILE_DEALLOCATION_INTERVAL"
106 #define HEAP_PROFILE_INUSE_INTERVAL "HEAP_PROFILE_INUSE_INTERVAL"
107 #define HEAP_PROFILE_TIME_INTERVAL "HEAP_PROFILE_TIME_INTERVAL"
108 #define HEAP_PROFILE_MMAP_LOG "HEAP_PROFILE_MMAP_LOG"
109 #define HEAP_PROFILE_MMAP "HEAP_PROFILE_MMAP"
110 #define HEAP_PROFILE_ONLY_MMAP "HEAP_PROFILE_ONLY_MMAP"
111 #define DEEP_HEAP_PROFILE "DEEP_HEAP_PROFILE"
112 #define DEEP_HEAP_PROFILE_PAGEFRAME "DEEP_HEAP_PROFILE_PAGEFRAME"
113 #define HEAP_PROFILE_TYPE_STATISTICS "HEAP_PROFILE_TYPE_STATISTICS"
114 #endif // defined(__ANDROID__) || defined(ANDROID)
116 using STL_NAMESPACE::string
;
117 using STL_NAMESPACE::sort
;
119 //----------------------------------------------------------------------
120 // Flags that control heap-profiling
122 // The thread-safety of the profiler depends on these being immutable
123 // after main starts, so don't change them.
124 //----------------------------------------------------------------------
126 DEFINE_int64(heap_profile_allocation_interval
,
127 EnvToInt64(HEAP_PROFILE_ALLOCATION_INTERVAL
, 1 << 30 /*1GB*/),
128 "If non-zero, dump heap profiling information once every "
129 "specified number of bytes allocated by the program since "
131 DEFINE_int64(heap_profile_deallocation_interval
,
132 EnvToInt64(HEAP_PROFILE_DEALLOCATION_INTERVAL
, 0),
133 "If non-zero, dump heap profiling information once every "
134 "specified number of bytes deallocated by the program "
135 "since the last dump.");
136 // We could also add flags that report whenever inuse_bytes changes by
137 // X or -X, but there hasn't been a need for that yet, so we haven't.
138 DEFINE_int64(heap_profile_inuse_interval
,
139 EnvToInt64(HEAP_PROFILE_INUSE_INTERVAL
, 100 << 20 /*100MB*/),
140 "If non-zero, dump heap profiling information whenever "
141 "the high-water memory usage mark increases by the specified "
143 DEFINE_int64(heap_profile_time_interval
,
144 EnvToInt64(HEAP_PROFILE_TIME_INTERVAL
, 0),
145 "If non-zero, dump heap profiling information once every "
146 "specified number of seconds since the last dump.");
147 DEFINE_bool(mmap_log
,
148 EnvToBool(HEAP_PROFILE_MMAP_LOG
, false),
149 "Should mmap/munmap calls be logged?");
150 DEFINE_bool(mmap_profile
,
151 EnvToBool(HEAP_PROFILE_MMAP
, false),
152 "If heap-profiling is on, also profile mmap, mremap, and sbrk)");
153 DEFINE_bool(only_mmap_profile
,
154 EnvToBool(HEAP_PROFILE_ONLY_MMAP
, false),
155 "If heap-profiling is on, only profile mmap, mremap, and sbrk; "
156 "do not profile malloc/new/etc");
157 DEFINE_bool(deep_heap_profile
,
158 EnvToBool(DEEP_HEAP_PROFILE
, false),
159 "If heap-profiling is on, profile deeper (Linux and Android)");
160 DEFINE_int32(deep_heap_profile_pageframe
,
161 EnvToInt(DEEP_HEAP_PROFILE_PAGEFRAME
, 0),
162 "Needs deeper profile. If 1, dump page frame numbers (PFNs). "
163 "If 2, dump page counts (/proc/kpagecount) with PFNs.");
164 #if defined(TYPE_PROFILING)
165 DEFINE_bool(heap_profile_type_statistics
,
166 EnvToBool(HEAP_PROFILE_TYPE_STATISTICS
, false),
167 "If heap-profiling is on, dump type statistics.");
168 #endif // defined(TYPE_PROFILING)
171 //----------------------------------------------------------------------
173 //----------------------------------------------------------------------
175 // A pthread_mutex has way too much lock contention to be used here.
177 // I would like to use Mutex, but it can call malloc(),
178 // which can cause us to fall into an infinite recursion.
180 // So we use a simple spinlock.
181 static SpinLock
heap_lock(SpinLock::LINKER_INITIALIZED
);
183 //----------------------------------------------------------------------
184 // Simple allocator for heap profiler's internal memory
185 //----------------------------------------------------------------------
187 static LowLevelAlloc::Arena
*heap_profiler_memory
;
189 static void* ProfilerMalloc(size_t bytes
) {
190 return LowLevelAlloc::AllocWithArena(bytes
, heap_profiler_memory
);
192 static void ProfilerFree(void* p
) {
193 LowLevelAlloc::Free(p
);
196 // We use buffers of this size in DoGetHeapProfile.
197 static const int kProfileBufferSize
= 1 << 20;
199 // This is a last-ditch buffer we use in DumpProfileLocked in case we
200 // can't allocate more memory from ProfilerMalloc. We expect this
201 // will be used by HeapProfileEndWriter when the application has to
202 // exit due to out-of-memory. This buffer is allocated in
203 // HeapProfilerStart. Access to this must be protected by heap_lock.
204 static char* global_profiler_buffer
= NULL
;
207 //----------------------------------------------------------------------
208 // Profiling control/state data
209 //----------------------------------------------------------------------
211 // Access to all of these is protected by heap_lock.
212 static bool is_on
= false; // If are on as a subsytem.
213 static bool dumping
= false; // Dumping status to prevent recursion
214 static char* filename_prefix
= NULL
; // Prefix used for profile file names
215 // (NULL if no need for dumping yet)
216 static int dump_count
= 0; // How many dumps so far
217 static int64 last_dump_alloc
= 0; // alloc_size when did we last dump
218 static int64 last_dump_free
= 0; // free_size when did we last dump
219 static int64 high_water_mark
= 0; // In-use-bytes at last high-water dump
220 static int64 last_dump_time
= 0; // The time of the last dump
222 static HeapProfileTable
* heap_profile
= NULL
; // the heap profile table
223 static DeepHeapProfile
* deep_profile
= NULL
; // deep memory profiler
225 // Callback to generate a stack trace for an allocation. May be overriden
226 // by an application to provide its own pseudo-stacks.
227 static StackGeneratorFunction stack_generator_function
=
228 HeapProfileTable::GetCallerStackTrace
;
230 //----------------------------------------------------------------------
231 // Profile generation
232 //----------------------------------------------------------------------
234 // Input must be a buffer of size at least 1MB.
235 static char* DoGetHeapProfileLocked(char* buf
, int buflen
) {
236 // We used to be smarter about estimating the required memory and
237 // then capping it to 1MB and generating the profile into that.
238 if (buf
== NULL
|| buflen
< 1)
241 RAW_DCHECK(heap_lock
.IsHeld(), "");
242 int bytes_written
= 0;
244 HeapProfileTable::Stats
const stats
= heap_profile
->total();
245 (void)stats
; // avoid an unused-variable warning in non-debug mode.
246 bytes_written
= heap_profile
->FillOrderedProfile(buf
, buflen
- 1);
247 // FillOrderedProfile should not reduce the set of active mmap-ed regions,
248 // hence MemoryRegionMap will let us remove everything we've added above:
249 RAW_DCHECK(stats
.Equivalent(heap_profile
->total()), "");
250 // if this fails, we somehow removed by FillOrderedProfile
251 // more than we have added.
253 buf
[bytes_written
] = '\0';
254 RAW_DCHECK(bytes_written
== strlen(buf
), "");
259 extern "C" char* GetHeapProfile() {
260 // Use normal malloc: we return the profile to the user to free it:
261 char* buffer
= reinterpret_cast<char*>(malloc(kProfileBufferSize
));
262 SpinLockHolder
l(&heap_lock
);
263 return DoGetHeapProfileLocked(buffer
, kProfileBufferSize
);
267 static void NewHook(const void* ptr
, size_t size
);
268 static void DeleteHook(const void* ptr
);
270 // Helper for HeapProfilerDump.
271 static void DumpProfileLocked(const char* reason
) {
272 RAW_DCHECK(heap_lock
.IsHeld(), "");
273 RAW_DCHECK(is_on
, "");
274 RAW_DCHECK(!dumping
, "");
276 if (filename_prefix
== NULL
) return; // we do not yet need dumping
281 char file_name
[1000];
283 snprintf(file_name
, sizeof(file_name
), "%s.%05d.%04d%s",
284 filename_prefix
, getpid(), dump_count
, HeapProfileTable::kFileExt
);
287 RAW_VLOG(0, "Dumping heap profile to %s (%s)", file_name
, reason
);
288 // We must use file routines that don't access memory, since we hold
289 // a memory lock now.
290 RawFD fd
= RawOpenForWriting(file_name
);
291 if (fd
== kIllegalRawFD
) {
292 RAW_LOG(ERROR
, "Failed dumping heap profile to %s", file_name
);
297 // This case may be impossible, but it's best to be safe.
298 // It's safe to use the global buffer: we're protected by heap_lock.
299 if (global_profiler_buffer
== NULL
) {
300 global_profiler_buffer
=
301 reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize
));
305 deep_profile
->DumpOrderedProfile(reason
, global_profiler_buffer
,
306 kProfileBufferSize
, fd
);
308 char* profile
= DoGetHeapProfileLocked(global_profiler_buffer
,
310 RawWrite(fd
, profile
, strlen(profile
));
314 #if defined(TYPE_PROFILING)
315 if (FLAGS_heap_profile_type_statistics
) {
316 snprintf(file_name
, sizeof(file_name
), "%s.%05d.%04d.type",
317 filename_prefix
, getpid(), dump_count
);
318 RAW_VLOG(0, "Dumping type statistics to %s", file_name
);
319 heap_profile
->DumpTypeStatistics(file_name
);
321 #endif // defined(TYPE_PROFILING)
326 //----------------------------------------------------------------------
327 // Profile collection
328 //----------------------------------------------------------------------
330 // Dump a profile after either an allocation or deallocation, if
331 // the memory use has changed enough since the last dump.
332 static void MaybeDumpProfileLocked() {
334 const HeapProfileTable::Stats
& total
= heap_profile
->total();
335 const int64 inuse_bytes
= total
.alloc_size
- total
.free_size
;
336 bool need_to_dump
= false;
338 int64 current_time
= time(NULL
);
339 if (FLAGS_heap_profile_allocation_interval
> 0 &&
341 last_dump_alloc
+ FLAGS_heap_profile_allocation_interval
) {
342 snprintf(buf
, sizeof(buf
), ("%" PRId64
" MB allocated cumulatively, "
343 "%" PRId64
" MB currently in use"),
344 total
.alloc_size
>> 20, inuse_bytes
>> 20);
346 } else if (FLAGS_heap_profile_deallocation_interval
> 0 &&
348 last_dump_free
+ FLAGS_heap_profile_deallocation_interval
) {
349 snprintf(buf
, sizeof(buf
), ("%" PRId64
" MB freed cumulatively, "
350 "%" PRId64
" MB currently in use"),
351 total
.free_size
>> 20, inuse_bytes
>> 20);
353 } else if (FLAGS_heap_profile_inuse_interval
> 0 &&
355 high_water_mark
+ FLAGS_heap_profile_inuse_interval
) {
356 snprintf(buf
, sizeof(buf
), "%" PRId64
" MB currently in use",
359 } else if (FLAGS_heap_profile_time_interval
> 0 &&
360 current_time
- last_dump_time
>=
361 FLAGS_heap_profile_time_interval
) {
362 snprintf(buf
, sizeof(buf
), "%" PRId64
" sec since the last dump",
363 current_time
- last_dump_time
);
365 last_dump_time
= current_time
;
368 DumpProfileLocked(buf
);
370 last_dump_alloc
= total
.alloc_size
;
371 last_dump_free
= total
.free_size
;
372 if (inuse_bytes
> high_water_mark
)
373 high_water_mark
= inuse_bytes
;
378 // Record an allocation in the profile.
379 static void RecordAlloc(const void* ptr
, size_t bytes
, int skip_count
) {
380 // Take the stack trace outside the critical section.
381 void* stack
[HeapProfileTable::kMaxStackDepth
];
382 int depth
= stack_generator_function(skip_count
+ 1, stack
);
383 SpinLockHolder
l(&heap_lock
);
385 heap_profile
->RecordAlloc(ptr
, bytes
, depth
, stack
);
386 MaybeDumpProfileLocked();
390 // Record a deallocation in the profile.
391 static void RecordFree(const void* ptr
) {
392 SpinLockHolder
l(&heap_lock
);
394 heap_profile
->RecordFree(ptr
);
395 MaybeDumpProfileLocked();
399 //----------------------------------------------------------------------
400 // Allocation/deallocation hooks for MallocHook
401 //----------------------------------------------------------------------
404 void NewHook(const void* ptr
, size_t size
) {
405 if (ptr
!= NULL
) RecordAlloc(ptr
, size
, 0);
409 void DeleteHook(const void* ptr
) {
410 if (ptr
!= NULL
) RecordFree(ptr
);
413 // TODO(jandrews): Re-enable stack tracing
414 #ifdef TODO_REENABLE_STACK_TRACING
415 static void RawInfoStackDumper(const char* message
, void*) {
416 RAW_LOG(INFO
, "%.*s", static_cast<int>(strlen(message
) - 1), message
);
417 // -1 is to chop the \n which will be added by RAW_LOG
421 static void MmapHook(const void* result
, const void* start
, size_t size
,
422 int prot
, int flags
, int fd
, off_t offset
) {
423 if (FLAGS_mmap_log
) { // log it
424 // We use PRIxS not just '%p' to avoid deadlocks
425 // in pretty-printing of NULL as "nil".
426 // TODO(maxim): instead should use a safe snprintf reimplementation
428 "mmap(start=0x%" PRIxPTR
", len=%" PRIuS
", prot=0x%x, flags=0x%x, "
429 "fd=%d, offset=0x%x) = 0x%" PRIxPTR
,
430 (uintptr_t) start
, size
, prot
, flags
, fd
, (unsigned int) offset
,
432 #ifdef TODO_REENABLE_STACK_TRACING
433 DumpStackTrace(1, RawInfoStackDumper
, NULL
);
438 static void MremapHook(const void* result
, const void* old_addr
,
439 size_t old_size
, size_t new_size
,
440 int flags
, const void* new_addr
) {
441 if (FLAGS_mmap_log
) { // log it
442 // We use PRIxS not just '%p' to avoid deadlocks
443 // in pretty-printing of NULL as "nil".
444 // TODO(maxim): instead should use a safe snprintf reimplementation
446 "mremap(old_addr=0x%" PRIxPTR
", old_size=%" PRIuS
", "
447 "new_size=%" PRIuS
", flags=0x%x, new_addr=0x%" PRIxPTR
") = "
449 (uintptr_t) old_addr
, old_size
, new_size
, flags
,
450 (uintptr_t) new_addr
, (uintptr_t) result
);
451 #ifdef TODO_REENABLE_STACK_TRACING
452 DumpStackTrace(1, RawInfoStackDumper
, NULL
);
457 static void MunmapHook(const void* ptr
, size_t size
) {
458 if (FLAGS_mmap_log
) { // log it
459 // We use PRIxS not just '%p' to avoid deadlocks
460 // in pretty-printing of NULL as "nil".
461 // TODO(maxim): instead should use a safe snprintf reimplementation
462 RAW_LOG(INFO
, "munmap(start=0x%" PRIxPTR
", len=%" PRIuS
")",
463 (uintptr_t) ptr
, size
);
464 #ifdef TODO_REENABLE_STACK_TRACING
465 DumpStackTrace(1, RawInfoStackDumper
, NULL
);
470 static void SbrkHook(const void* result
, ptrdiff_t increment
) {
471 if (FLAGS_mmap_log
) { // log it
472 RAW_LOG(INFO
, "sbrk(inc=%" PRIdS
") = 0x%" PRIxPTR
,
473 increment
, (uintptr_t) result
);
474 #ifdef TODO_REENABLE_STACK_TRACING
475 DumpStackTrace(1, RawInfoStackDumper
, NULL
);
480 //----------------------------------------------------------------------
481 // Starting/stopping/dumping
482 //----------------------------------------------------------------------
484 extern "C" void HeapProfilerStart(const char* prefix
) {
485 SpinLockHolder
l(&heap_lock
);
491 RAW_VLOG(0, "Starting tracking the heap");
493 // This should be done before the hooks are set up, since it should
494 // call new, and we want that to be accounted for correctly.
495 MallocExtension::Initialize();
497 if (FLAGS_only_mmap_profile
) {
498 FLAGS_mmap_profile
= true;
501 if (FLAGS_mmap_profile
) {
502 // Ask MemoryRegionMap to record all mmap, mremap, and sbrk
503 // call stack traces of at least size kMaxStackDepth:
504 MemoryRegionMap::Init(HeapProfileTable::kMaxStackDepth
,
505 /* use_buckets */ true);
508 if (FLAGS_mmap_log
) {
509 // Install our hooks to do the logging:
510 RAW_CHECK(MallocHook::AddMmapHook(&MmapHook
), "");
511 RAW_CHECK(MallocHook::AddMremapHook(&MremapHook
), "");
512 RAW_CHECK(MallocHook::AddMunmapHook(&MunmapHook
), "");
513 RAW_CHECK(MallocHook::AddSbrkHook(&SbrkHook
), "");
516 heap_profiler_memory
=
517 LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
519 // Reserve space now for the heap profiler, so we can still write a
520 // heap profile even if the application runs out of memory.
521 global_profiler_buffer
=
522 reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize
));
524 heap_profile
= new(ProfilerMalloc(sizeof(HeapProfileTable
)))
525 HeapProfileTable(ProfilerMalloc
, ProfilerFree
, FLAGS_mmap_profile
);
532 if (FLAGS_deep_heap_profile
) {
533 // Initialize deep memory profiler
534 RAW_VLOG(0, "[%d] Starting a deep memory profiler", getpid());
535 deep_profile
= new(ProfilerMalloc(sizeof(DeepHeapProfile
)))
536 DeepHeapProfile(heap_profile
, prefix
, DeepHeapProfile::PageFrameType(
537 FLAGS_deep_heap_profile_pageframe
));
540 // We do not reset dump_count so if the user does a sequence of
541 // HeapProfilerStart/HeapProfileStop, we will get a continuous
542 // sequence of profiles.
544 if (FLAGS_only_mmap_profile
== false) {
545 // Now set the hooks that capture new/delete and malloc/free.
546 RAW_CHECK(MallocHook::AddNewHook(&NewHook
), "");
547 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook
), "");
550 // Copy filename prefix only if provided.
553 RAW_DCHECK(filename_prefix
== NULL
, "");
554 const int prefix_length
= strlen(prefix
);
555 filename_prefix
= reinterpret_cast<char*>(ProfilerMalloc(prefix_length
+ 1));
556 memcpy(filename_prefix
, prefix
, prefix_length
);
557 filename_prefix
[prefix_length
] = '\0';
560 extern "C" void HeapProfilerWithPseudoStackStart(
561 StackGeneratorFunction callback
) {
563 // Ensure the callback is set before allocations can be recorded.
564 SpinLockHolder
l(&heap_lock
);
565 stack_generator_function
= callback
;
567 HeapProfilerStart(NULL
);
570 extern "C" void IterateAllocatedObjects(AddressVisitor visitor
, void* data
) {
571 SpinLockHolder
l(&heap_lock
);
575 heap_profile
->IterateAllocationAddresses(visitor
, data
);
578 extern "C" int IsHeapProfilerRunning() {
579 SpinLockHolder
l(&heap_lock
);
580 return is_on
? 1 : 0; // return an int, because C code doesn't have bool
583 extern "C" void HeapProfilerStop() {
584 SpinLockHolder
l(&heap_lock
);
588 if (FLAGS_only_mmap_profile
== false) {
589 // Unset our new/delete hooks, checking they were set:
590 RAW_CHECK(MallocHook::RemoveNewHook(&NewHook
), "");
591 RAW_CHECK(MallocHook::RemoveDeleteHook(&DeleteHook
), "");
593 if (FLAGS_mmap_log
) {
594 // Restore mmap/sbrk hooks, checking that our hooks were set:
595 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook
), "");
596 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook
), "");
597 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook
), "");
598 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook
), "");
602 // free deep memory profiler
603 deep_profile
->~DeepHeapProfile();
604 ProfilerFree(deep_profile
);
609 heap_profile
->~HeapProfileTable();
610 ProfilerFree(heap_profile
);
613 // free output-buffer memory
614 ProfilerFree(global_profiler_buffer
);
617 ProfilerFree(filename_prefix
);
618 filename_prefix
= NULL
;
620 if (!LowLevelAlloc::DeleteArena(heap_profiler_memory
)) {
621 RAW_LOG(FATAL
, "Memory leak in HeapProfiler:");
624 if (FLAGS_mmap_profile
) {
625 MemoryRegionMap::Shutdown();
631 extern "C" void HeapProfilerDump(const char* reason
) {
632 SpinLockHolder
l(&heap_lock
);
633 if (is_on
&& !dumping
) {
634 DumpProfileLocked(reason
);
638 extern "C" void HeapProfilerMarkBaseline() {
639 SpinLockHolder
l(&heap_lock
);
643 heap_profile
->MarkCurrentAllocations(HeapProfileTable::MARK_ONE
);
646 extern "C" void HeapProfilerMarkInteresting() {
647 SpinLockHolder
l(&heap_lock
);
651 heap_profile
->MarkUnmarkedAllocations(HeapProfileTable::MARK_TWO
);
654 extern "C" void HeapProfilerDumpAliveObjects(const char* filename
) {
655 SpinLockHolder
l(&heap_lock
);
659 heap_profile
->DumpMarkedObjects(HeapProfileTable::MARK_TWO
, filename
);
662 //----------------------------------------------------------------------
663 // Initialization/finalization code
664 //----------------------------------------------------------------------
665 #if defined(ENABLE_PROFILING)
666 // Initialization code
667 static void HeapProfilerInit() {
668 // Everything after this point is for setting up the profiler based on envvar
669 char fname
[PATH_MAX
];
670 if (!GetUniquePathFromEnv(HEAPPROFILE
, fname
)) {
673 // We do a uid check so we don't write out files in a setuid executable.
675 if (getuid() != geteuid()) {
676 RAW_LOG(WARNING
, ("HeapProfiler: ignoring " HEAPPROFILE
" because "
677 "program seems to be setuid\n"));
682 HeapProfileTable::CleanupOldProfiles(fname
);
684 HeapProfilerStart(fname
);
687 // class used for finalization -- dumps the heap-profile at program exit
688 struct HeapProfileEndWriter
{
689 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); }
692 // We want to make sure tcmalloc is up and running before starting the profiler
693 static const TCMallocGuard tcmalloc_initializer
;
694 REGISTER_MODULE_INITIALIZER(heapprofiler
, HeapProfilerInit());
695 static HeapProfileEndWriter heap_profile_end_writer
;
696 #endif // defined(ENABLE_PROFILING)