1 // Copyright (c) 2005, Google Inc.
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Author: Sanjay Ghemawat
33 // TODO: Log large allocations
42 #ifdef HAVE_INTTYPES_H
46 #include <fcntl.h> // for open()
53 #include <sys/types.h>
58 #include <gperftools/heap-profiler.h>
60 #include "base/logging.h"
61 #include "base/basictypes.h" // for PRId64, among other things
62 #include "base/googleinit.h"
63 #include "base/commandlineflags.h"
64 #include "malloc_hook-inl.h"
65 #include "tcmalloc_guard.h"
66 #include <gperftools/malloc_hook.h>
67 #include <gperftools/malloc_extension.h>
68 #include "base/spinlock.h"
69 #include "base/low_level_alloc.h"
70 #include "base/sysinfo.h" // for GetUniquePathFromEnv()
71 #include "heap-profile-table.h"
72 #include "memory_region_map.h"
77 #define PATH_MAX MAXPATHLEN
79 #define PATH_MAX 4096 // seems conservative for max filename len!
83 using STL_NAMESPACE::string
;
84 using STL_NAMESPACE::sort
;
86 //----------------------------------------------------------------------
87 // Flags that control heap-profiling
89 // The thread-safety of the profiler depends on these being immutable
90 // after main starts, so don't change them.
91 //----------------------------------------------------------------------
93 DEFINE_int64(heap_profile_allocation_interval
,
94 EnvToInt64("HEAP_PROFILE_ALLOCATION_INTERVAL", 1 << 30 /*1GB*/),
95 "If non-zero, dump heap profiling information once every "
96 "specified number of bytes allocated by the program since "
98 DEFINE_int64(heap_profile_deallocation_interval
,
99 EnvToInt64("HEAP_PROFILE_DEALLOCATION_INTERVAL", 0),
100 "If non-zero, dump heap profiling information once every "
101 "specified number of bytes deallocated by the program "
102 "since the last dump.");
103 // We could also add flags that report whenever inuse_bytes changes by
104 // X or -X, but there hasn't been a need for that yet, so we haven't.
105 DEFINE_int64(heap_profile_inuse_interval
,
106 EnvToInt64("HEAP_PROFILE_INUSE_INTERVAL", 100 << 20 /*100MB*/),
107 "If non-zero, dump heap profiling information whenever "
108 "the high-water memory usage mark increases by the specified "
110 DEFINE_bool(mmap_log
,
111 EnvToBool("HEAP_PROFILE_MMAP_LOG", false),
112 "Should mmap/munmap calls be logged?");
113 DEFINE_bool(mmap_profile
,
114 EnvToBool("HEAP_PROFILE_MMAP", false),
115 "If heap-profiling is on, also profile mmap, mremap, and sbrk)");
116 DEFINE_bool(only_mmap_profile
,
117 EnvToBool("HEAP_PROFILE_ONLY_MMAP", false),
118 "If heap-profiling is on, only profile mmap, mremap, and sbrk; "
119 "do not profile malloc/new/etc");
122 //----------------------------------------------------------------------
124 //----------------------------------------------------------------------
126 // A pthread_mutex has way too much lock contention to be used here.
128 // I would like to use Mutex, but it can call malloc(),
129 // which can cause us to fall into an infinite recursion.
131 // So we use a simple spinlock.
132 static SpinLock
heap_lock(SpinLock::LINKER_INITIALIZED
);
134 //----------------------------------------------------------------------
135 // Simple allocator for heap profiler's internal memory
136 //----------------------------------------------------------------------
138 static LowLevelAlloc::Arena
*heap_profiler_memory
;
140 static void* ProfilerMalloc(size_t bytes
) {
141 return LowLevelAlloc::AllocWithArena(bytes
, heap_profiler_memory
);
143 static void ProfilerFree(void* p
) {
144 LowLevelAlloc::Free(p
);
147 // We use buffers of this size in DoGetHeapProfile.
148 static const int kProfileBufferSize
= 1 << 20;
150 // This is a last-ditch buffer we use in DumpProfileLocked in case we
151 // can't allocate more memory from ProfilerMalloc. We expect this
152 // will be used by HeapProfileEndWriter when the application has to
153 // exit due to out-of-memory. This buffer is allocated in
154 // HeapProfilerStart. Access to this must be protected by heap_lock.
155 static char* global_profiler_buffer
= NULL
;
158 //----------------------------------------------------------------------
159 // Profiling control/state data
160 //----------------------------------------------------------------------
162 // Access to all of these is protected by heap_lock.
163 static bool is_on
= false; // If are on as a subsytem.
164 static bool dumping
= false; // Dumping status to prevent recursion
165 static char* filename_prefix
= NULL
; // Prefix used for profile file names
166 // (NULL if no need for dumping yet)
167 static int dump_count
= 0; // How many dumps so far
168 static int64 last_dump_alloc
= 0; // alloc_size when did we last dump
169 static int64 last_dump_free
= 0; // free_size when did we last dump
170 static int64 high_water_mark
= 0; // In-use-bytes at last high-water dump
172 static HeapProfileTable
* heap_profile
= NULL
; // the heap profile table
174 //----------------------------------------------------------------------
175 // Profile generation
176 //----------------------------------------------------------------------
178 // Input must be a buffer of size at least 1MB.
179 static char* DoGetHeapProfileLocked(char* buf
, int buflen
) {
180 // We used to be smarter about estimating the required memory and
181 // then capping it to 1MB and generating the profile into that.
182 if (buf
== NULL
|| buflen
< 1)
185 RAW_DCHECK(heap_lock
.IsHeld(), "");
186 int bytes_written
= 0;
188 if (FLAGS_mmap_profile
) {
189 heap_profile
->RefreshMMapData();
191 bytes_written
= heap_profile
->FillOrderedProfile(buf
, buflen
- 1);
192 if (FLAGS_mmap_profile
) {
193 heap_profile
->ClearMMapData();
196 buf
[bytes_written
] = '\0';
197 RAW_DCHECK(bytes_written
== strlen(buf
), "");
202 extern "C" char* GetHeapProfile() {
203 // Use normal malloc: we return the profile to the user to free it:
204 char* buffer
= reinterpret_cast<char*>(malloc(kProfileBufferSize
));
205 SpinLockHolder
l(&heap_lock
);
206 return DoGetHeapProfileLocked(buffer
, kProfileBufferSize
);
210 static void NewHook(const void* ptr
, size_t size
);
211 static void DeleteHook(const void* ptr
);
213 // Helper for HeapProfilerDump.
214 static void DumpProfileLocked(const char* reason
) {
215 RAW_DCHECK(heap_lock
.IsHeld(), "");
216 RAW_DCHECK(is_on
, "");
217 RAW_DCHECK(!dumping
, "");
219 if (filename_prefix
== NULL
) return; // we do not yet need dumping
224 char file_name
[1000];
226 snprintf(file_name
, sizeof(file_name
), "%s.%04d%s",
227 filename_prefix
, dump_count
, HeapProfileTable::kFileExt
);
230 RAW_VLOG(0, "Dumping heap profile to %s (%s)", file_name
, reason
);
231 // We must use file routines that don't access memory, since we hold
232 // a memory lock now.
233 RawFD fd
= RawOpenForWriting(file_name
);
234 if (fd
== kIllegalRawFD
) {
235 RAW_LOG(ERROR
, "Failed dumping heap profile to %s", file_name
);
240 // This case may be impossible, but it's best to be safe.
241 // It's safe to use the global buffer: we're protected by heap_lock.
242 if (global_profiler_buffer
== NULL
) {
243 global_profiler_buffer
=
244 reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize
));
247 char* profile
= DoGetHeapProfileLocked(global_profiler_buffer
,
249 RawWrite(fd
, profile
, strlen(profile
));
255 //----------------------------------------------------------------------
256 // Profile collection
257 //----------------------------------------------------------------------
259 // Dump a profile after either an allocation or deallocation, if
260 // the memory use has changed enough since the last dump.
261 static void MaybeDumpProfileLocked() {
263 const HeapProfileTable::Stats
& total
= heap_profile
->total();
264 const int64 inuse_bytes
= total
.alloc_size
- total
.free_size
;
265 bool need_to_dump
= false;
267 if (FLAGS_heap_profile_allocation_interval
> 0 &&
269 last_dump_alloc
+ FLAGS_heap_profile_allocation_interval
) {
270 snprintf(buf
, sizeof(buf
), ("%"PRId64
" MB allocated cumulatively, "
271 "%"PRId64
" MB currently in use"),
272 total
.alloc_size
>> 20, inuse_bytes
>> 20);
274 } else if (FLAGS_heap_profile_deallocation_interval
> 0 &&
276 last_dump_free
+ FLAGS_heap_profile_deallocation_interval
) {
277 snprintf(buf
, sizeof(buf
), ("%"PRId64
" MB freed cumulatively, "
278 "%"PRId64
" MB currently in use"),
279 total
.free_size
>> 20, inuse_bytes
>> 20);
281 } else if (FLAGS_heap_profile_inuse_interval
> 0 &&
283 high_water_mark
+ FLAGS_heap_profile_inuse_interval
) {
284 snprintf(buf
, sizeof(buf
), "%"PRId64
" MB currently in use",
289 DumpProfileLocked(buf
);
291 last_dump_alloc
= total
.alloc_size
;
292 last_dump_free
= total
.free_size
;
293 if (inuse_bytes
> high_water_mark
)
294 high_water_mark
= inuse_bytes
;
299 // Record an allocation in the profile.
300 static void RecordAlloc(const void* ptr
, size_t bytes
, int skip_count
) {
301 // Take the stack trace outside the critical section.
302 void* stack
[HeapProfileTable::kMaxStackDepth
];
303 int depth
= HeapProfileTable::GetCallerStackTrace(skip_count
+ 1, stack
);
304 SpinLockHolder
l(&heap_lock
);
306 heap_profile
->RecordAlloc(ptr
, bytes
, depth
, stack
);
307 MaybeDumpProfileLocked();
311 // Record a deallocation in the profile.
312 static void RecordFree(const void* ptr
) {
313 SpinLockHolder
l(&heap_lock
);
315 heap_profile
->RecordFree(ptr
);
316 MaybeDumpProfileLocked();
320 //----------------------------------------------------------------------
321 // Allocation/deallocation hooks for MallocHook
322 //----------------------------------------------------------------------
325 void NewHook(const void* ptr
, size_t size
) {
326 if (ptr
!= NULL
) RecordAlloc(ptr
, size
, 0);
330 void DeleteHook(const void* ptr
) {
331 if (ptr
!= NULL
) RecordFree(ptr
);
334 // TODO(jandrews): Re-enable stack tracing
335 #ifdef TODO_REENABLE_STACK_TRACING
336 static void RawInfoStackDumper(const char* message
, void*) {
337 RAW_LOG(INFO
, "%.*s", static_cast<int>(strlen(message
) - 1), message
);
338 // -1 is to chop the \n which will be added by RAW_LOG
342 static void MmapHook(const void* result
, const void* start
, size_t size
,
343 int prot
, int flags
, int fd
, off_t offset
) {
344 if (FLAGS_mmap_log
) { // log it
345 // We use PRIxS not just '%p' to avoid deadlocks
346 // in pretty-printing of NULL as "nil".
347 // TODO(maxim): instead should use a safe snprintf reimplementation
349 "mmap(start=0x%"PRIxPTR
", len=%"PRIuS
", prot=0x%x, flags=0x%x, "
350 "fd=%d, offset=0x%x) = 0x%"PRIxPTR
"",
351 (uintptr_t) start
, size
, prot
, flags
, fd
, (unsigned int) offset
,
353 #ifdef TODO_REENABLE_STACK_TRACING
354 DumpStackTrace(1, RawInfoStackDumper
, NULL
);
359 static void MremapHook(const void* result
, const void* old_addr
,
360 size_t old_size
, size_t new_size
,
361 int flags
, const void* new_addr
) {
362 if (FLAGS_mmap_log
) { // log it
363 // We use PRIxS not just '%p' to avoid deadlocks
364 // in pretty-printing of NULL as "nil".
365 // TODO(maxim): instead should use a safe snprintf reimplementation
367 "mremap(old_addr=0x%"PRIxPTR
", old_size=%"PRIuS
", "
368 "new_size=%"PRIuS
", flags=0x%x, new_addr=0x%"PRIxPTR
") = "
370 (uintptr_t) old_addr
, old_size
, new_size
, flags
,
371 (uintptr_t) new_addr
, (uintptr_t) result
);
372 #ifdef TODO_REENABLE_STACK_TRACING
373 DumpStackTrace(1, RawInfoStackDumper
, NULL
);
378 static void MunmapHook(const void* ptr
, size_t size
) {
379 if (FLAGS_mmap_log
) { // log it
380 // We use PRIxS not just '%p' to avoid deadlocks
381 // in pretty-printing of NULL as "nil".
382 // TODO(maxim): instead should use a safe snprintf reimplementation
383 RAW_LOG(INFO
, "munmap(start=0x%"PRIxPTR
", len=%"PRIuS
")",
384 (uintptr_t) ptr
, size
);
385 #ifdef TODO_REENABLE_STACK_TRACING
386 DumpStackTrace(1, RawInfoStackDumper
, NULL
);
391 static void SbrkHook(const void* result
, ptrdiff_t increment
) {
392 if (FLAGS_mmap_log
) { // log it
393 RAW_LOG(INFO
, "sbrk(inc=%"PRIdS
") = 0x%"PRIxPTR
"",
394 increment
, (uintptr_t) result
);
395 #ifdef TODO_REENABLE_STACK_TRACING
396 DumpStackTrace(1, RawInfoStackDumper
, NULL
);
401 //----------------------------------------------------------------------
402 // Starting/stopping/dumping
403 //----------------------------------------------------------------------
405 extern "C" void HeapProfilerStart(const char* prefix
) {
406 SpinLockHolder
l(&heap_lock
);
412 RAW_VLOG(0, "Starting tracking the heap");
414 // This should be done before the hooks are set up, since it should
415 // call new, and we want that to be accounted for correctly.
416 MallocExtension::Initialize();
418 if (FLAGS_only_mmap_profile
) {
419 FLAGS_mmap_profile
= true;
422 if (FLAGS_mmap_profile
) {
423 // Ask MemoryRegionMap to record all mmap, mremap, and sbrk
424 // call stack traces of at least size kMaxStackDepth:
425 MemoryRegionMap::Init(HeapProfileTable::kMaxStackDepth
);
428 if (FLAGS_mmap_log
) {
429 // Install our hooks to do the logging:
430 RAW_CHECK(MallocHook::AddMmapHook(&MmapHook
), "");
431 RAW_CHECK(MallocHook::AddMremapHook(&MremapHook
), "");
432 RAW_CHECK(MallocHook::AddMunmapHook(&MunmapHook
), "");
433 RAW_CHECK(MallocHook::AddSbrkHook(&SbrkHook
), "");
436 heap_profiler_memory
=
437 LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
439 // Reserve space now for the heap profiler, so we can still write a
440 // heap profile even if the application runs out of memory.
441 global_profiler_buffer
=
442 reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize
));
444 heap_profile
= new(ProfilerMalloc(sizeof(HeapProfileTable
)))
445 HeapProfileTable(ProfilerMalloc
, ProfilerFree
);
451 // We do not reset dump_count so if the user does a sequence of
452 // HeapProfilerStart/HeapProfileStop, we will get a continuous
453 // sequence of profiles.
455 if (FLAGS_only_mmap_profile
== false) {
456 // Now set the hooks that capture new/delete and malloc/free.
457 RAW_CHECK(MallocHook::AddNewHook(&NewHook
), "");
458 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook
), "");
461 // Copy filename prefix
462 RAW_DCHECK(filename_prefix
== NULL
, "");
463 const int prefix_length
= strlen(prefix
);
464 filename_prefix
= reinterpret_cast<char*>(ProfilerMalloc(prefix_length
+ 1));
465 memcpy(filename_prefix
, prefix
, prefix_length
);
466 filename_prefix
[prefix_length
] = '\0';
469 extern "C" int IsHeapProfilerRunning() {
470 SpinLockHolder
l(&heap_lock
);
471 return is_on
? 1 : 0; // return an int, because C code doesn't have bool
474 extern "C" void HeapProfilerStop() {
475 SpinLockHolder
l(&heap_lock
);
479 if (FLAGS_only_mmap_profile
== false) {
480 // Unset our new/delete hooks, checking they were set:
481 RAW_CHECK(MallocHook::RemoveNewHook(&NewHook
), "");
482 RAW_CHECK(MallocHook::RemoveDeleteHook(&DeleteHook
), "");
484 if (FLAGS_mmap_log
) {
485 // Restore mmap/sbrk hooks, checking that our hooks were set:
486 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook
), "");
487 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook
), "");
488 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook
), "");
489 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook
), "");
493 heap_profile
->~HeapProfileTable();
494 ProfilerFree(heap_profile
);
497 // free output-buffer memory
498 ProfilerFree(global_profiler_buffer
);
501 ProfilerFree(filename_prefix
);
502 filename_prefix
= NULL
;
504 if (!LowLevelAlloc::DeleteArena(heap_profiler_memory
)) {
505 RAW_LOG(FATAL
, "Memory leak in HeapProfiler:");
508 if (FLAGS_mmap_profile
) {
509 MemoryRegionMap::Shutdown();
515 extern "C" void HeapProfilerDump(const char *reason
) {
516 SpinLockHolder
l(&heap_lock
);
517 if (is_on
&& !dumping
) {
518 DumpProfileLocked(reason
);
522 //----------------------------------------------------------------------
523 // Initialization/finalization code
524 //----------------------------------------------------------------------
526 // Initialization code
527 static void HeapProfilerInit() {
528 // Everything after this point is for setting up the profiler based on envvar
529 char fname
[PATH_MAX
];
530 if (!GetUniquePathFromEnv("HEAPPROFILE", fname
)) {
533 // We do a uid check so we don't write out files in a setuid executable.
535 if (getuid() != geteuid()) {
536 RAW_LOG(WARNING
, ("HeapProfiler: ignoring HEAPPROFILE because "
537 "program seems to be setuid\n"));
542 HeapProfileTable::CleanupOldProfiles(fname
);
544 HeapProfilerStart(fname
);
547 // class used for finalization -- dumps the heap-profile at program exit
548 struct HeapProfileEndWriter
{
549 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); }
552 // We want to make sure tcmalloc is up and running before starting the profiler
553 static const TCMallocGuard tcmalloc_initializer
;
554 REGISTER_MODULE_INITIALIZER(heapprofiler
, HeapProfilerInit());
555 static HeapProfileEndWriter heap_profile_end_writer
;