1 // Copyright (c) 2006, Google Inc.
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Author: Sanjay Ghemawat
32 // Maxim Lifantsev (refactoring)
35 #ifndef BASE_HEAP_PROFILE_TABLE_H_
36 #define BASE_HEAP_PROFILE_TABLE_H_
38 #include "addressmap-inl.h"
39 #include "base/basictypes.h"
40 #include "base/logging.h" // for RawFD
41 #include "heap-profile-stats.h"
43 #if defined(TYPE_PROFILING)
44 #include <gperftools/type_profiler_map.h>
45 #endif // defined(TYPE_PROFILING)
47 // Table to maintain a heap profile data inside,
48 // i.e. the set of currently active heap memory allocations.
49 // thread-unsafe and non-reentrant code:
50 // each instance object must be used by one thread
51 // at a time w/o self-recursion.
53 // TODO(maxim): add a unittest for this class.
54 class HeapProfileTable
{
57 // Extension to be used for heap pforile files.
58 static const char kFileExt
[];
60 // Longest stack trace we record.
61 static const int kMaxStackDepth
= 32;
63 // data types ----------------------------
66 typedef HeapProfileStats Stats
;
68 // Possible marks for MarkCurrentAllocations and MarkUnmarkedAllocations. New
69 // allocations are marked with UNMARKED by default.
77 // Info we can return about an allocation.
79 size_t object_size
; // size of the allocation
80 const void* const* call_stack
; // call stack that made the allocation call
81 int stack_depth
; // depth of call_stack
86 // Info we return about an allocation context.
87 // An allocation context is a unique caller stack trace
88 // of an allocation operation.
89 struct AllocContextInfo
: public Stats
{
90 int stack_depth
; // Depth of stack trace
91 const void* const* call_stack
; // Stack trace
94 // Memory (de)allocator interface we'll use.
95 typedef void* (*Allocator
)(size_t size
);
96 typedef void (*DeAllocator
)(void* ptr
);
98 // interface ---------------------------
100 HeapProfileTable(Allocator alloc
, DeAllocator dealloc
, bool profile_mmap
);
103 // Collect the stack trace for the function that asked to do the
104 // allocation for passing to RecordAlloc() below.
106 // The stack trace is stored in 'stack'. The stack depth is returned.
108 // 'skip_count' gives the number of stack frames between this call
109 // and the memory allocation function.
110 static int GetCallerStackTrace(int skip_count
, void* stack
[kMaxStackDepth
]);
112 // Record an allocation at 'ptr' of 'bytes' bytes. 'stack_depth'
113 // and 'call_stack' identifying the function that requested the
114 // allocation. They can be generated using GetCallerStackTrace() above.
115 void RecordAlloc(const void* ptr
, size_t bytes
,
116 int stack_depth
, const void* const call_stack
[]);
118 // Record the deallocation of memory at 'ptr'.
119 void RecordFree(const void* ptr
);
121 // Return true iff we have recorded an allocation at 'ptr'.
122 // If yes, fill *object_size with the allocation byte size.
123 bool FindAlloc(const void* ptr
, size_t* object_size
) const;
124 // Same as FindAlloc, but fills all of *info.
125 bool FindAllocDetails(const void* ptr
, AllocInfo
* info
) const;
127 // Return true iff "ptr" points into a recorded allocation
128 // If yes, fill *object_ptr with the actual allocation address
129 // and *object_size with the allocation byte size.
130 // max_size specifies largest currently possible allocation size.
131 bool FindInsideAlloc(const void* ptr
, size_t max_size
,
132 const void** object_ptr
, size_t* object_size
) const;
134 // If "ptr" points to a recorded allocation and it's not marked as live
135 // mark it as live and return true. Else return false.
136 // All allocations start as non-live.
137 bool MarkAsLive(const void* ptr
);
139 // If "ptr" points to a recorded allocation, mark it as "ignored".
140 // Ignored objects are treated like other objects, except that they
141 // are skipped in heap checking reports.
142 void MarkAsIgnored(const void* ptr
);
144 // Mark all currently known allocations with the given AllocationMark.
145 void MarkCurrentAllocations(AllocationMark mark
);
147 // Mark all unmarked (i.e. marked with AllocationMark::UNMARKED) with the
149 void MarkUnmarkedAllocations(AllocationMark mark
);
151 // Return current total (de)allocation statistics. It doesn't contain
153 const Stats
& total() const { return total_
; }
155 // Allocation data iteration callback: gets passed object pointer and
156 // fully-filled AllocInfo.
157 typedef void (*AllocIterator
)(const void* ptr
, const AllocInfo
& info
);
159 // Iterate over the allocation profile data calling "callback"
160 // for every allocation.
161 void IterateAllocs(AllocIterator callback
) const {
162 address_map_
->Iterate(MapArgsAllocIterator
, callback
);
165 // Callback for iterating through addresses of all allocated objects. Accepts
166 // pointer to user data and object pointer.
167 typedef void (*AddressIterator
)(void* data
, const void* ptr
);
169 // Iterate over the addresses of all allocated objects.
170 void IterateAllocationAddresses(AddressIterator
, void* data
);
172 // Allocation context profile data iteration callback
173 typedef void (*AllocContextIterator
)(const AllocContextInfo
& info
);
175 // Iterate over the allocation context profile data calling "callback"
176 // for every allocation context. Allocation contexts are ordered by the
177 // size of allocated space.
178 void IterateOrderedAllocContexts(AllocContextIterator callback
) const;
180 // Fill profile data into buffer 'buf' of size 'size'
181 // and return the actual size occupied by the dump in 'buf'.
182 // The profile buckets are dumped in the decreasing order
183 // of currently allocated bytes.
184 // We do not provision for 0-terminating 'buf'.
185 int FillOrderedProfile(char buf
[], int size
) const;
187 // Cleanup any old profile files matching prefix + ".*" + kFileExt.
188 static void CleanupOldProfiles(const char* prefix
);
190 // Return a snapshot of the current contents of *this.
191 // Caller must call ReleaseSnapshot() on result when no longer needed.
192 // The result is only valid while this exists and until
193 // the snapshot is discarded by calling ReleaseSnapshot().
195 Snapshot
* TakeSnapshot();
197 // Release a previously taken snapshot. snapshot must not
198 // be used after this call.
199 void ReleaseSnapshot(Snapshot
* snapshot
);
201 // Return a snapshot of every non-live, non-ignored object in *this.
202 // If "base" is non-NULL, skip any objects present in "base".
203 // As a side-effect, clears the "live" bit on every live object in *this.
204 // Caller must call ReleaseSnapshot() on result when no longer needed.
205 Snapshot
* NonLiveSnapshot(Snapshot
* base
);
207 // Dump a list of allocations marked as "live" along with their creation
208 // stack traces and sizes to a file named |file_name|. Together with
209 // MarkCurrentAllocatiosn and MarkUnmarkedAllocations this can be used
210 // to find objects that are created in a certain time span:
211 // 1. Invoke MarkCurrentAllocations(MARK_ONE) to mark the start of the
213 // 2. Perform whatever action you suspect allocates memory that is not
215 // 3. Invoke MarkUnmarkedAllocations(MARK_TWO).
216 // 4. Perform whatever action is supposed to free the memory again. New
217 // allocations are not marked. So all allocations that are marked as
218 // "live" where created during step 2.
219 // 5. Invoke DumpMarkedObjects(MARK_TWO) to get the list of allocations that
220 // were created during step 2, but survived step 4.
222 // Note that this functionality cannot be used if the HeapProfileTable is
223 // used for leak checking (using HeapLeakChecker).
224 void DumpMarkedObjects(AllocationMark mark
, const char* file_name
);
226 #if defined(TYPE_PROFILING)
227 void DumpTypeStatistics(const char* file_name
) const;
228 #endif // defined(TYPE_PROFILING)
231 friend class DeepHeapProfile
;
233 // data types ----------------------------
235 // Hash table bucket to hold (de)allocation stats
236 // for a given allocation call stack trace.
237 typedef HeapProfileBucket Bucket
;
239 // Info stored in the address map
241 // Access to the stack-trace bucket
242 Bucket
* bucket() const {
243 return reinterpret_cast<Bucket
*>(bucket_rep
& ~uintptr_t(kMask
));
245 // This also does set_live(false).
246 void set_bucket(Bucket
* b
) { bucket_rep
= reinterpret_cast<uintptr_t>(b
); }
247 size_t bytes
; // Number of bytes in this allocation
249 // Access to the allocation liveness flag (for leak checking)
250 bool live() const { return bucket_rep
& kLive
; }
251 void set_live(bool l
) {
252 bucket_rep
= (bucket_rep
& ~uintptr_t(kLive
)) | (l
? kLive
: 0);
255 // Should this allocation be ignored if it looks like a leak?
256 bool ignore() const { return bucket_rep
& kIgnore
; }
257 void set_ignore(bool r
) {
258 bucket_rep
= (bucket_rep
& ~uintptr_t(kIgnore
)) | (r
? kIgnore
: 0);
260 AllocationMark
mark() const {
261 return static_cast<AllocationMark
>(bucket_rep
& uintptr_t(kMask
));
263 void set_mark(AllocationMark mark
) {
264 bucket_rep
= (bucket_rep
& ~uintptr_t(kMask
)) | uintptr_t(mark
);
268 // We store a few bits in the bottom bits of bucket_rep.
269 // (Alignment is at least four, so we have at least two bits.)
270 static const int kLive
= 1;
271 static const int kIgnore
= 2;
272 static const int kMask
= kLive
| kIgnore
;
274 uintptr_t bucket_rep
;
277 // helper for FindInsideAlloc
278 static size_t AllocValueSize(const AllocValue
& v
) { return v
.bytes
; }
280 typedef AddressMap
<AllocValue
> AllocationMap
;
282 // Arguments that need to be passed DumpBucketIterator callback below.
284 BufferArgs(char* buf_arg
, int buflen_arg
, int bufsize_arg
)
287 bufsize(bufsize_arg
) {
294 DISALLOW_COPY_AND_ASSIGN(BufferArgs
);
297 // Arguments that need to be passed DumpNonLiveIterator callback below.
299 DumpArgs(RawFD fd_arg
, Stats
* profile_stats_arg
)
301 profile_stats(profile_stats_arg
) {
304 RawFD fd
; // file to write to
305 Stats
* profile_stats
; // stats to update (may be NULL)
308 // Arguments that need to be passed DumpMarkedIterator callback below.
309 struct DumpMarkedArgs
{
310 DumpMarkedArgs(RawFD fd_arg
, AllocationMark mark_arg
)
315 RawFD fd
; // file to write to.
316 AllocationMark mark
; // The mark of the allocations to process.
319 // Arguments that need to be passed MarkIterator callback below.
321 MarkArgs(AllocationMark mark_arg
, bool mark_all_arg
)
323 mark_all(mark_all_arg
) {
326 AllocationMark mark
; // The mark to put on allocations.
327 bool mark_all
; // True if all allocations should be marked. Otherwise just
328 // mark unmarked allocations.
331 #if defined(TYPE_PROFILING)
333 TypeCount(size_t bytes_arg
, unsigned int objects_arg
)
335 objects(objects_arg
) {
339 unsigned int objects
;
341 #endif // defined(TYPE_PROFILING)
343 struct AllocationAddressIteratorArgs
{
344 AllocationAddressIteratorArgs(AddressIterator callback_arg
, void* data_arg
)
345 : callback(callback_arg
),
349 AddressIterator callback
;
353 // helpers ----------------------------
355 // Unparse bucket b and print its portion of profile dump into buf.
356 // We return the amount of space in buf that we use. We start printing
357 // at buf + buflen, and promise not to go beyond buf + bufsize.
358 // We do not provision for 0-terminating 'buf'.
360 // If profile_stats is non-NULL, we update *profile_stats by
361 // counting bucket b.
363 // "extra" is appended to the unparsed bucket. Typically it is empty,
364 // but may be set to something like " heapprofile" for the total
365 // bucket to indicate the type of the profile.
366 static int UnparseBucket(const Bucket
& b
,
367 char* buf
, int buflen
, int bufsize
,
369 Stats
* profile_stats
);
371 // Get the bucket for the caller stack trace 'key' of depth 'depth'
372 // creating the bucket if needed.
373 Bucket
* GetBucket(int depth
, const void* const key
[]);
375 // Helper for IterateAllocs to do callback signature conversion
376 // from AllocationMap::Iterate to AllocIterator.
377 static void MapArgsAllocIterator(const void* ptr
, AllocValue
* v
,
378 AllocIterator callback
) {
380 info
.object_size
= v
->bytes
;
381 info
.call_stack
= v
->bucket()->stack
;
382 info
.stack_depth
= v
->bucket()->depth
;
383 info
.live
= v
->live();
384 info
.ignored
= v
->ignore();
388 // Helper to dump a bucket.
389 inline static void DumpBucketIterator(const Bucket
* bucket
,
392 // Helper for IterateAllocationAddresses.
393 inline static void AllocationAddressesIterator(
396 const AllocationAddressIteratorArgs
& args
);
398 // Helper for MarkCurrentAllocations and MarkUnmarkedAllocations.
399 inline static void MarkIterator(const void* ptr
, AllocValue
* v
,
400 const MarkArgs
& args
);
402 // Helper for DumpNonLiveProfile to do object-granularity
403 // heap profile dumping. It gets passed to AllocationMap::Iterate.
404 inline static void DumpNonLiveIterator(const void* ptr
, AllocValue
* v
,
405 const DumpArgs
& args
);
407 // Helper for DumpMarkedObjects to dump all allocations with a given mark. It
408 // gets passed to AllocationMap::Iterate.
409 inline static void DumpMarkedIterator(const void* ptr
, AllocValue
* v
,
410 const DumpMarkedArgs
& args
);
412 #if defined(TYPE_PROFILING)
413 inline static void TallyTypesItererator(const void* ptr
,
415 AddressMap
<TypeCount
>* type_size_map
);
417 inline static void DumpTypesIterator(const void* ptr
,
419 const DumpArgs
& args
);
420 #endif // defined(TYPE_PROFILING)
422 // Helper for IterateOrderedAllocContexts and FillOrderedProfile.
423 // Creates a sorted list of Buckets whose length is num_buckets_.
424 // The caller is responsible for deallocating the returned list.
425 Bucket
** MakeSortedBucketList() const;
427 // Helper for TakeSnapshot. Saves object to snapshot.
428 static void AddToSnapshot(const void* ptr
, AllocValue
* v
, Snapshot
* s
);
430 // Arguments passed to AddIfNonLive
431 struct AddNonLiveArgs
{
436 // Helper for NonLiveSnapshot. Adds the object to the destination
437 // snapshot if it is non-live.
438 static void AddIfNonLive(const void* ptr
, AllocValue
* v
,
439 AddNonLiveArgs
* arg
);
441 // Write contents of "*allocations" as a heap profile to
442 // "file_name". "total" must contain the total of all entries in
444 static bool WriteProfile(const char* file_name
,
446 AllocationMap
* allocations
);
448 // data ----------------------------
450 // Memory (de)allocator that we use.
452 DeAllocator dealloc_
;
454 // Overall profile stats; we use only the Stats part,
455 // but make it a Bucket to pass to UnparseBucket.
460 // Bucket hash table for malloc.
461 // We hand-craft one instead of using one of the pre-written
462 // ones because we do not want to use malloc when operating on the table.
463 // It is only few lines of code, so no big deal.
464 Bucket
** bucket_table_
;
467 // Map of all currently allocated objects and mapped regions we know about.
468 AllocationMap
* address_map_
;
470 DISALLOW_COPY_AND_ASSIGN(HeapProfileTable
);
473 class HeapProfileTable::Snapshot
{
475 const Stats
& total() const { return total_
; }
477 // Report anything in this snapshot as a leak.
478 // May use new/delete for temporary storage.
479 // If should_symbolize is true, will fork (which is not threadsafe)
480 // to turn addresses into symbol names. Set to false for maximum safety.
481 // Also writes a heap profile to "filename" that contains
482 // all of the objects in this snapshot.
483 void ReportLeaks(const char* checker_name
, const char* filename
,
484 bool should_symbolize
);
486 // Report the addresses of all leaked objects.
487 // May use new/delete for temporary storage.
488 void ReportIndividualObjects();
491 return (total_
.allocs
== 0) && (total_
.alloc_size
== 0);
495 friend class HeapProfileTable
;
497 // Total count/size are stored in a Bucket so we can reuse UnparseBucket
500 // We share the Buckets managed by the parent table, but have our
501 // own object->bucket map.
504 Snapshot(Allocator alloc
, DeAllocator dealloc
) : map_(alloc
, dealloc
) {
505 memset(&total_
, 0, sizeof(total_
));
508 // Callback used to populate a Snapshot object with entries found
509 // in another allocation map.
510 inline void Add(const void* ptr
, const AllocValue
& v
) {
513 total_
.alloc_size
+= v
.bytes
;
516 // Helpers for sorting and generating leak reports
519 static void ReportCallback(const void* ptr
, AllocValue
* v
, ReportState
*);
520 static void ReportObject(const void* ptr
, AllocValue
* v
, char*);
522 DISALLOW_COPY_AND_ASSIGN(Snapshot
);
525 #endif // BASE_HEAP_PROFILE_TABLE_H_