1 // Copyright (c) 2006, Google Inc.
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Author: Sanjay Ghemawat
32 // Maxim Lifantsev (refactoring)
35 #ifndef BASE_HEAP_PROFILE_TABLE_H_
36 #define BASE_HEAP_PROFILE_TABLE_H_
38 #include "addressmap-inl.h"
39 #include "base/basictypes.h"
40 #include "base/logging.h" // for RawFD
41 #include "heap-profile-stats.h"
43 // Table to maintain a heap profile data inside,
44 // i.e. the set of currently active heap memory allocations.
45 // thread-unsafe and non-reentrant code:
46 // each instance object must be used by one thread
47 // at a time w/o self-recursion.
49 // TODO(maxim): add a unittest for this class.
50 class HeapProfileTable
{
53 // Extension to be used for heap pforile files.
54 static const char kFileExt
[];
56 // Longest stack trace we record.
57 static const int kMaxStackDepth
= 32;
59 // data types ----------------------------
62 typedef HeapProfileStats Stats
;
64 // Possible marks for MarkCurrentAllocations and MarkUnmarkedAllocations. New
65 // allocations are marked with UNMARKED by default.
73 // Info we can return about an allocation.
75 size_t object_size
; // size of the allocation
76 const void* const* call_stack
; // call stack that made the allocation call
77 int stack_depth
; // depth of call_stack
82 // Info we return about an allocation context.
83 // An allocation context is a unique caller stack trace
84 // of an allocation operation.
85 struct AllocContextInfo
: public Stats
{
86 int stack_depth
; // Depth of stack trace
87 const void* const* call_stack
; // Stack trace
90 // Memory (de)allocator interface we'll use.
91 typedef void* (*Allocator
)(size_t size
);
92 typedef void (*DeAllocator
)(void* ptr
);
94 // interface ---------------------------
96 HeapProfileTable(Allocator alloc
, DeAllocator dealloc
, bool profile_mmap
);
99 // Collect the stack trace for the function that asked to do the
100 // allocation for passing to RecordAlloc() below.
102 // The stack trace is stored in 'stack'. The stack depth is returned.
104 // 'skip_count' gives the number of stack frames between this call
105 // and the memory allocation function.
106 static int GetCallerStackTrace(int skip_count
, void* stack
[kMaxStackDepth
]);
108 // Record an allocation at 'ptr' of 'bytes' bytes. 'stack_depth'
109 // and 'call_stack' identifying the function that requested the
110 // allocation. They can be generated using GetCallerStackTrace() above.
111 void RecordAlloc(const void* ptr
, size_t bytes
,
112 int stack_depth
, const void* const call_stack
[]);
114 // Record the deallocation of memory at 'ptr'.
115 void RecordFree(const void* ptr
);
117 // Return true iff we have recorded an allocation at 'ptr'.
118 // If yes, fill *object_size with the allocation byte size.
119 bool FindAlloc(const void* ptr
, size_t* object_size
) const;
120 // Same as FindAlloc, but fills all of *info.
121 bool FindAllocDetails(const void* ptr
, AllocInfo
* info
) const;
123 // Return true iff "ptr" points into a recorded allocation
124 // If yes, fill *object_ptr with the actual allocation address
125 // and *object_size with the allocation byte size.
126 // max_size specifies largest currently possible allocation size.
127 bool FindInsideAlloc(const void* ptr
, size_t max_size
,
128 const void** object_ptr
, size_t* object_size
) const;
130 // If "ptr" points to a recorded allocation and it's not marked as live
131 // mark it as live and return true. Else return false.
132 // All allocations start as non-live.
133 bool MarkAsLive(const void* ptr
);
135 // If "ptr" points to a recorded allocation, mark it as "ignored".
136 // Ignored objects are treated like other objects, except that they
137 // are skipped in heap checking reports.
138 void MarkAsIgnored(const void* ptr
);
140 // Mark all currently known allocations with the given AllocationMark.
141 void MarkCurrentAllocations(AllocationMark mark
);
143 // Mark all unmarked (i.e. marked with AllocationMark::UNMARKED) with the
145 void MarkUnmarkedAllocations(AllocationMark mark
);
147 // Return current total (de)allocation statistics. It doesn't contain
149 const Stats
& total() const { return total_
; }
151 // Allocation data iteration callback: gets passed object pointer and
152 // fully-filled AllocInfo.
153 typedef void (*AllocIterator
)(const void* ptr
, const AllocInfo
& info
);
155 // Iterate over the allocation profile data calling "callback"
156 // for every allocation.
157 void IterateAllocs(AllocIterator callback
) const {
158 address_map_
->Iterate(MapArgsAllocIterator
, callback
);
161 // Callback for iterating through addresses of all allocated objects. Accepts
162 // pointer to user data and object pointer.
163 typedef void (*AddressIterator
)(void* data
, const void* ptr
);
165 // Iterate over the addresses of all allocated objects.
166 void IterateAllocationAddresses(AddressIterator
, void* data
);
168 // Allocation context profile data iteration callback
169 typedef void (*AllocContextIterator
)(const AllocContextInfo
& info
);
171 // Iterate over the allocation context profile data calling "callback"
172 // for every allocation context. Allocation contexts are ordered by the
173 // size of allocated space.
174 void IterateOrderedAllocContexts(AllocContextIterator callback
) const;
176 // Fill profile data into buffer 'buf' of size 'size'
177 // and return the actual size occupied by the dump in 'buf'.
178 // The profile buckets are dumped in the decreasing order
179 // of currently allocated bytes.
180 // We do not provision for 0-terminating 'buf'.
181 int FillOrderedProfile(char buf
[], int size
) const;
183 // Cleanup any old profile files matching prefix + ".*" + kFileExt.
184 static void CleanupOldProfiles(const char* prefix
);
186 // Return a snapshot of the current contents of *this.
187 // Caller must call ReleaseSnapshot() on result when no longer needed.
188 // The result is only valid while this exists and until
189 // the snapshot is discarded by calling ReleaseSnapshot().
191 Snapshot
* TakeSnapshot();
193 // Release a previously taken snapshot. snapshot must not
194 // be used after this call.
195 void ReleaseSnapshot(Snapshot
* snapshot
);
197 // Return a snapshot of every non-live, non-ignored object in *this.
198 // If "base" is non-NULL, skip any objects present in "base".
199 // As a side-effect, clears the "live" bit on every live object in *this.
200 // Caller must call ReleaseSnapshot() on result when no longer needed.
201 Snapshot
* NonLiveSnapshot(Snapshot
* base
);
203 // Dump a list of allocations marked as "live" along with their creation
204 // stack traces and sizes to a file named |file_name|. Together with
205 // MarkCurrentAllocatiosn and MarkUnmarkedAllocations this can be used
206 // to find objects that are created in a certain time span:
207 // 1. Invoke MarkCurrentAllocations(MARK_ONE) to mark the start of the
209 // 2. Perform whatever action you suspect allocates memory that is not
211 // 3. Invoke MarkUnmarkedAllocations(MARK_TWO).
212 // 4. Perform whatever action is supposed to free the memory again. New
213 // allocations are not marked. So all allocations that are marked as
214 // "live" where created during step 2.
215 // 5. Invoke DumpMarkedObjects(MARK_TWO) to get the list of allocations that
216 // were created during step 2, but survived step 4.
218 // Note that this functionality cannot be used if the HeapProfileTable is
219 // used for leak checking (using HeapLeakChecker).
220 void DumpMarkedObjects(AllocationMark mark
, const char* file_name
);
223 // data types ----------------------------
225 // Hash table bucket to hold (de)allocation stats
226 // for a given allocation call stack trace.
227 typedef HeapProfileBucket Bucket
;
229 // Info stored in the address map
231 // Access to the stack-trace bucket
232 Bucket
* bucket() const {
233 return reinterpret_cast<Bucket
*>(bucket_rep
& ~uintptr_t(kMask
));
235 // This also does set_live(false).
236 void set_bucket(Bucket
* b
) { bucket_rep
= reinterpret_cast<uintptr_t>(b
); }
237 size_t bytes
; // Number of bytes in this allocation
239 // Access to the allocation liveness flag (for leak checking)
240 bool live() const { return bucket_rep
& kLive
; }
241 void set_live(bool l
) {
242 bucket_rep
= (bucket_rep
& ~uintptr_t(kLive
)) | (l
? kLive
: 0);
245 // Should this allocation be ignored if it looks like a leak?
246 bool ignore() const { return bucket_rep
& kIgnore
; }
247 void set_ignore(bool r
) {
248 bucket_rep
= (bucket_rep
& ~uintptr_t(kIgnore
)) | (r
? kIgnore
: 0);
250 AllocationMark
mark() const {
251 return static_cast<AllocationMark
>(bucket_rep
& uintptr_t(kMask
));
253 void set_mark(AllocationMark mark
) {
254 bucket_rep
= (bucket_rep
& ~uintptr_t(kMask
)) | uintptr_t(mark
);
258 // We store a few bits in the bottom bits of bucket_rep.
259 // (Alignment is at least four, so we have at least two bits.)
260 static const int kLive
= 1;
261 static const int kIgnore
= 2;
262 static const int kMask
= kLive
| kIgnore
;
264 uintptr_t bucket_rep
;
267 // helper for FindInsideAlloc
268 static size_t AllocValueSize(const AllocValue
& v
) { return v
.bytes
; }
270 typedef AddressMap
<AllocValue
> AllocationMap
;
272 // Arguments that need to be passed DumpBucketIterator callback below.
274 BufferArgs(char* buf_arg
, int buflen_arg
, int bufsize_arg
)
277 bufsize(bufsize_arg
) {
284 DISALLOW_COPY_AND_ASSIGN(BufferArgs
);
287 // Arguments that need to be passed DumpNonLiveIterator callback below.
289 DumpArgs(RawFD fd_arg
, Stats
* profile_stats_arg
)
291 profile_stats(profile_stats_arg
) {
294 RawFD fd
; // file to write to
295 Stats
* profile_stats
; // stats to update (may be NULL)
298 // Arguments that need to be passed DumpMarkedIterator callback below.
299 struct DumpMarkedArgs
{
300 DumpMarkedArgs(RawFD fd_arg
, AllocationMark mark_arg
)
305 RawFD fd
; // file to write to.
306 AllocationMark mark
; // The mark of the allocations to process.
309 // Arguments that need to be passed MarkIterator callback below.
311 MarkArgs(AllocationMark mark_arg
, bool mark_all_arg
)
313 mark_all(mark_all_arg
) {
316 AllocationMark mark
; // The mark to put on allocations.
317 bool mark_all
; // True if all allocations should be marked. Otherwise just
318 // mark unmarked allocations.
321 struct AllocationAddressIteratorArgs
{
322 AllocationAddressIteratorArgs(AddressIterator callback_arg
, void* data_arg
)
323 : callback(callback_arg
),
327 AddressIterator callback
;
331 // helpers ----------------------------
333 // Unparse bucket b and print its portion of profile dump into buf.
334 // We return the amount of space in buf that we use. We start printing
335 // at buf + buflen, and promise not to go beyond buf + bufsize.
336 // We do not provision for 0-terminating 'buf'.
338 // If profile_stats is non-NULL, we update *profile_stats by
339 // counting bucket b.
341 // "extra" is appended to the unparsed bucket. Typically it is empty,
342 // but may be set to something like " heapprofile" for the total
343 // bucket to indicate the type of the profile.
344 static int UnparseBucket(const Bucket
& b
,
345 char* buf
, int buflen
, int bufsize
,
347 Stats
* profile_stats
);
349 // Get the bucket for the caller stack trace 'key' of depth 'depth'
350 // creating the bucket if needed.
351 Bucket
* GetBucket(int depth
, const void* const key
[]);
353 // Helper for IterateAllocs to do callback signature conversion
354 // from AllocationMap::Iterate to AllocIterator.
355 static void MapArgsAllocIterator(const void* ptr
, AllocValue
* v
,
356 AllocIterator callback
) {
358 info
.object_size
= v
->bytes
;
359 info
.call_stack
= v
->bucket()->stack
;
360 info
.stack_depth
= v
->bucket()->depth
;
361 info
.live
= v
->live();
362 info
.ignored
= v
->ignore();
366 // Helper to dump a bucket.
367 inline static void DumpBucketIterator(const Bucket
* bucket
,
370 // Helper for IterateAllocationAddresses.
371 inline static void AllocationAddressesIterator(
374 const AllocationAddressIteratorArgs
& args
);
376 // Helper for MarkCurrentAllocations and MarkUnmarkedAllocations.
377 inline static void MarkIterator(const void* ptr
, AllocValue
* v
,
378 const MarkArgs
& args
);
380 // Helper for DumpNonLiveProfile to do object-granularity
381 // heap profile dumping. It gets passed to AllocationMap::Iterate.
382 inline static void DumpNonLiveIterator(const void* ptr
, AllocValue
* v
,
383 const DumpArgs
& args
);
385 // Helper for DumpMarkedObjects to dump all allocations with a given mark. It
386 // gets passed to AllocationMap::Iterate.
387 inline static void DumpMarkedIterator(const void* ptr
, AllocValue
* v
,
388 const DumpMarkedArgs
& args
);
390 // Helper for IterateOrderedAllocContexts and FillOrderedProfile.
391 // Creates a sorted list of Buckets whose length is num_buckets_.
392 // The caller is responsible for deallocating the returned list.
393 Bucket
** MakeSortedBucketList() const;
395 // Helper for TakeSnapshot. Saves object to snapshot.
396 static void AddToSnapshot(const void* ptr
, AllocValue
* v
, Snapshot
* s
);
398 // Arguments passed to AddIfNonLive
399 struct AddNonLiveArgs
{
404 // Helper for NonLiveSnapshot. Adds the object to the destination
405 // snapshot if it is non-live.
406 static void AddIfNonLive(const void* ptr
, AllocValue
* v
,
407 AddNonLiveArgs
* arg
);
409 // Write contents of "*allocations" as a heap profile to
410 // "file_name". "total" must contain the total of all entries in
412 static bool WriteProfile(const char* file_name
,
414 AllocationMap
* allocations
);
416 // data ----------------------------
418 // Memory (de)allocator that we use.
420 DeAllocator dealloc_
;
422 // Overall profile stats; we use only the Stats part,
423 // but make it a Bucket to pass to UnparseBucket.
428 // Bucket hash table for malloc.
429 // We hand-craft one instead of using one of the pre-written
430 // ones because we do not want to use malloc when operating on the table.
431 // It is only few lines of code, so no big deal.
432 Bucket
** bucket_table_
;
435 // Map of all currently allocated objects and mapped regions we know about.
436 AllocationMap
* address_map_
;
438 DISALLOW_COPY_AND_ASSIGN(HeapProfileTable
);
441 class HeapProfileTable::Snapshot
{
443 const Stats
& total() const { return total_
; }
445 // Report anything in this snapshot as a leak.
446 // May use new/delete for temporary storage.
447 // If should_symbolize is true, will fork (which is not threadsafe)
448 // to turn addresses into symbol names. Set to false for maximum safety.
449 // Also writes a heap profile to "filename" that contains
450 // all of the objects in this snapshot.
451 void ReportLeaks(const char* checker_name
, const char* filename
,
452 bool should_symbolize
);
454 // Report the addresses of all leaked objects.
455 // May use new/delete for temporary storage.
456 void ReportIndividualObjects();
459 return (total_
.allocs
== 0) && (total_
.alloc_size
== 0);
463 friend class HeapProfileTable
;
465 // Total count/size are stored in a Bucket so we can reuse UnparseBucket
468 // We share the Buckets managed by the parent table, but have our
469 // own object->bucket map.
472 Snapshot(Allocator alloc
, DeAllocator dealloc
) : map_(alloc
, dealloc
) {
473 memset(&total_
, 0, sizeof(total_
));
476 // Callback used to populate a Snapshot object with entries found
477 // in another allocation map.
478 inline void Add(const void* ptr
, const AllocValue
& v
) {
481 total_
.alloc_size
+= v
.bytes
;
484 // Helpers for sorting and generating leak reports
487 static void ReportCallback(const void* ptr
, AllocValue
* v
, ReportState
*);
488 static void ReportObject(const void* ptr
, AllocValue
* v
, char*);
490 DISALLOW_COPY_AND_ASSIGN(Snapshot
);
493 #endif // BASE_HEAP_PROFILE_TABLE_H_