1 /* Copyright (c) 2006, Google Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * Author: Maxim Lifantsev
35 // Background and key design points of MemoryRegionMap.
37 // MemoryRegionMap is a low-level module with quite atypical requirements that
38 // result in some degree of non-triviality of the implementation and design.
40 // MemoryRegionMap collects info about *all* memory regions created with
41 // mmap, munmap, mremap, sbrk.
42 // They key word above is 'all': all that are happening in a process
43 // during its lifetime frequently starting even before global object
44 // constructor execution.
46 // This is needed by the primary client of MemoryRegionMap:
47 // HeapLeakChecker uses the regions and the associated stack traces
48 // to figure out what part of the memory is the heap:
49 // if MemoryRegionMap were to miss some (early) regions, leak checking would
50 // stop working correctly.
52 // To accomplish the goal of functioning before/during global object
53 // constructor execution MemoryRegionMap is done as a singleton service
54 // that relies on own on-demand initialized static constructor-less data,
55 // and only relies on other low-level modules that can also function properly
56 // even before global object constructors run.
58 // Accomplishing the goal of collecting data about all mmap, munmap, mremap,
59 // sbrk occurrences is a more involved: conceptually to do this one needs to
60 // record some bits of data in particular about any mmap or sbrk call,
61 // but to do that one needs to allocate memory for that data at some point,
62 // but all memory allocations in the end themselves come from an mmap
63 // or sbrk call (that's how the address space of the process grows).
65 // Also note that we need to do all the above recording from
66 // within an mmap/sbrk hook which is sometimes/frequently is made by a memory
67 // allocator, including the allocator MemoryRegionMap itself must rely on.
68 // In the case of heap-checker usage this includes even the very first
69 // mmap/sbrk call happening in the program: heap-checker gets activated due to
70 // a link-time installed mmap/sbrk hook and it initializes MemoryRegionMap
71 // and asks it to record info about this very first call right from that
72 // very first hook invocation.
74 // MemoryRegionMap is doing its memory allocations via LowLevelAlloc:
75 // unlike more complex standard memory allocator, LowLevelAlloc cooperates with
76 // MemoryRegionMap by not holding any of its own locks while it calls mmap
77 // to get memory, thus we are able to call LowLevelAlloc from
78 // our mmap/sbrk hooks without causing a deadlock in it.
79 // For the same reason of deadlock prevention the locking in MemoryRegionMap
80 // itself is write-recursive which is an exception to Google's mutex usage.
82 // We still need to break the infinite cycle of mmap calling our hook,
83 // which asks LowLevelAlloc for memory to record this mmap,
84 // which (sometimes) causes mmap, which calls our hook, and so on.
85 // We do this as follows: on a recursive call of MemoryRegionMap's
86 // mmap/sbrk/mremap hook we record the data about the allocation in a
87 // static fixed-sized stack (saved_regions and saved_buckets), when the
88 // recursion unwinds but before returning from the outer hook call we unwind
89 // this stack and move the data from saved_regions and saved_buckets to its
90 // permanent place in the RegionSet and "bucket_table" respectively,
91 // which can cause more allocations and mmap-s and recursion and unwinding,
92 // but the whole process ends eventually due to the fact that for the small
93 // allocations we are doing LowLevelAlloc reuses one mmap call and parcels out
94 // the memory it created to satisfy several of our allocation requests.
97 // ========================================================================= //
104 #ifdef HAVE_INTTYPES_H
105 #include <inttypes.h>
108 #include <sys/mman.h>
109 #elif !defined(MAP_FAILED)
110 #define MAP_FAILED -1 // the only thing we need from mman.h
113 #include <pthread.h> // for pthread_t, pthread_self()
120 #include "memory_region_map.h"
122 #include "base/logging.h"
123 #include "base/low_level_alloc.h"
124 #include "malloc_hook-inl.h"
126 #include <gperftools/stacktrace.h>
127 #include <gperftools/malloc_hook.h>
129 // MREMAP_FIXED is a linux extension. How it's used in this file,
130 // setting it to 0 is equivalent to saying, "This feature isn't
131 // supported", which is right.
133 # define MREMAP_FIXED 0
138 // ========================================================================= //
140 int MemoryRegionMap::client_count_
= 0;
141 int MemoryRegionMap::max_stack_depth_
= 0;
142 MemoryRegionMap::RegionSet
* MemoryRegionMap::regions_
= NULL
;
143 LowLevelAlloc::Arena
* MemoryRegionMap::arena_
= NULL
;
144 SpinLock
MemoryRegionMap::lock_(SpinLock::LINKER_INITIALIZED
);
145 SpinLock
MemoryRegionMap::owner_lock_( // ACQUIRED_AFTER(lock_)
146 SpinLock::LINKER_INITIALIZED
);
147 int MemoryRegionMap::recursion_count_
= 0; // GUARDED_BY(owner_lock_)
148 pthread_t
MemoryRegionMap::lock_owner_tid_
; // GUARDED_BY(owner_lock_)
149 int64
MemoryRegionMap::map_size_
= 0;
150 int64
MemoryRegionMap::unmap_size_
= 0;
151 HeapProfileBucket
** MemoryRegionMap::bucket_table_
= NULL
; // GUARDED_BY(lock_)
152 int MemoryRegionMap::num_buckets_
= 0; // GUARDED_BY(lock_)
153 int MemoryRegionMap::saved_buckets_count_
= 0; // GUARDED_BY(lock_)
154 HeapProfileBucket
MemoryRegionMap::saved_buckets_
[20]; // GUARDED_BY(lock_)
157 const void* MemoryRegionMap::saved_buckets_keys_
[20][kMaxStackDepth
];
159 // ========================================================================= //
161 // Simple hook into execution of global object constructors,
162 // so that we do not call pthread_self() when it does not yet work.
163 static bool libpthread_initialized
= false;
164 static bool initializer
= (libpthread_initialized
= true, true);
166 static inline bool current_thread_is(pthread_t should_be
) {
167 // Before main() runs, there's only one thread, so we're always that thread
168 if (!libpthread_initialized
) return true;
169 // this starts working only sometime well into global constructor execution:
170 return pthread_equal(pthread_self(), should_be
);
173 // ========================================================================= //
175 // Constructor-less place-holder to store a RegionSet in.
176 union MemoryRegionMap::RegionSetRep
{
177 char rep
[sizeof(RegionSet
)];
178 void* align_it
; // do not need a better alignment for 'rep' than this
179 RegionSet
* region_set() { return reinterpret_cast<RegionSet
*>(rep
); }
182 // The bytes where MemoryRegionMap::regions_ will point to.
183 // We use RegionSetRep with noop c-tor so that global construction
184 // does not interfere.
185 static MemoryRegionMap::RegionSetRep regions_rep
;
187 // ========================================================================= //
189 // Has InsertRegionLocked been called recursively
190 // (or rather should we *not* use regions_ to record a hooked mmap).
191 static bool recursive_insert
= false;
193 void MemoryRegionMap::Init(int max_stack_depth
, bool use_buckets
) {
194 RAW_VLOG(10, "MemoryRegionMap Init");
195 RAW_CHECK(max_stack_depth
>= 0, "");
196 // Make sure we don't overflow the memory in region stacks:
197 RAW_CHECK(max_stack_depth
<= kMaxStackDepth
,
198 "need to increase kMaxStackDepth?");
201 max_stack_depth_
= max(max_stack_depth_
, max_stack_depth
);
202 if (client_count_
> 1) {
203 // not first client: already did initialization-proper
205 RAW_VLOG(10, "MemoryRegionMap Init increment done");
208 // Set our hooks and make sure they were installed:
209 RAW_CHECK(MallocHook::AddMmapHook(&MmapHook
), "");
210 RAW_CHECK(MallocHook::AddMremapHook(&MremapHook
), "");
211 RAW_CHECK(MallocHook::AddSbrkHook(&SbrkHook
), "");
212 RAW_CHECK(MallocHook::AddMunmapHook(&MunmapHook
), "");
213 // We need to set recursive_insert since the NewArena call itself
214 // will already do some allocations with mmap which our hooks will catch
215 // recursive_insert allows us to buffer info about these mmap calls.
216 // Note that Init() can be (and is) sometimes called
217 // already from within an mmap/sbrk hook.
218 recursive_insert
= true;
219 arena_
= LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
220 recursive_insert
= false;
221 HandleSavedRegionsLocked(&InsertRegionLocked
); // flush the buffered ones
222 // Can't instead use HandleSavedRegionsLocked(&DoInsertRegionLocked) before
223 // recursive_insert = false; as InsertRegionLocked will also construct
224 // regions_ on demand for us.
226 const int table_bytes
= kHashTableSize
* sizeof(*bucket_table_
);
227 recursive_insert
= true;
228 bucket_table_
= static_cast<HeapProfileBucket
**>(
229 MyAllocator::Allocate(table_bytes
));
230 recursive_insert
= false;
231 memset(bucket_table_
, 0, table_bytes
);
235 RAW_VLOG(10, "MemoryRegionMap Init done");
238 bool MemoryRegionMap::Shutdown() {
239 RAW_VLOG(10, "MemoryRegionMap Shutdown");
241 RAW_CHECK(client_count_
> 0, "");
243 if (client_count_
!= 0) { // not last client; need not really shutdown
245 RAW_VLOG(10, "MemoryRegionMap Shutdown decrement done");
248 if (bucket_table_
!= NULL
) {
249 for (int i
= 0; i
< kHashTableSize
; i
++) {
250 for (HeapProfileBucket
* curr
= bucket_table_
[i
]; curr
!= 0; /**/) {
251 HeapProfileBucket
* bucket
= curr
;
253 MyAllocator::Free(bucket
->stack
, 0);
254 MyAllocator::Free(bucket
, 0);
257 MyAllocator::Free(bucket_table_
, 0);
259 bucket_table_
= NULL
;
261 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook
), "");
262 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook
), "");
263 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook
), "");
264 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook
), "");
265 if (regions_
) regions_
->~RegionSet();
267 bool deleted_arena
= LowLevelAlloc::DeleteArena(arena_
);
271 RAW_LOG(WARNING
, "Can't delete LowLevelAlloc arena: it's being used");
274 RAW_VLOG(10, "MemoryRegionMap Shutdown done");
275 return deleted_arena
;
278 bool MemoryRegionMap::IsRecordingLocked() {
279 RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
280 return client_count_
> 0;
283 // Invariants (once libpthread_initialized is true):
284 // * While lock_ is not held, recursion_count_ is 0 (and
285 // lock_owner_tid_ is the previous owner, but we don't rely on
287 // * recursion_count_ and lock_owner_tid_ are only written while
288 // both lock_ and owner_lock_ are held. They may be read under
290 // * At entry and exit of Lock() and Unlock(), the current thread
291 // owns lock_ iff pthread_equal(lock_owner_tid_, pthread_self())
292 // && recursion_count_ > 0.
293 void MemoryRegionMap::Lock() {
295 SpinLockHolder
l(&owner_lock_
);
296 if (recursion_count_
> 0 && current_thread_is(lock_owner_tid_
)) {
297 RAW_CHECK(lock_
.IsHeld(), "Invariants violated");
299 RAW_CHECK(recursion_count_
<= 5,
300 "recursive lock nesting unexpectedly deep");
306 SpinLockHolder
l(&owner_lock_
);
307 RAW_CHECK(recursion_count_
== 0,
308 "Last Unlock didn't reset recursion_count_");
309 if (libpthread_initialized
)
310 lock_owner_tid_
= pthread_self();
311 recursion_count_
= 1;
315 void MemoryRegionMap::Unlock() {
316 SpinLockHolder
l(&owner_lock_
);
317 RAW_CHECK(recursion_count_
> 0, "unlock when not held");
318 RAW_CHECK(lock_
.IsHeld(),
319 "unlock when not held, and recursion_count_ is wrong");
320 RAW_CHECK(current_thread_is(lock_owner_tid_
), "unlock by non-holder");
322 if (recursion_count_
== 0) {
327 bool MemoryRegionMap::LockIsHeld() {
328 SpinLockHolder
l(&owner_lock_
);
329 return lock_
.IsHeld() && current_thread_is(lock_owner_tid_
);
332 const MemoryRegionMap::Region
*
333 MemoryRegionMap::DoFindRegionLocked(uintptr_t addr
) {
334 RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
335 if (regions_
!= NULL
) {
337 sample
.SetRegionSetKey(addr
);
338 RegionSet::iterator region
= regions_
->lower_bound(sample
);
339 if (region
!= regions_
->end()) {
340 RAW_CHECK(addr
<= region
->end_addr
, "");
341 if (region
->start_addr
<= addr
&& addr
< region
->end_addr
) {
349 bool MemoryRegionMap::FindRegion(uintptr_t addr
, Region
* result
) {
351 const Region
* region
= DoFindRegionLocked(addr
);
352 if (region
!= NULL
) *result
= *region
; // create it as an independent copy
354 return region
!= NULL
;
357 bool MemoryRegionMap::FindAndMarkStackRegion(uintptr_t stack_top
,
360 const Region
* region
= DoFindRegionLocked(stack_top
);
361 if (region
!= NULL
) {
362 RAW_VLOG(10, "Stack at %p is inside region %p..%p",
363 reinterpret_cast<void*>(stack_top
),
364 reinterpret_cast<void*>(region
->start_addr
),
365 reinterpret_cast<void*>(region
->end_addr
));
366 const_cast<Region
*>(region
)->set_is_stack(); // now we know
367 // cast is safe (set_is_stack does not change the set ordering key)
368 *result
= *region
; // create *result as an independent copy
371 return region
!= NULL
;
374 HeapProfileBucket
* MemoryRegionMap::GetBucket(int depth
,
375 const void* const key
[]) {
376 RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
379 for (int i
= 0; i
< depth
; i
++) {
380 hash
+= reinterpret_cast<uintptr_t>(key
[i
]);
387 // Lookup stack trace in table
388 unsigned int hash_index
= (static_cast<unsigned int>(hash
)) % kHashTableSize
;
389 for (HeapProfileBucket
* bucket
= bucket_table_
[hash_index
];
391 bucket
= bucket
->next
) {
392 if ((bucket
->hash
== hash
) && (bucket
->depth
== depth
) &&
393 std::equal(key
, key
+ depth
, bucket
->stack
)) {
399 const size_t key_size
= sizeof(key
[0]) * depth
;
400 HeapProfileBucket
* bucket
;
401 if (recursive_insert
) { // recursion: save in saved_buckets_
402 const void** key_copy
= saved_buckets_keys_
[saved_buckets_count_
];
403 std::copy(key
, key
+ depth
, key_copy
);
404 bucket
= &saved_buckets_
[saved_buckets_count_
];
405 memset(bucket
, 0, sizeof(*bucket
));
406 ++saved_buckets_count_
;
407 bucket
->stack
= key_copy
;
410 recursive_insert
= true;
411 const void** key_copy
= static_cast<const void**>(
412 MyAllocator::Allocate(key_size
));
413 recursive_insert
= false;
414 std::copy(key
, key
+ depth
, key_copy
);
415 recursive_insert
= true;
416 bucket
= static_cast<HeapProfileBucket
*>(
417 MyAllocator::Allocate(sizeof(HeapProfileBucket
)));
418 recursive_insert
= false;
419 memset(bucket
, 0, sizeof(*bucket
));
420 bucket
->stack
= key_copy
;
421 bucket
->next
= bucket_table_
[hash_index
];
424 bucket
->depth
= depth
;
425 bucket_table_
[hash_index
] = bucket
;
430 MemoryRegionMap::RegionIterator
MemoryRegionMap::BeginRegionLocked() {
431 RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
432 RAW_CHECK(regions_
!= NULL
, "");
433 return regions_
->begin();
436 MemoryRegionMap::RegionIterator
MemoryRegionMap::EndRegionLocked() {
437 RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
438 RAW_CHECK(regions_
!= NULL
, "");
439 return regions_
->end();
442 inline void MemoryRegionMap::DoInsertRegionLocked(const Region
& region
) {
443 RAW_VLOG(12, "Inserting region %p..%p from %p",
444 reinterpret_cast<void*>(region
.start_addr
),
445 reinterpret_cast<void*>(region
.end_addr
),
446 reinterpret_cast<void*>(region
.caller()));
447 RegionSet::const_iterator i
= regions_
->lower_bound(region
);
448 if (i
!= regions_
->end() && i
->start_addr
<= region
.start_addr
) {
449 RAW_DCHECK(region
.end_addr
<= i
->end_addr
, ""); // lower_bound ensures this
450 return; // 'region' is a subset of an already recorded region; do nothing
451 // We can be stricter and allow this only when *i has been created via
452 // an mmap with MAP_NORESERVE flag set.
455 RAW_CHECK(i
== regions_
->end() || !region
.Overlaps(*i
),
456 "Wow, overlapping memory regions");
458 sample
.SetRegionSetKey(region
.start_addr
);
459 i
= regions_
->lower_bound(sample
);
460 RAW_CHECK(i
== regions_
->end() || !region
.Overlaps(*i
),
461 "Wow, overlapping memory regions");
463 region
.AssertIsConsistent(); // just making sure
464 // This inserts and allocates permanent storage for region
465 // and its call stack data: it's safe to do it now:
466 regions_
->insert(region
);
467 RAW_VLOG(12, "Inserted region %p..%p :",
468 reinterpret_cast<void*>(region
.start_addr
),
469 reinterpret_cast<void*>(region
.end_addr
));
470 if (VLOG_IS_ON(12)) LogAllLocked();
473 // These variables are local to MemoryRegionMap::InsertRegionLocked()
474 // and MemoryRegionMap::HandleSavedRegionsLocked()
475 // and are file-level to ensure that they are initialized at load time.
477 // Number of unprocessed region inserts.
478 static int saved_regions_count
= 0;
480 // Unprocessed inserts (must be big enough to hold all allocations that can
481 // be caused by a InsertRegionLocked call).
482 // Region has no constructor, so that c-tor execution does not interfere
483 // with the any-time use of the static memory behind saved_regions.
484 static MemoryRegionMap::Region saved_regions
[20];
486 inline void MemoryRegionMap::HandleSavedRegionsLocked(
487 void (*insert_func
)(const Region
& region
)) {
488 while (saved_regions_count
> 0) {
489 // Making a local-var copy of the region argument to insert_func
490 // including its stack (w/o doing any memory allocations) is important:
491 // in many cases the memory in saved_regions
492 // will get written-to during the (*insert_func)(r) call below.
493 Region r
= saved_regions
[--saved_regions_count
];
498 void MemoryRegionMap::RestoreSavedBucketsLocked() {
499 RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
500 while (saved_buckets_count_
> 0) {
501 HeapProfileBucket bucket
= saved_buckets_
[--saved_buckets_count_
];
502 unsigned int hash_index
=
503 static_cast<unsigned int>(bucket
.hash
) % kHashTableSize
;
504 bool is_found
= false;
505 for (HeapProfileBucket
* curr
= bucket_table_
[hash_index
];
508 if ((curr
->hash
== bucket
.hash
) && (curr
->depth
== bucket
.depth
) &&
509 std::equal(bucket
.stack
, bucket
.stack
+ bucket
.depth
, curr
->stack
)) {
510 curr
->allocs
+= bucket
.allocs
;
511 curr
->alloc_size
+= bucket
.alloc_size
;
512 curr
->frees
+= bucket
.frees
;
513 curr
->free_size
+= bucket
.free_size
;
518 if (is_found
) continue;
520 const size_t key_size
= sizeof(bucket
.stack
[0]) * bucket
.depth
;
521 const void** key_copy
= static_cast<const void**>(
522 MyAllocator::Allocate(key_size
));
523 std::copy(bucket
.stack
, bucket
.stack
+ bucket
.depth
, key_copy
);
524 HeapProfileBucket
* new_bucket
= static_cast<HeapProfileBucket
*>(
525 MyAllocator::Allocate(sizeof(HeapProfileBucket
)));
526 memset(new_bucket
, 0, sizeof(*new_bucket
));
527 new_bucket
->hash
= bucket
.hash
;
528 new_bucket
->depth
= bucket
.depth
;
529 new_bucket
->stack
= key_copy
;
530 new_bucket
->next
= bucket_table_
[hash_index
];
531 bucket_table_
[hash_index
] = new_bucket
;
536 inline void MemoryRegionMap::InsertRegionLocked(const Region
& region
) {
537 RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
538 // We can be called recursively, because RegionSet constructor
539 // and DoInsertRegionLocked() (called below) can call the allocator.
540 // recursive_insert tells us if that's the case. When this happens,
541 // region insertion information is recorded in saved_regions[],
542 // and taken into account when the recursion unwinds.
544 if (recursive_insert
) { // recursion: save in saved_regions
545 RAW_VLOG(12, "Saving recursive insert of region %p..%p from %p",
546 reinterpret_cast<void*>(region
.start_addr
),
547 reinterpret_cast<void*>(region
.end_addr
),
548 reinterpret_cast<void*>(region
.caller()));
549 RAW_CHECK(saved_regions_count
< arraysize(saved_regions
), "");
550 // Copy 'region' to saved_regions[saved_regions_count]
551 // together with the contents of its call_stack,
552 // then increment saved_regions_count.
553 saved_regions
[saved_regions_count
++] = region
;
554 } else { // not a recusrive call
555 if (regions_
== NULL
) { // init regions_
556 RAW_VLOG(12, "Initializing region set");
557 regions_
= regions_rep
.region_set();
558 recursive_insert
= true;
559 new(regions_
) RegionSet();
560 HandleSavedRegionsLocked(&DoInsertRegionLocked
);
561 recursive_insert
= false;
563 recursive_insert
= true;
564 // Do the actual insertion work to put new regions into regions_:
565 DoInsertRegionLocked(region
);
566 HandleSavedRegionsLocked(&DoInsertRegionLocked
);
567 recursive_insert
= false;
571 // We strip out different number of stack frames in debug mode
572 // because less inlining happens in that case
574 static const int kStripFrames
= 1;
576 static const int kStripFrames
= 3;
579 void MemoryRegionMap::RecordRegionAddition(const void* start
, size_t size
) {
580 // Record start/end info about this memory acquisition call in a new region:
582 region
.Create(start
, size
);
583 // First get the call stack info into the local varible 'region':
586 ? MallocHook::GetCallerStackTrace(const_cast<void**>(region
.call_stack
),
587 max_stack_depth_
, kStripFrames
+ 1)
589 region
.set_call_stack_depth(depth
); // record stack info fully
590 RAW_VLOG(10, "New global region %p..%p from %p",
591 reinterpret_cast<void*>(region
.start_addr
),
592 reinterpret_cast<void*>(region
.end_addr
),
593 reinterpret_cast<void*>(region
.caller()));
594 // Note: none of the above allocates memory.
595 Lock(); // recursively lock
597 InsertRegionLocked(region
);
598 // This will (eventually) allocate storage for and copy over the stack data
599 // from region.call_stack_data_ that is pointed by region.call_stack().
600 if (bucket_table_
!= NULL
) {
601 HeapProfileBucket
* b
= GetBucket(depth
, region
.call_stack
);
603 b
->alloc_size
+= size
;
604 if (!recursive_insert
) {
605 recursive_insert
= true;
606 RestoreSavedBucketsLocked();
607 recursive_insert
= false;
613 void MemoryRegionMap::RecordRegionRemoval(const void* start
, size_t size
) {
615 if (recursive_insert
) {
616 // First remove the removed region from saved_regions, if it's
617 // there, to prevent overrunning saved_regions in recursive
618 // map/unmap call sequences, and also from later inserting regions
619 // which have already been unmapped.
620 uintptr_t start_addr
= reinterpret_cast<uintptr_t>(start
);
621 uintptr_t end_addr
= start_addr
+ size
;
623 int old_count
= saved_regions_count
;
624 for (int i
= 0; i
< old_count
; ++i
, ++put_pos
) {
625 Region
& r
= saved_regions
[i
];
626 if (r
.start_addr
== start_addr
&& r
.end_addr
== end_addr
) {
627 // An exact match, so it's safe to remove.
628 RecordRegionRemovalInBucket(r
.call_stack_depth
, r
.call_stack
, size
);
629 --saved_regions_count
;
631 RAW_VLOG(10, ("Insta-Removing saved region %p..%p; "
632 "now have %d saved regions"),
633 reinterpret_cast<void*>(start_addr
),
634 reinterpret_cast<void*>(end_addr
),
635 saved_regions_count
);
638 saved_regions
[put_pos
] = saved_regions
[i
];
643 if (regions_
== NULL
) { // We must have just unset the hooks,
644 // but this thread was already inside the hook.
648 if (!recursive_insert
) {
649 HandleSavedRegionsLocked(&InsertRegionLocked
);
651 // first handle adding saved regions if any
652 uintptr_t start_addr
= reinterpret_cast<uintptr_t>(start
);
653 uintptr_t end_addr
= start_addr
+ size
;
654 // subtract start_addr, end_addr from all the regions
655 RAW_VLOG(10, "Removing global region %p..%p; have %"PRIuS
" regions",
656 reinterpret_cast<void*>(start_addr
),
657 reinterpret_cast<void*>(end_addr
),
660 sample
.SetRegionSetKey(start_addr
);
661 // Only iterate over the regions that might overlap start_addr..end_addr:
662 for (RegionSet::iterator region
= regions_
->lower_bound(sample
);
663 region
!= regions_
->end() && region
->start_addr
< end_addr
;
665 RAW_VLOG(13, "Looking at region %p..%p",
666 reinterpret_cast<void*>(region
->start_addr
),
667 reinterpret_cast<void*>(region
->end_addr
));
668 if (start_addr
<= region
->start_addr
&&
669 region
->end_addr
<= end_addr
) { // full deletion
670 RAW_VLOG(12, "Deleting region %p..%p",
671 reinterpret_cast<void*>(region
->start_addr
),
672 reinterpret_cast<void*>(region
->end_addr
));
673 RecordRegionRemovalInBucket(region
->call_stack_depth
, region
->call_stack
,
674 region
->end_addr
- region
->start_addr
);
675 RegionSet::iterator d
= region
;
679 } else if (region
->start_addr
< start_addr
&&
680 end_addr
< region
->end_addr
) { // cutting-out split
681 RAW_VLOG(12, "Splitting region %p..%p in two",
682 reinterpret_cast<void*>(region
->start_addr
),
683 reinterpret_cast<void*>(region
->end_addr
));
684 RecordRegionRemovalInBucket(region
->call_stack_depth
, region
->call_stack
,
685 end_addr
- start_addr
);
686 // Make another region for the start portion:
687 // The new region has to be the start portion because we can't
688 // just modify region->end_addr as it's the sorting key.
690 r
.set_end_addr(start_addr
);
691 InsertRegionLocked(r
);
692 // cut *region from start:
693 const_cast<Region
&>(*region
).set_start_addr(end_addr
);
694 } else if (end_addr
> region
->start_addr
&&
695 start_addr
<= region
->start_addr
) { // cut from start
696 RAW_VLOG(12, "Start-chopping region %p..%p",
697 reinterpret_cast<void*>(region
->start_addr
),
698 reinterpret_cast<void*>(region
->end_addr
));
699 RecordRegionRemovalInBucket(region
->call_stack_depth
, region
->call_stack
,
700 end_addr
- region
->start_addr
);
701 const_cast<Region
&>(*region
).set_start_addr(end_addr
);
702 } else if (start_addr
> region
->start_addr
&&
703 start_addr
< region
->end_addr
) { // cut from end
704 RAW_VLOG(12, "End-chopping region %p..%p",
705 reinterpret_cast<void*>(region
->start_addr
),
706 reinterpret_cast<void*>(region
->end_addr
));
707 RecordRegionRemovalInBucket(region
->call_stack_depth
, region
->call_stack
,
708 region
->end_addr
- start_addr
);
709 // Can't just modify region->end_addr (it's the sorting key):
711 r
.set_end_addr(start_addr
);
712 RegionSet::iterator d
= region
;
714 // It's safe to erase before inserting since r is independent of *d:
715 // r contains an own copy of the call stack:
717 InsertRegionLocked(r
);
722 RAW_VLOG(12, "Removed region %p..%p; have %"PRIuS
" regions",
723 reinterpret_cast<void*>(start_addr
),
724 reinterpret_cast<void*>(end_addr
),
726 if (VLOG_IS_ON(12)) LogAllLocked();
731 void MemoryRegionMap::RecordRegionRemovalInBucket(int depth
,
732 const void* const stack
[],
734 RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
735 if (bucket_table_
== NULL
) return;
736 HeapProfileBucket
* b
= GetBucket(depth
, stack
);
738 b
->free_size
+= size
;
741 void MemoryRegionMap::MmapHook(const void* result
,
742 const void* start
, size_t size
,
744 int fd
, off_t offset
) {
745 // TODO(maxim): replace all 0x%"PRIxS" by %p when RAW_VLOG uses a safe
746 // snprintf reimplementation that does not malloc to pretty-print NULL
747 RAW_VLOG(10, "MMap = 0x%"PRIxPTR
" of %"PRIuS
" at %"PRIu64
" "
748 "prot %d flags %d fd %d offs %"PRId64
,
749 reinterpret_cast<uintptr_t>(result
), size
,
750 reinterpret_cast<uint64
>(start
), prot
, flags
, fd
,
751 static_cast<int64
>(offset
));
752 if (result
!= reinterpret_cast<void*>(MAP_FAILED
) && size
!= 0) {
753 RecordRegionAddition(result
, size
);
757 void MemoryRegionMap::MunmapHook(const void* ptr
, size_t size
) {
758 RAW_VLOG(10, "MUnmap of %p %"PRIuS
"", ptr
, size
);
760 RecordRegionRemoval(ptr
, size
);
764 void MemoryRegionMap::MremapHook(const void* result
,
765 const void* old_addr
, size_t old_size
,
766 size_t new_size
, int flags
,
767 const void* new_addr
) {
768 RAW_VLOG(10, "MRemap = 0x%"PRIxPTR
" of 0x%"PRIxPTR
" %"PRIuS
" "
769 "to %"PRIuS
" flags %d new_addr=0x%"PRIxPTR
,
770 (uintptr_t)result
, (uintptr_t)old_addr
,
771 old_size
, new_size
, flags
,
772 flags
& MREMAP_FIXED
? (uintptr_t)new_addr
: 0);
773 if (result
!= reinterpret_cast<void*>(-1)) {
774 RecordRegionRemoval(old_addr
, old_size
);
775 RecordRegionAddition(result
, new_size
);
779 extern "C" void* __sbrk(ptrdiff_t increment
); // defined in libc
781 void MemoryRegionMap::SbrkHook(const void* result
, ptrdiff_t increment
) {
782 RAW_VLOG(10, "Sbrk = 0x%"PRIxPTR
" of %"PRIdS
"", (uintptr_t)result
, increment
);
783 if (result
!= reinterpret_cast<void*>(-1)) {
785 void* new_end
= sbrk(0);
786 RecordRegionAddition(result
, reinterpret_cast<uintptr_t>(new_end
) -
787 reinterpret_cast<uintptr_t>(result
));
788 } else if (increment
< 0) {
789 void* new_end
= sbrk(0);
790 RecordRegionRemoval(new_end
, reinterpret_cast<uintptr_t>(result
) -
791 reinterpret_cast<uintptr_t>(new_end
));
796 void MemoryRegionMap::LogAllLocked() {
797 RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
798 RAW_LOG(INFO
, "List of regions:");
799 uintptr_t previous
= 0;
800 for (RegionSet::const_iterator r
= regions_
->begin();
801 r
!= regions_
->end(); ++r
) {
802 RAW_LOG(INFO
, "Memory region 0x%"PRIxPTR
"..0x%"PRIxPTR
" "
803 "from 0x%"PRIxPTR
" stack=%d",
804 r
->start_addr
, r
->end_addr
, r
->caller(), r
->is_stack
);
805 RAW_CHECK(previous
< r
->end_addr
, "wow, we messed up the set order");
806 // this must be caused by uncontrolled recursive operations on regions_
807 previous
= r
->end_addr
;
809 RAW_LOG(INFO
, "End of regions list");