1 /* Copyright (c) 2006, Google Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * Author: Maxim Lifantsev
35 // Background and key design points of MemoryRegionMap.
37 // MemoryRegionMap is a low-level module with quite atypical requirements that
38 // result in some degree of non-triviality of the implementation and design.
40 // MemoryRegionMap collects info about *all* memory regions created with
41 // mmap, munmap, mremap, sbrk.
42 // They key word above is 'all': all that are happening in a process
43 // during its lifetime frequently starting even before global object
44 // constructor execution.
46 // This is needed by the primary client of MemoryRegionMap:
47 // HeapLeakChecker uses the regions and the associated stack traces
48 // to figure out what part of the memory is the heap:
49 // if MemoryRegionMap were to miss some (early) regions, leak checking would
50 // stop working correctly.
52 // To accomplish the goal of functioning before/during global object
53 // constructor execution MemoryRegionMap is done as a singleton service
54 // that relies on own on-demand initialized static constructor-less data,
55 // and only relies on other low-level modules that can also function properly
56 // even before global object constructors run.
58 // Accomplishing the goal of collecting data about all mmap, munmap, mremap,
59 // sbrk occurrences is a more involved: conceptually to do this one needs to
60 // record some bits of data in particular about any mmap or sbrk call,
61 // but to do that one needs to allocate memory for that data at some point,
62 // but all memory allocations in the end themselves come from an mmap
63 // or sbrk call (that's how the address space of the process grows).
65 // Also note that we need to do all the above recording from
66 // within an mmap/sbrk hook which is sometimes/frequently is made by a memory
67 // allocator, including the allocator MemoryRegionMap itself must rely on.
68 // In the case of heap-checker usage this includes even the very first
69 // mmap/sbrk call happening in the program: heap-checker gets activated due to
70 // a link-time installed mmap/sbrk hook and it initializes MemoryRegionMap
71 // and asks it to record info about this very first call right from that
72 // very first hook invocation.
74 // MemoryRegionMap is doing its memory allocations via LowLevelAlloc:
75 // unlike more complex standard memory allocator, LowLevelAlloc cooperates with
76 // MemoryRegionMap by not holding any of its own locks while it calls mmap
77 // to get memory, thus we are able to call LowLevelAlloc from
78 // our mmap/sbrk hooks without causing a deadlock in it.
79 // For the same reason of deadlock prevention the locking in MemoryRegionMap
80 // itself is write-recursive which is an exception to Google's mutex usage.
82 // We still need to break the infinite cycle of mmap calling our hook,
83 // which asks LowLevelAlloc for memory to record this mmap,
84 // which (sometimes) causes mmap, which calls our hook, and so on.
85 // We do this as follows: on a recursive call of MemoryRegionMap's
86 // mmap/sbrk/mremap hook we record the data about the allocation in a
87 // static fixed-sized stack (saved_regions and saved_buckets), when the
88 // recursion unwinds but before returning from the outer hook call we unwind
89 // this stack and move the data from saved_regions and saved_buckets to its
90 // permanent place in the RegionSet and "bucket_table" respectively,
91 // which can cause more allocations and mmap-s and recursion and unwinding,
92 // but the whole process ends eventually due to the fact that for the small
93 // allocations we are doing LowLevelAlloc reuses one mmap call and parcels out
94 // the memory it created to satisfy several of our allocation requests.
97 // ========================================================================= //
104 #ifdef HAVE_INTTYPES_H
105 #include <inttypes.h>
108 #include <sys/mman.h>
109 #elif !defined(MAP_FAILED)
110 #define MAP_FAILED -1 // the only thing we need from mman.h
113 #include <pthread.h> // for pthread_t, pthread_self()
120 #include "memory_region_map.h"
122 #include "base/logging.h"
123 #include "base/low_level_alloc.h"
124 #include "malloc_hook-inl.h"
126 #include <gperftools/stacktrace.h>
127 #include <gperftools/malloc_hook.h>
129 // MREMAP_FIXED is a linux extension. How it's used in this file,
130 // setting it to 0 is equivalent to saying, "This feature isn't
131 // supported", which is right.
133 # define MREMAP_FIXED 0
138 // ========================================================================= //
140 int MemoryRegionMap::client_count_
= 0;
141 int MemoryRegionMap::max_stack_depth_
= 0;
142 MemoryRegionMap::RegionSet
* MemoryRegionMap::regions_
= NULL
;
143 LowLevelAlloc::Arena
* MemoryRegionMap::arena_
= NULL
;
144 SpinLock
MemoryRegionMap::lock_(SpinLock::LINKER_INITIALIZED
);
145 SpinLock
MemoryRegionMap::owner_lock_( // ACQUIRED_AFTER(lock_)
146 SpinLock::LINKER_INITIALIZED
);
147 int MemoryRegionMap::recursion_count_
= 0; // GUARDED_BY(owner_lock_)
148 pthread_t
MemoryRegionMap::lock_owner_tid_
; // GUARDED_BY(owner_lock_)
149 int64
MemoryRegionMap::map_size_
= 0;
150 int64
MemoryRegionMap::unmap_size_
= 0;
151 HeapProfileBucket
** MemoryRegionMap::bucket_table_
= NULL
; // GUARDED_BY(lock_)
152 int MemoryRegionMap::num_buckets_
= 0; // GUARDED_BY(lock_)
153 int MemoryRegionMap::saved_buckets_count_
= 0; // GUARDED_BY(lock_)
154 HeapProfileBucket
MemoryRegionMap::saved_buckets_
[20]; // GUARDED_BY(lock_)
157 const void* MemoryRegionMap::saved_buckets_keys_
[20][kMaxStackDepth
];
159 // ========================================================================= //
161 // Simple hook into execution of global object constructors,
162 // so that we do not call pthread_self() when it does not yet work.
163 static bool libpthread_initialized
= false;
164 static bool initializer
= (libpthread_initialized
= true, true);
166 static inline bool current_thread_is(pthread_t should_be
) {
167 // Before main() runs, there's only one thread, so we're always that thread
168 if (!libpthread_initialized
) return true;
169 // this starts working only sometime well into global constructor execution:
170 return pthread_equal(pthread_self(), should_be
);
173 // ========================================================================= //
175 // Constructor-less place-holder to store a RegionSet in.
176 union MemoryRegionMap::RegionSetRep
{
177 char rep
[sizeof(RegionSet
)];
178 void* align_it
; // do not need a better alignment for 'rep' than this
179 RegionSet
* region_set() { return reinterpret_cast<RegionSet
*>(rep
); }
182 // The bytes where MemoryRegionMap::regions_ will point to.
183 // We use RegionSetRep with noop c-tor so that global construction
184 // does not interfere.
185 static MemoryRegionMap::RegionSetRep regions_rep
;
187 // ========================================================================= //
189 // Has InsertRegionLocked been called recursively
190 // (or rather should we *not* use regions_ to record a hooked mmap).
191 static bool recursive_insert
= false;
193 void MemoryRegionMap::Init(int max_stack_depth
, bool use_buckets
) {
194 RAW_VLOG(10, "MemoryRegionMap Init");
195 RAW_CHECK(max_stack_depth
>= 0, "");
196 // Make sure we don't overflow the memory in region stacks:
197 RAW_CHECK(max_stack_depth
<= kMaxStackDepth
,
198 "need to increase kMaxStackDepth?");
201 max_stack_depth_
= max(max_stack_depth_
, max_stack_depth
);
202 if (client_count_
> 1) {
203 // not first client: already did initialization-proper
205 RAW_VLOG(10, "MemoryRegionMap Init increment done");
208 // Set our hooks and make sure they were installed:
209 RAW_CHECK(MallocHook::AddMmapHook(&MmapHook
), "");
210 RAW_CHECK(MallocHook::AddMremapHook(&MremapHook
), "");
211 RAW_CHECK(MallocHook::AddSbrkHook(&SbrkHook
), "");
212 RAW_CHECK(MallocHook::AddMunmapHook(&MunmapHook
), "");
213 // We need to set recursive_insert since the NewArena call itself
214 // will already do some allocations with mmap which our hooks will catch
215 // recursive_insert allows us to buffer info about these mmap calls.
216 // Note that Init() can be (and is) sometimes called
217 // already from within an mmap/sbrk hook.
218 recursive_insert
= true;
219 arena_
= LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
220 recursive_insert
= false;
221 HandleSavedRegionsLocked(&InsertRegionLocked
); // flush the buffered ones
222 // Can't instead use HandleSavedRegionsLocked(&DoInsertRegionLocked) before
223 // recursive_insert = false; as InsertRegionLocked will also construct
224 // regions_ on demand for us.
226 const int table_bytes
= kHashTableSize
* sizeof(*bucket_table_
);
227 recursive_insert
= true;
228 bucket_table_
= static_cast<HeapProfileBucket
**>(
229 MyAllocator::Allocate(table_bytes
));
230 recursive_insert
= false;
231 memset(bucket_table_
, 0, table_bytes
);
234 if (regions_
== NULL
) // init regions_
235 InitRegionSetLocked();
237 RAW_VLOG(10, "MemoryRegionMap Init done");
240 bool MemoryRegionMap::Shutdown() {
241 RAW_VLOG(10, "MemoryRegionMap Shutdown");
243 RAW_CHECK(client_count_
> 0, "");
245 if (client_count_
!= 0) { // not last client; need not really shutdown
247 RAW_VLOG(10, "MemoryRegionMap Shutdown decrement done");
250 if (bucket_table_
!= NULL
) {
251 for (int i
= 0; i
< kHashTableSize
; i
++) {
252 for (HeapProfileBucket
* curr
= bucket_table_
[i
]; curr
!= 0; /**/) {
253 HeapProfileBucket
* bucket
= curr
;
255 MyAllocator::Free(bucket
->stack
, 0);
256 MyAllocator::Free(bucket
, 0);
259 MyAllocator::Free(bucket_table_
, 0);
261 bucket_table_
= NULL
;
263 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook
), "");
264 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook
), "");
265 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook
), "");
266 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook
), "");
267 if (regions_
) regions_
->~RegionSet();
269 bool deleted_arena
= LowLevelAlloc::DeleteArena(arena_
);
273 RAW_LOG(WARNING
, "Can't delete LowLevelAlloc arena: it's being used");
276 RAW_VLOG(10, "MemoryRegionMap Shutdown done");
277 return deleted_arena
;
280 bool MemoryRegionMap::IsRecordingLocked() {
281 RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
282 return client_count_
> 0;
285 // Invariants (once libpthread_initialized is true):
286 // * While lock_ is not held, recursion_count_ is 0 (and
287 // lock_owner_tid_ is the previous owner, but we don't rely on
289 // * recursion_count_ and lock_owner_tid_ are only written while
290 // both lock_ and owner_lock_ are held. They may be read under
292 // * At entry and exit of Lock() and Unlock(), the current thread
293 // owns lock_ iff pthread_equal(lock_owner_tid_, pthread_self())
294 // && recursion_count_ > 0.
295 void MemoryRegionMap::Lock() {
297 SpinLockHolder
l(&owner_lock_
);
298 if (recursion_count_
> 0 && current_thread_is(lock_owner_tid_
)) {
299 RAW_CHECK(lock_
.IsHeld(), "Invariants violated");
301 RAW_CHECK(recursion_count_
<= 5,
302 "recursive lock nesting unexpectedly deep");
308 SpinLockHolder
l(&owner_lock_
);
309 RAW_CHECK(recursion_count_
== 0,
310 "Last Unlock didn't reset recursion_count_");
311 if (libpthread_initialized
)
312 lock_owner_tid_
= pthread_self();
313 recursion_count_
= 1;
317 void MemoryRegionMap::Unlock() {
318 SpinLockHolder
l(&owner_lock_
);
319 RAW_CHECK(recursion_count_
> 0, "unlock when not held");
320 RAW_CHECK(lock_
.IsHeld(),
321 "unlock when not held, and recursion_count_ is wrong");
322 RAW_CHECK(current_thread_is(lock_owner_tid_
), "unlock by non-holder");
324 if (recursion_count_
== 0) {
329 bool MemoryRegionMap::LockIsHeld() {
330 SpinLockHolder
l(&owner_lock_
);
331 return lock_
.IsHeld() && current_thread_is(lock_owner_tid_
);
334 const MemoryRegionMap::Region
*
335 MemoryRegionMap::DoFindRegionLocked(uintptr_t addr
) {
336 RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
337 if (regions_
!= NULL
) {
339 sample
.SetRegionSetKey(addr
);
340 RegionSet::iterator region
= regions_
->lower_bound(sample
);
341 if (region
!= regions_
->end()) {
342 RAW_CHECK(addr
<= region
->end_addr
, "");
343 if (region
->start_addr
<= addr
&& addr
< region
->end_addr
) {
351 bool MemoryRegionMap::FindRegion(uintptr_t addr
, Region
* result
) {
353 const Region
* region
= DoFindRegionLocked(addr
);
354 if (region
!= NULL
) *result
= *region
; // create it as an independent copy
356 return region
!= NULL
;
359 bool MemoryRegionMap::FindAndMarkStackRegion(uintptr_t stack_top
,
362 const Region
* region
= DoFindRegionLocked(stack_top
);
363 if (region
!= NULL
) {
364 RAW_VLOG(10, "Stack at %p is inside region %p..%p",
365 reinterpret_cast<void*>(stack_top
),
366 reinterpret_cast<void*>(region
->start_addr
),
367 reinterpret_cast<void*>(region
->end_addr
));
368 const_cast<Region
*>(region
)->set_is_stack(); // now we know
369 // cast is safe (set_is_stack does not change the set ordering key)
370 *result
= *region
; // create *result as an independent copy
373 return region
!= NULL
;
376 HeapProfileBucket
* MemoryRegionMap::GetBucket(int depth
,
377 const void* const key
[]) {
378 RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
381 for (int i
= 0; i
< depth
; i
++) {
382 hash
+= reinterpret_cast<uintptr_t>(key
[i
]);
389 // Lookup stack trace in table
390 unsigned int hash_index
= (static_cast<unsigned int>(hash
)) % kHashTableSize
;
391 for (HeapProfileBucket
* bucket
= bucket_table_
[hash_index
];
393 bucket
= bucket
->next
) {
394 if ((bucket
->hash
== hash
) && (bucket
->depth
== depth
) &&
395 std::equal(key
, key
+ depth
, bucket
->stack
)) {
401 const size_t key_size
= sizeof(key
[0]) * depth
;
402 HeapProfileBucket
* bucket
;
403 if (recursive_insert
) { // recursion: save in saved_buckets_
404 const void** key_copy
= saved_buckets_keys_
[saved_buckets_count_
];
405 std::copy(key
, key
+ depth
, key_copy
);
406 bucket
= &saved_buckets_
[saved_buckets_count_
];
407 memset(bucket
, 0, sizeof(*bucket
));
408 ++saved_buckets_count_
;
409 bucket
->stack
= key_copy
;
412 recursive_insert
= true;
413 const void** key_copy
= static_cast<const void**>(
414 MyAllocator::Allocate(key_size
));
415 recursive_insert
= false;
416 std::copy(key
, key
+ depth
, key_copy
);
417 recursive_insert
= true;
418 bucket
= static_cast<HeapProfileBucket
*>(
419 MyAllocator::Allocate(sizeof(HeapProfileBucket
)));
420 recursive_insert
= false;
421 memset(bucket
, 0, sizeof(*bucket
));
422 bucket
->stack
= key_copy
;
423 bucket
->next
= bucket_table_
[hash_index
];
426 bucket
->depth
= depth
;
427 bucket_table_
[hash_index
] = bucket
;
432 MemoryRegionMap::RegionIterator
MemoryRegionMap::BeginRegionLocked() {
433 RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
434 RAW_CHECK(regions_
!= NULL
, "");
435 return regions_
->begin();
438 MemoryRegionMap::RegionIterator
MemoryRegionMap::EndRegionLocked() {
439 RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
440 RAW_CHECK(regions_
!= NULL
, "");
441 return regions_
->end();
444 inline void MemoryRegionMap::DoInsertRegionLocked(const Region
& region
) {
445 RAW_VLOG(12, "Inserting region %p..%p from %p",
446 reinterpret_cast<void*>(region
.start_addr
),
447 reinterpret_cast<void*>(region
.end_addr
),
448 reinterpret_cast<void*>(region
.caller()));
449 RegionSet::const_iterator i
= regions_
->lower_bound(region
);
450 if (i
!= regions_
->end() && i
->start_addr
<= region
.start_addr
) {
451 RAW_DCHECK(region
.end_addr
<= i
->end_addr
, ""); // lower_bound ensures this
452 return; // 'region' is a subset of an already recorded region; do nothing
453 // We can be stricter and allow this only when *i has been created via
454 // an mmap with MAP_NORESERVE flag set.
457 RAW_CHECK(i
== regions_
->end() || !region
.Overlaps(*i
),
458 "Wow, overlapping memory regions");
460 sample
.SetRegionSetKey(region
.start_addr
);
461 i
= regions_
->lower_bound(sample
);
462 RAW_CHECK(i
== regions_
->end() || !region
.Overlaps(*i
),
463 "Wow, overlapping memory regions");
465 region
.AssertIsConsistent(); // just making sure
466 // This inserts and allocates permanent storage for region
467 // and its call stack data: it's safe to do it now:
468 regions_
->insert(region
);
469 RAW_VLOG(12, "Inserted region %p..%p :",
470 reinterpret_cast<void*>(region
.start_addr
),
471 reinterpret_cast<void*>(region
.end_addr
));
472 if (VLOG_IS_ON(12)) LogAllLocked();
475 // These variables are local to MemoryRegionMap::InsertRegionLocked()
476 // and MemoryRegionMap::HandleSavedRegionsLocked()
477 // and are file-level to ensure that they are initialized at load time.
479 // Number of unprocessed region inserts.
480 static int saved_regions_count
= 0;
482 // Unprocessed inserts (must be big enough to hold all allocations that can
483 // be caused by a InsertRegionLocked call).
484 // Region has no constructor, so that c-tor execution does not interfere
485 // with the any-time use of the static memory behind saved_regions.
486 static MemoryRegionMap::Region saved_regions
[20];
488 inline void MemoryRegionMap::HandleSavedRegionsLocked(
489 void (*insert_func
)(const Region
& region
)) {
490 while (saved_regions_count
> 0) {
491 // Making a local-var copy of the region argument to insert_func
492 // including its stack (w/o doing any memory allocations) is important:
493 // in many cases the memory in saved_regions
494 // will get written-to during the (*insert_func)(r) call below.
495 Region r
= saved_regions
[--saved_regions_count
];
500 void MemoryRegionMap::RestoreSavedBucketsLocked() {
501 RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
502 while (saved_buckets_count_
> 0) {
503 HeapProfileBucket bucket
= saved_buckets_
[--saved_buckets_count_
];
504 unsigned int hash_index
=
505 static_cast<unsigned int>(bucket
.hash
) % kHashTableSize
;
506 bool is_found
= false;
507 for (HeapProfileBucket
* curr
= bucket_table_
[hash_index
];
510 if ((curr
->hash
== bucket
.hash
) && (curr
->depth
== bucket
.depth
) &&
511 std::equal(bucket
.stack
, bucket
.stack
+ bucket
.depth
, curr
->stack
)) {
512 curr
->allocs
+= bucket
.allocs
;
513 curr
->alloc_size
+= bucket
.alloc_size
;
514 curr
->frees
+= bucket
.frees
;
515 curr
->free_size
+= bucket
.free_size
;
520 if (is_found
) continue;
522 const size_t key_size
= sizeof(bucket
.stack
[0]) * bucket
.depth
;
523 const void** key_copy
= static_cast<const void**>(
524 MyAllocator::Allocate(key_size
));
525 std::copy(bucket
.stack
, bucket
.stack
+ bucket
.depth
, key_copy
);
526 HeapProfileBucket
* new_bucket
= static_cast<HeapProfileBucket
*>(
527 MyAllocator::Allocate(sizeof(HeapProfileBucket
)));
528 memset(new_bucket
, 0, sizeof(*new_bucket
));
529 new_bucket
->hash
= bucket
.hash
;
530 new_bucket
->depth
= bucket
.depth
;
531 new_bucket
->stack
= key_copy
;
532 new_bucket
->next
= bucket_table_
[hash_index
];
533 bucket_table_
[hash_index
] = new_bucket
;
538 inline void MemoryRegionMap::InitRegionSetLocked() {
539 RAW_VLOG(12, "Initializing region set");
540 regions_
= regions_rep
.region_set();
541 recursive_insert
= true;
542 new(regions_
) RegionSet();
543 HandleSavedRegionsLocked(&DoInsertRegionLocked
);
544 recursive_insert
= false;
547 inline void MemoryRegionMap::InsertRegionLocked(const Region
& region
) {
548 RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
549 // We can be called recursively, because RegionSet constructor
550 // and DoInsertRegionLocked() (called below) can call the allocator.
551 // recursive_insert tells us if that's the case. When this happens,
552 // region insertion information is recorded in saved_regions[],
553 // and taken into account when the recursion unwinds.
555 if (recursive_insert
) { // recursion: save in saved_regions
556 RAW_VLOG(12, "Saving recursive insert of region %p..%p from %p",
557 reinterpret_cast<void*>(region
.start_addr
),
558 reinterpret_cast<void*>(region
.end_addr
),
559 reinterpret_cast<void*>(region
.caller()));
560 RAW_CHECK(saved_regions_count
< arraysize(saved_regions
), "");
561 // Copy 'region' to saved_regions[saved_regions_count]
562 // together with the contents of its call_stack,
563 // then increment saved_regions_count.
564 saved_regions
[saved_regions_count
++] = region
;
565 } else { // not a recusrive call
566 if (regions_
== NULL
) // init regions_
567 InitRegionSetLocked();
568 recursive_insert
= true;
569 // Do the actual insertion work to put new regions into regions_:
570 DoInsertRegionLocked(region
);
571 HandleSavedRegionsLocked(&DoInsertRegionLocked
);
572 recursive_insert
= false;
576 // We strip out different number of stack frames in debug mode
577 // because less inlining happens in that case
579 static const int kStripFrames
= 1;
581 static const int kStripFrames
= 3;
584 void MemoryRegionMap::RecordRegionAddition(const void* start
, size_t size
) {
585 // Record start/end info about this memory acquisition call in a new region:
587 region
.Create(start
, size
);
588 // First get the call stack info into the local varible 'region':
591 ? MallocHook::GetCallerStackTrace(const_cast<void**>(region
.call_stack
),
592 max_stack_depth_
, kStripFrames
+ 1)
594 region
.set_call_stack_depth(depth
); // record stack info fully
595 RAW_VLOG(10, "New global region %p..%p from %p",
596 reinterpret_cast<void*>(region
.start_addr
),
597 reinterpret_cast<void*>(region
.end_addr
),
598 reinterpret_cast<void*>(region
.caller()));
599 // Note: none of the above allocates memory.
600 Lock(); // recursively lock
602 InsertRegionLocked(region
);
603 // This will (eventually) allocate storage for and copy over the stack data
604 // from region.call_stack_data_ that is pointed by region.call_stack().
605 if (bucket_table_
!= NULL
) {
606 HeapProfileBucket
* b
= GetBucket(depth
, region
.call_stack
);
608 b
->alloc_size
+= size
;
609 if (!recursive_insert
) {
610 recursive_insert
= true;
611 RestoreSavedBucketsLocked();
612 recursive_insert
= false;
618 void MemoryRegionMap::RecordRegionRemoval(const void* start
, size_t size
) {
620 if (recursive_insert
) {
621 // First remove the removed region from saved_regions, if it's
622 // there, to prevent overrunning saved_regions in recursive
623 // map/unmap call sequences, and also from later inserting regions
624 // which have already been unmapped.
625 uintptr_t start_addr
= reinterpret_cast<uintptr_t>(start
);
626 uintptr_t end_addr
= start_addr
+ size
;
628 int old_count
= saved_regions_count
;
629 for (int i
= 0; i
< old_count
; ++i
, ++put_pos
) {
630 Region
& r
= saved_regions
[i
];
631 if (r
.start_addr
== start_addr
&& r
.end_addr
== end_addr
) {
632 // An exact match, so it's safe to remove.
633 RecordRegionRemovalInBucket(r
.call_stack_depth
, r
.call_stack
, size
);
634 --saved_regions_count
;
636 RAW_VLOG(10, ("Insta-Removing saved region %p..%p; "
637 "now have %d saved regions"),
638 reinterpret_cast<void*>(start_addr
),
639 reinterpret_cast<void*>(end_addr
),
640 saved_regions_count
);
643 saved_regions
[put_pos
] = saved_regions
[i
];
648 if (regions_
== NULL
) { // We must have just unset the hooks,
649 // but this thread was already inside the hook.
653 if (!recursive_insert
) {
654 HandleSavedRegionsLocked(&InsertRegionLocked
);
656 // first handle adding saved regions if any
657 uintptr_t start_addr
= reinterpret_cast<uintptr_t>(start
);
658 uintptr_t end_addr
= start_addr
+ size
;
659 // subtract start_addr, end_addr from all the regions
660 RAW_VLOG(10, "Removing global region %p..%p; have %" PRIuS
" regions",
661 reinterpret_cast<void*>(start_addr
),
662 reinterpret_cast<void*>(end_addr
),
665 sample
.SetRegionSetKey(start_addr
);
666 // Only iterate over the regions that might overlap start_addr..end_addr:
667 for (RegionSet::iterator region
= regions_
->lower_bound(sample
);
668 region
!= regions_
->end() && region
->start_addr
< end_addr
;
670 RAW_VLOG(13, "Looking at region %p..%p",
671 reinterpret_cast<void*>(region
->start_addr
),
672 reinterpret_cast<void*>(region
->end_addr
));
673 if (start_addr
<= region
->start_addr
&&
674 region
->end_addr
<= end_addr
) { // full deletion
675 RAW_VLOG(12, "Deleting region %p..%p",
676 reinterpret_cast<void*>(region
->start_addr
),
677 reinterpret_cast<void*>(region
->end_addr
));
678 RecordRegionRemovalInBucket(region
->call_stack_depth
, region
->call_stack
,
679 region
->end_addr
- region
->start_addr
);
680 RegionSet::iterator d
= region
;
684 } else if (region
->start_addr
< start_addr
&&
685 end_addr
< region
->end_addr
) { // cutting-out split
686 RAW_VLOG(12, "Splitting region %p..%p in two",
687 reinterpret_cast<void*>(region
->start_addr
),
688 reinterpret_cast<void*>(region
->end_addr
));
689 RecordRegionRemovalInBucket(region
->call_stack_depth
, region
->call_stack
,
690 end_addr
- start_addr
);
691 // Make another region for the start portion:
692 // The new region has to be the start portion because we can't
693 // just modify region->end_addr as it's the sorting key.
695 r
.set_end_addr(start_addr
);
696 InsertRegionLocked(r
);
697 // cut *region from start:
698 const_cast<Region
&>(*region
).set_start_addr(end_addr
);
699 } else if (end_addr
> region
->start_addr
&&
700 start_addr
<= region
->start_addr
) { // cut from start
701 RAW_VLOG(12, "Start-chopping region %p..%p",
702 reinterpret_cast<void*>(region
->start_addr
),
703 reinterpret_cast<void*>(region
->end_addr
));
704 RecordRegionRemovalInBucket(region
->call_stack_depth
, region
->call_stack
,
705 end_addr
- region
->start_addr
);
706 const_cast<Region
&>(*region
).set_start_addr(end_addr
);
707 } else if (start_addr
> region
->start_addr
&&
708 start_addr
< region
->end_addr
) { // cut from end
709 RAW_VLOG(12, "End-chopping region %p..%p",
710 reinterpret_cast<void*>(region
->start_addr
),
711 reinterpret_cast<void*>(region
->end_addr
));
712 RecordRegionRemovalInBucket(region
->call_stack_depth
, region
->call_stack
,
713 region
->end_addr
- start_addr
);
714 // Can't just modify region->end_addr (it's the sorting key):
716 r
.set_end_addr(start_addr
);
717 RegionSet::iterator d
= region
;
719 // It's safe to erase before inserting since r is independent of *d:
720 // r contains an own copy of the call stack:
722 InsertRegionLocked(r
);
727 RAW_VLOG(12, "Removed region %p..%p; have %" PRIuS
" regions",
728 reinterpret_cast<void*>(start_addr
),
729 reinterpret_cast<void*>(end_addr
),
731 if (VLOG_IS_ON(12)) LogAllLocked();
736 void MemoryRegionMap::RecordRegionRemovalInBucket(int depth
,
737 const void* const stack
[],
739 RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
740 if (bucket_table_
== NULL
) return;
741 HeapProfileBucket
* b
= GetBucket(depth
, stack
);
743 b
->free_size
+= size
;
746 void MemoryRegionMap::MmapHook(const void* result
,
747 const void* start
, size_t size
,
749 int fd
, off_t offset
) {
750 // TODO(maxim): replace all 0x%"PRIxS" by %p when RAW_VLOG uses a safe
751 // snprintf reimplementation that does not malloc to pretty-print NULL
752 RAW_VLOG(10, "MMap = 0x%" PRIxPTR
" of %" PRIuS
" at %" PRIu64
" "
753 "prot %d flags %d fd %d offs %" PRId64
,
754 reinterpret_cast<uintptr_t>(result
), size
,
755 reinterpret_cast<uint64
>(start
), prot
, flags
, fd
,
756 static_cast<int64
>(offset
));
757 if (result
!= reinterpret_cast<void*>(MAP_FAILED
) && size
!= 0) {
758 RecordRegionAddition(result
, size
);
762 void MemoryRegionMap::MunmapHook(const void* ptr
, size_t size
) {
763 RAW_VLOG(10, "MUnmap of %p %" PRIuS
, ptr
, size
);
765 RecordRegionRemoval(ptr
, size
);
769 void MemoryRegionMap::MremapHook(const void* result
,
770 const void* old_addr
, size_t old_size
,
771 size_t new_size
, int flags
,
772 const void* new_addr
) {
773 RAW_VLOG(10, "MRemap = 0x%" PRIxPTR
" of 0x%" PRIxPTR
" %" PRIuS
" "
774 "to %" PRIuS
" flags %d new_addr=0x%" PRIxPTR
,
775 (uintptr_t)result
, (uintptr_t)old_addr
,
776 old_size
, new_size
, flags
,
777 flags
& MREMAP_FIXED
? (uintptr_t)new_addr
: 0);
778 if (result
!= reinterpret_cast<void*>(-1)) {
779 RecordRegionRemoval(old_addr
, old_size
);
780 RecordRegionAddition(result
, new_size
);
784 extern "C" void* __sbrk(ptrdiff_t increment
); // defined in libc
786 void MemoryRegionMap::SbrkHook(const void* result
, ptrdiff_t increment
) {
787 RAW_VLOG(10, "Sbrk = 0x%" PRIxPTR
" of %" PRIdS
,
788 (uintptr_t)result
, increment
);
789 if (result
!= reinterpret_cast<void*>(-1)) {
791 void* new_end
= sbrk(0);
792 RecordRegionAddition(result
, reinterpret_cast<uintptr_t>(new_end
) -
793 reinterpret_cast<uintptr_t>(result
));
794 } else if (increment
< 0) {
795 void* new_end
= sbrk(0);
796 RecordRegionRemoval(new_end
, reinterpret_cast<uintptr_t>(result
) -
797 reinterpret_cast<uintptr_t>(new_end
));
802 void MemoryRegionMap::LogAllLocked() {
803 RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
804 RAW_LOG(INFO
, "List of regions:");
805 uintptr_t previous
= 0;
806 for (RegionSet::const_iterator r
= regions_
->begin();
807 r
!= regions_
->end(); ++r
) {
808 RAW_LOG(INFO
, "Memory region 0x%" PRIxPTR
"..0x%" PRIxPTR
" "
809 "from 0x%" PRIxPTR
" stack=%d",
810 r
->start_addr
, r
->end_addr
, r
->caller(), r
->is_stack
);
811 RAW_CHECK(previous
< r
->end_addr
, "wow, we messed up the set order");
812 // this must be caused by uncontrolled recursive operations on regions_
813 previous
= r
->end_addr
;
815 RAW_LOG(INFO
, "End of regions list");