1 // Copyright (c) 2000, Google Inc.
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Author: Urs Holzle <opensource@google.com>
38 #ifdef HAVE_INTTYPES_H
41 // We only need malloc.h for struct mallinfo.
42 #ifdef HAVE_STRUCT_MALLINFO
43 // Malloc can be in several places on older versions of OS X.
44 # if defined(HAVE_MALLOC_H)
46 # elif defined(HAVE_MALLOC_MALLOC_H)
47 # include <malloc/malloc.h>
48 # elif defined(HAVE_SYS_MALLOC_H)
49 # include <sys/malloc.h>
62 #include <sys/types.h>
67 #include <gperftools/malloc_extension.h>
68 #include <gperftools/malloc_hook.h>
69 #include <gperftools/stacktrace.h>
70 #include "addressmap-inl.h"
71 #include "base/abort.h"
72 #include "base/commandlineflags.h"
73 #include "base/googleinit.h"
74 #include "base/logging.h"
75 #include "base/spinlock.h"
76 #include "malloc_hook-inl.h"
77 #include "symbolize.h"
79 #define TCMALLOC_USING_DEBUGALLOCATION
80 #include "tcmalloc.cc"
82 // __THROW is defined in glibc systems. It means, counter-intuitively,
83 // "This function will never throw an exception." It's an optional
84 // optimization tool, but we may need to use it to match glibc prototypes.
85 #ifndef __THROW // I guess we're not on a glibc system
86 # define __THROW // __THROW is just an optimization, so ok to make it ""
89 // On systems (like freebsd) that don't define MAP_ANONYMOUS, use the old
90 // form of the name instead.
92 # define MAP_ANONYMOUS MAP_ANON
95 // ========================================================================= //
97 DEFINE_bool(malloctrace
,
98 EnvToBool("TCMALLOC_TRACE", false),
99 "Enables memory (de)allocation tracing to /tmp/google.alloc.");
101 DEFINE_bool(malloc_page_fence
,
102 EnvToBool("TCMALLOC_PAGE_FENCE", false),
103 "Enables putting of memory allocations at page boundaries "
104 "with a guard page following the allocation (to catch buffer "
105 "overruns right when they happen).");
106 DEFINE_bool(malloc_page_fence_never_reclaim
,
107 EnvToBool("TCMALLOC_PAGE_FRANCE_NEVER_RECLAIM", false),
108 "Enables making the virtual address space inaccessible "
109 "upon a deallocation instead of returning it and reusing later.");
111 DEFINE_bool(malloc_page_fence
, false, "Not usable (requires mmap)");
112 DEFINE_bool(malloc_page_fence_never_reclaim
, false, "Not usable (required mmap)");
114 DEFINE_bool(malloc_reclaim_memory
,
115 EnvToBool("TCMALLOC_RECLAIM_MEMORY", true),
116 "If set to false, we never return memory to malloc "
117 "when an object is deallocated. This ensures that all "
118 "heap object addresses are unique.");
119 DEFINE_int32(max_free_queue_size
,
120 EnvToInt("TCMALLOC_MAX_FREE_QUEUE_SIZE", 10*1024*1024),
121 "If greater than 0, keep freed blocks in a queue instead of "
122 "releasing them to the allocator immediately. Release them when "
123 "the total size of all blocks in the queue would otherwise exceed "
126 DEFINE_bool(symbolize_stacktrace
,
127 EnvToBool("TCMALLOC_SYMBOLIZE_STACKTRACE", true),
128 "Symbolize the stack trace when provided (on some error exits)");
130 // If we are LD_PRELOAD-ed against a non-pthreads app, then
131 // pthread_once won't be defined. We declare it here, for that
132 // case (with weak linkage) which will cause the non-definition to
133 // resolve to NULL. We can then check for NULL or not in Instance.
134 extern "C" int pthread_once(pthread_once_t
*, void (*)(void))
137 // ========================================================================= //
139 // A safe version of printf() that does not do any allocation and
140 // uses very little stack space.
141 static void TracePrintf(int fd
, const char *fmt
, ...)
142 __attribute__ ((__format__ (__printf__
, 2, 3)));
144 // The do_* functions are defined in tcmalloc/tcmalloc.cc,
145 // which is included before this file
146 // when TCMALLOC_FOR_DEBUGALLOCATION is defined
147 // TODO(csilvers): get rid of these now that we are tied to tcmalloc.
148 #define BASE_MALLOC_NEW do_malloc
149 #define BASE_MALLOC do_malloc
150 #define BASE_FREE do_free
151 #define BASE_MALLOC_STATS do_malloc_stats
152 #define BASE_MALLOPT do_mallopt
153 #define BASE_MALLINFO do_mallinfo
155 // ========================================================================= //
159 // A circular buffer to hold freed blocks of memory. MallocBlock::Deallocate
160 // (below) pushes blocks into this queue instead of returning them to the
161 // underlying allocator immediately. See MallocBlock::Deallocate for more
164 // We can't use an STL class for this because we need to be careful not to
165 // perform any heap de-allocations in any of the code in this class, since the
166 // code in MallocBlock::Deallocate is not re-entrant.
167 template <typename QueueEntry
>
170 FreeQueue() : q_front_(0), q_back_(0) {}
173 return (q_front_
+ 1) % kFreeQueueSize
== q_back_
;
176 void Push(const QueueEntry
& block
) {
177 q_
[q_front_
] = block
;
178 q_front_
= (q_front_
+ 1) % kFreeQueueSize
;
182 RAW_CHECK(q_back_
!= q_front_
, "Queue is empty");
183 const QueueEntry
& ret
= q_
[q_back_
];
184 q_back_
= (q_back_
+ 1) % kFreeQueueSize
;
188 size_t size() const {
189 return (q_front_
- q_back_
+ kFreeQueueSize
) % kFreeQueueSize
;
193 // Maximum number of blocks kept in the free queue before being freed.
194 static const int kFreeQueueSize
= 1024;
196 QueueEntry q_
[kFreeQueueSize
];
201 struct MallocBlockQueueEntry
{
202 MallocBlockQueueEntry() : block(NULL
), size(0),
203 num_deleter_pcs(0), deleter_threadid(0) {}
204 MallocBlockQueueEntry(MallocBlock
* b
, size_t s
) : block(b
), size(s
) {
205 if (FLAGS_max_free_queue_size
!= 0 && b
!= NULL
) {
206 // Adjust the number of frames to skip (4) if you change the
207 // location of this call.
209 GetStackTrace(deleter_pcs
,
210 sizeof(deleter_pcs
) / sizeof(deleter_pcs
[0]),
212 deleter_threadid
= pthread_self();
215 // Zero is an illegal pthread id by my reading of the pthread
217 deleter_threadid
= 0;
224 // When deleted and put in the free queue, we (flag-controlled)
225 // record the stack so that if corruption is later found, we can
226 // print the deleter's stack. (These three vars add 144 bytes of
227 // overhead under the LP64 data model.)
228 void* deleter_pcs
[16];
230 pthread_t deleter_threadid
;
234 public: // allocation type constants
236 // Different allocation types we distinguish.
237 // Note: The lower 4 bits are not random: we index kAllocName array
238 // by these values masked with kAllocTypeMask;
239 // the rest are "random" magic bits to help catch memory corruption.
240 static const int kMallocType
= 0xEFCDAB90;
241 static const int kNewType
= 0xFEBADC81;
242 static const int kArrayNewType
= 0xBCEADF72;
244 private: // constants
246 // A mask used on alloc types above to get to 0, 1, 2
247 static const int kAllocTypeMask
= 0x3;
248 // An additional bit to set in AllocType constants
249 // to mark now deallocated regions.
250 static const int kDeallocatedTypeBit
= 0x4;
252 // For better memory debugging, we initialize all storage to known
253 // values, and overwrite the storage when it's deallocated:
254 // Byte that fills uninitialized storage.
255 static const int kMagicUninitializedByte
= 0xAB;
256 // Byte that fills deallocated storage.
257 // NOTE: tcmalloc.cc depends on the value of kMagicDeletedByte
258 // to work around a bug in the pthread library.
259 static const int kMagicDeletedByte
= 0xCD;
260 // A size_t (type of alloc_type_ below) in a deallocated storage
261 // filled with kMagicDeletedByte.
262 static const size_t kMagicDeletedSizeT
=
263 0xCDCDCDCD | (((size_t)0xCDCDCDCD << 16) << 16);
264 // Initializer works for 32 and 64 bit size_ts;
265 // "<< 16 << 16" is to fool gcc from issuing a warning
266 // when size_ts are 32 bits.
268 // NOTE: on Linux, you can enable malloc debugging support in libc by
269 // setting the environment variable MALLOC_CHECK_ to 1 before you
270 // start the program (see man malloc).
272 // We use either BASE_MALLOC or mmap to make the actual allocation. In
273 // order to remember which one of the two was used for any block, we store an
274 // appropriate magic word next to the block.
275 static const int kMagicMalloc
= 0xDEADBEEF;
276 static const int kMagicMMap
= 0xABCDEFAB;
278 // This array will be filled with 0xCD, for use with memcmp.
279 static unsigned char kMagicDeletedBuffer
[1024];
280 static pthread_once_t deleted_buffer_initialized_
;
281 static bool deleted_buffer_initialized_no_pthreads_
;
283 private: // data layout
285 // The four fields size1_,offset_,magic1_,alloc_type_
286 // should together occupy a multiple of 16 bytes. (At the
287 // moment, sizeof(size_t) == 4 or 8 depending on piii vs
288 // k8, and 4 of those sum to 16 or 32 bytes).
289 // This, combined with BASE_MALLOC's alignment guarantees,
290 // ensures that SSE types can be stored into the returned
291 // block, at &size2_.
293 size_t offset_
; // normally 0 unless memaligned memory
294 // see comments in memalign() and FromRawPointer().
297 // here comes the actual data (variable length)
299 // then come the size2_ and magic2_, or a full page of mprotect-ed memory
300 // if the malloc_page_fence feature is enabled.
304 private: // static data and helpers
306 // Allocation map: stores the allocation type for each allocated object,
307 // or the type or'ed with kDeallocatedTypeBit
308 // for each formerly allocated object.
309 typedef AddressMap
<int> AllocMap
;
310 static AllocMap
* alloc_map_
;
311 // This protects alloc_map_ and consistent state of metadata
312 // for each still-allocated object in it.
313 // We use spin locks instead of pthread_mutex_t locks
314 // to prevent crashes via calls to pthread_mutex_(un)lock
315 // for the (de)allocations coming from pthreads initialization itself.
316 static SpinLock alloc_map_lock_
;
318 // A queue of freed blocks. Instead of releasing blocks to the allocator
319 // immediately, we put them in a queue, freeing them only when necessary
320 // to keep the total size of all the freed blocks below the limit set by
321 // FLAGS_max_free_queue_size.
322 static FreeQueue
<MallocBlockQueueEntry
>* free_queue_
;
324 static size_t free_queue_size_
; // total size of blocks in free_queue_
325 // protects free_queue_ and free_queue_size_
326 static SpinLock free_queue_lock_
;
328 // Names of allocation types (kMallocType, kNewType, kArrayNewType)
329 static const char* const kAllocName
[];
330 // Names of corresponding deallocation types
331 static const char* const kDeallocName
[];
333 static const char* AllocName(int type
) {
334 return kAllocName
[type
& kAllocTypeMask
];
337 static const char* DeallocName(int type
) {
338 return kDeallocName
[type
& kAllocTypeMask
];
341 private: // helper accessors
343 bool IsMMapped() const { return kMagicMMap
== magic1_
; }
345 bool IsValidMagicValue(int value
) const {
346 return kMagicMMap
== value
|| kMagicMalloc
== value
;
349 static size_t real_malloced_size(size_t size
) {
350 return size
+ sizeof(MallocBlock
);
352 static size_t real_mmapped_size(size_t size
) {
353 return size
+ MallocBlock::data_offset();
357 return IsMMapped() ? real_mmapped_size(size1_
) : real_malloced_size(size1_
);
360 // NOTE: if the block is mmapped (that is, we're using the
361 // malloc_page_fence option) then there's no size2 or magic2
362 // (instead, the guard page begins where size2 would be).
364 size_t* size2_addr() { return (size_t*)((char*)&size2_
+ size1_
); }
365 const size_t* size2_addr() const {
366 return (const size_t*)((char*)&size2_
+ size1_
);
369 int* magic2_addr() { return (int*)(size2_addr() + 1); }
370 const int* magic2_addr() const { return (const int*)(size2_addr() + 1); }
372 private: // other helpers
374 void Initialize(size_t size
, int type
) {
375 RAW_CHECK(IsValidMagicValue(magic1_
), "");
376 // record us as allocated in the map
377 alloc_map_lock_
.Lock();
379 void* p
= BASE_MALLOC(sizeof(AllocMap
));
380 alloc_map_
= new(p
) AllocMap(BASE_MALLOC
, BASE_FREE
);
382 alloc_map_
->Insert(data_addr(), type
);
388 *magic2_addr() = magic1_
;
389 *size2_addr() = size
;
391 alloc_map_lock_
.Unlock();
392 memset(data_addr(), kMagicUninitializedByte
, size
);
394 RAW_CHECK(size1_
== *size2_addr(), "should hold");
395 RAW_CHECK(magic1_
== *magic2_addr(), "should hold");
399 size_t CheckAndClear(int type
) {
400 alloc_map_lock_
.Lock();
403 RAW_CHECK(size1_
== *size2_addr(), "should hold");
405 // record us as deallocated in the map
406 alloc_map_
->Insert(data_addr(), type
| kDeallocatedTypeBit
);
407 alloc_map_lock_
.Unlock();
409 const size_t size
= real_size();
410 memset(this, kMagicDeletedByte
, size
);
414 void CheckLocked(int type
) const {
416 const int* found_type
=
417 alloc_map_
!= NULL
? alloc_map_
->Find(data_addr()) : NULL
;
418 if (found_type
== NULL
) {
419 RAW_LOG(FATAL
, "memory allocation bug: object at %p "
420 "has never been allocated", data_addr());
422 map_type
= *found_type
;
424 if ((map_type
& kDeallocatedTypeBit
) != 0) {
425 RAW_LOG(FATAL
, "memory allocation bug: object at %p "
426 "has been already deallocated (it was allocated with %s)",
427 data_addr(), AllocName(map_type
& ~kDeallocatedTypeBit
));
429 if (alloc_type_
== kMagicDeletedSizeT
) {
430 RAW_LOG(FATAL
, "memory stomping bug: a word before object at %p "
431 "has been corrupted; or else the object has been already "
432 "deallocated and our memory map has been corrupted",
435 if (!IsValidMagicValue(magic1_
)) {
436 RAW_LOG(FATAL
, "memory stomping bug: a word before object at %p "
437 "has been corrupted; "
438 "or else our memory map has been corrupted and this is a "
439 "deallocation for not (currently) heap-allocated object",
443 if (size1_
!= *size2_addr()) {
444 RAW_LOG(FATAL
, "memory stomping bug: a word after object at %p "
445 "has been corrupted", data_addr());
447 if (!IsValidMagicValue(*magic2_addr())) {
448 RAW_LOG(FATAL
, "memory stomping bug: a word after object at %p "
449 "has been corrupted", data_addr());
452 if (alloc_type_
!= type
) {
453 if ((alloc_type_
!= MallocBlock::kMallocType
) &&
454 (alloc_type_
!= MallocBlock::kNewType
) &&
455 (alloc_type_
!= MallocBlock::kArrayNewType
)) {
456 RAW_LOG(FATAL
, "memory stomping bug: a word before object at %p "
457 "has been corrupted", data_addr());
459 RAW_LOG(FATAL
, "memory allocation/deallocation mismatch at %p: "
460 "allocated with %s being deallocated with %s",
461 data_addr(), AllocName(alloc_type_
), DeallocName(type
));
463 if (alloc_type_
!= map_type
) {
464 RAW_LOG(FATAL
, "memory stomping bug: our memory map has been corrupted : "
465 "allocation at %p made with %s "
466 "is recorded in the map to be made with %s",
467 data_addr(), AllocName(alloc_type_
), AllocName(map_type
));
471 public: // public accessors
473 void* data_addr() { return (void*)&size2_
; }
474 const void* data_addr() const { return (const void*)&size2_
; }
476 static size_t data_offset() { return OFFSETOF_MEMBER(MallocBlock
, size2_
); }
478 size_t data_size() const { return size1_
; }
480 void set_offset(int offset
) { this->offset_
= offset
; }
482 public: // our main interface
484 static MallocBlock
* Allocate(size_t size
, int type
) {
485 // Prevent an integer overflow / crash with large allocation sizes.
486 // TODO - Note that for a e.g. 64-bit size_t, max_size_t may not actually
487 // be the maximum value, depending on how the compiler treats ~0. The worst
488 // practical effect is that allocations are limited to 4Gb or so, even if
489 // the address space could take more.
490 static size_t max_size_t
= ~0;
491 if (size
> max_size_t
- sizeof(MallocBlock
)) {
492 RAW_LOG(ERROR
, "Massive size passed to malloc: %"PRIuS
"", size
);
495 MallocBlock
* b
= NULL
;
496 const bool use_malloc_page_fence
= FLAGS_malloc_page_fence
;
498 if (use_malloc_page_fence
) {
499 // Put the block towards the end of the page and make the next page
500 // inaccessible. This will catch buffer overrun right when it happens.
501 size_t sz
= real_mmapped_size(size
);
502 int pagesize
= getpagesize();
503 int num_pages
= (sz
+ pagesize
- 1) / pagesize
+ 1;
504 char* p
= (char*) mmap(NULL
, num_pages
* pagesize
, PROT_READ
|PROT_WRITE
,
505 MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
506 if (p
== MAP_FAILED
) {
507 // If the allocation fails, abort rather than returning NULL to
508 // malloc. This is because in most cases, the program will run out
509 // of memory in this mode due to tremendous amount of wastage. There
510 // is no point in propagating the error elsewhere.
511 RAW_LOG(FATAL
, "Out of memory: possibly due to page fence overhead: %s",
514 // Mark the page after the block inaccessible
515 if (mprotect(p
+ (num_pages
- 1) * pagesize
, pagesize
, PROT_NONE
)) {
516 RAW_LOG(FATAL
, "Guard page setup failed: %s", strerror(errno
));
518 b
= (MallocBlock
*) (p
+ (num_pages
- 1) * pagesize
- sz
);
520 b
= (MallocBlock
*) (type
== kMallocType
?
521 BASE_MALLOC(real_malloced_size(size
)) :
522 BASE_MALLOC_NEW(real_malloced_size(size
)));
525 b
= (MallocBlock
*) (type
== kMallocType
?
526 BASE_MALLOC(real_malloced_size(size
)) :
527 BASE_MALLOC_NEW(real_malloced_size(size
)));
530 // It would be nice to output a diagnostic on allocation failure
531 // here, but logging (other than FATAL) requires allocating
532 // memory, which could trigger a nasty recursion. Instead, preserve
533 // malloc semantics and return NULL on failure.
535 b
->magic1_
= use_malloc_page_fence
? kMagicMMap
: kMagicMalloc
;
536 b
->Initialize(size
, type
);
541 void Deallocate(int type
) {
542 if (IsMMapped()) { // have to do this before CheckAndClear
544 int size
= CheckAndClear(type
);
545 int pagesize
= getpagesize();
546 int num_pages
= (size
+ pagesize
- 1) / pagesize
+ 1;
547 char* p
= (char*) this;
548 if (FLAGS_malloc_page_fence_never_reclaim
||
549 !FLAGS_malloc_reclaim_memory
) {
550 mprotect(p
- (num_pages
- 1) * pagesize
+ size
,
551 num_pages
* pagesize
, PROT_NONE
);
553 munmap(p
- (num_pages
- 1) * pagesize
+ size
, num_pages
* pagesize
);
557 const size_t size
= CheckAndClear(type
);
558 if (FLAGS_malloc_reclaim_memory
) {
559 // Instead of freeing the block immediately, push it onto a queue of
560 // recently freed blocks. Free only enough blocks to keep from
561 // exceeding the capacity of the queue or causing the total amount of
562 // un-released memory in the queue from exceeding
563 // FLAGS_max_free_queue_size.
564 ProcessFreeQueue(this, size
, FLAGS_max_free_queue_size
);
569 static size_t FreeQueueSize() {
570 SpinLockHolder
l(&free_queue_lock_
);
571 return free_queue_size_
;
574 static void ProcessFreeQueue(MallocBlock
* b
, size_t size
,
575 int max_free_queue_size
) {
576 // MallocBlockQueueEntry are about 144 in size, so we can only
577 // use a small array of them on the stack.
578 MallocBlockQueueEntry entries
[4];
580 MallocBlockQueueEntry
new_entry(b
, size
);
581 free_queue_lock_
.Lock();
582 if (free_queue_
== NULL
)
583 free_queue_
= new FreeQueue
<MallocBlockQueueEntry
>;
584 RAW_CHECK(!free_queue_
->Full(), "Free queue mustn't be full!");
587 free_queue_size_
+= size
+ sizeof(MallocBlockQueueEntry
);
588 free_queue_
->Push(new_entry
);
591 // Free blocks until the total size of unfreed blocks no longer exceeds
592 // max_free_queue_size, and the free queue has at least one free
594 while (free_queue_size_
> max_free_queue_size
|| free_queue_
->Full()) {
595 RAW_CHECK(num_entries
< arraysize(entries
), "entries array overflow");
596 entries
[num_entries
] = free_queue_
->Pop();
598 entries
[num_entries
].size
+ sizeof(MallocBlockQueueEntry
);
600 if (num_entries
== arraysize(entries
)) {
601 // The queue will not be full at this point, so it is ok to
602 // release the lock. The queue may still contain more than
603 // max_free_queue_size, but this is not a strict invariant.
604 free_queue_lock_
.Unlock();
605 for (int i
= 0; i
< num_entries
; i
++) {
606 CheckForDanglingWrites(entries
[i
]);
607 BASE_FREE(entries
[i
].block
);
610 free_queue_lock_
.Lock();
613 RAW_CHECK(free_queue_size_
>= 0, "Free queue size went negative!");
614 free_queue_lock_
.Unlock();
615 for (int i
= 0; i
< num_entries
; i
++) {
616 CheckForDanglingWrites(entries
[i
]);
617 BASE_FREE(entries
[i
].block
);
621 static void InitDeletedBuffer() {
622 memset(kMagicDeletedBuffer
, kMagicDeletedByte
, sizeof(kMagicDeletedBuffer
));
623 deleted_buffer_initialized_no_pthreads_
= true;
626 static void CheckForDanglingWrites(const MallocBlockQueueEntry
& queue_entry
) {
627 // Initialize the buffer if necessary.
629 pthread_once(&deleted_buffer_initialized_
, &InitDeletedBuffer
);
630 if (!deleted_buffer_initialized_no_pthreads_
) {
631 // This will be the case on systems that don't link in pthreads,
632 // including on FreeBSD where pthread_once has a non-zero address
633 // (but doesn't do anything) even when pthreads isn't linked in.
637 const unsigned char* p
=
638 reinterpret_cast<unsigned char*>(queue_entry
.block
);
640 static const size_t size_of_buffer
= sizeof(kMagicDeletedBuffer
);
641 const size_t size
= queue_entry
.size
;
642 const size_t buffers
= size
/ size_of_buffer
;
643 const size_t remainder
= size
% size_of_buffer
;
645 for (buffer_idx
= 0; buffer_idx
< buffers
; ++buffer_idx
) {
646 CheckForCorruptedBuffer(queue_entry
, buffer_idx
, p
, size_of_buffer
);
649 CheckForCorruptedBuffer(queue_entry
, buffer_idx
, p
, remainder
);
652 static void CheckForCorruptedBuffer(const MallocBlockQueueEntry
& queue_entry
,
654 const unsigned char* buffer
,
655 size_t size_of_buffer
) {
656 if (memcmp(buffer
, kMagicDeletedBuffer
, size_of_buffer
) == 0) {
661 "Found a corrupted memory buffer in MallocBlock (may be offset "
662 "from user ptr): buffer index: %zd, buffer ptr: %p, size of "
663 "buffer: %zd", buffer_idx
, buffer
, size_of_buffer
);
665 // The magic deleted buffer should only be 1024 bytes, but in case
666 // this changes, let's put an upper limit on the number of debug
667 // lines we'll output:
668 if (size_of_buffer
<= 1024) {
669 for (int i
= 0; i
< size_of_buffer
; ++i
) {
670 if (buffer
[i
] != kMagicDeletedByte
) {
671 RAW_LOG(ERROR
, "Buffer byte %d is 0x%02x (should be 0x%02x).",
672 i
, buffer
[i
], kMagicDeletedByte
);
676 RAW_LOG(ERROR
, "Buffer too large to print corruption.");
679 const MallocBlock
* b
= queue_entry
.block
;
680 const size_t size
= queue_entry
.size
;
681 if (queue_entry
.num_deleter_pcs
> 0) {
682 TracePrintf(STDERR_FILENO
, "Deleted by thread %p\n",
683 reinterpret_cast<void*>(
684 PRINTABLE_PTHREAD(queue_entry
.deleter_threadid
)));
686 // We don't want to allocate or deallocate memory here, so we use
687 // placement-new. It's ok that we don't destroy this, since we're
688 // just going to error-exit below anyway. Union is for alignment.
689 union { void* alignment
; char buf
[sizeof(SymbolTable
)]; } tablebuf
;
690 SymbolTable
* symbolization_table
= new (tablebuf
.buf
) SymbolTable
;
691 for (int i
= 0; i
< queue_entry
.num_deleter_pcs
; i
++) {
692 // Symbolizes the previous address of pc because pc may be in the
693 // next function. This may happen when the function ends with
694 // a call to a function annotated noreturn (e.g. CHECK).
695 char *pc
= reinterpret_cast<char*>(queue_entry
.deleter_pcs
[i
]);
696 symbolization_table
->Add(pc
- 1);
698 if (FLAGS_symbolize_stacktrace
)
699 symbolization_table
->Symbolize();
700 for (int i
= 0; i
< queue_entry
.num_deleter_pcs
; i
++) {
701 char *pc
= reinterpret_cast<char*>(queue_entry
.deleter_pcs
[i
]);
702 TracePrintf(STDERR_FILENO
, " @ %p %s\n",
703 pc
, symbolization_table
->GetSymbol(pc
- 1));
707 "Skipping the printing of the deleter's stack! Its stack was "
708 "not found; either the corruption occurred too early in "
709 "execution to obtain a stack trace or --max_free_queue_size was "
714 "Memory was written to after being freed. MallocBlock: %p, user "
715 "ptr: %p, size: %zd. If you can't find the source of the error, "
716 "try using ASan (http://code.google.com/p/address-sanitizer/), "
717 "Valgrind, or Purify, or study the "
718 "output of the deleter's stack printed above.",
719 b
, b
->data_addr(), size
);
722 static MallocBlock
* FromRawPointer(void* p
) {
723 const size_t data_offset
= MallocBlock::data_offset();
724 // Find the header just before client's memory.
725 MallocBlock
*mb
= reinterpret_cast<MallocBlock
*>(
726 reinterpret_cast<char *>(p
) - data_offset
);
727 // If mb->alloc_type_ is kMagicDeletedSizeT, we're not an ok pointer.
728 if (mb
->alloc_type_
== kMagicDeletedSizeT
) {
729 RAW_LOG(FATAL
, "memory allocation bug: object at %p has been already"
730 " deallocated; or else a word before the object has been"
731 " corrupted (memory stomping bug)", p
);
733 // If mb->offset_ is zero (common case), mb is the real header. If
734 // mb->offset_ is non-zero, this block was allocated by memalign, and
735 // mb->offset_ is the distance backwards to the real header from mb,
736 // which is a fake header. The following subtraction works for both zero
737 // and non-zero values.
738 return reinterpret_cast<MallocBlock
*>(
739 reinterpret_cast<char *>(mb
) - mb
->offset_
);
741 static const MallocBlock
* FromRawPointer(const void* p
) {
742 // const-safe version: we just cast about
743 return FromRawPointer(const_cast<void*>(p
));
746 // Return whether p points to memory returned by memalign.
747 // Requires that p be non-zero and has been checked for sanity with
749 static bool IsMemaligned(const void* p
) {
750 const MallocBlock
* mb
= reinterpret_cast<const MallocBlock
*>(
751 reinterpret_cast<const char*>(p
) - MallocBlock::data_offset());
752 // If the offset is non-zero, the block was allocated by memalign
753 // (see FromRawPointer above).
754 return mb
->offset_
!= 0;
757 void Check(int type
) const {
758 alloc_map_lock_
.Lock();
760 alloc_map_lock_
.Unlock();
763 static bool CheckEverything() {
764 alloc_map_lock_
.Lock();
765 if (alloc_map_
!= NULL
) alloc_map_
->Iterate(CheckCallback
, 0);
766 alloc_map_lock_
.Unlock();
767 return true; // if we get here, we're okay
770 static bool MemoryStats(int* blocks
, size_t* total
,
771 int histogram
[kMallocHistogramSize
]) {
772 memset(histogram
, 0, kMallocHistogramSize
* sizeof(int));
773 alloc_map_lock_
.Lock();
776 stats_histogram_
= histogram
;
777 if (alloc_map_
!= NULL
) alloc_map_
->Iterate(StatsCallback
, 0);
778 *blocks
= stats_blocks_
;
779 *total
= stats_total_
;
780 alloc_map_lock_
.Unlock();
784 private: // helpers for CheckEverything and MemoryStats
786 static void CheckCallback(const void* ptr
, int* type
, int dummy
) {
787 if ((*type
& kDeallocatedTypeBit
) == 0) {
788 FromRawPointer(ptr
)->CheckLocked(*type
);
792 // Accumulation variables for StatsCallback protected by alloc_map_lock_
793 static int stats_blocks_
;
794 static size_t stats_total_
;
795 static int* stats_histogram_
;
797 static void StatsCallback(const void* ptr
, int* type
, int dummy
) {
798 if ((*type
& kDeallocatedTypeBit
) == 0) {
799 const MallocBlock
* b
= FromRawPointer(ptr
);
800 b
->CheckLocked(*type
);
802 size_t mysize
= b
->size1_
;
804 stats_total_
+= mysize
;
809 RAW_CHECK(entry
< kMallocHistogramSize
,
810 "kMallocHistogramSize should be at least as large as log2 "
811 "of the maximum process memory size");
812 stats_histogram_
[entry
] += 1;
817 void DanglingWriteChecker() {
818 // Clear out the remaining free queue to check for dangling writes.
819 MallocBlock::ProcessFreeQueue(NULL
, 0, 0);
822 // ========================================================================= //
824 const int MallocBlock::kMagicMalloc
;
825 const int MallocBlock::kMagicMMap
;
827 MallocBlock::AllocMap
* MallocBlock::alloc_map_
= NULL
;
828 SpinLock
MallocBlock::alloc_map_lock_(SpinLock::LINKER_INITIALIZED
);
830 FreeQueue
<MallocBlockQueueEntry
>* MallocBlock::free_queue_
= NULL
;
831 size_t MallocBlock::free_queue_size_
= 0;
832 SpinLock
MallocBlock::free_queue_lock_(SpinLock::LINKER_INITIALIZED
);
834 unsigned char MallocBlock::kMagicDeletedBuffer
[1024];
835 pthread_once_t
MallocBlock::deleted_buffer_initialized_
= PTHREAD_ONCE_INIT
;
836 bool MallocBlock::deleted_buffer_initialized_no_pthreads_
= false;
838 const char* const MallocBlock::kAllocName
[] = {
845 const char* const MallocBlock::kDeallocName
[] = {
852 int MallocBlock::stats_blocks_
;
853 size_t MallocBlock::stats_total_
;
854 int* MallocBlock::stats_histogram_
;
856 // ========================================================================= //
858 // The following cut-down version of printf() avoids
859 // using stdio or ostreams.
860 // This is to guarantee no recursive calls into
861 // the allocator and to bound the stack space consumed. (The pthread
862 // manager thread in linuxthreads has a very small stack,
863 // so fprintf can't be called.)
864 static void TracePrintf(int fd
, const char *fmt
, ...) {
871 numbuf
[sizeof(numbuf
)-1] = 0;
872 while (*p
!= '\0') { // until end of format string
873 char *s
= &numbuf
[sizeof(numbuf
)-1];
874 if (p
[0] == '%' && p
[1] != 0) { // handle % formats
876 unsigned long base
= 0;
877 if (*++p
== 's') { // %s
878 s
= va_arg(ap
, char *);
879 } else if (*p
== 'l' && p
[1] == 'd') { // %ld
880 l
= va_arg(ap
, long);
883 } else if (*p
== 'l' && p
[1] == 'u') { // %lu
884 l
= va_arg(ap
, unsigned long);
887 } else if (*p
== 'z' && p
[1] == 'u') { // %zu
888 l
= va_arg(ap
, size_t);
891 } else if (*p
== 'u') { // %u
892 l
= va_arg(ap
, unsigned int);
894 } else if (*p
== 'd') { // %d
897 } else if (*p
== 'p') { // %p
898 l
= va_arg(ap
, intptr_t);
901 write(STDERR_FILENO
, "Unimplemented TracePrintf format\n", 33);
902 write(STDERR_FILENO
, p
, 2);
903 write(STDERR_FILENO
, "\n", 1);
908 bool minus
= (l
< 0 && base
== 10);
909 uint64 ul
= minus
? -l
: l
;
911 *--s
= "0123456789abcdef"[ul
% base
];
921 } else { // handle normal characters
925 if (i
== sizeof(buf
)) {
938 // Return the file descriptor we're writing a log to
939 static int TraceFd() {
940 static int trace_fd
= -1;
941 if (trace_fd
== -1) { // Open the trace file on the first call
942 trace_fd
= open("/tmp/google.alloc", O_CREAT
|O_TRUNC
|O_WRONLY
, 0666);
943 if (trace_fd
== -1) {
945 TracePrintf(trace_fd
,
946 "Can't open /tmp/google.alloc. Logging to stderr.\n");
948 // Add a header to the log.
949 TracePrintf(trace_fd
, "Trace started: %lu\n",
950 static_cast<unsigned long>(time(NULL
)));
951 TracePrintf(trace_fd
,
952 "func\tsize\tptr\tthread_id\tstack pcs for tools/symbolize\n");
957 // Print the hex stack dump on a single line. PCs are separated by tabs.
958 static void TraceStack(void) {
960 int n
= GetStackTrace(pcs
, sizeof(pcs
)/sizeof(pcs
[0]), 0);
961 for (int i
= 0; i
!= n
; i
++) {
962 TracePrintf(TraceFd(), "\t%p", pcs
[i
]);
966 // This protects MALLOC_TRACE, to make sure its info is atomically written.
967 static SpinLock
malloc_trace_lock(SpinLock::LINKER_INITIALIZED
);
969 #define MALLOC_TRACE(name, size, addr) \
971 if (FLAGS_malloctrace) { \
972 SpinLockHolder l(&malloc_trace_lock); \
973 TracePrintf(TraceFd(), "%s\t%"PRIuS"\t%p\t%"GPRIuPTHREAD, \
974 name, size, addr, PRINTABLE_PTHREAD(pthread_self())); \
976 TracePrintf(TraceFd(), "\n"); \
980 // ========================================================================= //
982 // Write the characters buf[0, ..., size-1] to
983 // the malloc trace buffer.
984 // This function is intended for debugging,
985 // and is not declared in any header file.
986 // You must insert a declaration of it by hand when you need
988 void __malloctrace_write(const char *buf
, size_t size
) {
989 if (FLAGS_malloctrace
) {
990 write(TraceFd(), buf
, size
);
994 // ========================================================================= //
996 // General debug allocation/deallocation
998 static inline void* DebugAllocate(size_t size
, int type
) {
999 MallocBlock
* ptr
= MallocBlock::Allocate(size
, type
);
1000 if (ptr
== NULL
) return NULL
;
1001 MALLOC_TRACE("malloc", size
, ptr
->data_addr());
1002 return ptr
->data_addr();
1005 static inline void DebugDeallocate(void* ptr
, int type
) {
1006 MALLOC_TRACE("free",
1007 (ptr
!= 0 ? MallocBlock::FromRawPointer(ptr
)->data_size() : 0),
1009 if (ptr
) MallocBlock::FromRawPointer(ptr
)->Deallocate(type
);
1012 // ========================================================================= //
1014 // The following functions may be called via MallocExtension::instance()
1015 // for memory verification and statistics.
1016 class DebugMallocImplementation
: public TCMallocImplementation
{
1018 virtual bool GetNumericProperty(const char* name
, size_t* value
) {
1019 bool result
= TCMallocImplementation::GetNumericProperty(name
, value
);
1020 if (result
&& (strcmp(name
, "generic.current_allocated_bytes") == 0)) {
1021 // Subtract bytes kept in the free queue
1022 size_t qsize
= MallocBlock::FreeQueueSize();
1023 if (*value
>= qsize
) {
1030 virtual bool VerifyNewMemory(const void* p
) {
1031 if (p
) MallocBlock::FromRawPointer(p
)->Check(MallocBlock::kNewType
);
1035 virtual bool VerifyArrayNewMemory(const void* p
) {
1036 if (p
) MallocBlock::FromRawPointer(p
)->Check(MallocBlock::kArrayNewType
);
1040 virtual bool VerifyMallocMemory(const void* p
) {
1041 if (p
) MallocBlock::FromRawPointer(p
)->Check(MallocBlock::kMallocType
);
1045 virtual bool VerifyAllMemory() {
1046 return MallocBlock::CheckEverything();
1049 virtual bool MallocMemoryStats(int* blocks
, size_t* total
,
1050 int histogram
[kMallocHistogramSize
]) {
1051 return MallocBlock::MemoryStats(blocks
, total
, histogram
);
1054 virtual size_t GetEstimatedAllocatedSize(size_t size
) {
1058 virtual size_t GetAllocatedSize(const void* p
) {
1060 RAW_CHECK(GetOwnership(p
) != MallocExtension::kNotOwned
,
1061 "ptr not allocated by tcmalloc");
1062 return MallocBlock::FromRawPointer(p
)->data_size();
1067 virtual MallocExtension::Ownership
GetOwnership(const void* p
) {
1069 const MallocBlock
* mb
= MallocBlock::FromRawPointer(p
);
1070 return TCMallocImplementation::GetOwnership(mb
);
1072 return MallocExtension::kNotOwned
; // nobody owns NULL
1075 virtual void GetFreeListSizes(vector
<MallocExtension::FreeListInfo
>* v
) {
1076 static const char* kDebugFreeQueue
= "debug.free_queue";
1078 TCMallocImplementation::GetFreeListSizes(v
);
1080 MallocExtension::FreeListInfo i
;
1081 i
.type
= kDebugFreeQueue
;
1082 i
.min_object_size
= 0;
1083 i
.max_object_size
= numeric_limits
<size_t>::max();
1084 i
.total_bytes_free
= MallocBlock::FreeQueueSize();
1090 static DebugMallocImplementation debug_malloc_implementation
;
1092 REGISTER_MODULE_INITIALIZER(debugallocation
, {
1093 // Either we or valgrind will control memory management. We
1094 // register our extension if we're the winner. Otherwise let
1095 // Valgrind use its own malloc (so don't register our extension).
1096 if (!RunningOnValgrind()) {
1097 MallocExtension::Register(&debug_malloc_implementation
);
1101 REGISTER_MODULE_DESTRUCTOR(debugallocation
, {
1102 if (!RunningOnValgrind()) {
1103 // When the program exits, check all blocks still in the free
1104 // queue for corruption.
1105 DanglingWriteChecker();
1109 // ========================================================================= //
1111 // This is mostly the same a cpp_alloc in tcmalloc.cc.
1112 // TODO(csilvers): change Allocate() above to call cpp_alloc, so we
1113 // don't have to reproduce the logic here. To make tc_new_mode work
1114 // properly, I think we'll need to separate out the logic of throwing
1115 // from the logic of calling the new-handler.
1116 inline void* debug_cpp_alloc(size_t size
, int new_type
, bool nothrow
) {
1118 void* p
= DebugAllocate(size
, new_type
);
1122 if (p
== NULL
) { // allocation failed
1123 // Get the current new handler. NB: this function is not
1124 // thread-safe. We make a feeble stab at making it so here, but
1125 // this lock only protects against tcmalloc interfering with
1126 // itself, not with other libraries calling set_new_handler.
1127 std::new_handler nh
;
1129 SpinLockHolder
h(&set_new_handler_lock
);
1130 nh
= std::set_new_handler(0);
1131 (void) std::set_new_handler(nh
);
1133 #if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1135 // Since exceptions are disabled, we don't really know if new_handler
1136 // failed. Assume it will abort if it fails.
1142 // If no new_handler is established, the allocation failed.
1144 if (nothrow
) return 0;
1145 throw std::bad_alloc();
1147 // Otherwise, try the new_handler. If it returns, retry the
1148 // allocation. If it throws std::bad_alloc, fail the allocation.
1149 // if it throws something else, don't interfere.
1152 } catch (const std::bad_alloc
&) {
1153 if (!nothrow
) throw;
1156 #endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1157 } else { // allocation success
1160 #endif // PREANSINEW
1164 inline void* do_debug_malloc_or_debug_cpp_alloc(size_t size
) {
1165 return tc_new_mode
? debug_cpp_alloc(size
, MallocBlock::kMallocType
, true)
1166 : DebugAllocate(size
, MallocBlock::kMallocType
);
1169 // Exported routines
1171 extern "C" PERFTOOLS_DLL_DECL
void* tc_malloc(size_t size
) __THROW
{
1172 void* ptr
= do_debug_malloc_or_debug_cpp_alloc(size
);
1173 MallocHook::InvokeNewHook(ptr
, size
);
1177 extern "C" PERFTOOLS_DLL_DECL
void tc_free(void* ptr
) __THROW
{
1178 MallocHook::InvokeDeleteHook(ptr
);
1179 DebugDeallocate(ptr
, MallocBlock::kMallocType
);
1182 extern "C" PERFTOOLS_DLL_DECL
void* tc_calloc(size_t count
, size_t size
) __THROW
{
1184 const size_t total_size
= count
* size
;
1185 if (size
!= 0 && total_size
/ size
!= count
) return NULL
;
1187 void* block
= do_debug_malloc_or_debug_cpp_alloc(total_size
);
1188 MallocHook::InvokeNewHook(block
, total_size
);
1189 if (block
) memset(block
, 0, total_size
);
1193 extern "C" PERFTOOLS_DLL_DECL
void tc_cfree(void* ptr
) __THROW
{
1194 MallocHook::InvokeDeleteHook(ptr
);
1195 DebugDeallocate(ptr
, MallocBlock::kMallocType
);
1198 extern "C" PERFTOOLS_DLL_DECL
void* tc_realloc(void* ptr
, size_t size
) __THROW
{
1200 ptr
= do_debug_malloc_or_debug_cpp_alloc(size
);
1201 MallocHook::InvokeNewHook(ptr
, size
);
1204 MallocBlock
* old
= MallocBlock::FromRawPointer(ptr
);
1205 old
->Check(MallocBlock::kMallocType
);
1206 if (MallocBlock::IsMemaligned(ptr
)) {
1207 RAW_LOG(FATAL
, "realloc/memalign mismatch at %p: "
1208 "non-NULL pointers passed to realloc must be obtained "
1209 "from malloc, calloc, or realloc", ptr
);
1212 MallocHook::InvokeDeleteHook(ptr
);
1213 DebugDeallocate(ptr
, MallocBlock::kMallocType
);
1216 MallocBlock
* p
= MallocBlock::Allocate(size
, MallocBlock::kMallocType
);
1218 // If realloc fails we are to leave the old block untouched and
1220 if (p
== NULL
) return NULL
;
1222 memcpy(p
->data_addr(), old
->data_addr(),
1223 (old
->data_size() < size
) ? old
->data_size() : size
);
1224 MallocHook::InvokeDeleteHook(ptr
);
1225 MallocHook::InvokeNewHook(p
->data_addr(), size
);
1226 DebugDeallocate(ptr
, MallocBlock::kMallocType
);
1227 MALLOC_TRACE("realloc", p
->data_size(), p
->data_addr());
1228 return p
->data_addr();
1231 extern "C" PERFTOOLS_DLL_DECL
void* tc_new(size_t size
) {
1232 void* ptr
= debug_cpp_alloc(size
, MallocBlock::kNewType
, false);
1233 MallocHook::InvokeNewHook(ptr
, size
);
1235 RAW_LOG(FATAL
, "Unable to allocate %"PRIuS
" bytes: new failed.", size
);
1240 extern "C" PERFTOOLS_DLL_DECL
void* tc_new_nothrow(size_t size
, const std::nothrow_t
&) __THROW
{
1241 void* ptr
= debug_cpp_alloc(size
, MallocBlock::kNewType
, true);
1242 MallocHook::InvokeNewHook(ptr
, size
);
1246 extern "C" PERFTOOLS_DLL_DECL
void tc_delete(void* p
) __THROW
{
1247 MallocHook::InvokeDeleteHook(p
);
1248 DebugDeallocate(p
, MallocBlock::kNewType
);
1251 // Some STL implementations explicitly invoke this.
1252 // It is completely equivalent to a normal delete (delete never throws).
1253 extern "C" PERFTOOLS_DLL_DECL
void tc_delete_nothrow(void* p
, const std::nothrow_t
&) __THROW
{
1254 MallocHook::InvokeDeleteHook(p
);
1255 DebugDeallocate(p
, MallocBlock::kNewType
);
1258 extern "C" PERFTOOLS_DLL_DECL
void* tc_newarray(size_t size
) {
1259 void* ptr
= debug_cpp_alloc(size
, MallocBlock::kArrayNewType
, false);
1260 MallocHook::InvokeNewHook(ptr
, size
);
1262 RAW_LOG(FATAL
, "Unable to allocate %"PRIuS
" bytes: new[] failed.", size
);
1267 extern "C" PERFTOOLS_DLL_DECL
void* tc_newarray_nothrow(size_t size
, const std::nothrow_t
&)
1269 void* ptr
= debug_cpp_alloc(size
, MallocBlock::kArrayNewType
, true);
1270 MallocHook::InvokeNewHook(ptr
, size
);
1274 extern "C" PERFTOOLS_DLL_DECL
void tc_deletearray(void* p
) __THROW
{
1275 MallocHook::InvokeDeleteHook(p
);
1276 DebugDeallocate(p
, MallocBlock::kArrayNewType
);
1279 // Some STL implementations explicitly invoke this.
1280 // It is completely equivalent to a normal delete (delete never throws).
1281 extern "C" PERFTOOLS_DLL_DECL
void tc_deletearray_nothrow(void* p
, const std::nothrow_t
&) __THROW
{
1282 MallocHook::InvokeDeleteHook(p
);
1283 DebugDeallocate(p
, MallocBlock::kArrayNewType
);
1286 // Round "value" up to next "alignment" boundary.
1287 // Requires that "alignment" be a power of two.
1288 static intptr_t RoundUp(intptr_t value
, intptr_t alignment
) {
1289 return (value
+ alignment
- 1) & ~(alignment
- 1);
1292 // This is mostly the same as do_memalign in tcmalloc.cc.
1293 static void *do_debug_memalign(size_t alignment
, size_t size
) {
1294 // Allocate >= size bytes aligned on "alignment" boundary
1295 // "alignment" is a power of two.
1297 RAW_CHECK((alignment
& (alignment
-1)) == 0, "must be power of two");
1298 const size_t data_offset
= MallocBlock::data_offset();
1299 // Allocate "alignment-1" extra bytes to ensure alignment is possible, and
1300 // a further data_offset bytes for an additional fake header.
1301 size_t extra_bytes
= data_offset
+ alignment
- 1;
1302 if (size
+ extra_bytes
< size
) return NULL
; // Overflow
1303 p
= DebugAllocate(size
+ extra_bytes
, MallocBlock::kMallocType
);
1305 intptr_t orig_p
= reinterpret_cast<intptr_t>(p
);
1306 // Leave data_offset bytes for fake header, and round up to meet
1308 p
= reinterpret_cast<void *>(RoundUp(orig_p
+ data_offset
, alignment
));
1309 // Create a fake header block with an offset_ that points back to the
1310 // real header. FromRawPointer uses this value.
1311 MallocBlock
*fake_hdr
= reinterpret_cast<MallocBlock
*>(
1312 reinterpret_cast<char *>(p
) - data_offset
);
1313 // offset_ is distance between real and fake headers.
1314 // p is now end of fake header (beginning of client area),
1315 // and orig_p is the end of the real header, so offset_
1316 // is their difference.
1317 fake_hdr
->set_offset(reinterpret_cast<intptr_t>(p
) - orig_p
);
1322 // This is mostly the same as cpp_memalign in tcmalloc.cc.
1323 static void* debug_cpp_memalign(size_t align
, size_t size
) {
1325 void* p
= do_debug_memalign(align
, size
);
1329 if (p
== NULL
) { // allocation failed
1330 // Get the current new handler. NB: this function is not
1331 // thread-safe. We make a feeble stab at making it so here, but
1332 // this lock only protects against tcmalloc interfering with
1333 // itself, not with other libraries calling set_new_handler.
1334 std::new_handler nh
;
1336 SpinLockHolder
h(&set_new_handler_lock
);
1337 nh
= std::set_new_handler(0);
1338 (void) std::set_new_handler(nh
);
1340 #if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1342 // Since exceptions are disabled, we don't really know if new_handler
1343 // failed. Assume it will abort if it fails.
1349 // If no new_handler is established, the allocation failed.
1353 // Otherwise, try the new_handler. If it returns, retry the
1354 // allocation. If it throws std::bad_alloc, fail the allocation.
1355 // if it throws something else, don't interfere.
1358 } catch (const std::bad_alloc
&) {
1361 #endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1362 } else { // allocation success
1365 #endif // PREANSINEW
1369 inline void* do_debug_memalign_or_debug_cpp_memalign(size_t align
,
1371 return tc_new_mode
? debug_cpp_memalign(align
, size
)
1372 : do_debug_memalign(align
, size
);
1375 extern "C" PERFTOOLS_DLL_DECL
void* tc_memalign(size_t align
, size_t size
) __THROW
{
1376 void *p
= do_debug_memalign_or_debug_cpp_memalign(align
, size
);
1377 MallocHook::InvokeNewHook(p
, size
);
1381 // Implementation taken from tcmalloc/tcmalloc.cc
1382 extern "C" PERFTOOLS_DLL_DECL
int tc_posix_memalign(void** result_ptr
, size_t align
, size_t size
)
1384 if (((align
% sizeof(void*)) != 0) ||
1385 ((align
& (align
- 1)) != 0) ||
1390 void* result
= do_debug_memalign_or_debug_cpp_memalign(align
, size
);
1391 MallocHook::InvokeNewHook(result
, size
);
1392 if (result
== NULL
) {
1395 *result_ptr
= result
;
1400 extern "C" PERFTOOLS_DLL_DECL
void* tc_valloc(size_t size
) __THROW
{
1401 // Allocate >= size bytes starting on a page boundary
1402 void *p
= do_debug_memalign_or_debug_cpp_memalign(getpagesize(), size
);
1403 MallocHook::InvokeNewHook(p
, size
);
1407 extern "C" PERFTOOLS_DLL_DECL
void* tc_pvalloc(size_t size
) __THROW
{
1408 // Round size up to a multiple of pages
1409 // then allocate memory on a page boundary
1410 int pagesize
= getpagesize();
1411 size
= RoundUp(size
, pagesize
);
1412 if (size
== 0) { // pvalloc(0) should allocate one page, according to
1413 size
= pagesize
; // http://man.free4web.biz/man3/libmpatrol.3.html
1415 void *p
= do_debug_memalign_or_debug_cpp_memalign(pagesize
, size
);
1416 MallocHook::InvokeNewHook(p
, size
);
1420 // malloc_stats just falls through to the base implementation.
1421 extern "C" PERFTOOLS_DLL_DECL
void tc_malloc_stats(void) __THROW
{
1422 BASE_MALLOC_STATS();
1425 extern "C" PERFTOOLS_DLL_DECL
int tc_mallopt(int cmd
, int value
) __THROW
{
1426 return BASE_MALLOPT(cmd
, value
);
1429 #ifdef HAVE_STRUCT_MALLINFO
1430 extern "C" PERFTOOLS_DLL_DECL
struct mallinfo
tc_mallinfo(void) __THROW
{
1431 return BASE_MALLINFO();
1435 extern "C" PERFTOOLS_DLL_DECL
size_t tc_malloc_size(void* ptr
) __THROW
{
1436 return MallocExtension::instance()->GetAllocatedSize(ptr
);