[Session restore] Rename group name Enabled to Restore.
[chromium-blink-merge.git] / third_party / tcmalloc / chromium / src / debugallocation.cc
blob6a4f2865d4b432fe9b8bb30ad67fca9e9e500d7a
1 // Copyright (c) 2000, Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 // ---
31 // Author: Urs Holzle <opensource@google.com>
33 #include "config.h"
34 #include <errno.h>
35 #ifdef HAVE_FCNTL_H
36 #include <fcntl.h>
37 #endif
38 #ifdef HAVE_INTTYPES_H
39 #include <inttypes.h>
40 #endif
41 // We only need malloc.h for struct mallinfo.
42 #ifdef HAVE_STRUCT_MALLINFO
43 // Malloc can be in several places on older versions of OS X.
44 # if defined(HAVE_MALLOC_H)
45 # include <malloc.h>
46 # elif defined(HAVE_MALLOC_MALLOC_H)
47 # include <malloc/malloc.h>
48 # elif defined(HAVE_SYS_MALLOC_H)
49 # include <sys/malloc.h>
50 # endif
51 #endif
52 #ifdef HAVE_PTHREAD
53 #include <pthread.h>
54 #endif
55 #include <stdarg.h>
56 #include <stdio.h>
57 #include <string.h>
58 #ifdef HAVE_MMAP
59 #include <sys/mman.h>
60 #endif
61 #include <sys/stat.h>
62 #include <sys/types.h>
63 #ifdef HAVE_UNISTD_H
64 #include <unistd.h>
65 #endif
67 #include <gperftools/malloc_extension.h>
68 #include <gperftools/malloc_hook.h>
69 #include <gperftools/stacktrace.h>
70 #include "addressmap-inl.h"
71 #include "base/abort.h"
72 #include "base/commandlineflags.h"
73 #include "base/googleinit.h"
74 #include "base/logging.h"
75 #include "base/spinlock.h"
76 #include "malloc_hook-inl.h"
77 #include "symbolize.h"
79 #define TCMALLOC_USING_DEBUGALLOCATION
80 #include "tcmalloc.cc"
82 // __THROW is defined in glibc systems. It means, counter-intuitively,
83 // "This function will never throw an exception." It's an optional
84 // optimization tool, but we may need to use it to match glibc prototypes.
85 #ifndef __THROW // I guess we're not on a glibc system
86 # define __THROW // __THROW is just an optimization, so ok to make it ""
87 #endif
89 // On systems (like freebsd) that don't define MAP_ANONYMOUS, use the old
90 // form of the name instead.
91 #ifndef MAP_ANONYMOUS
92 # define MAP_ANONYMOUS MAP_ANON
93 #endif
95 // ========================================================================= //
97 DEFINE_bool(malloctrace,
98 EnvToBool("TCMALLOC_TRACE", false),
99 "Enables memory (de)allocation tracing to /tmp/google.alloc.");
100 #ifdef HAVE_MMAP
101 DEFINE_bool(malloc_page_fence,
102 EnvToBool("TCMALLOC_PAGE_FENCE", false),
103 "Enables putting of memory allocations at page boundaries "
104 "with a guard page following the allocation (to catch buffer "
105 "overruns right when they happen).");
106 DEFINE_bool(malloc_page_fence_never_reclaim,
107 EnvToBool("TCMALLOC_PAGE_FRANCE_NEVER_RECLAIM", false),
108 "Enables making the virtual address space inaccessible "
109 "upon a deallocation instead of returning it and reusing later.");
110 #else
111 DEFINE_bool(malloc_page_fence, false, "Not usable (requires mmap)");
112 DEFINE_bool(malloc_page_fence_never_reclaim, false, "Not usable (required mmap)");
113 #endif
114 DEFINE_bool(malloc_reclaim_memory,
115 EnvToBool("TCMALLOC_RECLAIM_MEMORY", true),
116 "If set to false, we never return memory to malloc "
117 "when an object is deallocated. This ensures that all "
118 "heap object addresses are unique.");
119 DEFINE_int32(max_free_queue_size,
120 EnvToInt("TCMALLOC_MAX_FREE_QUEUE_SIZE", 10*1024*1024),
121 "If greater than 0, keep freed blocks in a queue instead of "
122 "releasing them to the allocator immediately. Release them when "
123 "the total size of all blocks in the queue would otherwise exceed "
124 "this limit.");
126 DEFINE_bool(symbolize_stacktrace,
127 EnvToBool("TCMALLOC_SYMBOLIZE_STACKTRACE", true),
128 "Symbolize the stack trace when provided (on some error exits)");
130 // ========================================================================= //
132 // A safe version of printf() that does not do any allocation and
133 // uses very little stack space.
134 static void TracePrintf(int fd, const char *fmt, ...)
135 #ifdef __GNUC__
136 __attribute__ ((__format__ (__printf__, 2, 3)));
137 #else
139 #endif
141 // The do_* functions are defined in tcmalloc/tcmalloc.cc,
142 // which is included before this file
143 // when TCMALLOC_FOR_DEBUGALLOCATION is defined
144 // TODO(csilvers): get rid of these now that we are tied to tcmalloc.
145 #define BASE_MALLOC_NEW do_malloc
146 #define BASE_MALLOC do_malloc
147 #define BASE_FREE do_free
148 #define BASE_MALLOC_STATS do_malloc_stats
149 #define BASE_MALLOPT do_mallopt
150 #define BASE_MALLINFO do_mallinfo
152 // ========================================================================= //
154 class MallocBlock;
156 // A circular buffer to hold freed blocks of memory. MallocBlock::Deallocate
157 // (below) pushes blocks into this queue instead of returning them to the
158 // underlying allocator immediately. See MallocBlock::Deallocate for more
159 // information.
161 // We can't use an STL class for this because we need to be careful not to
162 // perform any heap de-allocations in any of the code in this class, since the
163 // code in MallocBlock::Deallocate is not re-entrant.
164 template <typename QueueEntry>
165 class FreeQueue {
166 public:
167 FreeQueue() : q_front_(0), q_back_(0) {}
169 bool Full() {
170 return (q_front_ + 1) % kFreeQueueSize == q_back_;
173 void Push(const QueueEntry& block) {
174 q_[q_front_] = block;
175 q_front_ = (q_front_ + 1) % kFreeQueueSize;
178 QueueEntry Pop() {
179 RAW_CHECK(q_back_ != q_front_, "Queue is empty");
180 const QueueEntry& ret = q_[q_back_];
181 q_back_ = (q_back_ + 1) % kFreeQueueSize;
182 return ret;
185 size_t size() const {
186 return (q_front_ - q_back_ + kFreeQueueSize) % kFreeQueueSize;
189 private:
190 // Maximum number of blocks kept in the free queue before being freed.
191 static const int kFreeQueueSize = 1024;
193 QueueEntry q_[kFreeQueueSize];
194 int q_front_;
195 int q_back_;
198 struct MallocBlockQueueEntry {
199 MallocBlockQueueEntry() : block(NULL), size(0),
200 num_deleter_pcs(0), deleter_threadid(0) {}
201 MallocBlockQueueEntry(MallocBlock* b, size_t s) : block(b), size(s) {
202 if (FLAGS_max_free_queue_size != 0 && b != NULL) {
203 // Adjust the number of frames to skip (4) if you change the
204 // location of this call.
205 num_deleter_pcs =
206 GetStackTrace(deleter_pcs,
207 sizeof(deleter_pcs) / sizeof(deleter_pcs[0]),
209 deleter_threadid = pthread_self();
210 } else {
211 num_deleter_pcs = 0;
212 // Zero is an illegal pthread id by my reading of the pthread
213 // implementation:
214 deleter_threadid = 0;
218 MallocBlock* block;
219 size_t size;
221 // When deleted and put in the free queue, we (flag-controlled)
222 // record the stack so that if corruption is later found, we can
223 // print the deleter's stack. (These three vars add 144 bytes of
224 // overhead under the LP64 data model.)
225 void* deleter_pcs[16];
226 int num_deleter_pcs;
227 pthread_t deleter_threadid;
230 class MallocBlock {
231 public: // allocation type constants
233 // Different allocation types we distinguish.
234 // Note: The lower 4 bits are not random: we index kAllocName array
235 // by these values masked with kAllocTypeMask;
236 // the rest are "random" magic bits to help catch memory corruption.
237 static const int kMallocType = 0xEFCDAB90;
238 static const int kNewType = 0xFEBADC81;
239 static const int kArrayNewType = 0xBCEADF72;
241 private: // constants
243 // A mask used on alloc types above to get to 0, 1, 2
244 static const int kAllocTypeMask = 0x3;
245 // An additional bit to set in AllocType constants
246 // to mark now deallocated regions.
247 static const int kDeallocatedTypeBit = 0x4;
249 // For better memory debugging, we initialize all storage to known
250 // values, and overwrite the storage when it's deallocated:
251 // Byte that fills uninitialized storage.
252 static const int kMagicUninitializedByte = 0xAB;
253 // Byte that fills deallocated storage.
254 // NOTE: tcmalloc.cc depends on the value of kMagicDeletedByte
255 // to work around a bug in the pthread library.
256 static const int kMagicDeletedByte = 0xCD;
257 // A size_t (type of alloc_type_ below) in a deallocated storage
258 // filled with kMagicDeletedByte.
259 static const size_t kMagicDeletedSizeT =
260 0xCDCDCDCD | (((size_t)0xCDCDCDCD << 16) << 16);
261 // Initializer works for 32 and 64 bit size_ts;
262 // "<< 16 << 16" is to fool gcc from issuing a warning
263 // when size_ts are 32 bits.
265 // NOTE: on Linux, you can enable malloc debugging support in libc by
266 // setting the environment variable MALLOC_CHECK_ to 1 before you
267 // start the program (see man malloc).
269 // We use either BASE_MALLOC or mmap to make the actual allocation. In
270 // order to remember which one of the two was used for any block, we store an
271 // appropriate magic word next to the block.
272 static const int kMagicMalloc = 0xDEADBEEF;
273 static const int kMagicMMap = 0xABCDEFAB;
275 // This array will be filled with 0xCD, for use with memcmp.
276 static unsigned char kMagicDeletedBuffer[1024];
277 static pthread_once_t deleted_buffer_initialized_;
278 static bool deleted_buffer_initialized_no_pthreads_;
280 private: // data layout
282 // The four fields size1_,offset_,magic1_,alloc_type_
283 // should together occupy a multiple of 16 bytes. (At the
284 // moment, sizeof(size_t) == 4 or 8 depending on piii vs
285 // k8, and 4 of those sum to 16 or 32 bytes).
286 // This, combined with BASE_MALLOC's alignment guarantees,
287 // ensures that SSE types can be stored into the returned
288 // block, at &size2_.
289 size_t size1_;
290 size_t offset_; // normally 0 unless memaligned memory
291 // see comments in memalign() and FromRawPointer().
292 size_t magic1_;
293 size_t alloc_type_;
294 // here comes the actual data (variable length)
295 // ...
296 // then come the size2_ and magic2_, or a full page of mprotect-ed memory
297 // if the malloc_page_fence feature is enabled.
298 size_t size2_;
299 int magic2_;
301 private: // static data and helpers
303 // Allocation map: stores the allocation type for each allocated object,
304 // or the type or'ed with kDeallocatedTypeBit
305 // for each formerly allocated object.
306 typedef AddressMap<int> AllocMap;
307 static AllocMap* alloc_map_;
308 // This protects alloc_map_ and consistent state of metadata
309 // for each still-allocated object in it.
310 // We use spin locks instead of pthread_mutex_t locks
311 // to prevent crashes via calls to pthread_mutex_(un)lock
312 // for the (de)allocations coming from pthreads initialization itself.
313 static SpinLock alloc_map_lock_;
315 // A queue of freed blocks. Instead of releasing blocks to the allocator
316 // immediately, we put them in a queue, freeing them only when necessary
317 // to keep the total size of all the freed blocks below the limit set by
318 // FLAGS_max_free_queue_size.
319 static FreeQueue<MallocBlockQueueEntry>* free_queue_;
321 static size_t free_queue_size_; // total size of blocks in free_queue_
322 // protects free_queue_ and free_queue_size_
323 static SpinLock free_queue_lock_;
325 // Names of allocation types (kMallocType, kNewType, kArrayNewType)
326 static const char* const kAllocName[];
327 // Names of corresponding deallocation types
328 static const char* const kDeallocName[];
330 static const char* AllocName(int type) {
331 return kAllocName[type & kAllocTypeMask];
334 static const char* DeallocName(int type) {
335 return kDeallocName[type & kAllocTypeMask];
338 private: // helper accessors
340 bool IsMMapped() const { return kMagicMMap == magic1_; }
342 bool IsValidMagicValue(int value) const {
343 return kMagicMMap == value || kMagicMalloc == value;
346 static size_t real_malloced_size(size_t size) {
347 return size + sizeof(MallocBlock);
349 static size_t real_mmapped_size(size_t size) {
350 return size + MallocBlock::data_offset();
353 size_t real_size() {
354 return IsMMapped() ? real_mmapped_size(size1_) : real_malloced_size(size1_);
357 // NOTE: if the block is mmapped (that is, we're using the
358 // malloc_page_fence option) then there's no size2 or magic2
359 // (instead, the guard page begins where size2 would be).
361 size_t* size2_addr() { return (size_t*)((char*)&size2_ + size1_); }
362 const size_t* size2_addr() const {
363 return (const size_t*)((char*)&size2_ + size1_);
366 int* magic2_addr() { return (int*)(size2_addr() + 1); }
367 const int* magic2_addr() const { return (const int*)(size2_addr() + 1); }
369 private: // other helpers
371 void Initialize(size_t size, int type) {
372 RAW_CHECK(IsValidMagicValue(magic1_), "");
373 // record us as allocated in the map
374 alloc_map_lock_.Lock();
375 if (!alloc_map_) {
376 void* p = BASE_MALLOC(sizeof(AllocMap));
377 alloc_map_ = new(p) AllocMap(BASE_MALLOC, BASE_FREE);
379 alloc_map_->Insert(data_addr(), type);
380 // initialize us
381 size1_ = size;
382 offset_ = 0;
383 alloc_type_ = type;
384 if (!IsMMapped()) {
385 *magic2_addr() = magic1_;
386 *size2_addr() = size;
388 alloc_map_lock_.Unlock();
389 memset(data_addr(), kMagicUninitializedByte, size);
390 if (!IsMMapped()) {
391 RAW_CHECK(size1_ == *size2_addr(), "should hold");
392 RAW_CHECK(magic1_ == *magic2_addr(), "should hold");
396 size_t CheckAndClear(int type) {
397 alloc_map_lock_.Lock();
398 CheckLocked(type);
399 if (!IsMMapped()) {
400 RAW_CHECK(size1_ == *size2_addr(), "should hold");
402 // record us as deallocated in the map
403 alloc_map_->Insert(data_addr(), type | kDeallocatedTypeBit);
404 alloc_map_lock_.Unlock();
405 // clear us
406 const size_t size = real_size();
407 memset(this, kMagicDeletedByte, size);
408 return size;
411 void CheckLocked(int type) const {
412 int map_type = 0;
413 const int* found_type =
414 alloc_map_ != NULL ? alloc_map_->Find(data_addr()) : NULL;
415 if (found_type == NULL) {
416 RAW_LOG(FATAL, "memory allocation bug: object at %p "
417 "has never been allocated", data_addr());
418 } else {
419 map_type = *found_type;
421 if ((map_type & kDeallocatedTypeBit) != 0) {
422 RAW_LOG(FATAL, "memory allocation bug: object at %p "
423 "has been already deallocated (it was allocated with %s)",
424 data_addr(), AllocName(map_type & ~kDeallocatedTypeBit));
426 if (alloc_type_ == kMagicDeletedSizeT) {
427 RAW_LOG(FATAL, "memory stomping bug: a word before object at %p "
428 "has been corrupted; or else the object has been already "
429 "deallocated and our memory map has been corrupted",
430 data_addr());
432 if (!IsValidMagicValue(magic1_)) {
433 RAW_LOG(FATAL, "memory stomping bug: a word before object at %p "
434 "has been corrupted; "
435 "or else our memory map has been corrupted and this is a "
436 "deallocation for not (currently) heap-allocated object",
437 data_addr());
439 if (!IsMMapped()) {
440 if (size1_ != *size2_addr()) {
441 RAW_LOG(FATAL, "memory stomping bug: a word after object at %p "
442 "has been corrupted", data_addr());
444 if (!IsValidMagicValue(*magic2_addr())) {
445 RAW_LOG(FATAL, "memory stomping bug: a word after object at %p "
446 "has been corrupted", data_addr());
449 if (alloc_type_ != type) {
450 if ((alloc_type_ != MallocBlock::kMallocType) &&
451 (alloc_type_ != MallocBlock::kNewType) &&
452 (alloc_type_ != MallocBlock::kArrayNewType)) {
453 RAW_LOG(FATAL, "memory stomping bug: a word before object at %p "
454 "has been corrupted", data_addr());
456 RAW_LOG(FATAL, "memory allocation/deallocation mismatch at %p: "
457 "allocated with %s being deallocated with %s",
458 data_addr(), AllocName(alloc_type_), DeallocName(type));
460 if (alloc_type_ != map_type) {
461 RAW_LOG(FATAL, "memory stomping bug: our memory map has been corrupted : "
462 "allocation at %p made with %s "
463 "is recorded in the map to be made with %s",
464 data_addr(), AllocName(alloc_type_), AllocName(map_type));
468 public: // public accessors
470 void* data_addr() { return (void*)&size2_; }
471 const void* data_addr() const { return (const void*)&size2_; }
473 static size_t data_offset() { return OFFSETOF_MEMBER(MallocBlock, size2_); }
475 size_t data_size() const { return size1_; }
477 void set_offset(int offset) { this->offset_ = offset; }
479 public: // our main interface
481 static MallocBlock* Allocate(size_t size, int type) {
482 // Prevent an integer overflow / crash with large allocation sizes.
483 // TODO - Note that for a e.g. 64-bit size_t, max_size_t may not actually
484 // be the maximum value, depending on how the compiler treats ~0. The worst
485 // practical effect is that allocations are limited to 4Gb or so, even if
486 // the address space could take more.
487 static size_t max_size_t = ~0;
488 if (size > max_size_t - sizeof(MallocBlock)) {
489 RAW_LOG(ERROR, "Massive size passed to malloc: %"PRIuS"", size);
490 return NULL;
492 MallocBlock* b = NULL;
493 const bool use_malloc_page_fence = FLAGS_malloc_page_fence;
494 #ifdef HAVE_MMAP
495 if (use_malloc_page_fence) {
496 // Put the block towards the end of the page and make the next page
497 // inaccessible. This will catch buffer overrun right when it happens.
498 size_t sz = real_mmapped_size(size);
499 int pagesize = getpagesize();
500 int num_pages = (sz + pagesize - 1) / pagesize + 1;
501 char* p = (char*) mmap(NULL, num_pages * pagesize, PROT_READ|PROT_WRITE,
502 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
503 if (p == MAP_FAILED) {
504 // If the allocation fails, abort rather than returning NULL to
505 // malloc. This is because in most cases, the program will run out
506 // of memory in this mode due to tremendous amount of wastage. There
507 // is no point in propagating the error elsewhere.
508 RAW_LOG(FATAL, "Out of memory: possibly due to page fence overhead: %s",
509 strerror(errno));
511 // Mark the page after the block inaccessible
512 if (mprotect(p + (num_pages - 1) * pagesize, pagesize, PROT_NONE)) {
513 RAW_LOG(FATAL, "Guard page setup failed: %s", strerror(errno));
515 b = (MallocBlock*) (p + (num_pages - 1) * pagesize - sz);
516 } else {
517 b = (MallocBlock*) (type == kMallocType ?
518 BASE_MALLOC(real_malloced_size(size)) :
519 BASE_MALLOC_NEW(real_malloced_size(size)));
521 #else
522 b = (MallocBlock*) (type == kMallocType ?
523 BASE_MALLOC(real_malloced_size(size)) :
524 BASE_MALLOC_NEW(real_malloced_size(size)));
525 #endif
527 // It would be nice to output a diagnostic on allocation failure
528 // here, but logging (other than FATAL) requires allocating
529 // memory, which could trigger a nasty recursion. Instead, preserve
530 // malloc semantics and return NULL on failure.
531 if (b != NULL) {
532 b->magic1_ = use_malloc_page_fence ? kMagicMMap : kMagicMalloc;
533 b->Initialize(size, type);
535 return b;
538 void Deallocate(int type) {
539 if (IsMMapped()) { // have to do this before CheckAndClear
540 #ifdef HAVE_MMAP
541 int size = CheckAndClear(type);
542 int pagesize = getpagesize();
543 int num_pages = (size + pagesize - 1) / pagesize + 1;
544 char* p = (char*) this;
545 if (FLAGS_malloc_page_fence_never_reclaim ||
546 !FLAGS_malloc_reclaim_memory) {
547 mprotect(p - (num_pages - 1) * pagesize + size,
548 num_pages * pagesize, PROT_NONE);
549 } else {
550 munmap(p - (num_pages - 1) * pagesize + size, num_pages * pagesize);
552 #endif
553 } else {
554 const size_t size = CheckAndClear(type);
555 if (FLAGS_malloc_reclaim_memory) {
556 // Instead of freeing the block immediately, push it onto a queue of
557 // recently freed blocks. Free only enough blocks to keep from
558 // exceeding the capacity of the queue or causing the total amount of
559 // un-released memory in the queue from exceeding
560 // FLAGS_max_free_queue_size.
561 ProcessFreeQueue(this, size, FLAGS_max_free_queue_size);
566 static size_t FreeQueueSize() {
567 SpinLockHolder l(&free_queue_lock_);
568 return free_queue_size_;
571 static void ProcessFreeQueue(MallocBlock* b, size_t size,
572 int max_free_queue_size) {
573 // MallocBlockQueueEntry are about 144 in size, so we can only
574 // use a small array of them on the stack.
575 MallocBlockQueueEntry entries[4];
576 int num_entries = 0;
577 MallocBlockQueueEntry new_entry(b, size);
578 free_queue_lock_.Lock();
579 if (free_queue_ == NULL)
580 free_queue_ = new FreeQueue<MallocBlockQueueEntry>;
581 RAW_CHECK(!free_queue_->Full(), "Free queue mustn't be full!");
583 if (b != NULL) {
584 free_queue_size_ += size + sizeof(MallocBlockQueueEntry);
585 free_queue_->Push(new_entry);
588 // Free blocks until the total size of unfreed blocks no longer exceeds
589 // max_free_queue_size, and the free queue has at least one free
590 // space in it.
591 while (free_queue_size_ > max_free_queue_size || free_queue_->Full()) {
592 RAW_CHECK(num_entries < arraysize(entries), "entries array overflow");
593 entries[num_entries] = free_queue_->Pop();
594 free_queue_size_ -=
595 entries[num_entries].size + sizeof(MallocBlockQueueEntry);
596 num_entries++;
597 if (num_entries == arraysize(entries)) {
598 // The queue will not be full at this point, so it is ok to
599 // release the lock. The queue may still contain more than
600 // max_free_queue_size, but this is not a strict invariant.
601 free_queue_lock_.Unlock();
602 for (int i = 0; i < num_entries; i++) {
603 CheckForDanglingWrites(entries[i]);
604 BASE_FREE(entries[i].block);
606 num_entries = 0;
607 free_queue_lock_.Lock();
610 RAW_CHECK(free_queue_size_ >= 0, "Free queue size went negative!");
611 free_queue_lock_.Unlock();
612 for (int i = 0; i < num_entries; i++) {
613 CheckForDanglingWrites(entries[i]);
614 BASE_FREE(entries[i].block);
618 static void InitDeletedBuffer() {
619 memset(kMagicDeletedBuffer, kMagicDeletedByte, sizeof(kMagicDeletedBuffer));
620 deleted_buffer_initialized_no_pthreads_ = true;
623 static void CheckForDanglingWrites(const MallocBlockQueueEntry& queue_entry) {
624 perftools_pthread_once(&deleted_buffer_initialized_, &InitDeletedBuffer);
625 if (!deleted_buffer_initialized_no_pthreads_) {
626 // This will be the case on systems that don't link in pthreads,
627 // including on FreeBSD where pthread_once has a non-zero address
628 // (but doesn't do anything) even when pthreads isn't linked in.
629 InitDeletedBuffer();
632 const unsigned char* p =
633 reinterpret_cast<unsigned char*>(queue_entry.block);
635 static const size_t size_of_buffer = sizeof(kMagicDeletedBuffer);
636 const size_t size = queue_entry.size;
637 const size_t buffers = size / size_of_buffer;
638 const size_t remainder = size % size_of_buffer;
639 size_t buffer_idx;
640 for (buffer_idx = 0; buffer_idx < buffers; ++buffer_idx) {
641 CheckForCorruptedBuffer(queue_entry, buffer_idx, p, size_of_buffer);
642 p += size_of_buffer;
644 CheckForCorruptedBuffer(queue_entry, buffer_idx, p, remainder);
647 static void CheckForCorruptedBuffer(const MallocBlockQueueEntry& queue_entry,
648 size_t buffer_idx,
649 const unsigned char* buffer,
650 size_t size_of_buffer) {
651 if (memcmp(buffer, kMagicDeletedBuffer, size_of_buffer) == 0) {
652 return;
655 RAW_LOG(ERROR,
656 "Found a corrupted memory buffer in MallocBlock (may be offset "
657 "from user ptr): buffer index: %zd, buffer ptr: %p, size of "
658 "buffer: %zd", buffer_idx, buffer, size_of_buffer);
660 // The magic deleted buffer should only be 1024 bytes, but in case
661 // this changes, let's put an upper limit on the number of debug
662 // lines we'll output:
663 if (size_of_buffer <= 1024) {
664 for (int i = 0; i < size_of_buffer; ++i) {
665 if (buffer[i] != kMagicDeletedByte) {
666 RAW_LOG(ERROR, "Buffer byte %d is 0x%02x (should be 0x%02x).",
667 i, buffer[i], kMagicDeletedByte);
670 } else {
671 RAW_LOG(ERROR, "Buffer too large to print corruption.");
674 const MallocBlock* b = queue_entry.block;
675 const size_t size = queue_entry.size;
676 if (queue_entry.num_deleter_pcs > 0) {
677 TracePrintf(STDERR_FILENO, "Deleted by thread %p\n",
678 reinterpret_cast<void*>(
679 PRINTABLE_PTHREAD(queue_entry.deleter_threadid)));
681 // We don't want to allocate or deallocate memory here, so we use
682 // placement-new. It's ok that we don't destroy this, since we're
683 // just going to error-exit below anyway. Union is for alignment.
684 union { void* alignment; char buf[sizeof(SymbolTable)]; } tablebuf;
685 SymbolTable* symbolization_table = new (tablebuf.buf) SymbolTable;
686 for (int i = 0; i < queue_entry.num_deleter_pcs; i++) {
687 // Symbolizes the previous address of pc because pc may be in the
688 // next function. This may happen when the function ends with
689 // a call to a function annotated noreturn (e.g. CHECK).
690 char *pc = reinterpret_cast<char*>(queue_entry.deleter_pcs[i]);
691 symbolization_table->Add(pc - 1);
693 if (FLAGS_symbolize_stacktrace)
694 symbolization_table->Symbolize();
695 for (int i = 0; i < queue_entry.num_deleter_pcs; i++) {
696 char *pc = reinterpret_cast<char*>(queue_entry.deleter_pcs[i]);
697 TracePrintf(STDERR_FILENO, " @ %p %s\n",
698 pc, symbolization_table->GetSymbol(pc - 1));
700 } else {
701 RAW_LOG(ERROR,
702 "Skipping the printing of the deleter's stack! Its stack was "
703 "not found; either the corruption occurred too early in "
704 "execution to obtain a stack trace or --max_free_queue_size was "
705 "set to 0.");
708 RAW_LOG(FATAL,
709 "Memory was written to after being freed. MallocBlock: %p, user "
710 "ptr: %p, size: %zd. If you can't find the source of the error, "
711 "try using ASan (http://code.google.com/p/address-sanitizer/), "
712 "Valgrind, or Purify, or study the "
713 "output of the deleter's stack printed above.",
714 b, b->data_addr(), size);
717 static MallocBlock* FromRawPointer(void* p) {
718 const size_t data_offset = MallocBlock::data_offset();
719 // Find the header just before client's memory.
720 MallocBlock *mb = reinterpret_cast<MallocBlock *>(
721 reinterpret_cast<char *>(p) - data_offset);
722 // If mb->alloc_type_ is kMagicDeletedSizeT, we're not an ok pointer.
723 if (mb->alloc_type_ == kMagicDeletedSizeT) {
724 RAW_LOG(FATAL, "memory allocation bug: object at %p has been already"
725 " deallocated; or else a word before the object has been"
726 " corrupted (memory stomping bug)", p);
728 // If mb->offset_ is zero (common case), mb is the real header. If
729 // mb->offset_ is non-zero, this block was allocated by memalign, and
730 // mb->offset_ is the distance backwards to the real header from mb,
731 // which is a fake header. The following subtraction works for both zero
732 // and non-zero values.
733 return reinterpret_cast<MallocBlock *>(
734 reinterpret_cast<char *>(mb) - mb->offset_);
736 static const MallocBlock* FromRawPointer(const void* p) {
737 // const-safe version: we just cast about
738 return FromRawPointer(const_cast<void*>(p));
741 // Return whether p points to memory returned by memalign.
742 // Requires that p be non-zero and has been checked for sanity with
743 // FromRawPointer().
744 static bool IsMemaligned(const void* p) {
745 const MallocBlock* mb = reinterpret_cast<const MallocBlock*>(
746 reinterpret_cast<const char*>(p) - MallocBlock::data_offset());
747 // If the offset is non-zero, the block was allocated by memalign
748 // (see FromRawPointer above).
749 return mb->offset_ != 0;
752 void Check(int type) const {
753 alloc_map_lock_.Lock();
754 CheckLocked(type);
755 alloc_map_lock_.Unlock();
758 static bool CheckEverything() {
759 alloc_map_lock_.Lock();
760 if (alloc_map_ != NULL) alloc_map_->Iterate(CheckCallback, 0);
761 alloc_map_lock_.Unlock();
762 return true; // if we get here, we're okay
765 static bool MemoryStats(int* blocks, size_t* total,
766 int histogram[kMallocHistogramSize]) {
767 memset(histogram, 0, kMallocHistogramSize * sizeof(int));
768 alloc_map_lock_.Lock();
769 stats_blocks_ = 0;
770 stats_total_ = 0;
771 stats_histogram_ = histogram;
772 if (alloc_map_ != NULL) alloc_map_->Iterate(StatsCallback, 0);
773 *blocks = stats_blocks_;
774 *total = stats_total_;
775 alloc_map_lock_.Unlock();
776 return true;
779 private: // helpers for CheckEverything and MemoryStats
781 static void CheckCallback(const void* ptr, int* type, int dummy) {
782 if ((*type & kDeallocatedTypeBit) == 0) {
783 FromRawPointer(ptr)->CheckLocked(*type);
787 // Accumulation variables for StatsCallback protected by alloc_map_lock_
788 static int stats_blocks_;
789 static size_t stats_total_;
790 static int* stats_histogram_;
792 static void StatsCallback(const void* ptr, int* type, int dummy) {
793 if ((*type & kDeallocatedTypeBit) == 0) {
794 const MallocBlock* b = FromRawPointer(ptr);
795 b->CheckLocked(*type);
796 ++stats_blocks_;
797 size_t mysize = b->size1_;
798 int entry = 0;
799 stats_total_ += mysize;
800 while (mysize) {
801 ++entry;
802 mysize >>= 1;
804 RAW_CHECK(entry < kMallocHistogramSize,
805 "kMallocHistogramSize should be at least as large as log2 "
806 "of the maximum process memory size");
807 stats_histogram_[entry] += 1;
812 void DanglingWriteChecker() {
813 // Clear out the remaining free queue to check for dangling writes.
814 MallocBlock::ProcessFreeQueue(NULL, 0, 0);
817 // ========================================================================= //
819 const int MallocBlock::kMagicMalloc;
820 const int MallocBlock::kMagicMMap;
822 MallocBlock::AllocMap* MallocBlock::alloc_map_ = NULL;
823 SpinLock MallocBlock::alloc_map_lock_(SpinLock::LINKER_INITIALIZED);
825 FreeQueue<MallocBlockQueueEntry>* MallocBlock::free_queue_ = NULL;
826 size_t MallocBlock::free_queue_size_ = 0;
827 SpinLock MallocBlock::free_queue_lock_(SpinLock::LINKER_INITIALIZED);
829 unsigned char MallocBlock::kMagicDeletedBuffer[1024];
830 pthread_once_t MallocBlock::deleted_buffer_initialized_ = PTHREAD_ONCE_INIT;
831 bool MallocBlock::deleted_buffer_initialized_no_pthreads_ = false;
833 const char* const MallocBlock::kAllocName[] = {
834 "malloc",
835 "new",
836 "new []",
837 NULL,
840 const char* const MallocBlock::kDeallocName[] = {
841 "free",
842 "delete",
843 "delete []",
844 NULL,
847 int MallocBlock::stats_blocks_;
848 size_t MallocBlock::stats_total_;
849 int* MallocBlock::stats_histogram_;
851 // ========================================================================= //
853 // The following cut-down version of printf() avoids
854 // using stdio or ostreams.
855 // This is to guarantee no recursive calls into
856 // the allocator and to bound the stack space consumed. (The pthread
857 // manager thread in linuxthreads has a very small stack,
858 // so fprintf can't be called.)
859 static void TracePrintf(int fd, const char *fmt, ...) {
860 char buf[64];
861 int i = 0;
862 va_list ap;
863 va_start(ap, fmt);
864 const char *p = fmt;
865 char numbuf[25];
866 numbuf[sizeof(numbuf)-1] = 0;
867 while (*p != '\0') { // until end of format string
868 char *s = &numbuf[sizeof(numbuf)-1];
869 if (p[0] == '%' && p[1] != 0) { // handle % formats
870 int64 l = 0;
871 unsigned long base = 0;
872 if (*++p == 's') { // %s
873 s = va_arg(ap, char *);
874 } else if (*p == 'l' && p[1] == 'd') { // %ld
875 l = va_arg(ap, long);
876 base = 10;
877 p++;
878 } else if (*p == 'l' && p[1] == 'u') { // %lu
879 l = va_arg(ap, unsigned long);
880 base = 10;
881 p++;
882 } else if (*p == 'z' && p[1] == 'u') { // %zu
883 l = va_arg(ap, size_t);
884 base = 10;
885 p++;
886 } else if (*p == 'u') { // %u
887 l = va_arg(ap, unsigned int);
888 base = 10;
889 } else if (*p == 'd') { // %d
890 l = va_arg(ap, int);
891 base = 10;
892 } else if (*p == 'p') { // %p
893 l = va_arg(ap, intptr_t);
894 base = 16;
895 } else {
896 write(STDERR_FILENO, "Unimplemented TracePrintf format\n", 33);
897 write(STDERR_FILENO, p, 2);
898 write(STDERR_FILENO, "\n", 1);
899 tcmalloc::Abort();
901 p++;
902 if (base != 0) {
903 bool minus = (l < 0 && base == 10);
904 uint64 ul = minus? -l : l;
905 do {
906 *--s = "0123456789abcdef"[ul % base];
907 ul /= base;
908 } while (ul != 0);
909 if (base == 16) {
910 *--s = 'x';
911 *--s = '0';
912 } else if (minus) {
913 *--s = '-';
916 } else { // handle normal characters
917 *--s = *p++;
919 while (*s != 0) {
920 if (i == sizeof(buf)) {
921 write(fd, buf, i);
922 i = 0;
924 buf[i++] = *s++;
927 if (i != 0) {
928 write(fd, buf, i);
930 va_end(ap);
933 // Return the file descriptor we're writing a log to
934 static int TraceFd() {
935 static int trace_fd = -1;
936 if (trace_fd == -1) { // Open the trace file on the first call
937 trace_fd = open("/tmp/google.alloc", O_CREAT|O_TRUNC|O_WRONLY, 0666);
938 if (trace_fd == -1) {
939 trace_fd = 2;
940 TracePrintf(trace_fd,
941 "Can't open /tmp/google.alloc. Logging to stderr.\n");
943 // Add a header to the log.
944 TracePrintf(trace_fd, "Trace started: %lu\n",
945 static_cast<unsigned long>(time(NULL)));
946 TracePrintf(trace_fd,
947 "func\tsize\tptr\tthread_id\tstack pcs for tools/symbolize\n");
949 return trace_fd;
952 // Print the hex stack dump on a single line. PCs are separated by tabs.
953 static void TraceStack(void) {
954 void *pcs[16];
955 int n = GetStackTrace(pcs, sizeof(pcs)/sizeof(pcs[0]), 0);
956 for (int i = 0; i != n; i++) {
957 TracePrintf(TraceFd(), "\t%p", pcs[i]);
961 // This protects MALLOC_TRACE, to make sure its info is atomically written.
962 static SpinLock malloc_trace_lock(SpinLock::LINKER_INITIALIZED);
964 #define MALLOC_TRACE(name, size, addr) \
965 do { \
966 if (FLAGS_malloctrace) { \
967 SpinLockHolder l(&malloc_trace_lock); \
968 TracePrintf(TraceFd(), "%s\t%"PRIuS"\t%p\t%"GPRIuPTHREAD, \
969 name, size, addr, PRINTABLE_PTHREAD(pthread_self())); \
970 TraceStack(); \
971 TracePrintf(TraceFd(), "\n"); \
973 } while (0)
975 // ========================================================================= //
977 // Write the characters buf[0, ..., size-1] to
978 // the malloc trace buffer.
979 // This function is intended for debugging,
980 // and is not declared in any header file.
981 // You must insert a declaration of it by hand when you need
982 // to use it.
983 void __malloctrace_write(const char *buf, size_t size) {
984 if (FLAGS_malloctrace) {
985 write(TraceFd(), buf, size);
989 // ========================================================================= //
991 // General debug allocation/deallocation
993 static inline void* DebugAllocate(size_t size, int type) {
994 MallocBlock* ptr = MallocBlock::Allocate(size, type);
995 if (ptr == NULL) return NULL;
996 MALLOC_TRACE("malloc", size, ptr->data_addr());
997 return ptr->data_addr();
1000 static inline void DebugDeallocate(void* ptr, int type) {
1001 MALLOC_TRACE("free",
1002 (ptr != 0 ? MallocBlock::FromRawPointer(ptr)->data_size() : 0),
1003 ptr);
1004 if (ptr) MallocBlock::FromRawPointer(ptr)->Deallocate(type);
1007 // ========================================================================= //
1009 // The following functions may be called via MallocExtension::instance()
1010 // for memory verification and statistics.
1011 class DebugMallocImplementation : public TCMallocImplementation {
1012 public:
1013 virtual bool GetNumericProperty(const char* name, size_t* value) {
1014 bool result = TCMallocImplementation::GetNumericProperty(name, value);
1015 if (result && (strcmp(name, "generic.current_allocated_bytes") == 0)) {
1016 // Subtract bytes kept in the free queue
1017 size_t qsize = MallocBlock::FreeQueueSize();
1018 if (*value >= qsize) {
1019 *value -= qsize;
1022 return result;
1025 virtual bool VerifyNewMemory(const void* p) {
1026 if (p) MallocBlock::FromRawPointer(p)->Check(MallocBlock::kNewType);
1027 return true;
1030 virtual bool VerifyArrayNewMemory(const void* p) {
1031 if (p) MallocBlock::FromRawPointer(p)->Check(MallocBlock::kArrayNewType);
1032 return true;
1035 virtual bool VerifyMallocMemory(const void* p) {
1036 if (p) MallocBlock::FromRawPointer(p)->Check(MallocBlock::kMallocType);
1037 return true;
1040 virtual bool VerifyAllMemory() {
1041 return MallocBlock::CheckEverything();
1044 virtual bool MallocMemoryStats(int* blocks, size_t* total,
1045 int histogram[kMallocHistogramSize]) {
1046 return MallocBlock::MemoryStats(blocks, total, histogram);
1049 virtual size_t GetEstimatedAllocatedSize(size_t size) {
1050 return size;
1053 virtual size_t GetAllocatedSize(const void* p) {
1054 if (p) {
1055 RAW_CHECK(GetOwnership(p) != MallocExtension::kNotOwned,
1056 "ptr not allocated by tcmalloc");
1057 return MallocBlock::FromRawPointer(p)->data_size();
1059 return 0;
1062 virtual MallocExtension::Ownership GetOwnership(const void* p) {
1063 if (p) {
1064 const MallocBlock* mb = MallocBlock::FromRawPointer(p);
1065 return TCMallocImplementation::GetOwnership(mb);
1067 return MallocExtension::kNotOwned; // nobody owns NULL
1070 virtual void GetFreeListSizes(vector<MallocExtension::FreeListInfo>* v) {
1071 static const char* kDebugFreeQueue = "debug.free_queue";
1073 TCMallocImplementation::GetFreeListSizes(v);
1075 MallocExtension::FreeListInfo i;
1076 i.type = kDebugFreeQueue;
1077 i.min_object_size = 0;
1078 i.max_object_size = numeric_limits<size_t>::max();
1079 i.total_bytes_free = MallocBlock::FreeQueueSize();
1080 v->push_back(i);
1085 static DebugMallocImplementation debug_malloc_implementation;
1087 REGISTER_MODULE_INITIALIZER(debugallocation, {
1088 // Either we or valgrind will control memory management. We
1089 // register our extension if we're the winner. Otherwise let
1090 // Valgrind use its own malloc (so don't register our extension).
1091 if (!RunningOnValgrind()) {
1092 MallocExtension::Register(&debug_malloc_implementation);
1096 REGISTER_MODULE_DESTRUCTOR(debugallocation, {
1097 if (!RunningOnValgrind()) {
1098 // When the program exits, check all blocks still in the free
1099 // queue for corruption.
1100 DanglingWriteChecker();
1104 // ========================================================================= //
1106 // This is mostly the same a cpp_alloc in tcmalloc.cc.
1107 // TODO(csilvers): change Allocate() above to call cpp_alloc, so we
1108 // don't have to reproduce the logic here. To make tc_new_mode work
1109 // properly, I think we'll need to separate out the logic of throwing
1110 // from the logic of calling the new-handler.
1111 inline void* debug_cpp_alloc(size_t size, int new_type, bool nothrow) {
1112 for (;;) {
1113 void* p = DebugAllocate(size, new_type);
1114 #ifdef PREANSINEW
1115 return p;
1116 #else
1117 if (p == NULL) { // allocation failed
1118 // Get the current new handler. NB: this function is not
1119 // thread-safe. We make a feeble stab at making it so here, but
1120 // this lock only protects against tcmalloc interfering with
1121 // itself, not with other libraries calling set_new_handler.
1122 std::new_handler nh;
1124 SpinLockHolder h(&set_new_handler_lock);
1125 nh = std::set_new_handler(0);
1126 (void) std::set_new_handler(nh);
1128 #if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1129 if (nh) {
1130 // Since exceptions are disabled, we don't really know if new_handler
1131 // failed. Assume it will abort if it fails.
1132 (*nh)();
1133 continue;
1135 return 0;
1136 #else
1137 // If no new_handler is established, the allocation failed.
1138 if (!nh) {
1139 if (nothrow) return 0;
1140 throw std::bad_alloc();
1142 // Otherwise, try the new_handler. If it returns, retry the
1143 // allocation. If it throws std::bad_alloc, fail the allocation.
1144 // if it throws something else, don't interfere.
1145 try {
1146 (*nh)();
1147 } catch (const std::bad_alloc&) {
1148 if (!nothrow) throw;
1149 return p;
1151 #endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1152 } else { // allocation success
1153 return p;
1155 #endif // PREANSINEW
1159 inline void* do_debug_malloc_or_debug_cpp_alloc(size_t size) {
1160 return tc_new_mode ? debug_cpp_alloc(size, MallocBlock::kMallocType, true)
1161 : DebugAllocate(size, MallocBlock::kMallocType);
1164 // Exported routines
1166 extern "C" PERFTOOLS_DLL_DECL void* tc_malloc(size_t size) __THROW {
1167 void* ptr = do_debug_malloc_or_debug_cpp_alloc(size);
1168 MallocHook::InvokeNewHook(ptr, size);
1169 return ptr;
1172 extern "C" PERFTOOLS_DLL_DECL void tc_free(void* ptr) __THROW {
1173 MallocHook::InvokeDeleteHook(ptr);
1174 DebugDeallocate(ptr, MallocBlock::kMallocType);
1177 extern "C" PERFTOOLS_DLL_DECL void* tc_calloc(size_t count, size_t size) __THROW {
1178 // Overflow check
1179 const size_t total_size = count * size;
1180 if (size != 0 && total_size / size != count) return NULL;
1182 void* block = do_debug_malloc_or_debug_cpp_alloc(total_size);
1183 MallocHook::InvokeNewHook(block, total_size);
1184 if (block) memset(block, 0, total_size);
1185 return block;
1188 extern "C" PERFTOOLS_DLL_DECL void tc_cfree(void* ptr) __THROW {
1189 MallocHook::InvokeDeleteHook(ptr);
1190 DebugDeallocate(ptr, MallocBlock::kMallocType);
1193 extern "C" PERFTOOLS_DLL_DECL void* tc_realloc(void* ptr, size_t size) __THROW {
1194 if (ptr == NULL) {
1195 ptr = do_debug_malloc_or_debug_cpp_alloc(size);
1196 MallocHook::InvokeNewHook(ptr, size);
1197 return ptr;
1199 MallocBlock* old = MallocBlock::FromRawPointer(ptr);
1200 old->Check(MallocBlock::kMallocType);
1201 if (MallocBlock::IsMemaligned(ptr)) {
1202 RAW_LOG(FATAL, "realloc/memalign mismatch at %p: "
1203 "non-NULL pointers passed to realloc must be obtained "
1204 "from malloc, calloc, or realloc", ptr);
1206 if (size == 0) {
1207 MallocHook::InvokeDeleteHook(ptr);
1208 DebugDeallocate(ptr, MallocBlock::kMallocType);
1209 return NULL;
1211 MallocBlock* p = MallocBlock::Allocate(size, MallocBlock::kMallocType);
1213 // If realloc fails we are to leave the old block untouched and
1214 // return null
1215 if (p == NULL) return NULL;
1217 memcpy(p->data_addr(), old->data_addr(),
1218 (old->data_size() < size) ? old->data_size() : size);
1219 MallocHook::InvokeDeleteHook(ptr);
1220 MallocHook::InvokeNewHook(p->data_addr(), size);
1221 DebugDeallocate(ptr, MallocBlock::kMallocType);
1222 MALLOC_TRACE("realloc", p->data_size(), p->data_addr());
1223 return p->data_addr();
1226 extern "C" PERFTOOLS_DLL_DECL void* tc_new(size_t size) {
1227 void* ptr = debug_cpp_alloc(size, MallocBlock::kNewType, false);
1228 MallocHook::InvokeNewHook(ptr, size);
1229 if (ptr == NULL) {
1230 RAW_LOG(FATAL, "Unable to allocate %"PRIuS" bytes: new failed.", size);
1232 return ptr;
1235 extern "C" PERFTOOLS_DLL_DECL void* tc_new_nothrow(size_t size, const std::nothrow_t&) __THROW {
1236 void* ptr = debug_cpp_alloc(size, MallocBlock::kNewType, true);
1237 MallocHook::InvokeNewHook(ptr, size);
1238 return ptr;
1241 extern "C" PERFTOOLS_DLL_DECL void tc_delete(void* p) __THROW {
1242 MallocHook::InvokeDeleteHook(p);
1243 DebugDeallocate(p, MallocBlock::kNewType);
1246 // Some STL implementations explicitly invoke this.
1247 // It is completely equivalent to a normal delete (delete never throws).
1248 extern "C" PERFTOOLS_DLL_DECL void tc_delete_nothrow(void* p, const std::nothrow_t&) __THROW {
1249 MallocHook::InvokeDeleteHook(p);
1250 DebugDeallocate(p, MallocBlock::kNewType);
1253 extern "C" PERFTOOLS_DLL_DECL void* tc_newarray(size_t size) {
1254 void* ptr = debug_cpp_alloc(size, MallocBlock::kArrayNewType, false);
1255 MallocHook::InvokeNewHook(ptr, size);
1256 if (ptr == NULL) {
1257 RAW_LOG(FATAL, "Unable to allocate %"PRIuS" bytes: new[] failed.", size);
1259 return ptr;
1262 extern "C" PERFTOOLS_DLL_DECL void* tc_newarray_nothrow(size_t size, const std::nothrow_t&)
1263 __THROW {
1264 void* ptr = debug_cpp_alloc(size, MallocBlock::kArrayNewType, true);
1265 MallocHook::InvokeNewHook(ptr, size);
1266 return ptr;
1269 extern "C" PERFTOOLS_DLL_DECL void tc_deletearray(void* p) __THROW {
1270 MallocHook::InvokeDeleteHook(p);
1271 DebugDeallocate(p, MallocBlock::kArrayNewType);
1274 // Some STL implementations explicitly invoke this.
1275 // It is completely equivalent to a normal delete (delete never throws).
1276 extern "C" PERFTOOLS_DLL_DECL void tc_deletearray_nothrow(void* p, const std::nothrow_t&) __THROW {
1277 MallocHook::InvokeDeleteHook(p);
1278 DebugDeallocate(p, MallocBlock::kArrayNewType);
1281 // Round "value" up to next "alignment" boundary.
1282 // Requires that "alignment" be a power of two.
1283 static intptr_t RoundUp(intptr_t value, intptr_t alignment) {
1284 return (value + alignment - 1) & ~(alignment - 1);
1287 // This is mostly the same as do_memalign in tcmalloc.cc.
1288 static void *do_debug_memalign(size_t alignment, size_t size) {
1289 // Allocate >= size bytes aligned on "alignment" boundary
1290 // "alignment" is a power of two.
1291 void *p = 0;
1292 RAW_CHECK((alignment & (alignment-1)) == 0, "must be power of two");
1293 const size_t data_offset = MallocBlock::data_offset();
1294 // Allocate "alignment-1" extra bytes to ensure alignment is possible, and
1295 // a further data_offset bytes for an additional fake header.
1296 size_t extra_bytes = data_offset + alignment - 1;
1297 if (size + extra_bytes < size) return NULL; // Overflow
1298 p = DebugAllocate(size + extra_bytes, MallocBlock::kMallocType);
1299 if (p != 0) {
1300 intptr_t orig_p = reinterpret_cast<intptr_t>(p);
1301 // Leave data_offset bytes for fake header, and round up to meet
1302 // alignment.
1303 p = reinterpret_cast<void *>(RoundUp(orig_p + data_offset, alignment));
1304 // Create a fake header block with an offset_ that points back to the
1305 // real header. FromRawPointer uses this value.
1306 MallocBlock *fake_hdr = reinterpret_cast<MallocBlock *>(
1307 reinterpret_cast<char *>(p) - data_offset);
1308 // offset_ is distance between real and fake headers.
1309 // p is now end of fake header (beginning of client area),
1310 // and orig_p is the end of the real header, so offset_
1311 // is their difference.
1312 fake_hdr->set_offset(reinterpret_cast<intptr_t>(p) - orig_p);
1314 return p;
1317 // This is mostly the same as cpp_memalign in tcmalloc.cc.
1318 static void* debug_cpp_memalign(size_t align, size_t size) {
1319 for (;;) {
1320 void* p = do_debug_memalign(align, size);
1321 #ifdef PREANSINEW
1322 return p;
1323 #else
1324 if (p == NULL) { // allocation failed
1325 // Get the current new handler. NB: this function is not
1326 // thread-safe. We make a feeble stab at making it so here, but
1327 // this lock only protects against tcmalloc interfering with
1328 // itself, not with other libraries calling set_new_handler.
1329 std::new_handler nh;
1331 SpinLockHolder h(&set_new_handler_lock);
1332 nh = std::set_new_handler(0);
1333 (void) std::set_new_handler(nh);
1335 #if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1336 if (nh) {
1337 // Since exceptions are disabled, we don't really know if new_handler
1338 // failed. Assume it will abort if it fails.
1339 (*nh)();
1340 continue;
1342 return 0;
1343 #else
1344 // If no new_handler is established, the allocation failed.
1345 if (!nh)
1346 return 0;
1348 // Otherwise, try the new_handler. If it returns, retry the
1349 // allocation. If it throws std::bad_alloc, fail the allocation.
1350 // if it throws something else, don't interfere.
1351 try {
1352 (*nh)();
1353 } catch (const std::bad_alloc&) {
1354 return p;
1356 #endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1357 } else { // allocation success
1358 return p;
1360 #endif // PREANSINEW
1364 inline void* do_debug_memalign_or_debug_cpp_memalign(size_t align,
1365 size_t size) {
1366 return tc_new_mode ? debug_cpp_memalign(align, size)
1367 : do_debug_memalign(align, size);
1370 extern "C" PERFTOOLS_DLL_DECL void* tc_memalign(size_t align, size_t size) __THROW {
1371 void *p = do_debug_memalign_or_debug_cpp_memalign(align, size);
1372 MallocHook::InvokeNewHook(p, size);
1373 return p;
1376 // Implementation taken from tcmalloc/tcmalloc.cc
1377 extern "C" PERFTOOLS_DLL_DECL int tc_posix_memalign(void** result_ptr, size_t align, size_t size)
1378 __THROW {
1379 if (((align % sizeof(void*)) != 0) ||
1380 ((align & (align - 1)) != 0) ||
1381 (align == 0)) {
1382 return EINVAL;
1385 void* result = do_debug_memalign_or_debug_cpp_memalign(align, size);
1386 MallocHook::InvokeNewHook(result, size);
1387 if (result == NULL) {
1388 return ENOMEM;
1389 } else {
1390 *result_ptr = result;
1391 return 0;
1395 extern "C" PERFTOOLS_DLL_DECL void* tc_valloc(size_t size) __THROW {
1396 // Allocate >= size bytes starting on a page boundary
1397 void *p = do_debug_memalign_or_debug_cpp_memalign(getpagesize(), size);
1398 MallocHook::InvokeNewHook(p, size);
1399 return p;
1402 extern "C" PERFTOOLS_DLL_DECL void* tc_pvalloc(size_t size) __THROW {
1403 // Round size up to a multiple of pages
1404 // then allocate memory on a page boundary
1405 int pagesize = getpagesize();
1406 size = RoundUp(size, pagesize);
1407 if (size == 0) { // pvalloc(0) should allocate one page, according to
1408 size = pagesize; // http://man.free4web.biz/man3/libmpatrol.3.html
1410 void *p = do_debug_memalign_or_debug_cpp_memalign(pagesize, size);
1411 MallocHook::InvokeNewHook(p, size);
1412 return p;
1415 // malloc_stats just falls through to the base implementation.
1416 extern "C" PERFTOOLS_DLL_DECL void tc_malloc_stats(void) __THROW {
1417 BASE_MALLOC_STATS();
1420 extern "C" PERFTOOLS_DLL_DECL int tc_mallopt(int cmd, int value) __THROW {
1421 return BASE_MALLOPT(cmd, value);
1424 #ifdef HAVE_STRUCT_MALLINFO
1425 extern "C" PERFTOOLS_DLL_DECL struct mallinfo tc_mallinfo(void) __THROW {
1426 return BASE_MALLINFO();
1428 #endif
1430 extern "C" PERFTOOLS_DLL_DECL size_t tc_malloc_size(void* ptr) __THROW {
1431 return MallocExtension::instance()->GetAllocatedSize(ptr);
1434 #if defined(OS_LINUX)
1435 extern "C" PERFTOOLS_DLL_DECL void* tc_malloc_skip_new_handler(size_t size) {
1436 void* result = DebugAllocate(size, MallocBlock::kMallocType);
1437 MallocHook::InvokeNewHook(result, size);
1438 return result;
1440 #endif