1 // Copyright (c) 2005, Google Inc.
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Author: Sanjay Ghemawat <opensource@google.com>
33 // A malloc that uses a per-thread cache to satisfy small malloc requests.
34 // (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
36 // See doc/tcmalloc.html for a high-level
37 // description of how this malloc works.
40 // 1. The thread-specific lists are accessed without acquiring any locks.
41 // This is safe because each such list is only accessed by one thread.
42 // 2. We have a lock per central free-list, and hold it while manipulating
43 // the central free list for a particular size.
44 // 3. The central page allocator is protected by "pageheap_lock".
45 // 4. The pagemap (which maps from page-number to descriptor),
46 // can be read without holding any locks, and written while holding
47 // the "pageheap_lock".
48 // 5. To improve performance, a subset of the information one can get
49 // from the pagemap is cached in a data structure, pagemap_cache_,
50 // that atomically reads and writes its entries. This cache can be
51 // read and written without locking.
53 // This multi-threaded access to the pagemap is safe for fairly
54 // subtle reasons. We basically assume that when an object X is
55 // allocated by thread A and deallocated by thread B, there must
56 // have been appropriate synchronization in the handoff of object
57 // X from thread A to thread B. The same logic applies to pagemap_cache_.
59 // THE PAGEID-TO-SIZECLASS CACHE
60 // Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
61 // returns 0 for a particular PageID then that means "no information," not that
62 // the sizeclass is 0. The cache may have stale information for pages that do
63 // not hold the beginning of any free()'able object. Staleness is eliminated
64 // in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
65 // do_memalign() for all other relevant pages.
69 // Page map contains a mapping from page id to Span.
71 // If Span s occupies pages [p..q],
74 // pagemap[p+1..q-1] are undefined
75 // pagemap[p-1] and pagemap[q+1] are defined:
76 // NULL if the corresponding page is not yet in the address space.
77 // Otherwise it points to a Span. This span may be free
78 // or allocated. If free, it is in one of pageheap's freelist.
80 // TODO: Bias reclamation to larger addresses
81 // TODO: implement mallinfo/mallopt
82 // TODO: Better testing
84 // 9/28/2003 (new page-level allocator replaces ptmalloc2):
85 // * malloc/free of small objects goes from ~300 ns to ~50 ns.
86 // * allocation of a reasonably complicated struct
87 // goes from about 1100 ns to about 300 ns.
90 #include <gperftools/tcmalloc.h>
92 #include <errno.h> // for ENOMEM, EINVAL, errno
93 #ifdef HAVE_SYS_CDEFS_H
94 #include <sys/cdefs.h> // for __THROW
96 #if defined HAVE_STDINT_H
98 #elif defined HAVE_INTTYPES_H
101 #include <sys/types.h>
103 #include <stddef.h> // for size_t, NULL
104 #include <stdlib.h> // for getenv
105 #include <string.h> // for strcmp, memset, strlen, etc
107 #include <unistd.h> // for getpagesize, write, etc
109 #include <algorithm> // for max, min
110 #include <limits> // for numeric_limits
111 #include <new> // for nothrow_t (ptr only), etc
112 #include <vector> // for vector
114 #include <gperftools/malloc_extension.h>
115 #include <gperftools/malloc_hook.h> // for MallocHook
116 #include "base/basictypes.h" // for int64
117 #include "base/commandlineflags.h" // for RegisterFlagValidator, etc
118 #include "base/dynamic_annotations.h" // for RunningOnValgrind
119 #include "base/spinlock.h" // for SpinLockHolder
120 #include "central_freelist.h" // for CentralFreeListPadded
121 #include "common.h" // for StackTrace, kPageShift, etc
122 #include "free_list.h" // for FL_Init
123 #include "internal_logging.h" // for ASSERT, TCMalloc_Printer, etc
124 #include "malloc_hook-inl.h" // for MallocHook::InvokeNewHook, etc
125 #include "page_heap.h" // for PageHeap, PageHeap::Stats
126 #include "page_heap_allocator.h" // for PageHeapAllocator
127 #include "span.h" // for Span, DLL_Prepend, etc
128 #include "stack_trace_table.h" // for StackTraceTable
129 #include "static_vars.h" // for Static
130 #include "system-alloc.h" // for DumpSystemAllocatorStats, etc
131 #include "tcmalloc_guard.h" // for TCMallocGuard
132 #include "thread_cache.h" // for ThreadCache
134 #if (defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)) && !defined(WIN32_OVERRIDE_ALLOCATORS)
135 # define WIN32_DO_PATCHING 1
138 // Some windows file somewhere (at least on cygwin) #define's small (!)
139 // For instance, <windows.h> appears to have "#define small char".
142 using STL_NAMESPACE::max
;
143 using STL_NAMESPACE::min
;
144 using STL_NAMESPACE::numeric_limits
;
145 using STL_NAMESPACE::vector
;
147 #include "libc_override.h"
149 // __THROW is defined in glibc (via <sys/cdefs.h>). It means,
150 // counter-intuitively, "This function will never throw an exception."
151 // It's an optional optimization tool, but we may need to use it to
152 // match glibc prototypes.
153 #ifndef __THROW // I guess we're not on a glibc system
154 # define __THROW // __THROW is just an optimization, so ok to make it ""
157 using tcmalloc::AlignmentForSize
;
158 using tcmalloc::kLog
;
159 using tcmalloc::kCrash
;
160 using tcmalloc::kCrashWithStats
;
162 using tcmalloc::PageHeap
;
163 using tcmalloc::PageHeapAllocator
;
164 using tcmalloc::SizeMap
;
165 using tcmalloc::Span
;
166 using tcmalloc::StackTrace
;
167 using tcmalloc::Static
;
168 using tcmalloc::ThreadCache
;
170 // ---- Functions doing validation with an extra mark.
171 static size_t ExcludeSpaceForMark(size_t size
);
172 static void AddRoomForMark(size_t* size
);
173 static void ExcludeMarkFromSize(size_t* new_size
);
174 static void MarkAllocatedRegion(void* ptr
);
175 static void ValidateAllocatedRegion(void* ptr
, size_t cl
);
176 // ---- End validation functions.
178 DECLARE_int64(tcmalloc_sample_parameter
);
179 DECLARE_double(tcmalloc_release_rate
);
181 // For windows, the printf we use to report large allocs is
182 // potentially dangerous: it could cause a malloc that would cause an
183 // infinite loop. So by default we set the threshold to a huge number
184 // on windows, so this bad situation will never trigger. You can
185 // always set TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD manually if you
186 // want this functionality.
188 const int64 kDefaultLargeAllocReportThreshold
= static_cast<int64
>(1) << 62;
190 const int64 kDefaultLargeAllocReportThreshold
= static_cast<int64
>(1) << 30;
192 DEFINE_int64(tcmalloc_large_alloc_report_threshold
,
193 EnvToInt64("TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD",
194 kDefaultLargeAllocReportThreshold
),
195 "Allocations larger than this value cause a stack "
196 "trace to be dumped to stderr. The threshold for "
197 "dumping stack traces is increased by a factor of 1.125 "
198 "every time we print a message so that the threshold "
199 "automatically goes up by a factor of ~1000 every 60 "
200 "messages. This bounds the amount of extra logging "
201 "generated by this flag. Default value of this flag "
202 "is very large and therefore you should see no extra "
203 "logging unless the flag is overridden. Set to 0 to "
204 "disable reporting entirely.");
207 // We already declared these functions in tcmalloc.h, but we have to
208 // declare them again to give them an ATTRIBUTE_SECTION: we want to
209 // put all callers of MallocHook::Invoke* in this module into
210 // ATTRIBUTE_SECTION(google_malloc) section, so that
211 // MallocHook::GetCallerStackTrace can function accurately.
213 void* tc_malloc(size_t size
) __THROW
214 ATTRIBUTE_SECTION(google_malloc
);
215 void tc_free(void* ptr
) __THROW
216 ATTRIBUTE_SECTION(google_malloc
);
217 void* tc_realloc(void* ptr
, size_t size
) __THROW
218 ATTRIBUTE_SECTION(google_malloc
);
219 void* tc_calloc(size_t nmemb
, size_t size
) __THROW
220 ATTRIBUTE_SECTION(google_malloc
);
221 void tc_cfree(void* ptr
) __THROW
222 ATTRIBUTE_SECTION(google_malloc
);
224 void* tc_memalign(size_t __alignment
, size_t __size
) __THROW
225 ATTRIBUTE_SECTION(google_malloc
);
226 int tc_posix_memalign(void** ptr
, size_t align
, size_t size
) __THROW
227 ATTRIBUTE_SECTION(google_malloc
);
228 void* tc_valloc(size_t __size
) __THROW
229 ATTRIBUTE_SECTION(google_malloc
);
230 void* tc_pvalloc(size_t __size
) __THROW
231 ATTRIBUTE_SECTION(google_malloc
);
233 void tc_malloc_stats(void) __THROW
234 ATTRIBUTE_SECTION(google_malloc
);
235 int tc_mallopt(int cmd
, int value
) __THROW
236 ATTRIBUTE_SECTION(google_malloc
);
237 #ifdef HAVE_STRUCT_MALLINFO
238 struct mallinfo
tc_mallinfo(void) __THROW
239 ATTRIBUTE_SECTION(google_malloc
);
242 void* tc_new(size_t size
)
243 ATTRIBUTE_SECTION(google_malloc
);
244 void tc_delete(void* p
) __THROW
245 ATTRIBUTE_SECTION(google_malloc
);
246 void* tc_newarray(size_t size
)
247 ATTRIBUTE_SECTION(google_malloc
);
248 void tc_deletearray(void* p
) __THROW
249 ATTRIBUTE_SECTION(google_malloc
);
251 // And the nothrow variants of these:
252 void* tc_new_nothrow(size_t size
, const std::nothrow_t
&) __THROW
253 ATTRIBUTE_SECTION(google_malloc
);
254 void* tc_newarray_nothrow(size_t size
, const std::nothrow_t
&) __THROW
255 ATTRIBUTE_SECTION(google_malloc
);
256 // Surprisingly, standard C++ library implementations use a
257 // nothrow-delete internally. See, eg:
258 // http://www.dinkumware.com/manuals/?manual=compleat&page=new.html
259 void tc_delete_nothrow(void* ptr
, const std::nothrow_t
&) __THROW
260 ATTRIBUTE_SECTION(google_malloc
);
261 void tc_deletearray_nothrow(void* ptr
, const std::nothrow_t
&) __THROW
262 ATTRIBUTE_SECTION(google_malloc
);
264 // Some non-standard extensions that we support.
266 // This is equivalent to
267 // OS X: malloc_size()
268 // glibc: malloc_usable_size()
270 size_t tc_malloc_size(void* p
) __THROW
271 ATTRIBUTE_SECTION(google_malloc
);
275 // ----------------------- IMPLEMENTATION -------------------------------
277 static int tc_new_mode
= 0; // See tc_set_new_mode().
279 // Routines such as free() and realloc() catch some erroneous pointers
280 // passed to them, and invoke the below when they do. (An erroneous pointer
281 // won't be caught if it's within a valid span or a stale span for which
282 // the pagemap cache has a non-zero sizeclass.) This is a cheap (source-editing
283 // required) kind of exception handling for these routines.
285 void InvalidFree(void* ptr
) {
286 Log(kCrash
, __FILE__
, __LINE__
, "Attempt to free invalid pointer", ptr
);
289 size_t InvalidGetSizeForRealloc(const void* old_ptr
) {
290 Log(kCrash
, __FILE__
, __LINE__
,
291 "Attempt to realloc invalid pointer", old_ptr
);
295 size_t InvalidGetAllocatedSize(const void* ptr
) {
296 Log(kCrash
, __FILE__
, __LINE__
,
297 "Attempt to get the size of an invalid pointer", ptr
);
301 // For security reasons, we want to limit the size of allocations.
302 // See crbug.com/169327.
303 inline bool IsAllocSizePermitted(size_t alloc_size
) {
304 // Never allow an allocation larger than what can be indexed via an int.
305 // Remove kPageSize to account for various rounding, padding and to have a
307 return alloc_size
<= ((std::numeric_limits
<int>::max
)() - kPageSize
);
310 } // unnamed namespace
312 // Extract interesting stats
313 struct TCMallocStats
{
314 uint64_t thread_bytes
; // Bytes in thread caches
315 uint64_t central_bytes
; // Bytes in central cache
316 uint64_t transfer_bytes
; // Bytes in central transfer cache
317 uint64_t metadata_bytes
; // Bytes alloced for metadata
318 uint64_t metadata_unmapped_bytes
; // Address space reserved for metadata
319 // but is not committed.
320 PageHeap::Stats pageheap
; // Stats from page heap
323 // Get stats into "r". Also get per-size-class counts if class_count != NULL
324 static void ExtractStats(TCMallocStats
* r
, uint64_t* class_count
,
325 PageHeap::SmallSpanStats
* small_spans
,
326 PageHeap::LargeSpanStats
* large_spans
) {
327 r
->central_bytes
= 0;
328 r
->transfer_bytes
= 0;
329 for (int cl
= 0; cl
< kNumClasses
; ++cl
) {
330 const int length
= Static::central_cache()[cl
].length();
331 const int tc_length
= Static::central_cache()[cl
].tc_length();
332 const size_t cache_overhead
= Static::central_cache()[cl
].OverheadBytes();
333 const size_t size
= static_cast<uint64_t>(
334 Static::sizemap()->ByteSizeForClass(cl
));
335 r
->central_bytes
+= (size
* length
) + cache_overhead
;
336 r
->transfer_bytes
+= (size
* tc_length
);
337 if (class_count
) class_count
[cl
] = length
+ tc_length
;
340 // Add stats from per-thread heaps
343 SpinLockHolder
h(Static::pageheap_lock());
344 ThreadCache::GetThreadStats(&r
->thread_bytes
, class_count
);
345 r
->metadata_bytes
= tcmalloc::metadata_system_bytes();
346 r
->metadata_unmapped_bytes
= tcmalloc::metadata_unmapped_bytes();
347 r
->pageheap
= Static::pageheap()->stats();
348 if (small_spans
!= NULL
) {
349 Static::pageheap()->GetSmallSpanStats(small_spans
);
351 if (large_spans
!= NULL
) {
352 Static::pageheap()->GetLargeSpanStats(large_spans
);
357 static double PagesToMiB(uint64_t pages
) {
358 return (pages
<< kPageShift
) / 1048576.0;
361 // WRITE stats to "out"
362 static void DumpStats(TCMalloc_Printer
* out
, int level
) {
364 uint64_t class_count
[kNumClasses
];
365 PageHeap::SmallSpanStats small
;
366 PageHeap::LargeSpanStats large
;
368 ExtractStats(&stats
, class_count
, &small
, &large
);
370 ExtractStats(&stats
, NULL
, NULL
, NULL
);
373 static const double MiB
= 1048576.0;
375 const uint64_t physical_memory_used_by_metadata
=
376 stats
.metadata_bytes
- stats
.metadata_unmapped_bytes
;
377 const uint64_t unmapped_bytes
=
378 stats
.pageheap
.unmapped_bytes
+ stats
.metadata_unmapped_bytes
;
380 const uint64_t virtual_memory_used
= (stats
.pageheap
.system_bytes
381 + stats
.metadata_bytes
);
382 const uint64_t physical_memory_used
= virtual_memory_used
- unmapped_bytes
;
383 const uint64_t bytes_in_use_by_app
= (physical_memory_used
384 - physical_memory_used_by_metadata
385 - stats
.pageheap
.free_bytes
386 - stats
.central_bytes
387 - stats
.transfer_bytes
388 - stats
.thread_bytes
);
391 "WASTE: %7.1f MiB bytes in use\n"
392 "WASTE: + %7.1f MiB committed but not used\n"
393 "WASTE: ------------\n"
394 "WASTE: = %7.1f MiB bytes committed\n"
395 "WASTE: committed/used ratio of %f\n",
396 bytes_in_use_by_app
/ MiB
,
397 (stats
.pageheap
.committed_bytes
- bytes_in_use_by_app
) / MiB
,
398 stats
.pageheap
.committed_bytes
/ MiB
,
399 stats
.pageheap
.committed_bytes
/ static_cast<double>(bytes_in_use_by_app
)
401 #ifdef TCMALLOC_SMALL_BUT_SLOW
403 "NOTE: SMALL MEMORY MODEL IS IN USE, PERFORMANCE MAY SUFFER.\n");
406 "------------------------------------------------\n"
407 "MALLOC: %12" PRIu64
" (%7.1f MiB) Bytes in use by application\n"
408 "MALLOC: + %12" PRIu64
" (%7.1f MiB) Bytes in page heap freelist\n"
409 "MALLOC: + %12" PRIu64
" (%7.1f MiB) Bytes in central cache freelist\n"
410 "MALLOC: + %12" PRIu64
" (%7.1f MiB) Bytes in transfer cache freelist\n"
411 "MALLOC: + %12" PRIu64
" (%7.1f MiB) Bytes in thread cache freelists\n"
412 "MALLOC: ------------\n"
413 "MALLOC: = %12" PRIu64
" (%7.1f MiB) Bytes committed\n"
414 "MALLOC: + %12" PRIu64
" (%7.1f MiB) Bytes in malloc metadata\n"
415 "MALLOC: ------------\n"
416 "MALLOC: = %12" PRIu64
" (%7.1f MiB) Actual memory used (physical + swap)\n"
417 "MALLOC: + %12" PRIu64
" (%7.1f MiB) Bytes released to OS (aka unmapped)\n"
418 "MALLOC: ------------\n"
419 "MALLOC: = %12" PRIu64
" (%7.1f MiB) Virtual address space used\n"
421 "MALLOC: %12" PRIu64
" Spans in use\n"
422 "MALLOC: %12" PRIu64
" Thread heaps in use\n"
423 "MALLOC: %12" PRIu64
" Tcmalloc page size\n"
424 "------------------------------------------------\n"
425 "Call ReleaseFreeMemory() to release freelist memory to the OS"
426 " (via madvise()).\n"
427 "Bytes released to the OS take up virtual address space"
428 " but no physical memory.\n",
429 bytes_in_use_by_app
, bytes_in_use_by_app
/ MiB
,
430 stats
.pageheap
.free_bytes
, stats
.pageheap
.free_bytes
/ MiB
,
431 stats
.central_bytes
, stats
.central_bytes
/ MiB
,
432 stats
.transfer_bytes
, stats
.transfer_bytes
/ MiB
,
433 stats
.thread_bytes
, stats
.thread_bytes
/ MiB
,
434 stats
.pageheap
.committed_bytes
, stats
.pageheap
.committed_bytes
/ MiB
,
435 physical_memory_used_by_metadata
, physical_memory_used_by_metadata
/ MiB
,
436 physical_memory_used
, physical_memory_used
/ MiB
,
437 unmapped_bytes
, unmapped_bytes
/ MiB
,
438 virtual_memory_used
, virtual_memory_used
/ MiB
,
439 uint64_t(Static::span_allocator()->inuse()),
440 uint64_t(ThreadCache::HeapsInUse()),
441 uint64_t(kPageSize
));
444 out
->printf("------------------------------------------------\n");
445 out
->printf("Size class breakdown\n");
446 out
->printf("------------------------------------------------\n");
447 uint64_t cumulative
= 0;
448 for (int cl
= 0; cl
< kNumClasses
; ++cl
) {
449 if (class_count
[cl
] > 0) {
450 uint64_t class_bytes
=
451 class_count
[cl
] * Static::sizemap()->ByteSizeForClass(cl
);
452 cumulative
+= class_bytes
;
453 out
->printf("class %3d [ %8" PRIuS
" bytes ] : "
454 "%8" PRIu64
" objs; %5.1f MiB; %5.1f cum MiB\n",
455 cl
, Static::sizemap()->ByteSizeForClass(cl
),
462 // append page heap info
463 int nonempty_sizes
= 0;
464 for (int s
= 0; s
< kMaxPages
; s
++) {
465 if (small
.normal_length
[s
] + small
.returned_length
[s
] > 0) {
469 out
->printf("------------------------------------------------\n");
470 out
->printf("PageHeap: %d sizes; %6.1f MiB free; %6.1f MiB unmapped\n",
471 nonempty_sizes
, stats
.pageheap
.free_bytes
/ MiB
,
472 stats
.pageheap
.unmapped_bytes
/ MiB
);
473 out
->printf("------------------------------------------------\n");
474 uint64_t total_normal
= 0;
475 uint64_t total_returned
= 0;
476 for (int s
= 0; s
< kMaxPages
; s
++) {
477 const int n_length
= small
.normal_length
[s
];
478 const int r_length
= small
.returned_length
[s
];
479 if (n_length
+ r_length
> 0) {
480 uint64_t n_pages
= s
* n_length
;
481 uint64_t r_pages
= s
* r_length
;
482 total_normal
+= n_pages
;
483 total_returned
+= r_pages
;
484 out
->printf("%6u pages * %6u spans ~ %6.1f MiB; %6.1f MiB cum"
485 "; unmapped: %6.1f MiB; %6.1f MiB cum\n",
487 (n_length
+ r_length
),
488 PagesToMiB(n_pages
+ r_pages
),
489 PagesToMiB(total_normal
+ total_returned
),
491 PagesToMiB(total_returned
));
495 total_normal
+= large
.normal_pages
;
496 total_returned
+= large
.returned_pages
;
497 out
->printf(">255 large * %6u spans ~ %6.1f MiB; %6.1f MiB cum"
498 "; unmapped: %6.1f MiB; %6.1f MiB cum\n",
499 static_cast<unsigned int>(large
.spans
),
500 PagesToMiB(large
.normal_pages
+ large
.returned_pages
),
501 PagesToMiB(total_normal
+ total_returned
),
502 PagesToMiB(large
.returned_pages
),
503 PagesToMiB(total_returned
));
507 static void PrintStats(int level
) {
508 const int kBufferSize
= 16 << 10;
509 char* buffer
= new char[kBufferSize
];
510 TCMalloc_Printer
printer(buffer
, kBufferSize
);
511 DumpStats(&printer
, level
);
512 write(STDERR_FILENO
, buffer
, strlen(buffer
));
516 static void** DumpHeapGrowthStackTraces() {
517 // Count how much space we need
518 int needed_slots
= 0;
520 SpinLockHolder
h(Static::pageheap_lock());
521 for (StackTrace
* t
= Static::growth_stacks();
523 t
= reinterpret_cast<StackTrace
*>(
524 t
->stack
[tcmalloc::kMaxStackDepth
-1])) {
525 needed_slots
+= 3 + t
->depth
;
527 needed_slots
+= 100; // Slop in case list grows
528 needed_slots
+= needed_slots
/8; // An extra 12.5% slop
531 void** result
= new void*[needed_slots
];
532 if (result
== NULL
) {
533 Log(kLog
, __FILE__
, __LINE__
,
534 "tcmalloc: allocation failed for stack trace slots",
535 needed_slots
* sizeof(*result
));
539 SpinLockHolder
h(Static::pageheap_lock());
541 for (StackTrace
* t
= Static::growth_stacks();
543 t
= reinterpret_cast<StackTrace
*>(
544 t
->stack
[tcmalloc::kMaxStackDepth
-1])) {
545 ASSERT(used_slots
< needed_slots
); // Need to leave room for terminator
546 if (used_slots
+ 3 + t
->depth
>= needed_slots
) {
551 result
[used_slots
+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
552 result
[used_slots
+1] = reinterpret_cast<void*>(t
->size
);
553 result
[used_slots
+2] = reinterpret_cast<void*>(t
->depth
);
554 for (int d
= 0; d
< t
->depth
; d
++) {
555 result
[used_slots
+3+d
] = t
->stack
[d
];
557 used_slots
+= 3 + t
->depth
;
559 result
[used_slots
] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
563 static void IterateOverRanges(void* arg
, MallocExtension::RangeFunction func
) {
564 PageID page
= 1; // Some code may assume that page==0 is never used
567 // Accumulate a small number of ranges in a local buffer
568 static const int kNumRanges
= 16;
569 static base::MallocRange ranges
[kNumRanges
];
572 SpinLockHolder
h(Static::pageheap_lock());
573 while (n
< kNumRanges
) {
574 if (!Static::pageheap()->GetNextRange(page
, &ranges
[n
])) {
578 uintptr_t limit
= ranges
[n
].address
+ ranges
[n
].length
;
579 page
= (limit
+ kPageSize
- 1) >> kPageShift
;
585 for (int i
= 0; i
< n
; i
++) {
586 (*func
)(arg
, &ranges
[i
]);
591 // TCMalloc's support for extra malloc interfaces
592 class TCMallocImplementation
: public MallocExtension
{
594 // ReleaseToSystem() might release more than the requested bytes because
595 // the page heap releases at the span granularity, and spans are of wildly
596 // different sizes. This member keeps track of the extra bytes bytes
597 // released so that the app can periodically call ReleaseToSystem() to
598 // release memory at a constant rate.
599 // NOTE: Protected by Static::pageheap_lock().
600 size_t extra_bytes_released_
;
603 TCMallocImplementation()
604 : extra_bytes_released_(0) {
607 virtual void GetStats(char* buffer
, int buffer_length
) {
608 ASSERT(buffer_length
> 0);
609 TCMalloc_Printer
printer(buffer
, buffer_length
);
611 // Print level one stats unless lots of space is available
612 if (buffer_length
< 10000) {
613 DumpStats(&printer
, 1);
615 DumpStats(&printer
, 2);
619 // We may print an extra, tcmalloc-specific warning message here.
620 virtual void GetHeapSample(MallocExtensionWriter
* writer
) {
621 if (FLAGS_tcmalloc_sample_parameter
== 0) {
622 const char* const kWarningMsg
=
624 "%warn This heap profile does not have any data in it, because\n"
625 "%warn the application was run with heap sampling turned off.\n"
626 "%warn To get useful data from GetHeapSample(), you must\n"
627 "%warn set the environment variable TCMALLOC_SAMPLE_PARAMETER to\n"
628 "%warn a positive sampling period, such as 524288.\n"
630 writer
->append(kWarningMsg
, strlen(kWarningMsg
));
632 MallocExtension::GetHeapSample(writer
);
635 virtual void** ReadStackTraces(int* sample_period
) {
636 tcmalloc::StackTraceTable table
;
638 SpinLockHolder
h(Static::pageheap_lock());
639 Span
* sampled
= Static::sampled_objects();
640 for (Span
* s
= sampled
->next
; s
!= sampled
; s
= s
->next
) {
641 table
.AddTrace(*reinterpret_cast<StackTrace
*>(s
->objects
));
644 *sample_period
= ThreadCache::GetCache()->GetSamplePeriod();
645 return table
.ReadStackTracesAndClear(); // grabs and releases pageheap_lock
648 virtual void** ReadHeapGrowthStackTraces() {
649 return DumpHeapGrowthStackTraces();
652 virtual void Ranges(void* arg
, RangeFunction func
) {
653 IterateOverRanges(arg
, func
);
656 virtual bool GetNumericProperty(const char* name
, size_t* value
) {
657 ASSERT(name
!= NULL
);
659 if (strcmp(name
, "generic.current_allocated_bytes") == 0) {
661 ExtractStats(&stats
, NULL
, NULL
, NULL
);
662 *value
= stats
.pageheap
.system_bytes
664 - stats
.central_bytes
665 - stats
.transfer_bytes
666 - stats
.pageheap
.free_bytes
667 - stats
.pageheap
.unmapped_bytes
;
671 if (strcmp(name
, "generic.heap_size") == 0) {
673 ExtractStats(&stats
, NULL
, NULL
, NULL
);
674 *value
= stats
.pageheap
.system_bytes
;
678 if (strcmp(name
, "tcmalloc.slack_bytes") == 0) {
679 // Kept for backwards compatibility. Now defined externally as:
680 // pageheap_free_bytes + pageheap_unmapped_bytes.
681 SpinLockHolder
l(Static::pageheap_lock());
682 PageHeap::Stats stats
= Static::pageheap()->stats();
683 *value
= stats
.free_bytes
+ stats
.unmapped_bytes
;
687 if (strcmp(name
, "tcmalloc.pageheap_free_bytes") == 0) {
688 SpinLockHolder
l(Static::pageheap_lock());
689 *value
= Static::pageheap()->stats().free_bytes
;
693 if (strcmp(name
, "tcmalloc.pageheap_unmapped_bytes") == 0) {
694 SpinLockHolder
l(Static::pageheap_lock());
695 *value
= Static::pageheap()->stats().unmapped_bytes
;
699 if (strcmp(name
, "tcmalloc.max_total_thread_cache_bytes") == 0) {
700 SpinLockHolder
l(Static::pageheap_lock());
701 *value
= ThreadCache::overall_thread_cache_size();
705 if (strcmp(name
, "tcmalloc.current_total_thread_cache_bytes") == 0) {
707 ExtractStats(&stats
, NULL
, NULL
, NULL
);
708 *value
= stats
.thread_bytes
;
715 virtual bool SetNumericProperty(const char* name
, size_t value
) {
716 ASSERT(name
!= NULL
);
718 if (strcmp(name
, "tcmalloc.max_total_thread_cache_bytes") == 0) {
719 SpinLockHolder
l(Static::pageheap_lock());
720 ThreadCache::set_overall_thread_cache_size(value
);
727 virtual void MarkThreadIdle() {
728 ThreadCache::BecomeIdle();
731 virtual void MarkThreadBusy(); // Implemented below
733 virtual SysAllocator
* GetSystemAllocator() {
734 SpinLockHolder
h(Static::pageheap_lock());
738 virtual void SetSystemAllocator(SysAllocator
* alloc
) {
739 SpinLockHolder
h(Static::pageheap_lock());
743 virtual void ReleaseToSystem(size_t num_bytes
) {
744 SpinLockHolder
h(Static::pageheap_lock());
745 if (num_bytes
<= extra_bytes_released_
) {
746 // We released too much on a prior call, so don't release any
748 extra_bytes_released_
= extra_bytes_released_
- num_bytes
;
751 num_bytes
= num_bytes
- extra_bytes_released_
;
752 // num_bytes might be less than one page. If we pass zero to
753 // ReleaseAtLeastNPages, it won't do anything, so we release a whole
754 // page now and let extra_bytes_released_ smooth it out over time.
755 Length num_pages
= max
<Length
>(num_bytes
>> kPageShift
, 1);
756 size_t bytes_released
= Static::pageheap()->ReleaseAtLeastNPages(
757 num_pages
) << kPageShift
;
758 if (bytes_released
> num_bytes
) {
759 extra_bytes_released_
= bytes_released
- num_bytes
;
761 // The PageHeap wasn't able to release num_bytes. Don't try to
762 // compensate with a big release next time. Specifically,
763 // ReleaseFreeMemory() calls ReleaseToSystem(LONG_MAX).
764 extra_bytes_released_
= 0;
768 virtual void SetMemoryReleaseRate(double rate
) {
769 FLAGS_tcmalloc_release_rate
= rate
;
772 virtual double GetMemoryReleaseRate() {
773 return FLAGS_tcmalloc_release_rate
;
775 virtual size_t GetEstimatedAllocatedSize(size_t size
) {
776 if (size
<= kMaxSize
) {
777 const size_t cl
= Static::sizemap()->SizeClass(size
);
778 const size_t alloc_size
= Static::sizemap()->ByteSizeForClass(cl
);
781 return tcmalloc::pages(size
) << kPageShift
;
785 // This just calls GetSizeWithCallback, but because that's in an
786 // unnamed namespace, we need to move the definition below it in the
788 virtual size_t GetAllocatedSize(const void* ptr
);
790 // This duplicates some of the logic in GetSizeWithCallback, but is
791 // faster. This is important on OS X, where this function is called
792 // on every allocation operation.
793 virtual Ownership
GetOwnership(const void* ptr
) {
794 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
795 // The rest of tcmalloc assumes that all allocated pointers use at
796 // most kAddressBits bits. If ptr doesn't, then it definitely
797 // wasn't alloacted by tcmalloc.
798 if ((p
>> (kAddressBits
- kPageShift
)) > 0) {
801 size_t cl
= Static::pageheap()->GetSizeClassIfCached(p
);
805 const Span
*span
= Static::pageheap()->GetDescriptor(p
);
806 return span
? kOwned
: kNotOwned
;
809 virtual void GetFreeListSizes(vector
<MallocExtension::FreeListInfo
>* v
) {
810 static const char* kCentralCacheType
= "tcmalloc.central";
811 static const char* kTransferCacheType
= "tcmalloc.transfer";
812 static const char* kThreadCacheType
= "tcmalloc.thread";
813 static const char* kPageHeapType
= "tcmalloc.page";
814 static const char* kPageHeapUnmappedType
= "tcmalloc.page_unmapped";
815 static const char* kLargeSpanType
= "tcmalloc.large";
816 static const char* kLargeUnmappedSpanType
= "tcmalloc.large_unmapped";
820 // central class information
821 int64 prev_class_size
= 0;
822 for (int cl
= 1; cl
< kNumClasses
; ++cl
) {
823 size_t class_size
= Static::sizemap()->ByteSizeForClass(cl
);
824 MallocExtension::FreeListInfo i
;
825 i
.min_object_size
= prev_class_size
+ 1;
826 i
.max_object_size
= class_size
;
828 Static::central_cache()[cl
].length() * class_size
;
829 i
.type
= kCentralCacheType
;
834 Static::central_cache()[cl
].tc_length() * class_size
;
835 i
.type
= kTransferCacheType
;
838 prev_class_size
= Static::sizemap()->ByteSizeForClass(cl
);
841 // Add stats from per-thread heaps
842 uint64_t class_count
[kNumClasses
];
843 memset(class_count
, 0, sizeof(class_count
));
845 SpinLockHolder
h(Static::pageheap_lock());
846 uint64_t thread_bytes
= 0;
847 ThreadCache::GetThreadStats(&thread_bytes
, class_count
);
851 for (int cl
= 1; cl
< kNumClasses
; ++cl
) {
852 MallocExtension::FreeListInfo i
;
853 i
.min_object_size
= prev_class_size
+ 1;
854 i
.max_object_size
= Static::sizemap()->ByteSizeForClass(cl
);
856 class_count
[cl
] * Static::sizemap()->ByteSizeForClass(cl
);
857 i
.type
= kThreadCacheType
;
861 // append page heap info
862 PageHeap::SmallSpanStats small
;
863 PageHeap::LargeSpanStats large
;
865 SpinLockHolder
h(Static::pageheap_lock());
866 Static::pageheap()->GetSmallSpanStats(&small
);
867 Static::pageheap()->GetLargeSpanStats(&large
);
870 // large spans: mapped
871 MallocExtension::FreeListInfo span_info
;
872 span_info
.type
= kLargeSpanType
;
873 span_info
.max_object_size
= (numeric_limits
<size_t>::max
)();
874 span_info
.min_object_size
= kMaxPages
<< kPageShift
;
875 span_info
.total_bytes_free
= large
.normal_pages
<< kPageShift
;
876 v
->push_back(span_info
);
878 // large spans: unmapped
879 span_info
.type
= kLargeUnmappedSpanType
;
880 span_info
.total_bytes_free
= large
.returned_pages
<< kPageShift
;
881 v
->push_back(span_info
);
884 for (int s
= 1; s
< kMaxPages
; s
++) {
885 MallocExtension::FreeListInfo i
;
886 i
.max_object_size
= (s
<< kPageShift
);
887 i
.min_object_size
= ((s
- 1) << kPageShift
);
889 i
.type
= kPageHeapType
;
890 i
.total_bytes_free
= (s
<< kPageShift
) * small
.normal_length
[s
];
893 i
.type
= kPageHeapUnmappedType
;
894 i
.total_bytes_free
= (s
<< kPageShift
) * small
.returned_length
[s
];
900 // The constructor allocates an object to ensure that initialization
901 // runs before main(), and therefore we do not have a chance to become
902 // multi-threaded before initialization. We also create the TSD key
903 // here. Presumably by the time this constructor runs, glibc is in
904 // good enough shape to handle pthread_key_create().
906 // The constructor also takes the opportunity to tell STL to use
907 // tcmalloc. We want to do this early, before construct time, so
908 // all user STL allocations go through tcmalloc (which works really
911 // The destructor prints stats when the program exits.
912 static int tcmallocguard_refcount
= 0; // no lock needed: runs before main()
913 TCMallocGuard::TCMallocGuard() {
914 if (tcmallocguard_refcount
++ == 0) {
915 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
916 // Check whether the kernel also supports TLS (needs to happen at runtime)
917 tcmalloc::CheckIfKernelSupportsTLS();
919 ReplaceSystemAlloc(); // defined in libc_override_*.h
920 tc_free(tc_malloc(1));
921 ThreadCache::InitTSD();
922 tc_free(tc_malloc(1));
923 // Either we, or debugallocation.cc, or valgrind will control memory
924 // management. We register our extension if we're the winner.
925 #ifdef TCMALLOC_USING_DEBUGALLOCATION
926 // Let debugallocation register its extension.
928 if (RunningOnValgrind()) {
929 // Let Valgrind uses its own malloc (so don't register our extension).
931 MallocExtension::Register(new TCMallocImplementation
);
937 TCMallocGuard::~TCMallocGuard() {
938 if (--tcmallocguard_refcount
== 0) {
939 const char* env
= getenv("MALLOCSTATS");
941 int level
= atoi(env
);
942 if (level
< 1) level
= 1;
947 #ifndef WIN32_OVERRIDE_ALLOCATORS
948 static TCMallocGuard module_enter_exit_hook
;
951 //-------------------------------------------------------------------
952 // Helpers for the exported routines below
953 //-------------------------------------------------------------------
955 static inline bool CheckCachedSizeClass(void *ptr
) {
956 PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
957 size_t cached_value
= Static::pageheap()->GetSizeClassIfCached(p
);
958 return cached_value
== 0 ||
959 cached_value
== Static::pageheap()->GetDescriptor(p
)->sizeclass
;
962 static inline void* CheckMallocResult(void *result
) {
963 ASSERT(result
== NULL
|| CheckCachedSizeClass(result
));
964 MarkAllocatedRegion(result
);
968 static inline void* SpanToMallocResult(Span
*span
) {
969 Static::pageheap()->CacheSizeClass(span
->start
, 0);
971 CheckMallocResult(reinterpret_cast<void*>(span
->start
<< kPageShift
));
974 static void* DoSampledAllocation(size_t size
) {
975 // Grab the stack trace outside the heap lock
977 tmp
.depth
= GetStackTrace(tmp
.stack
, tcmalloc::kMaxStackDepth
, 1);
980 SpinLockHolder
h(Static::pageheap_lock());
982 Span
*span
= Static::pageheap()->New(tcmalloc::pages(size
== 0 ? 1 : size
));
987 // Allocate stack trace
988 StackTrace
*stack
= Static::stacktrace_allocator()->New();
990 // Sampling failed because of lack of memory
995 span
->objects
= stack
;
996 tcmalloc::DLL_Prepend(Static::sampled_objects(), span
);
998 return SpanToMallocResult(span
);
1003 // Copy of FLAGS_tcmalloc_large_alloc_report_threshold with
1004 // automatic increases factored in.
1005 static int64_t large_alloc_threshold
=
1006 (kPageSize
> FLAGS_tcmalloc_large_alloc_report_threshold
1007 ? kPageSize
: FLAGS_tcmalloc_large_alloc_report_threshold
);
1009 static void ReportLargeAlloc(Length num_pages
, void* result
) {
1011 stack
.depth
= GetStackTrace(stack
.stack
, tcmalloc::kMaxStackDepth
, 1);
1013 static const int N
= 1000;
1015 TCMalloc_Printer
printer(buffer
, N
);
1016 printer
.printf("tcmalloc: large alloc %"PRIu64
" bytes == %p @ ",
1017 static_cast<uint64
>(num_pages
) << kPageShift
,
1019 for (int i
= 0; i
< stack
.depth
; i
++) {
1020 printer
.printf(" %p", stack
.stack
[i
]);
1022 printer
.printf("\n");
1023 write(STDERR_FILENO
, buffer
, strlen(buffer
));
1026 inline void* cpp_alloc(size_t size
, bool nothrow
);
1027 inline void* do_malloc(size_t size
);
1029 // TODO(willchan): Investigate whether or not inlining this much is harmful to
1031 // This is equivalent to do_malloc() except when tc_new_mode is set to true.
1032 // Otherwise, it will run the std::new_handler if set.
1033 inline void* do_malloc_or_cpp_alloc(size_t size
) {
1034 return tc_new_mode
? cpp_alloc(size
, true) : do_malloc(size
);
1037 void* cpp_memalign(size_t align
, size_t size
);
1038 void* do_memalign(size_t align
, size_t size
);
1040 inline void* do_memalign_or_cpp_memalign(size_t align
, size_t size
) {
1041 return tc_new_mode
? cpp_memalign(align
, size
) : do_memalign(align
, size
);
1044 // Must be called with the page lock held.
1045 inline bool should_report_large(Length num_pages
) {
1046 const int64 threshold
= large_alloc_threshold
;
1047 if (threshold
> 0 && num_pages
>= (threshold
>> kPageShift
)) {
1048 // Increase the threshold by 1/8 every time we generate a report.
1049 // We cap the threshold at 8GiB to avoid overflow problems.
1050 large_alloc_threshold
= (threshold
+ threshold
/8 < 8ll<<30
1051 ? threshold
+ threshold
/8 : 8ll<<30);
1057 // Helper for do_malloc().
1058 inline void* do_malloc_pages(ThreadCache
* heap
, size_t size
) {
1062 Length num_pages
= tcmalloc::pages(size
);
1063 size
= num_pages
<< kPageShift
;
1065 // Chromium profiling. Measurements in March 2013 suggest this
1066 // imposes a small enough runtime cost that there's no reason to
1067 // try to optimize it.
1068 heap
->AddToByteAllocatedTotal(size
);
1070 if ((FLAGS_tcmalloc_sample_parameter
> 0) && heap
->SampleAllocation(size
)) {
1071 result
= DoSampledAllocation(size
);
1073 SpinLockHolder
h(Static::pageheap_lock());
1074 report_large
= should_report_large(num_pages
);
1076 SpinLockHolder
h(Static::pageheap_lock());
1077 Span
* span
= Static::pageheap()->New(num_pages
);
1078 result
= (span
== NULL
? NULL
: SpanToMallocResult(span
));
1079 report_large
= should_report_large(num_pages
);
1083 ReportLargeAlloc(num_pages
, result
);
1088 inline void* do_malloc(size_t size
) {
1089 AddRoomForMark(&size
);
1093 // The following call forces module initialization
1094 ThreadCache
* heap
= ThreadCache::GetCache();
1095 if (size
<= kMaxSize
&& IsAllocSizePermitted(size
)) {
1096 size_t cl
= Static::sizemap()->SizeClass(size
);
1097 size
= Static::sizemap()->class_to_size(cl
);
1099 // Chromium profiling. Measurements in March 2013 suggest this
1100 // imposes a small enough runtime cost that there's no reason to
1101 // try to optimize it.
1102 heap
->AddToByteAllocatedTotal(size
);
1104 if ((FLAGS_tcmalloc_sample_parameter
> 0) &&
1105 heap
->SampleAllocation(size
)) {
1106 ret
= DoSampledAllocation(size
);
1107 MarkAllocatedRegion(ret
);
1109 // The common case, and also the simplest. This just pops the
1110 // size-appropriate freelist, after replenishing it if it's empty.
1111 ret
= CheckMallocResult(heap
->Allocate(size
, cl
));
1113 } else if (IsAllocSizePermitted(size
)) {
1114 ret
= do_malloc_pages(heap
, size
);
1115 MarkAllocatedRegion(ret
);
1117 if (ret
== NULL
) errno
= ENOMEM
;
1118 ASSERT(IsAllocSizePermitted(size
) || ret
== NULL
);
1122 inline void* do_calloc(size_t n
, size_t elem_size
) {
1124 const size_t size
= n
* elem_size
;
1125 if (elem_size
!= 0 && size
/ elem_size
!= n
) return NULL
;
1127 void* result
= do_malloc_or_cpp_alloc(size
);
1128 if (result
!= NULL
) {
1129 memset(result
, 0, size
);
1134 static inline ThreadCache
* GetCacheIfPresent() {
1135 void* const p
= ThreadCache::GetCacheIfPresent();
1136 return reinterpret_cast<ThreadCache
*>(p
);
1139 // This lets you call back to a given function pointer if ptr is invalid.
1140 // It is used primarily by windows code which wants a specialized callback.
1141 inline void do_free_with_callback(void* ptr
, void (*invalid_free_fn
)(void*)) {
1142 if (ptr
== NULL
) return;
1143 if (Static::pageheap() == NULL
) {
1144 // We called free() before malloc(). This can occur if the
1145 // (system) malloc() is called before tcmalloc is loaded, and then
1146 // free() is called after tcmalloc is loaded (and tc_free has
1147 // replaced free), but before the global constructor has run that
1148 // sets up the tcmalloc data structures.
1149 (*invalid_free_fn
)(ptr
); // Decide how to handle the bad free request
1152 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
1154 size_t cl
= Static::pageheap()->GetSizeClassIfCached(p
);
1157 span
= Static::pageheap()->GetDescriptor(p
);
1159 // span can be NULL because the pointer passed in is invalid
1160 // (not something returned by malloc or friends), or because the
1161 // pointer was allocated with some other allocator besides
1162 // tcmalloc. The latter can happen if tcmalloc is linked in via
1163 // a dynamic library, but is not listed last on the link line.
1164 // In that case, libraries after it on the link line will
1165 // allocate with libc malloc, but free with tcmalloc's free.
1166 (*invalid_free_fn
)(ptr
); // Decide how to handle the bad free request
1169 cl
= span
->sizeclass
;
1170 Static::pageheap()->CacheSizeClass(p
, cl
);
1173 // Check to see if the object is in use.
1174 CHECK_CONDITION_PRINT(span
->location
== Span::IN_USE
,
1175 "Object was not in-use");
1177 CHECK_CONDITION_PRINT(
1178 span
->start
<< kPageShift
== reinterpret_cast<uintptr_t>(ptr
),
1179 "Pointer is not pointing to the start of a span");
1181 ValidateAllocatedRegion(ptr
, cl
);
1184 ASSERT(!Static::pageheap()->GetDescriptor(p
)->sample
);
1185 ThreadCache
* heap
= GetCacheIfPresent();
1187 heap
->Deallocate(ptr
, cl
);
1189 // Delete directly into central cache
1190 tcmalloc::FL_Init(ptr
);
1191 Static::central_cache()[cl
].InsertRange(ptr
, ptr
, 1);
1194 SpinLockHolder
h(Static::pageheap_lock());
1195 ASSERT(reinterpret_cast<uintptr_t>(ptr
) % kPageSize
== 0);
1196 ASSERT(span
!= NULL
&& span
->start
== p
);
1198 StackTrace
* st
= reinterpret_cast<StackTrace
*>(span
->objects
);
1199 tcmalloc::DLL_Remove(span
);
1200 Static::stacktrace_allocator()->Delete(st
);
1201 span
->objects
= NULL
;
1203 Static::pageheap()->Delete(span
);
1207 // The default "do_free" that uses the default callback.
1208 inline void do_free(void* ptr
) {
1209 return do_free_with_callback(ptr
, &InvalidFree
);
1212 // NOTE: some logic here is duplicated in GetOwnership (above), for
1213 // speed. If you change this function, look at that one too.
1214 inline size_t GetSizeWithCallback(const void* ptr
,
1215 size_t (*invalid_getsize_fn
)(const void*)) {
1218 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
1219 size_t cl
= Static::pageheap()->GetSizeClassIfCached(p
);
1221 return Static::sizemap()->ByteSizeForClass(cl
);
1223 const Span
*span
= Static::pageheap()->GetDescriptor(p
);
1224 if (span
== NULL
) { // means we do not own this memory
1225 return (*invalid_getsize_fn
)(ptr
);
1226 } else if (span
->sizeclass
!= 0) {
1227 Static::pageheap()->CacheSizeClass(p
, span
->sizeclass
);
1228 return Static::sizemap()->ByteSizeForClass(span
->sizeclass
);
1230 return span
->length
<< kPageShift
;
1235 // This lets you call back to a given function pointer if ptr is invalid.
1236 // It is used primarily by windows code which wants a specialized callback.
1237 inline void* do_realloc_with_callback(
1238 void* old_ptr
, size_t new_size
,
1239 void (*invalid_free_fn
)(void*),
1240 size_t (*invalid_get_size_fn
)(const void*)) {
1241 AddRoomForMark(&new_size
);
1242 // Get the size of the old entry
1243 const size_t old_size
= GetSizeWithCallback(old_ptr
, invalid_get_size_fn
);
1245 // Reallocate if the new size is larger than the old size,
1246 // or if the new size is significantly smaller than the old size.
1247 // We do hysteresis to avoid resizing ping-pongs:
1248 // . If we need to grow, grow to max(new_size, old_size * 1.X)
1249 // . Don't shrink unless new_size < old_size * 0.Y
1250 // X and Y trade-off time for wasted space. For now we do 1.25 and 0.5.
1251 const size_t min_growth
= min(old_size
/ 4,
1252 (std::numeric_limits
<size_t>::max
)() - old_size
); // Avoid overflow.
1253 const size_t lower_bound_to_grow
= old_size
+ min_growth
;
1254 const size_t upper_bound_to_shrink
= old_size
/ 2;
1255 if ((new_size
> old_size
) || (new_size
< upper_bound_to_shrink
)) {
1256 // Need to reallocate.
1257 void* new_ptr
= NULL
;
1259 if (new_size
> old_size
&& new_size
< lower_bound_to_grow
) {
1260 new_ptr
= do_malloc_or_cpp_alloc(lower_bound_to_grow
);
1262 ExcludeMarkFromSize(&new_size
); // do_malloc will add space if needed.
1263 if (new_ptr
== NULL
) {
1264 // Either new_size is not a tiny increment, or last do_malloc failed.
1265 new_ptr
= do_malloc_or_cpp_alloc(new_size
);
1267 if (new_ptr
== NULL
) {
1270 MallocHook::InvokeNewHook(new_ptr
, new_size
);
1271 memcpy(new_ptr
, old_ptr
, ((old_size
< new_size
) ? old_size
: new_size
));
1272 MallocHook::InvokeDeleteHook(old_ptr
);
1273 // We could use a variant of do_free() that leverages the fact
1274 // that we already know the sizeclass of old_ptr. The benefit
1275 // would be small, so don't bother.
1276 do_free_with_callback(old_ptr
, invalid_free_fn
);
1279 // We still need to call hooks to report the updated size:
1280 MallocHook::InvokeDeleteHook(old_ptr
);
1281 ExcludeMarkFromSize(&new_size
);
1282 MallocHook::InvokeNewHook(old_ptr
, new_size
);
1287 inline void* do_realloc(void* old_ptr
, size_t new_size
) {
1288 return do_realloc_with_callback(old_ptr
, new_size
,
1289 &InvalidFree
, &InvalidGetSizeForRealloc
);
1292 // For use by exported routines below that want specific alignments
1294 // Note: this code can be slow for alignments > 16, and can
1295 // significantly fragment memory. The expectation is that
1296 // memalign/posix_memalign/valloc/pvalloc will not be invoked very
1297 // often. This requirement simplifies our implementation and allows
1298 // us to tune for expected allocation patterns.
1299 void* do_memalign(size_t align
, size_t size
) {
1300 ASSERT((align
& (align
- 1)) == 0);
1302 // Marked in CheckMallocResult(), which is also inside SpanToMallocResult().
1303 AddRoomForMark(&size
);
1304 if (size
+ align
< size
) return NULL
; // Overflow
1306 // Fall back to malloc if we would already align this memory access properly.
1307 if (align
<= AlignmentForSize(size
)) {
1308 void* p
= do_malloc(size
);
1309 ASSERT((reinterpret_cast<uintptr_t>(p
) % align
) == 0);
1313 if (Static::pageheap() == NULL
) ThreadCache::InitModule();
1315 // Allocate at least one byte to avoid boundary conditions below
1316 if (size
== 0) size
= 1;
1318 if (size
<= kMaxSize
&& align
< kPageSize
) {
1319 // Search through acceptable size classes looking for one with
1320 // enough alignment. This depends on the fact that
1321 // InitSizeClasses() currently produces several size classes that
1322 // are aligned at powers of two. We will waste time and space if
1323 // we miss in the size class array, but that is deemed acceptable
1324 // since memalign() should be used rarely.
1325 int cl
= Static::sizemap()->SizeClass(size
);
1326 while (cl
< kNumClasses
&&
1327 ((Static::sizemap()->class_to_size(cl
) & (align
- 1)) != 0)) {
1330 if (cl
< kNumClasses
) {
1331 ThreadCache
* heap
= ThreadCache::GetCache();
1332 size
= Static::sizemap()->class_to_size(cl
);
1333 return CheckMallocResult(heap
->Allocate(size
, cl
));
1337 // We will allocate directly from the page heap
1338 SpinLockHolder
h(Static::pageheap_lock());
1340 if (align
<= kPageSize
) {
1341 // Any page-level allocation will be fine
1342 // TODO: We could put the rest of this page in the appropriate
1343 // TODO: cache but it does not seem worth it.
1344 Span
* span
= Static::pageheap()->New(tcmalloc::pages(size
));
1345 return span
== NULL
? NULL
: SpanToMallocResult(span
);
1348 // Allocate extra pages and carve off an aligned portion
1349 const Length alloc
= tcmalloc::pages(size
+ align
);
1350 Span
* span
= Static::pageheap()->New(alloc
);
1351 if (span
== NULL
) return NULL
;
1353 // Skip starting portion so that we end up aligned
1355 while ((((span
->start
+skip
) << kPageShift
) & (align
- 1)) != 0) {
1358 ASSERT(skip
< alloc
);
1360 Span
* rest
= Static::pageheap()->Split(span
, skip
);
1361 Static::pageheap()->Delete(span
);
1365 // Skip trailing portion that we do not need to return
1366 const Length needed
= tcmalloc::pages(size
);
1367 ASSERT(span
->length
>= needed
);
1368 if (span
->length
> needed
) {
1369 Span
* trailer
= Static::pageheap()->Split(span
, needed
);
1370 Static::pageheap()->Delete(trailer
);
1372 return SpanToMallocResult(span
);
1375 // Helpers for use by exported routines below:
1377 inline void do_malloc_stats() {
1381 inline int do_mallopt(int cmd
, int value
) {
1382 return 1; // Indicates error
1385 #ifdef HAVE_STRUCT_MALLINFO
1386 inline struct mallinfo
do_mallinfo() {
1387 TCMallocStats stats
;
1388 ExtractStats(&stats
, NULL
, NULL
, NULL
);
1390 // Just some of the fields are filled in.
1391 struct mallinfo info
;
1392 memset(&info
, 0, sizeof(info
));
1394 // Unfortunately, the struct contains "int" field, so some of the
1395 // size values will be truncated.
1396 info
.arena
= static_cast<int>(stats
.pageheap
.system_bytes
);
1397 info
.fsmblks
= static_cast<int>(stats
.thread_bytes
1398 + stats
.central_bytes
1399 + stats
.transfer_bytes
);
1400 info
.fordblks
= static_cast<int>(stats
.pageheap
.free_bytes
+
1401 stats
.pageheap
.unmapped_bytes
);
1402 info
.uordblks
= static_cast<int>(stats
.pageheap
.system_bytes
1403 - stats
.thread_bytes
1404 - stats
.central_bytes
1405 - stats
.transfer_bytes
1406 - stats
.pageheap
.free_bytes
1407 - stats
.pageheap
.unmapped_bytes
);
1411 #endif // HAVE_STRUCT_MALLINFO
1413 static SpinLock
set_new_handler_lock(SpinLock::LINKER_INITIALIZED
);
1415 inline void* cpp_alloc(size_t size
, bool nothrow
) {
1417 void* p
= do_malloc(size
);
1421 if (p
== NULL
) { // allocation failed
1422 // Get the current new handler. NB: this function is not
1423 // thread-safe. We make a feeble stab at making it so here, but
1424 // this lock only protects against tcmalloc interfering with
1425 // itself, not with other libraries calling set_new_handler.
1426 std::new_handler nh
;
1428 SpinLockHolder
h(&set_new_handler_lock
);
1429 nh
= std::set_new_handler(0);
1430 (void) std::set_new_handler(nh
);
1432 #if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1434 // Since exceptions are disabled, we don't really know if new_handler
1435 // failed. Assume it will abort if it fails.
1441 // If no new_handler is established, the allocation failed.
1443 if (nothrow
) return 0;
1444 throw std::bad_alloc();
1446 // Otherwise, try the new_handler. If it returns, retry the
1447 // allocation. If it throws std::bad_alloc, fail the allocation.
1448 // if it throws something else, don't interfere.
1451 } catch (const std::bad_alloc
&) {
1452 if (!nothrow
) throw;
1455 #endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1456 } else { // allocation success
1459 #endif // PREANSINEW
1463 void* cpp_memalign(size_t align
, size_t size
) {
1465 void* p
= do_memalign(align
, size
);
1469 if (p
== NULL
) { // allocation failed
1470 // Get the current new handler. NB: this function is not
1471 // thread-safe. We make a feeble stab at making it so here, but
1472 // this lock only protects against tcmalloc interfering with
1473 // itself, not with other libraries calling set_new_handler.
1474 std::new_handler nh
;
1476 SpinLockHolder
h(&set_new_handler_lock
);
1477 nh
= std::set_new_handler(0);
1478 (void) std::set_new_handler(nh
);
1480 #if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1482 // Since exceptions are disabled, we don't really know if new_handler
1483 // failed. Assume it will abort if it fails.
1489 // If no new_handler is established, the allocation failed.
1493 // Otherwise, try the new_handler. If it returns, retry the
1494 // allocation. If it throws std::bad_alloc, fail the allocation.
1495 // if it throws something else, don't interfere.
1498 } catch (const std::bad_alloc
&) {
1501 #endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1502 } else { // allocation success
1505 #endif // PREANSINEW
1509 } // end unnamed namespace
1511 // As promised, the definition of this function, declared above.
1512 size_t TCMallocImplementation::GetAllocatedSize(const void* ptr
) {
1513 // Chromium workaround for third-party code calling tc_malloc_size(NULL), see
1514 // http://code.google.com/p/chromium/issues/detail?id=118087
1515 // Note: this is consistent with GLIBC's implementation of
1516 // malloc_usable_size(NULL).
1519 ASSERT(TCMallocImplementation::GetOwnership(ptr
)
1520 != TCMallocImplementation::kNotOwned
);
1521 return ExcludeSpaceForMark(
1522 GetSizeWithCallback(ptr
, &InvalidGetAllocatedSize
));
1525 void TCMallocImplementation::MarkThreadBusy() {
1526 // Allocate to force the creation of a thread cache, but avoid
1527 // invoking any hooks.
1528 do_free(do_malloc(0));
1531 //-------------------------------------------------------------------
1532 // Exported routines
1533 //-------------------------------------------------------------------
1535 extern "C" PERFTOOLS_DLL_DECL
const char* tc_version(
1536 int* major
, int* minor
, const char** patch
) __THROW
{
1537 if (major
) *major
= TC_VERSION_MAJOR
;
1538 if (minor
) *minor
= TC_VERSION_MINOR
;
1539 if (patch
) *patch
= TC_VERSION_PATCH
;
1540 return TC_VERSION_STRING
;
1543 // This function behaves similarly to MSVC's _set_new_mode.
1544 // If flag is 0 (default), calls to malloc will behave normally.
1545 // If flag is 1, calls to malloc will behave like calls to new,
1546 // and the std_new_handler will be invoked on failure.
1547 // Returns the previous mode.
1548 extern "C" PERFTOOLS_DLL_DECL
int tc_set_new_mode(int flag
) __THROW
{
1549 int old_mode
= tc_new_mode
;
1554 #ifndef TCMALLOC_USING_DEBUGALLOCATION // debugallocation.cc defines its own
1556 // CAVEAT: The code structure below ensures that MallocHook methods are always
1557 // called from the stack frame of the invoked allocation function.
1558 // heap-checker.cc depends on this to start a stack trace from
1559 // the call to the (de)allocation function.
1561 extern "C" PERFTOOLS_DLL_DECL
void* tc_malloc(size_t size
) __THROW
{
1562 void* result
= do_malloc_or_cpp_alloc(size
);
1563 MallocHook::InvokeNewHook(result
, size
);
1567 extern "C" PERFTOOLS_DLL_DECL
void tc_free(void* ptr
) __THROW
{
1568 MallocHook::InvokeDeleteHook(ptr
);
1572 extern "C" PERFTOOLS_DLL_DECL
void* tc_calloc(size_t n
,
1573 size_t elem_size
) __THROW
{
1574 void* result
= do_calloc(n
, elem_size
);
1575 MallocHook::InvokeNewHook(result
, n
* elem_size
);
1579 extern "C" PERFTOOLS_DLL_DECL
void tc_cfree(void* ptr
) __THROW
{
1580 MallocHook::InvokeDeleteHook(ptr
);
1584 extern "C" PERFTOOLS_DLL_DECL
void* tc_realloc(void* old_ptr
,
1585 size_t new_size
) __THROW
{
1586 if (old_ptr
== NULL
) {
1587 void* result
= do_malloc_or_cpp_alloc(new_size
);
1588 MallocHook::InvokeNewHook(result
, new_size
);
1591 if (new_size
== 0) {
1592 MallocHook::InvokeDeleteHook(old_ptr
);
1596 return do_realloc(old_ptr
, new_size
);
1599 extern "C" PERFTOOLS_DLL_DECL
void* tc_new(size_t size
) {
1600 void* p
= cpp_alloc(size
, false);
1601 // We keep this next instruction out of cpp_alloc for a reason: when
1602 // it's in, and new just calls cpp_alloc, the optimizer may fold the
1603 // new call into cpp_alloc, which messes up our whole section-based
1604 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
1605 // isn't the last thing this fn calls, and prevents the folding.
1606 MallocHook::InvokeNewHook(p
, size
);
1610 extern "C" PERFTOOLS_DLL_DECL
void* tc_new_nothrow(size_t size
, const std::nothrow_t
&) __THROW
{
1611 void* p
= cpp_alloc(size
, true);
1612 MallocHook::InvokeNewHook(p
, size
);
1616 extern "C" PERFTOOLS_DLL_DECL
void tc_delete(void* p
) __THROW
{
1617 MallocHook::InvokeDeleteHook(p
);
1621 // Standard C++ library implementations define and use this
1622 // (via ::operator delete(ptr, nothrow)).
1623 // But it's really the same as normal delete, so we just do the same thing.
1624 extern "C" PERFTOOLS_DLL_DECL
void tc_delete_nothrow(void* p
, const std::nothrow_t
&) __THROW
{
1625 MallocHook::InvokeDeleteHook(p
);
1629 extern "C" PERFTOOLS_DLL_DECL
void* tc_newarray(size_t size
) {
1630 void* p
= cpp_alloc(size
, false);
1631 // We keep this next instruction out of cpp_alloc for a reason: when
1632 // it's in, and new just calls cpp_alloc, the optimizer may fold the
1633 // new call into cpp_alloc, which messes up our whole section-based
1634 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
1635 // isn't the last thing this fn calls, and prevents the folding.
1636 MallocHook::InvokeNewHook(p
, size
);
1640 extern "C" PERFTOOLS_DLL_DECL
void* tc_newarray_nothrow(size_t size
, const std::nothrow_t
&)
1642 void* p
= cpp_alloc(size
, true);
1643 MallocHook::InvokeNewHook(p
, size
);
1647 extern "C" PERFTOOLS_DLL_DECL
void tc_deletearray(void* p
) __THROW
{
1648 MallocHook::InvokeDeleteHook(p
);
1652 extern "C" PERFTOOLS_DLL_DECL
void tc_deletearray_nothrow(void* p
, const std::nothrow_t
&) __THROW
{
1653 MallocHook::InvokeDeleteHook(p
);
1657 extern "C" PERFTOOLS_DLL_DECL
void* tc_memalign(size_t align
,
1658 size_t size
) __THROW
{
1659 void* result
= do_memalign_or_cpp_memalign(align
, size
);
1660 MallocHook::InvokeNewHook(result
, size
);
1664 extern "C" PERFTOOLS_DLL_DECL
int tc_posix_memalign(
1665 void** result_ptr
, size_t align
, size_t size
) __THROW
{
1666 if (((align
% sizeof(void*)) != 0) ||
1667 ((align
& (align
- 1)) != 0) ||
1672 void* result
= do_memalign_or_cpp_memalign(align
, size
);
1673 MallocHook::InvokeNewHook(result
, size
);
1674 if (result
== NULL
) {
1677 *result_ptr
= result
;
1682 static size_t pagesize
= 0;
1684 extern "C" PERFTOOLS_DLL_DECL
void* tc_valloc(size_t size
) __THROW
{
1685 // Allocate page-aligned object of length >= size bytes
1686 if (pagesize
== 0) pagesize
= getpagesize();
1687 void* result
= do_memalign_or_cpp_memalign(pagesize
, size
);
1688 MallocHook::InvokeNewHook(result
, size
);
1692 extern "C" PERFTOOLS_DLL_DECL
void* tc_pvalloc(size_t size
) __THROW
{
1693 // Round up size to a multiple of pagesize
1694 if (pagesize
== 0) pagesize
= getpagesize();
1695 if (size
== 0) { // pvalloc(0) should allocate one page, according to
1696 size
= pagesize
; // http://man.free4web.biz/man3/libmpatrol.3.html
1698 size
= (size
+ pagesize
- 1) & ~(pagesize
- 1);
1699 void* result
= do_memalign_or_cpp_memalign(pagesize
, size
);
1700 MallocHook::InvokeNewHook(result
, size
);
1704 extern "C" PERFTOOLS_DLL_DECL
void tc_malloc_stats(void) __THROW
{
1708 extern "C" PERFTOOLS_DLL_DECL
int tc_mallopt(int cmd
, int value
) __THROW
{
1709 return do_mallopt(cmd
, value
);
1712 #ifdef HAVE_STRUCT_MALLINFO
1713 extern "C" PERFTOOLS_DLL_DECL
struct mallinfo
tc_mallinfo(void) __THROW
{
1714 return do_mallinfo();
1718 extern "C" PERFTOOLS_DLL_DECL
size_t tc_malloc_size(void* ptr
) __THROW
{
1719 return MallocExtension::instance()->GetAllocatedSize(ptr
);
1722 #endif // TCMALLOC_USING_DEBUGALLOCATION
1724 // --- Validation implementation with an extra mark ----------------------------
1725 // We will put a mark at the extreme end of each allocation block. We make
1726 // sure that we always allocate enough "extra memory" that we can fit in the
1727 // mark, and still provide the requested usable region. If ever that mark is
1728 // not as expected, then we know that the user is corrupting memory beyond their
1729 // request size, or that they have called free a second time without having
1730 // the memory allocated (again). This allows us to spot most double free()s,
1731 // but some can "slip by" or confuse our logic if the caller reallocates memory
1732 // (for a second use) before performing an evil double-free of a first
1735 // This code can be optimized, but for now, it is written to be most easily
1736 // understood, and flexible (since it is evolving a bit). Potential
1737 // optimizations include using other calculated data, such as class size, or
1738 // allocation size, which is known in the code above, but then is recalculated
1739 // below. Another potential optimization would be careful manual inlining of
1740 // code, but I *think* that the compile will probably do this for me, and I've
1741 // been careful to avoid aliasing issues that might make a compiler back-off.
1743 // Evolution includes experimenting with different marks, to minimize the chance
1744 // that a mark would be misunderstood (missed corruption). The marks are meant
1745 // to be hashed encoding of the location, so that they can't be copied over a
1746 // different region (by accident) without being detected (most of the time).
1748 // Enable the following define to turn on all the TCMalloc checking.
1749 // It will cost about 2% in performance, but it will catch double frees (most of
1750 // the time), and will often catch allocated-buffer overrun errors. This
1751 // validation is only active when TCMalloc is used as the allocator.
1753 #define TCMALLOC_VALIDATION
1756 #if !defined(TCMALLOC_VALIDATION)
1758 static size_t ExcludeSpaceForMark(size_t size
) { return size
; }
1759 static void AddRoomForMark(size_t* size
) {}
1760 static void ExcludeMarkFromSize(size_t* new_size
) {}
1761 static void MarkAllocatedRegion(void* ptr
) {}
1762 static void ValidateAllocatedRegion(void* ptr
, size_t cl
) {}
1764 #else // TCMALLOC_VALIDATION
1766 static void DieFromDoubleFree() {
1767 Log(kCrash
, __FILE__
, __LINE__
, "Attempt to double free");
1770 static void DieFromMemoryCorruption() {
1771 Log(kCrash
, __FILE__
, __LINE__
, "Memory corrupted");
1774 // We can either do byte marking, or whole word marking based on the following
1775 // define. char is as small as we can get, and word marking probably provides
1776 // more than enough bits that we won't miss a corruption. Any sized integral
1777 // type can be used, but we just define two examples.
1779 // #define TCMALLOC_SMALL_VALIDATION
1780 #if defined (TCMALLOC_SMALL_VALIDATION)
1782 typedef char MarkType
; // char saves memory... int is more complete.
1783 static const MarkType kAllocationMarkMask
= static_cast<MarkType
>(0x36);
1787 typedef int MarkType
; // char saves memory... int is more complete.
1788 static const MarkType kAllocationMarkMask
= static_cast<MarkType
>(0xE1AB9536);
1792 // TODO(jar): See if use of reference rather than pointer gets better inlining,
1793 // or if macro is needed. My fear is that taking address map preclude register
1795 inline static void AddRoomForMark(size_t* size
) {
1796 *size
+= sizeof(kAllocationMarkMask
);
1799 inline static void ExcludeMarkFromSize(size_t* new_size
) {
1800 *new_size
-= sizeof(kAllocationMarkMask
);
1803 inline static size_t ExcludeSpaceForMark(size_t size
) {
1804 return size
- sizeof(kAllocationMarkMask
); // Lie about size when asked.
1807 inline static MarkType
* GetMarkLocation(void* ptr
) {
1808 size_t size
= GetSizeWithCallback(ptr
, &InvalidGetAllocatedSize
);
1809 ASSERT(size
% sizeof(kAllocationMarkMask
) == 0);
1810 size_t last_index
= (size
/ sizeof(kAllocationMarkMask
)) - 1;
1811 return static_cast<MarkType
*>(ptr
) + last_index
;
1814 // We hash in the mark location plus the pointer so that we effectively mix in
1815 // the size of the block. This means that if a span is used for different sizes
1816 // that the mark will be different. It would be good to hash in the size (which
1817 // we effectively get by using both mark location and pointer), but even better
1818 // would be to also include the class, as it concisely contains the entropy
1819 // found in the size (when we don't have large allocation), and there is less
1820 // risk of losing those bits to truncation. It would probably be good to combine
1821 // the high bits of size (capturing info about large blocks) with the class
1822 // (which is a 6 bit number).
1823 inline static MarkType
GetMarkValue(void* ptr
, MarkType
* mark
) {
1824 void* ptr2
= static_cast<void*>(mark
);
1825 size_t offset1
= static_cast<char*>(ptr
) - static_cast<char*>(NULL
);
1826 size_t offset2
= static_cast<char*>(ptr2
) - static_cast<char*>(NULL
);
1827 static const int kInvariantBits
= 2;
1828 ASSERT((offset1
>> kInvariantBits
) << kInvariantBits
== offset1
);
1829 // Note: low bits of both offsets are invariants due to alignment. High bits
1830 // of both offsets are the same (unless we have a large allocation). Avoid
1831 // XORing high bits together, as they will cancel for most small allocations.
1833 MarkType ret
= kAllocationMarkMask
;
1834 // Using a little shift, we can safely XOR together both offsets.
1835 ret
^= static_cast<MarkType
>(offset1
>> kInvariantBits
) ^
1836 static_cast<MarkType
>(offset2
);
1837 if (sizeof(ret
) == 1) {
1838 // Try to bring some high level bits into the mix.
1839 ret
+= static_cast<MarkType
>(offset1
>> 8) ^
1840 static_cast<MarkType
>(offset1
>> 16) ^
1841 static_cast<MarkType
>(offset1
>> 24) ;
1843 // Hash in high bits on a 64 bit architecture.
1844 if (sizeof(size_t) == 8 && sizeof(ret
) == 4)
1845 ret
+= offset1
>> 16;
1847 ret
= kAllocationMarkMask
; // Avoid common pattern of all zeros.
1851 // TODO(jar): Use the passed in TCmalloc Class Index to calculate mark location
1852 // faster. The current implementation calls general functions, which have to
1853 // recalculate this in order to get the Class Size. This is a slow and wasteful
1854 // recomputation... but it is much more readable this way (for now).
1855 static void ValidateAllocatedRegion(void* ptr
, size_t cl
) {
1856 if (ptr
== NULL
) return;
1857 MarkType
* mark
= GetMarkLocation(ptr
);
1858 MarkType allocated_mark
= GetMarkValue(ptr
, mark
);
1859 MarkType current_mark
= *mark
;
1861 if (current_mark
== ~allocated_mark
)
1862 DieFromDoubleFree();
1863 if (current_mark
!= allocated_mark
)
1864 DieFromMemoryCorruption();
1866 // In debug mode, copy the mark into all the free'd region.
1867 size_t class_size
= static_cast<size_t>(reinterpret_cast<char*>(mark
) -
1868 reinterpret_cast<char*>(ptr
));
1869 memset(ptr
, static_cast<char>(0x36), class_size
);
1871 *mark
= ~allocated_mark
; // Distinctively not allocated.
1874 static void MarkAllocatedRegion(void* ptr
) {
1875 if (ptr
== NULL
) return;
1876 MarkType
* mark
= GetMarkLocation(ptr
);
1877 *mark
= GetMarkValue(ptr
, mark
);
1880 #endif // TCMALLOC_VALIDATION