1 // Copyright (c) 2005, Google Inc.
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Author: Sanjay Ghemawat <opensource@google.com>
33 // A malloc that uses a per-thread cache to satisfy small malloc requests.
34 // (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
36 // See doc/tcmalloc.html for a high-level
37 // description of how this malloc works.
40 // 1. The thread-specific lists are accessed without acquiring any locks.
41 // This is safe because each such list is only accessed by one thread.
42 // 2. We have a lock per central free-list, and hold it while manipulating
43 // the central free list for a particular size.
44 // 3. The central page allocator is protected by "pageheap_lock".
45 // 4. The pagemap (which maps from page-number to descriptor),
46 // can be read without holding any locks, and written while holding
47 // the "pageheap_lock".
48 // 5. To improve performance, a subset of the information one can get
49 // from the pagemap is cached in a data structure, pagemap_cache_,
50 // that atomically reads and writes its entries. This cache can be
51 // read and written without locking.
53 // This multi-threaded access to the pagemap is safe for fairly
54 // subtle reasons. We basically assume that when an object X is
55 // allocated by thread A and deallocated by thread B, there must
56 // have been appropriate synchronization in the handoff of object
57 // X from thread A to thread B. The same logic applies to pagemap_cache_.
59 // THE PAGEID-TO-SIZECLASS CACHE
60 // Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
61 // returns 0 for a particular PageID then that means "no information," not that
62 // the sizeclass is 0. The cache may have stale information for pages that do
63 // not hold the beginning of any free()'able object. Staleness is eliminated
64 // in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
65 // do_memalign() for all other relevant pages.
69 // Page map contains a mapping from page id to Span.
71 // If Span s occupies pages [p..q],
74 // pagemap[p+1..q-1] are undefined
75 // pagemap[p-1] and pagemap[q+1] are defined:
76 // NULL if the corresponding page is not yet in the address space.
77 // Otherwise it points to a Span. This span may be free
78 // or allocated. If free, it is in one of pageheap's freelist.
80 // TODO: Bias reclamation to larger addresses
81 // TODO: implement mallinfo/mallopt
82 // TODO: Better testing
84 // 9/28/2003 (new page-level allocator replaces ptmalloc2):
85 // * malloc/free of small objects goes from ~300 ns to ~50 ns.
86 // * allocation of a reasonably complicated struct
87 // goes from about 1100 ns to about 300 ns.
90 #include <gperftools/tcmalloc.h>
92 #include <errno.h> // for ENOMEM, EINVAL, errno
93 #ifdef HAVE_SYS_CDEFS_H
94 #include <sys/cdefs.h> // for __THROW
96 #if defined HAVE_STDINT_H
98 #elif defined HAVE_INTTYPES_H
101 #include <sys/types.h>
103 #include <stddef.h> // for size_t, NULL
104 #include <stdlib.h> // for getenv
105 #include <string.h> // for strcmp, memset, strlen, etc
107 #include <unistd.h> // for getpagesize, write, etc
109 #include <algorithm> // for max, min
110 #include <limits> // for numeric_limits
111 #include <new> // for nothrow_t (ptr only), etc
112 #include <vector> // for vector
114 #include <gperftools/malloc_extension.h>
115 #include <gperftools/malloc_hook.h> // for MallocHook
116 #include "base/basictypes.h" // for int64
117 #include "base/commandlineflags.h" // for RegisterFlagValidator, etc
118 #include "base/dynamic_annotations.h" // for RunningOnValgrind
119 #include "base/spinlock.h" // for SpinLockHolder
120 #include "central_freelist.h" // for CentralFreeListPadded
121 #include "common.h" // for StackTrace, kPageShift, etc
122 #include "free_list.h" // for FL_Init
123 #include "internal_logging.h" // for ASSERT, TCMalloc_Printer, etc
124 #include "malloc_hook-inl.h" // for MallocHook::InvokeNewHook, etc
125 #include "page_heap.h" // for PageHeap, PageHeap::Stats
126 #include "page_heap_allocator.h" // for PageHeapAllocator
127 #include "span.h" // for Span, DLL_Prepend, etc
128 #include "stack_trace_table.h" // for StackTraceTable
129 #include "static_vars.h" // for Static
130 #include "system-alloc.h" // for DumpSystemAllocatorStats, etc
131 #include "tcmalloc_guard.h" // for TCMallocGuard
132 #include "thread_cache.h" // for ThreadCache
134 #if (defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)) && !defined(WIN32_OVERRIDE_ALLOCATORS)
135 # define WIN32_DO_PATCHING 1
138 // Some windows file somewhere (at least on cygwin) #define's small (!)
139 // For instance, <windows.h> appears to have "#define small char".
142 using STL_NAMESPACE::max
;
143 using STL_NAMESPACE::min
;
144 using STL_NAMESPACE::numeric_limits
;
145 using STL_NAMESPACE::vector
;
147 #include "libc_override.h"
149 // __THROW is defined in glibc (via <sys/cdefs.h>). It means,
150 // counter-intuitively, "This function will never throw an exception."
151 // It's an optional optimization tool, but we may need to use it to
152 // match glibc prototypes.
153 #ifndef __THROW // I guess we're not on a glibc system
154 # define __THROW // __THROW is just an optimization, so ok to make it ""
157 using tcmalloc::AlignmentForSize
;
158 using tcmalloc::kLog
;
159 using tcmalloc::kCrash
;
160 using tcmalloc::kCrashWithStats
;
162 using tcmalloc::PageHeap
;
163 using tcmalloc::PageHeapAllocator
;
164 using tcmalloc::SizeMap
;
165 using tcmalloc::Span
;
166 using tcmalloc::StackTrace
;
167 using tcmalloc::Static
;
168 using tcmalloc::ThreadCache
;
170 // ---- Functions doing validation with an extra mark.
171 static size_t ExcludeSpaceForMark(size_t size
);
172 static void AddRoomForMark(size_t* size
);
173 static void ExcludeMarkFromSize(size_t* new_size
);
174 static void MarkAllocatedRegion(void* ptr
);
175 static void ValidateAllocatedRegion(void* ptr
, size_t cl
);
176 // ---- End validation functions.
178 DECLARE_int64(tcmalloc_sample_parameter
);
179 DECLARE_double(tcmalloc_release_rate
);
181 // For windows, the printf we use to report large allocs is
182 // potentially dangerous: it could cause a malloc that would cause an
183 // infinite loop. So by default we set the threshold to a huge number
184 // on windows, so this bad situation will never trigger. You can
185 // always set TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD manually if you
186 // want this functionality.
188 const int64 kDefaultLargeAllocReportThreshold
= static_cast<int64
>(1) << 62;
190 const int64 kDefaultLargeAllocReportThreshold
= static_cast<int64
>(1) << 30;
192 DEFINE_int64(tcmalloc_large_alloc_report_threshold
,
193 EnvToInt64("TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD",
194 kDefaultLargeAllocReportThreshold
),
195 "Allocations larger than this value cause a stack "
196 "trace to be dumped to stderr. The threshold for "
197 "dumping stack traces is increased by a factor of 1.125 "
198 "every time we print a message so that the threshold "
199 "automatically goes up by a factor of ~1000 every 60 "
200 "messages. This bounds the amount of extra logging "
201 "generated by this flag. Default value of this flag "
202 "is very large and therefore you should see no extra "
203 "logging unless the flag is overridden. Set to 0 to "
204 "disable reporting entirely.");
207 // We already declared these functions in tcmalloc.h, but we have to
208 // declare them again to give them an ATTRIBUTE_SECTION: we want to
209 // put all callers of MallocHook::Invoke* in this module into
210 // ATTRIBUTE_SECTION(google_malloc) section, so that
211 // MallocHook::GetCallerStackTrace can function accurately.
213 void* tc_malloc(size_t size
) __THROW
214 ATTRIBUTE_SECTION(google_malloc
);
215 void tc_free(void* ptr
) __THROW
216 ATTRIBUTE_SECTION(google_malloc
);
217 void* tc_realloc(void* ptr
, size_t size
) __THROW
218 ATTRIBUTE_SECTION(google_malloc
);
219 void* tc_calloc(size_t nmemb
, size_t size
) __THROW
220 ATTRIBUTE_SECTION(google_malloc
);
221 void tc_cfree(void* ptr
) __THROW
222 ATTRIBUTE_SECTION(google_malloc
);
224 void* tc_memalign(size_t __alignment
, size_t __size
) __THROW
225 ATTRIBUTE_SECTION(google_malloc
);
226 int tc_posix_memalign(void** ptr
, size_t align
, size_t size
) __THROW
227 ATTRIBUTE_SECTION(google_malloc
);
228 void* tc_valloc(size_t __size
) __THROW
229 ATTRIBUTE_SECTION(google_malloc
);
230 void* tc_pvalloc(size_t __size
) __THROW
231 ATTRIBUTE_SECTION(google_malloc
);
233 void tc_malloc_stats(void) __THROW
234 ATTRIBUTE_SECTION(google_malloc
);
235 int tc_mallopt(int cmd
, int value
) __THROW
236 ATTRIBUTE_SECTION(google_malloc
);
237 #ifdef HAVE_STRUCT_MALLINFO
238 struct mallinfo
tc_mallinfo(void) __THROW
239 ATTRIBUTE_SECTION(google_malloc
);
242 void* tc_new(size_t size
)
243 ATTRIBUTE_SECTION(google_malloc
);
244 void tc_delete(void* p
) __THROW
245 ATTRIBUTE_SECTION(google_malloc
);
246 void* tc_newarray(size_t size
)
247 ATTRIBUTE_SECTION(google_malloc
);
248 void tc_deletearray(void* p
) __THROW
249 ATTRIBUTE_SECTION(google_malloc
);
251 // And the nothrow variants of these:
252 void* tc_new_nothrow(size_t size
, const std::nothrow_t
&) __THROW
253 ATTRIBUTE_SECTION(google_malloc
);
254 void* tc_newarray_nothrow(size_t size
, const std::nothrow_t
&) __THROW
255 ATTRIBUTE_SECTION(google_malloc
);
256 // Surprisingly, standard C++ library implementations use a
257 // nothrow-delete internally. See, eg:
258 // http://www.dinkumware.com/manuals/?manual=compleat&page=new.html
259 void tc_delete_nothrow(void* ptr
, const std::nothrow_t
&) __THROW
260 ATTRIBUTE_SECTION(google_malloc
);
261 void tc_deletearray_nothrow(void* ptr
, const std::nothrow_t
&) __THROW
262 ATTRIBUTE_SECTION(google_malloc
);
264 // Some non-standard extensions that we support.
266 // This is equivalent to
267 // OS X: malloc_size()
268 // glibc: malloc_usable_size()
270 size_t tc_malloc_size(void* p
) __THROW
271 ATTRIBUTE_SECTION(google_malloc
);
273 void* tc_malloc_skip_new_handler(size_t size
)
274 ATTRIBUTE_SECTION(google_malloc
);
278 // ----------------------- IMPLEMENTATION -------------------------------
280 static int tc_new_mode
= 0; // See tc_set_new_mode().
282 // Routines such as free() and realloc() catch some erroneous pointers
283 // passed to them, and invoke the below when they do. (An erroneous pointer
284 // won't be caught if it's within a valid span or a stale span for which
285 // the pagemap cache has a non-zero sizeclass.) This is a cheap (source-editing
286 // required) kind of exception handling for these routines.
288 void InvalidFree(void* ptr
) {
289 Log(kCrash
, __FILE__
, __LINE__
, "Attempt to free invalid pointer", ptr
);
292 size_t InvalidGetSizeForRealloc(const void* old_ptr
) {
293 Log(kCrash
, __FILE__
, __LINE__
,
294 "Attempt to realloc invalid pointer", old_ptr
);
298 size_t InvalidGetAllocatedSize(const void* ptr
) {
299 Log(kCrash
, __FILE__
, __LINE__
,
300 "Attempt to get the size of an invalid pointer", ptr
);
304 // For security reasons, we want to limit the size of allocations.
305 // See crbug.com/169327.
306 inline bool IsAllocSizePermitted(size_t alloc_size
) {
307 // Never allow an allocation larger than what can be indexed via an int.
308 // Remove kPageSize to account for various rounding, padding and to have a
310 return alloc_size
<= ((std::numeric_limits
<int>::max
)() - kPageSize
);
313 } // unnamed namespace
315 // Extract interesting stats
316 struct TCMallocStats
{
317 uint64_t thread_bytes
; // Bytes in thread caches
318 uint64_t central_bytes
; // Bytes in central cache
319 uint64_t transfer_bytes
; // Bytes in central transfer cache
320 uint64_t metadata_bytes
; // Bytes alloced for metadata
321 uint64_t metadata_unmapped_bytes
; // Address space reserved for metadata
322 // but is not committed.
323 PageHeap::Stats pageheap
; // Stats from page heap
326 // Get stats into "r". Also get per-size-class counts if class_count != NULL
327 static void ExtractStats(TCMallocStats
* r
, uint64_t* class_count
,
328 PageHeap::SmallSpanStats
* small_spans
,
329 PageHeap::LargeSpanStats
* large_spans
) {
330 r
->central_bytes
= 0;
331 r
->transfer_bytes
= 0;
332 for (int cl
= 0; cl
< kNumClasses
; ++cl
) {
333 const int length
= Static::central_cache()[cl
].length();
334 const int tc_length
= Static::central_cache()[cl
].tc_length();
335 const size_t cache_overhead
= Static::central_cache()[cl
].OverheadBytes();
336 const size_t size
= static_cast<uint64_t>(
337 Static::sizemap()->ByteSizeForClass(cl
));
338 r
->central_bytes
+= (size
* length
) + cache_overhead
;
339 r
->transfer_bytes
+= (size
* tc_length
);
340 if (class_count
) class_count
[cl
] = length
+ tc_length
;
343 // Add stats from per-thread heaps
346 SpinLockHolder
h(Static::pageheap_lock());
347 ThreadCache::GetThreadStats(&r
->thread_bytes
, class_count
);
348 r
->metadata_bytes
= tcmalloc::metadata_system_bytes();
349 r
->metadata_unmapped_bytes
= tcmalloc::metadata_unmapped_bytes();
350 r
->pageheap
= Static::pageheap()->stats();
351 if (small_spans
!= NULL
) {
352 Static::pageheap()->GetSmallSpanStats(small_spans
);
354 if (large_spans
!= NULL
) {
355 Static::pageheap()->GetLargeSpanStats(large_spans
);
360 static double PagesToMiB(uint64_t pages
) {
361 return (pages
<< kPageShift
) / 1048576.0;
364 // WRITE stats to "out"
365 static void DumpStats(TCMalloc_Printer
* out
, int level
) {
367 uint64_t class_count
[kNumClasses
];
368 PageHeap::SmallSpanStats small
;
369 PageHeap::LargeSpanStats large
;
371 ExtractStats(&stats
, class_count
, &small
, &large
);
373 ExtractStats(&stats
, NULL
, NULL
, NULL
);
376 static const double MiB
= 1048576.0;
378 const uint64_t physical_memory_used_by_metadata
=
379 stats
.metadata_bytes
- stats
.metadata_unmapped_bytes
;
380 const uint64_t unmapped_bytes
=
381 stats
.pageheap
.unmapped_bytes
+ stats
.metadata_unmapped_bytes
;
383 const uint64_t virtual_memory_used
= (stats
.pageheap
.system_bytes
384 + stats
.metadata_bytes
);
385 const uint64_t physical_memory_used
= virtual_memory_used
- unmapped_bytes
;
386 const uint64_t bytes_in_use_by_app
= (physical_memory_used
387 - physical_memory_used_by_metadata
388 - stats
.pageheap
.free_bytes
389 - stats
.central_bytes
390 - stats
.transfer_bytes
391 - stats
.thread_bytes
);
394 "WASTE: %7.1f MiB bytes in use\n"
395 "WASTE: + %7.1f MiB committed but not used\n"
396 "WASTE: ------------\n"
397 "WASTE: = %7.1f MiB bytes committed\n"
398 "WASTE: committed/used ratio of %f\n",
399 bytes_in_use_by_app
/ MiB
,
400 (stats
.pageheap
.committed_bytes
- bytes_in_use_by_app
) / MiB
,
401 stats
.pageheap
.committed_bytes
/ MiB
,
402 stats
.pageheap
.committed_bytes
/ static_cast<double>(bytes_in_use_by_app
)
404 #ifdef TCMALLOC_SMALL_BUT_SLOW
406 "NOTE: SMALL MEMORY MODEL IS IN USE, PERFORMANCE MAY SUFFER.\n");
409 "------------------------------------------------\n"
410 "MALLOC: %12" PRIu64
" (%7.1f MiB) Bytes in use by application\n"
411 "MALLOC: + %12" PRIu64
" (%7.1f MiB) Bytes in page heap freelist\n"
412 "MALLOC: + %12" PRIu64
" (%7.1f MiB) Bytes in central cache freelist\n"
413 "MALLOC: + %12" PRIu64
" (%7.1f MiB) Bytes in transfer cache freelist\n"
414 "MALLOC: + %12" PRIu64
" (%7.1f MiB) Bytes in thread cache freelists\n"
415 "MALLOC: ------------\n"
416 "MALLOC: = %12" PRIu64
" (%7.1f MiB) Bytes committed\n"
417 "MALLOC: + %12" PRIu64
" (%7.1f MiB) Bytes in malloc metadata\n"
418 "MALLOC: ------------\n"
419 "MALLOC: = %12" PRIu64
" (%7.1f MiB) Actual memory used (physical + swap)\n"
420 "MALLOC: + %12" PRIu64
" (%7.1f MiB) Bytes released to OS (aka unmapped)\n"
421 "MALLOC: ------------\n"
422 "MALLOC: = %12" PRIu64
" (%7.1f MiB) Virtual address space used\n"
424 "MALLOC: %12" PRIu64
" Spans in use\n"
425 "MALLOC: %12" PRIu64
" Thread heaps in use\n"
426 "MALLOC: %12" PRIu64
" Tcmalloc page size\n"
427 "------------------------------------------------\n"
428 "Call ReleaseFreeMemory() to release freelist memory to the OS"
429 " (via madvise()).\n"
430 "Bytes released to the OS take up virtual address space"
431 " but no physical memory.\n",
432 bytes_in_use_by_app
, bytes_in_use_by_app
/ MiB
,
433 stats
.pageheap
.free_bytes
, stats
.pageheap
.free_bytes
/ MiB
,
434 stats
.central_bytes
, stats
.central_bytes
/ MiB
,
435 stats
.transfer_bytes
, stats
.transfer_bytes
/ MiB
,
436 stats
.thread_bytes
, stats
.thread_bytes
/ MiB
,
437 stats
.pageheap
.committed_bytes
, stats
.pageheap
.committed_bytes
/ MiB
,
438 physical_memory_used_by_metadata
, physical_memory_used_by_metadata
/ MiB
,
439 physical_memory_used
, physical_memory_used
/ MiB
,
440 unmapped_bytes
, unmapped_bytes
/ MiB
,
441 virtual_memory_used
, virtual_memory_used
/ MiB
,
442 uint64_t(Static::span_allocator()->inuse()),
443 uint64_t(ThreadCache::HeapsInUse()),
444 uint64_t(kPageSize
));
447 out
->printf("------------------------------------------------\n");
448 out
->printf("Size class breakdown\n");
449 out
->printf("------------------------------------------------\n");
450 uint64_t cumulative
= 0;
451 for (int cl
= 0; cl
< kNumClasses
; ++cl
) {
452 if (class_count
[cl
] > 0) {
453 uint64_t class_bytes
=
454 class_count
[cl
] * Static::sizemap()->ByteSizeForClass(cl
);
455 cumulative
+= class_bytes
;
456 out
->printf("class %3d [ %8" PRIuS
" bytes ] : "
457 "%8" PRIu64
" objs; %5.1f MiB; %5.1f cum MiB\n",
458 cl
, Static::sizemap()->ByteSizeForClass(cl
),
465 // append page heap info
466 int nonempty_sizes
= 0;
467 for (int s
= 0; s
< kMaxPages
; s
++) {
468 if (small
.normal_length
[s
] + small
.returned_length
[s
] > 0) {
472 out
->printf("------------------------------------------------\n");
473 out
->printf("PageHeap: %d sizes; %6.1f MiB free; %6.1f MiB unmapped\n",
474 nonempty_sizes
, stats
.pageheap
.free_bytes
/ MiB
,
475 stats
.pageheap
.unmapped_bytes
/ MiB
);
476 out
->printf("------------------------------------------------\n");
477 uint64_t total_normal
= 0;
478 uint64_t total_returned
= 0;
479 for (int s
= 0; s
< kMaxPages
; s
++) {
480 const int n_length
= small
.normal_length
[s
];
481 const int r_length
= small
.returned_length
[s
];
482 if (n_length
+ r_length
> 0) {
483 uint64_t n_pages
= s
* n_length
;
484 uint64_t r_pages
= s
* r_length
;
485 total_normal
+= n_pages
;
486 total_returned
+= r_pages
;
487 out
->printf("%6u pages * %6u spans ~ %6.1f MiB; %6.1f MiB cum"
488 "; unmapped: %6.1f MiB; %6.1f MiB cum\n",
490 (n_length
+ r_length
),
491 PagesToMiB(n_pages
+ r_pages
),
492 PagesToMiB(total_normal
+ total_returned
),
494 PagesToMiB(total_returned
));
498 total_normal
+= large
.normal_pages
;
499 total_returned
+= large
.returned_pages
;
500 out
->printf(">255 large * %6u spans ~ %6.1f MiB; %6.1f MiB cum"
501 "; unmapped: %6.1f MiB; %6.1f MiB cum\n",
502 static_cast<unsigned int>(large
.spans
),
503 PagesToMiB(large
.normal_pages
+ large
.returned_pages
),
504 PagesToMiB(total_normal
+ total_returned
),
505 PagesToMiB(large
.returned_pages
),
506 PagesToMiB(total_returned
));
510 static void PrintStats(int level
) {
511 const int kBufferSize
= 16 << 10;
512 char* buffer
= new char[kBufferSize
];
513 TCMalloc_Printer
printer(buffer
, kBufferSize
);
514 DumpStats(&printer
, level
);
515 write(STDERR_FILENO
, buffer
, strlen(buffer
));
519 static void** DumpHeapGrowthStackTraces() {
520 // Count how much space we need
521 int needed_slots
= 0;
523 SpinLockHolder
h(Static::pageheap_lock());
524 for (StackTrace
* t
= Static::growth_stacks();
526 t
= reinterpret_cast<StackTrace
*>(
527 t
->stack
[tcmalloc::kMaxStackDepth
-1])) {
528 needed_slots
+= 3 + t
->depth
;
530 needed_slots
+= 100; // Slop in case list grows
531 needed_slots
+= needed_slots
/8; // An extra 12.5% slop
534 void** result
= new void*[needed_slots
];
535 if (result
== NULL
) {
536 Log(kLog
, __FILE__
, __LINE__
,
537 "tcmalloc: allocation failed for stack trace slots",
538 needed_slots
* sizeof(*result
));
542 SpinLockHolder
h(Static::pageheap_lock());
544 for (StackTrace
* t
= Static::growth_stacks();
546 t
= reinterpret_cast<StackTrace
*>(
547 t
->stack
[tcmalloc::kMaxStackDepth
-1])) {
548 ASSERT(used_slots
< needed_slots
); // Need to leave room for terminator
549 if (used_slots
+ 3 + t
->depth
>= needed_slots
) {
554 result
[used_slots
+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
555 result
[used_slots
+1] = reinterpret_cast<void*>(t
->size
);
556 result
[used_slots
+2] = reinterpret_cast<void*>(t
->depth
);
557 for (int d
= 0; d
< t
->depth
; d
++) {
558 result
[used_slots
+3+d
] = t
->stack
[d
];
560 used_slots
+= 3 + t
->depth
;
562 result
[used_slots
] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
566 static void IterateOverRanges(void* arg
, MallocExtension::RangeFunction func
) {
567 PageID page
= 1; // Some code may assume that page==0 is never used
570 // Accumulate a small number of ranges in a local buffer
571 static const int kNumRanges
= 16;
572 static base::MallocRange ranges
[kNumRanges
];
575 SpinLockHolder
h(Static::pageheap_lock());
576 while (n
< kNumRanges
) {
577 if (!Static::pageheap()->GetNextRange(page
, &ranges
[n
])) {
581 uintptr_t limit
= ranges
[n
].address
+ ranges
[n
].length
;
582 page
= (limit
+ kPageSize
- 1) >> kPageShift
;
588 for (int i
= 0; i
< n
; i
++) {
589 (*func
)(arg
, &ranges
[i
]);
594 // TCMalloc's support for extra malloc interfaces
595 class TCMallocImplementation
: public MallocExtension
{
597 // ReleaseToSystem() might release more than the requested bytes because
598 // the page heap releases at the span granularity, and spans are of wildly
599 // different sizes. This member keeps track of the extra bytes bytes
600 // released so that the app can periodically call ReleaseToSystem() to
601 // release memory at a constant rate.
602 // NOTE: Protected by Static::pageheap_lock().
603 size_t extra_bytes_released_
;
606 TCMallocImplementation()
607 : extra_bytes_released_(0) {
610 virtual void GetStats(char* buffer
, int buffer_length
) {
611 ASSERT(buffer_length
> 0);
612 TCMalloc_Printer
printer(buffer
, buffer_length
);
614 // Print level one stats unless lots of space is available
615 if (buffer_length
< 10000) {
616 DumpStats(&printer
, 1);
618 DumpStats(&printer
, 2);
622 // We may print an extra, tcmalloc-specific warning message here.
623 virtual void GetHeapSample(MallocExtensionWriter
* writer
) {
624 if (FLAGS_tcmalloc_sample_parameter
== 0) {
625 const char* const kWarningMsg
=
627 "%warn This heap profile does not have any data in it, because\n"
628 "%warn the application was run with heap sampling turned off.\n"
629 "%warn To get useful data from GetHeapSample(), you must\n"
630 "%warn set the environment variable TCMALLOC_SAMPLE_PARAMETER to\n"
631 "%warn a positive sampling period, such as 524288.\n"
633 writer
->append(kWarningMsg
, strlen(kWarningMsg
));
635 MallocExtension::GetHeapSample(writer
);
638 virtual void** ReadStackTraces(int* sample_period
) {
639 tcmalloc::StackTraceTable table
;
641 SpinLockHolder
h(Static::pageheap_lock());
642 Span
* sampled
= Static::sampled_objects();
643 for (Span
* s
= sampled
->next
; s
!= sampled
; s
= s
->next
) {
644 table
.AddTrace(*reinterpret_cast<StackTrace
*>(s
->objects
));
647 *sample_period
= ThreadCache::GetCache()->GetSamplePeriod();
648 return table
.ReadStackTracesAndClear(); // grabs and releases pageheap_lock
651 virtual void** ReadHeapGrowthStackTraces() {
652 return DumpHeapGrowthStackTraces();
655 virtual void Ranges(void* arg
, RangeFunction func
) {
656 IterateOverRanges(arg
, func
);
659 virtual bool GetNumericProperty(const char* name
, size_t* value
) {
660 ASSERT(name
!= NULL
);
662 if (strcmp(name
, "generic.current_allocated_bytes") == 0) {
664 ExtractStats(&stats
, NULL
, NULL
, NULL
);
665 *value
= stats
.pageheap
.system_bytes
667 - stats
.central_bytes
668 - stats
.transfer_bytes
669 - stats
.pageheap
.free_bytes
670 - stats
.pageheap
.unmapped_bytes
;
674 if (strcmp(name
, "generic.heap_size") == 0) {
676 ExtractStats(&stats
, NULL
, NULL
, NULL
);
677 *value
= stats
.pageheap
.system_bytes
;
681 if (strcmp(name
, "tcmalloc.slack_bytes") == 0) {
682 // Kept for backwards compatibility. Now defined externally as:
683 // pageheap_free_bytes + pageheap_unmapped_bytes.
684 SpinLockHolder
l(Static::pageheap_lock());
685 PageHeap::Stats stats
= Static::pageheap()->stats();
686 *value
= stats
.free_bytes
+ stats
.unmapped_bytes
;
690 if (strcmp(name
, "tcmalloc.pageheap_free_bytes") == 0) {
691 SpinLockHolder
l(Static::pageheap_lock());
692 *value
= Static::pageheap()->stats().free_bytes
;
696 if (strcmp(name
, "tcmalloc.pageheap_unmapped_bytes") == 0) {
697 SpinLockHolder
l(Static::pageheap_lock());
698 *value
= Static::pageheap()->stats().unmapped_bytes
;
702 if (strcmp(name
, "tcmalloc.max_total_thread_cache_bytes") == 0) {
703 SpinLockHolder
l(Static::pageheap_lock());
704 *value
= ThreadCache::overall_thread_cache_size();
708 if (strcmp(name
, "tcmalloc.current_total_thread_cache_bytes") == 0) {
710 ExtractStats(&stats
, NULL
, NULL
, NULL
);
711 *value
= stats
.thread_bytes
;
718 virtual bool SetNumericProperty(const char* name
, size_t value
) {
719 ASSERT(name
!= NULL
);
721 if (strcmp(name
, "tcmalloc.max_total_thread_cache_bytes") == 0) {
722 SpinLockHolder
l(Static::pageheap_lock());
723 ThreadCache::set_overall_thread_cache_size(value
);
730 virtual void MarkThreadIdle() {
731 ThreadCache::BecomeIdle();
734 virtual void MarkThreadBusy(); // Implemented below
736 virtual SysAllocator
* GetSystemAllocator() {
737 SpinLockHolder
h(Static::pageheap_lock());
741 virtual void SetSystemAllocator(SysAllocator
* alloc
) {
742 SpinLockHolder
h(Static::pageheap_lock());
746 virtual void ReleaseToSystem(size_t num_bytes
) {
747 SpinLockHolder
h(Static::pageheap_lock());
748 if (num_bytes
<= extra_bytes_released_
) {
749 // We released too much on a prior call, so don't release any
751 extra_bytes_released_
= extra_bytes_released_
- num_bytes
;
754 num_bytes
= num_bytes
- extra_bytes_released_
;
755 // num_bytes might be less than one page. If we pass zero to
756 // ReleaseAtLeastNPages, it won't do anything, so we release a whole
757 // page now and let extra_bytes_released_ smooth it out over time.
758 Length num_pages
= max
<Length
>(num_bytes
>> kPageShift
, 1);
759 size_t bytes_released
= Static::pageheap()->ReleaseAtLeastNPages(
760 num_pages
) << kPageShift
;
761 if (bytes_released
> num_bytes
) {
762 extra_bytes_released_
= bytes_released
- num_bytes
;
764 // The PageHeap wasn't able to release num_bytes. Don't try to
765 // compensate with a big release next time. Specifically,
766 // ReleaseFreeMemory() calls ReleaseToSystem(LONG_MAX).
767 extra_bytes_released_
= 0;
771 virtual void SetMemoryReleaseRate(double rate
) {
772 FLAGS_tcmalloc_release_rate
= rate
;
775 virtual double GetMemoryReleaseRate() {
776 return FLAGS_tcmalloc_release_rate
;
778 virtual size_t GetEstimatedAllocatedSize(size_t size
) {
779 if (size
<= kMaxSize
) {
780 const size_t cl
= Static::sizemap()->SizeClass(size
);
781 const size_t alloc_size
= Static::sizemap()->ByteSizeForClass(cl
);
784 return tcmalloc::pages(size
) << kPageShift
;
788 // This just calls GetSizeWithCallback, but because that's in an
789 // unnamed namespace, we need to move the definition below it in the
791 virtual size_t GetAllocatedSize(const void* ptr
);
793 // This duplicates some of the logic in GetSizeWithCallback, but is
794 // faster. This is important on OS X, where this function is called
795 // on every allocation operation.
796 virtual Ownership
GetOwnership(const void* ptr
) {
797 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
798 // The rest of tcmalloc assumes that all allocated pointers use at
799 // most kAddressBits bits. If ptr doesn't, then it definitely
800 // wasn't alloacted by tcmalloc.
801 if ((p
>> (kAddressBits
- kPageShift
)) > 0) {
804 size_t cl
= Static::pageheap()->GetSizeClassIfCached(p
);
808 const Span
*span
= Static::pageheap()->GetDescriptor(p
);
809 return span
? kOwned
: kNotOwned
;
812 virtual void GetFreeListSizes(vector
<MallocExtension::FreeListInfo
>* v
) {
813 static const char* kCentralCacheType
= "tcmalloc.central";
814 static const char* kTransferCacheType
= "tcmalloc.transfer";
815 static const char* kThreadCacheType
= "tcmalloc.thread";
816 static const char* kPageHeapType
= "tcmalloc.page";
817 static const char* kPageHeapUnmappedType
= "tcmalloc.page_unmapped";
818 static const char* kLargeSpanType
= "tcmalloc.large";
819 static const char* kLargeUnmappedSpanType
= "tcmalloc.large_unmapped";
823 // central class information
824 int64 prev_class_size
= 0;
825 for (int cl
= 1; cl
< kNumClasses
; ++cl
) {
826 size_t class_size
= Static::sizemap()->ByteSizeForClass(cl
);
827 MallocExtension::FreeListInfo i
;
828 i
.min_object_size
= prev_class_size
+ 1;
829 i
.max_object_size
= class_size
;
831 Static::central_cache()[cl
].length() * class_size
;
832 i
.type
= kCentralCacheType
;
837 Static::central_cache()[cl
].tc_length() * class_size
;
838 i
.type
= kTransferCacheType
;
841 prev_class_size
= Static::sizemap()->ByteSizeForClass(cl
);
844 // Add stats from per-thread heaps
845 uint64_t class_count
[kNumClasses
];
846 memset(class_count
, 0, sizeof(class_count
));
848 SpinLockHolder
h(Static::pageheap_lock());
849 uint64_t thread_bytes
= 0;
850 ThreadCache::GetThreadStats(&thread_bytes
, class_count
);
854 for (int cl
= 1; cl
< kNumClasses
; ++cl
) {
855 MallocExtension::FreeListInfo i
;
856 i
.min_object_size
= prev_class_size
+ 1;
857 i
.max_object_size
= Static::sizemap()->ByteSizeForClass(cl
);
859 class_count
[cl
] * Static::sizemap()->ByteSizeForClass(cl
);
860 i
.type
= kThreadCacheType
;
864 // append page heap info
865 PageHeap::SmallSpanStats small
;
866 PageHeap::LargeSpanStats large
;
868 SpinLockHolder
h(Static::pageheap_lock());
869 Static::pageheap()->GetSmallSpanStats(&small
);
870 Static::pageheap()->GetLargeSpanStats(&large
);
873 // large spans: mapped
874 MallocExtension::FreeListInfo span_info
;
875 span_info
.type
= kLargeSpanType
;
876 span_info
.max_object_size
= (numeric_limits
<size_t>::max
)();
877 span_info
.min_object_size
= kMaxPages
<< kPageShift
;
878 span_info
.total_bytes_free
= large
.normal_pages
<< kPageShift
;
879 v
->push_back(span_info
);
881 // large spans: unmapped
882 span_info
.type
= kLargeUnmappedSpanType
;
883 span_info
.total_bytes_free
= large
.returned_pages
<< kPageShift
;
884 v
->push_back(span_info
);
887 for (int s
= 1; s
< kMaxPages
; s
++) {
888 MallocExtension::FreeListInfo i
;
889 i
.max_object_size
= (s
<< kPageShift
);
890 i
.min_object_size
= ((s
- 1) << kPageShift
);
892 i
.type
= kPageHeapType
;
893 i
.total_bytes_free
= (s
<< kPageShift
) * small
.normal_length
[s
];
896 i
.type
= kPageHeapUnmappedType
;
897 i
.total_bytes_free
= (s
<< kPageShift
) * small
.returned_length
[s
];
903 // The constructor allocates an object to ensure that initialization
904 // runs before main(), and therefore we do not have a chance to become
905 // multi-threaded before initialization. We also create the TSD key
906 // here. Presumably by the time this constructor runs, glibc is in
907 // good enough shape to handle pthread_key_create().
909 // The constructor also takes the opportunity to tell STL to use
910 // tcmalloc. We want to do this early, before construct time, so
911 // all user STL allocations go through tcmalloc (which works really
914 // The destructor prints stats when the program exits.
915 static int tcmallocguard_refcount
= 0; // no lock needed: runs before main()
916 TCMallocGuard::TCMallocGuard() {
917 if (tcmallocguard_refcount
++ == 0) {
918 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
919 // Check whether the kernel also supports TLS (needs to happen at runtime)
920 tcmalloc::CheckIfKernelSupportsTLS();
922 ReplaceSystemAlloc(); // defined in libc_override_*.h
923 tc_free(tc_malloc(1));
924 ThreadCache::InitTSD();
925 tc_free(tc_malloc(1));
926 // Either we, or debugallocation.cc, or valgrind will control memory
927 // management. We register our extension if we're the winner.
928 #ifdef TCMALLOC_USING_DEBUGALLOCATION
929 // Let debugallocation register its extension.
931 if (RunningOnValgrind()) {
932 // Let Valgrind uses its own malloc (so don't register our extension).
934 MallocExtension::Register(new TCMallocImplementation
);
940 TCMallocGuard::~TCMallocGuard() {
941 if (--tcmallocguard_refcount
== 0) {
942 const char* env
= getenv("MALLOCSTATS");
944 int level
= atoi(env
);
945 if (level
< 1) level
= 1;
950 #ifndef WIN32_OVERRIDE_ALLOCATORS
951 static TCMallocGuard module_enter_exit_hook
;
954 //-------------------------------------------------------------------
955 // Helpers for the exported routines below
956 //-------------------------------------------------------------------
958 static inline bool CheckCachedSizeClass(void *ptr
) {
959 PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
960 size_t cached_value
= Static::pageheap()->GetSizeClassIfCached(p
);
961 return cached_value
== 0 ||
962 cached_value
== Static::pageheap()->GetDescriptor(p
)->sizeclass
;
965 static inline void* CheckMallocResult(void *result
) {
966 ASSERT(result
== NULL
|| CheckCachedSizeClass(result
));
967 MarkAllocatedRegion(result
);
971 static inline void* SpanToMallocResult(Span
*span
) {
972 Static::pageheap()->CacheSizeClass(span
->start
, 0);
974 CheckMallocResult(reinterpret_cast<void*>(span
->start
<< kPageShift
));
977 static void* DoSampledAllocation(size_t size
) {
978 // Grab the stack trace outside the heap lock
980 tmp
.depth
= GetStackTrace(tmp
.stack
, tcmalloc::kMaxStackDepth
, 1);
983 SpinLockHolder
h(Static::pageheap_lock());
985 Span
*span
= Static::pageheap()->New(tcmalloc::pages(size
== 0 ? 1 : size
));
990 // Allocate stack trace
991 StackTrace
*stack
= Static::stacktrace_allocator()->New();
993 // Sampling failed because of lack of memory
998 span
->objects
= stack
;
999 tcmalloc::DLL_Prepend(Static::sampled_objects(), span
);
1001 return SpanToMallocResult(span
);
1006 // Copy of FLAGS_tcmalloc_large_alloc_report_threshold with
1007 // automatic increases factored in.
1008 static int64_t large_alloc_threshold
=
1009 (kPageSize
> FLAGS_tcmalloc_large_alloc_report_threshold
1010 ? kPageSize
: FLAGS_tcmalloc_large_alloc_report_threshold
);
1012 static void ReportLargeAlloc(Length num_pages
, void* result
) {
1014 stack
.depth
= GetStackTrace(stack
.stack
, tcmalloc::kMaxStackDepth
, 1);
1016 static const int N
= 1000;
1018 TCMalloc_Printer
printer(buffer
, N
);
1019 printer
.printf("tcmalloc: large alloc %" PRIu64
" bytes == %p @ ",
1020 static_cast<uint64
>(num_pages
) << kPageShift
,
1022 for (int i
= 0; i
< stack
.depth
; i
++) {
1023 printer
.printf(" %p", stack
.stack
[i
]);
1025 printer
.printf("\n");
1026 write(STDERR_FILENO
, buffer
, strlen(buffer
));
1029 inline void* cpp_alloc(size_t size
, bool nothrow
);
1030 inline void* do_malloc(size_t size
);
1032 // TODO(willchan): Investigate whether or not inlining this much is harmful to
1034 // This is equivalent to do_malloc() except when tc_new_mode is set to true.
1035 // Otherwise, it will run the std::new_handler if set.
1036 inline void* do_malloc_or_cpp_alloc(size_t size
) {
1037 return tc_new_mode
? cpp_alloc(size
, true) : do_malloc(size
);
1040 void* cpp_memalign(size_t align
, size_t size
);
1041 void* do_memalign(size_t align
, size_t size
);
1043 inline void* do_memalign_or_cpp_memalign(size_t align
, size_t size
) {
1044 return tc_new_mode
? cpp_memalign(align
, size
) : do_memalign(align
, size
);
1047 // Must be called with the page lock held.
1048 inline bool should_report_large(Length num_pages
) {
1049 const int64 threshold
= large_alloc_threshold
;
1050 if (threshold
> 0 && num_pages
>= (threshold
>> kPageShift
)) {
1051 // Increase the threshold by 1/8 every time we generate a report.
1052 // We cap the threshold at 8GiB to avoid overflow problems.
1053 large_alloc_threshold
= (threshold
+ threshold
/8 < 8ll<<30
1054 ? threshold
+ threshold
/8 : 8ll<<30);
1060 // Helper for do_malloc().
1061 inline void* do_malloc_pages(ThreadCache
* heap
, size_t size
) {
1065 Length num_pages
= tcmalloc::pages(size
);
1066 size
= num_pages
<< kPageShift
;
1068 // Chromium profiling. Measurements in March 2013 suggest this
1069 // imposes a small enough runtime cost that there's no reason to
1070 // try to optimize it.
1071 heap
->AddToByteAllocatedTotal(size
);
1073 if ((FLAGS_tcmalloc_sample_parameter
> 0) && heap
->SampleAllocation(size
)) {
1074 result
= DoSampledAllocation(size
);
1076 SpinLockHolder
h(Static::pageheap_lock());
1077 report_large
= should_report_large(num_pages
);
1079 SpinLockHolder
h(Static::pageheap_lock());
1080 Span
* span
= Static::pageheap()->New(num_pages
);
1081 result
= (span
== NULL
? NULL
: SpanToMallocResult(span
));
1082 report_large
= should_report_large(num_pages
);
1086 ReportLargeAlloc(num_pages
, result
);
1091 inline void* do_malloc(size_t size
) {
1092 AddRoomForMark(&size
);
1096 // The following call forces module initialization
1097 ThreadCache
* heap
= ThreadCache::GetCache();
1098 if (size
<= kMaxSize
&& IsAllocSizePermitted(size
)) {
1099 size_t cl
= Static::sizemap()->SizeClass(size
);
1100 size
= Static::sizemap()->class_to_size(cl
);
1102 // Chromium profiling. Measurements in March 2013 suggest this
1103 // imposes a small enough runtime cost that there's no reason to
1104 // try to optimize it.
1105 heap
->AddToByteAllocatedTotal(size
);
1107 if ((FLAGS_tcmalloc_sample_parameter
> 0) &&
1108 heap
->SampleAllocation(size
)) {
1109 ret
= DoSampledAllocation(size
);
1110 MarkAllocatedRegion(ret
);
1112 // The common case, and also the simplest. This just pops the
1113 // size-appropriate freelist, after replenishing it if it's empty.
1114 ret
= CheckMallocResult(heap
->Allocate(size
, cl
));
1116 } else if (IsAllocSizePermitted(size
)) {
1117 ret
= do_malloc_pages(heap
, size
);
1118 MarkAllocatedRegion(ret
);
1120 if (ret
== NULL
) errno
= ENOMEM
;
1121 ASSERT(IsAllocSizePermitted(size
) || ret
== NULL
);
1125 inline void* do_calloc(size_t n
, size_t elem_size
) {
1127 const size_t size
= n
* elem_size
;
1128 if (elem_size
!= 0 && size
/ elem_size
!= n
) return NULL
;
1130 void* result
= do_malloc_or_cpp_alloc(size
);
1131 if (result
!= NULL
) {
1132 memset(result
, 0, size
);
1137 static inline ThreadCache
* GetCacheIfPresent() {
1138 void* const p
= ThreadCache::GetCacheIfPresent();
1139 return reinterpret_cast<ThreadCache
*>(p
);
1142 // This lets you call back to a given function pointer if ptr is invalid.
1143 // It is used primarily by windows code which wants a specialized callback.
1144 inline void do_free_with_callback(void* ptr
, void (*invalid_free_fn
)(void*)) {
1145 if (ptr
== NULL
) return;
1146 if (Static::pageheap() == NULL
) {
1147 // We called free() before malloc(). This can occur if the
1148 // (system) malloc() is called before tcmalloc is loaded, and then
1149 // free() is called after tcmalloc is loaded (and tc_free has
1150 // replaced free), but before the global constructor has run that
1151 // sets up the tcmalloc data structures.
1152 (*invalid_free_fn
)(ptr
); // Decide how to handle the bad free request
1155 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
1157 size_t cl
= Static::pageheap()->GetSizeClassIfCached(p
);
1160 span
= Static::pageheap()->GetDescriptor(p
);
1162 // span can be NULL because the pointer passed in is invalid
1163 // (not something returned by malloc or friends), or because the
1164 // pointer was allocated with some other allocator besides
1165 // tcmalloc. The latter can happen if tcmalloc is linked in via
1166 // a dynamic library, but is not listed last on the link line.
1167 // In that case, libraries after it on the link line will
1168 // allocate with libc malloc, but free with tcmalloc's free.
1169 (*invalid_free_fn
)(ptr
); // Decide how to handle the bad free request
1172 cl
= span
->sizeclass
;
1173 Static::pageheap()->CacheSizeClass(p
, cl
);
1176 // Check to see if the object is in use.
1177 CHECK_CONDITION_PRINT(span
->location
== Span::IN_USE
,
1178 "Object was not in-use");
1180 CHECK_CONDITION_PRINT(
1181 span
->start
<< kPageShift
== reinterpret_cast<uintptr_t>(ptr
),
1182 "Pointer is not pointing to the start of a span");
1184 ValidateAllocatedRegion(ptr
, cl
);
1187 ASSERT(!Static::pageheap()->GetDescriptor(p
)->sample
);
1188 ThreadCache
* heap
= GetCacheIfPresent();
1190 heap
->Deallocate(ptr
, cl
);
1192 // Delete directly into central cache
1193 tcmalloc::FL_Init(ptr
);
1194 Static::central_cache()[cl
].InsertRange(ptr
, ptr
, 1);
1197 SpinLockHolder
h(Static::pageheap_lock());
1198 ASSERT(reinterpret_cast<uintptr_t>(ptr
) % kPageSize
== 0);
1199 ASSERT(span
!= NULL
&& span
->start
== p
);
1201 StackTrace
* st
= reinterpret_cast<StackTrace
*>(span
->objects
);
1202 tcmalloc::DLL_Remove(span
);
1203 Static::stacktrace_allocator()->Delete(st
);
1204 span
->objects
= NULL
;
1206 Static::pageheap()->Delete(span
);
1210 // The default "do_free" that uses the default callback.
1211 inline void do_free(void* ptr
) {
1212 return do_free_with_callback(ptr
, &InvalidFree
);
1215 // NOTE: some logic here is duplicated in GetOwnership (above), for
1216 // speed. If you change this function, look at that one too.
1217 inline size_t GetSizeWithCallback(const void* ptr
,
1218 size_t (*invalid_getsize_fn
)(const void*)) {
1221 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
1222 size_t cl
= Static::pageheap()->GetSizeClassIfCached(p
);
1224 return Static::sizemap()->ByteSizeForClass(cl
);
1226 const Span
*span
= Static::pageheap()->GetDescriptor(p
);
1227 if (span
== NULL
) { // means we do not own this memory
1228 return (*invalid_getsize_fn
)(ptr
);
1229 } else if (span
->sizeclass
!= 0) {
1230 Static::pageheap()->CacheSizeClass(p
, span
->sizeclass
);
1231 return Static::sizemap()->ByteSizeForClass(span
->sizeclass
);
1233 return span
->length
<< kPageShift
;
1238 // This lets you call back to a given function pointer if ptr is invalid.
1239 // It is used primarily by windows code which wants a specialized callback.
1240 inline void* do_realloc_with_callback(
1241 void* old_ptr
, size_t new_size
,
1242 void (*invalid_free_fn
)(void*),
1243 size_t (*invalid_get_size_fn
)(const void*)) {
1244 AddRoomForMark(&new_size
);
1245 // Get the size of the old entry
1246 const size_t old_size
= GetSizeWithCallback(old_ptr
, invalid_get_size_fn
);
1248 // Reallocate if the new size is larger than the old size,
1249 // or if the new size is significantly smaller than the old size.
1250 // We do hysteresis to avoid resizing ping-pongs:
1251 // . If we need to grow, grow to max(new_size, old_size * 1.X)
1252 // . Don't shrink unless new_size < old_size * 0.Y
1253 // X and Y trade-off time for wasted space. For now we do 1.25 and 0.5.
1254 const size_t min_growth
= min(old_size
/ 4,
1255 (std::numeric_limits
<size_t>::max
)() - old_size
); // Avoid overflow.
1256 const size_t lower_bound_to_grow
= old_size
+ min_growth
;
1257 const size_t upper_bound_to_shrink
= old_size
/ 2;
1258 if ((new_size
> old_size
) || (new_size
< upper_bound_to_shrink
)) {
1259 // Need to reallocate.
1260 void* new_ptr
= NULL
;
1262 if (new_size
> old_size
&& new_size
< lower_bound_to_grow
) {
1263 new_ptr
= do_malloc_or_cpp_alloc(lower_bound_to_grow
);
1265 ExcludeMarkFromSize(&new_size
); // do_malloc will add space if needed.
1266 if (new_ptr
== NULL
) {
1267 // Either new_size is not a tiny increment, or last do_malloc failed.
1268 new_ptr
= do_malloc_or_cpp_alloc(new_size
);
1270 if (new_ptr
== NULL
) {
1273 MallocHook::InvokeNewHook(new_ptr
, new_size
);
1274 memcpy(new_ptr
, old_ptr
, ((old_size
< new_size
) ? old_size
: new_size
));
1275 MallocHook::InvokeDeleteHook(old_ptr
);
1276 // We could use a variant of do_free() that leverages the fact
1277 // that we already know the sizeclass of old_ptr. The benefit
1278 // would be small, so don't bother.
1279 do_free_with_callback(old_ptr
, invalid_free_fn
);
1282 // We still need to call hooks to report the updated size:
1283 MallocHook::InvokeDeleteHook(old_ptr
);
1284 ExcludeMarkFromSize(&new_size
);
1285 MallocHook::InvokeNewHook(old_ptr
, new_size
);
1290 inline void* do_realloc(void* old_ptr
, size_t new_size
) {
1291 return do_realloc_with_callback(old_ptr
, new_size
,
1292 &InvalidFree
, &InvalidGetSizeForRealloc
);
1295 // For use by exported routines below that want specific alignments
1297 // Note: this code can be slow for alignments > 16, and can
1298 // significantly fragment memory. The expectation is that
1299 // memalign/posix_memalign/valloc/pvalloc will not be invoked very
1300 // often. This requirement simplifies our implementation and allows
1301 // us to tune for expected allocation patterns.
1302 void* do_memalign(size_t align
, size_t size
) {
1303 ASSERT((align
& (align
- 1)) == 0);
1305 // Marked in CheckMallocResult(), which is also inside SpanToMallocResult().
1306 AddRoomForMark(&size
);
1307 if (size
+ align
< size
) return NULL
; // Overflow
1309 // Fall back to malloc if we would already align this memory access properly.
1310 if (align
<= AlignmentForSize(size
)) {
1311 void* p
= do_malloc(size
);
1312 ASSERT((reinterpret_cast<uintptr_t>(p
) % align
) == 0);
1316 if (Static::pageheap() == NULL
) ThreadCache::InitModule();
1318 // Allocate at least one byte to avoid boundary conditions below
1319 if (size
== 0) size
= 1;
1321 if (size
<= kMaxSize
&& align
< kPageSize
) {
1322 // Search through acceptable size classes looking for one with
1323 // enough alignment. This depends on the fact that
1324 // InitSizeClasses() currently produces several size classes that
1325 // are aligned at powers of two. We will waste time and space if
1326 // we miss in the size class array, but that is deemed acceptable
1327 // since memalign() should be used rarely.
1328 int cl
= Static::sizemap()->SizeClass(size
);
1329 while (cl
< kNumClasses
&&
1330 ((Static::sizemap()->class_to_size(cl
) & (align
- 1)) != 0)) {
1333 if (cl
< kNumClasses
) {
1334 ThreadCache
* heap
= ThreadCache::GetCache();
1335 size
= Static::sizemap()->class_to_size(cl
);
1336 return CheckMallocResult(heap
->Allocate(size
, cl
));
1340 // We will allocate directly from the page heap
1341 SpinLockHolder
h(Static::pageheap_lock());
1343 if (align
<= kPageSize
) {
1344 // Any page-level allocation will be fine
1345 // TODO: We could put the rest of this page in the appropriate
1346 // TODO: cache but it does not seem worth it.
1347 Span
* span
= Static::pageheap()->New(tcmalloc::pages(size
));
1348 return span
== NULL
? NULL
: SpanToMallocResult(span
);
1351 // Allocate extra pages and carve off an aligned portion
1352 const Length alloc
= tcmalloc::pages(size
+ align
);
1353 Span
* span
= Static::pageheap()->New(alloc
);
1354 if (span
== NULL
) return NULL
;
1356 // Skip starting portion so that we end up aligned
1358 while ((((span
->start
+skip
) << kPageShift
) & (align
- 1)) != 0) {
1361 ASSERT(skip
< alloc
);
1363 Span
* rest
= Static::pageheap()->Split(span
, skip
);
1364 Static::pageheap()->Delete(span
);
1368 // Skip trailing portion that we do not need to return
1369 const Length needed
= tcmalloc::pages(size
);
1370 ASSERT(span
->length
>= needed
);
1371 if (span
->length
> needed
) {
1372 Span
* trailer
= Static::pageheap()->Split(span
, needed
);
1373 Static::pageheap()->Delete(trailer
);
1375 return SpanToMallocResult(span
);
1378 // Helpers for use by exported routines below:
1380 inline void do_malloc_stats() {
1384 inline int do_mallopt(int cmd
, int value
) {
1385 return 1; // Indicates error
1388 #ifdef HAVE_STRUCT_MALLINFO
1389 inline struct mallinfo
do_mallinfo() {
1390 TCMallocStats stats
;
1391 ExtractStats(&stats
, NULL
, NULL
, NULL
);
1393 // Just some of the fields are filled in.
1394 struct mallinfo info
;
1395 memset(&info
, 0, sizeof(info
));
1397 // Unfortunately, the struct contains "int" field, so some of the
1398 // size values will be truncated.
1399 info
.arena
= static_cast<int>(stats
.pageheap
.system_bytes
);
1400 info
.fsmblks
= static_cast<int>(stats
.thread_bytes
1401 + stats
.central_bytes
1402 + stats
.transfer_bytes
);
1403 info
.fordblks
= static_cast<int>(stats
.pageheap
.free_bytes
+
1404 stats
.pageheap
.unmapped_bytes
);
1405 info
.uordblks
= static_cast<int>(stats
.pageheap
.system_bytes
1406 - stats
.thread_bytes
1407 - stats
.central_bytes
1408 - stats
.transfer_bytes
1409 - stats
.pageheap
.free_bytes
1410 - stats
.pageheap
.unmapped_bytes
);
1414 #endif // HAVE_STRUCT_MALLINFO
1416 static SpinLock
set_new_handler_lock(SpinLock::LINKER_INITIALIZED
);
1418 inline void* cpp_alloc(size_t size
, bool nothrow
) {
1420 void* p
= do_malloc(size
);
1424 if (p
== NULL
) { // allocation failed
1425 // Get the current new handler. NB: this function is not
1426 // thread-safe. We make a feeble stab at making it so here, but
1427 // this lock only protects against tcmalloc interfering with
1428 // itself, not with other libraries calling set_new_handler.
1429 std::new_handler nh
;
1431 SpinLockHolder
h(&set_new_handler_lock
);
1432 nh
= std::set_new_handler(0);
1433 (void) std::set_new_handler(nh
);
1435 #if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1437 // Since exceptions are disabled, we don't really know if new_handler
1438 // failed. Assume it will abort if it fails.
1444 // If no new_handler is established, the allocation failed.
1446 if (nothrow
) return 0;
1447 throw std::bad_alloc();
1449 // Otherwise, try the new_handler. If it returns, retry the
1450 // allocation. If it throws std::bad_alloc, fail the allocation.
1451 // if it throws something else, don't interfere.
1454 } catch (const std::bad_alloc
&) {
1455 if (!nothrow
) throw;
1458 #endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1459 } else { // allocation success
1462 #endif // PREANSINEW
1466 void* cpp_memalign(size_t align
, size_t size
) {
1468 void* p
= do_memalign(align
, size
);
1472 if (p
== NULL
) { // allocation failed
1473 // Get the current new handler. NB: this function is not
1474 // thread-safe. We make a feeble stab at making it so here, but
1475 // this lock only protects against tcmalloc interfering with
1476 // itself, not with other libraries calling set_new_handler.
1477 std::new_handler nh
;
1479 SpinLockHolder
h(&set_new_handler_lock
);
1480 nh
= std::set_new_handler(0);
1481 (void) std::set_new_handler(nh
);
1483 #if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1485 // Since exceptions are disabled, we don't really know if new_handler
1486 // failed. Assume it will abort if it fails.
1492 // If no new_handler is established, the allocation failed.
1496 // Otherwise, try the new_handler. If it returns, retry the
1497 // allocation. If it throws std::bad_alloc, fail the allocation.
1498 // if it throws something else, don't interfere.
1501 } catch (const std::bad_alloc
&) {
1504 #endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1505 } else { // allocation success
1508 #endif // PREANSINEW
1512 } // end unnamed namespace
1514 // As promised, the definition of this function, declared above.
1515 size_t TCMallocImplementation::GetAllocatedSize(const void* ptr
) {
1516 // Chromium workaround for third-party code calling tc_malloc_size(NULL), see
1517 // http://code.google.com/p/chromium/issues/detail?id=118087
1518 // Note: this is consistent with GLIBC's implementation of
1519 // malloc_usable_size(NULL).
1522 ASSERT(TCMallocImplementation::GetOwnership(ptr
)
1523 != TCMallocImplementation::kNotOwned
);
1524 return ExcludeSpaceForMark(
1525 GetSizeWithCallback(ptr
, &InvalidGetAllocatedSize
));
1528 void TCMallocImplementation::MarkThreadBusy() {
1529 // Allocate to force the creation of a thread cache, but avoid
1530 // invoking any hooks.
1531 do_free(do_malloc(0));
1534 //-------------------------------------------------------------------
1535 // Exported routines
1536 //-------------------------------------------------------------------
1538 extern "C" PERFTOOLS_DLL_DECL
const char* tc_version(
1539 int* major
, int* minor
, const char** patch
) __THROW
{
1540 if (major
) *major
= TC_VERSION_MAJOR
;
1541 if (minor
) *minor
= TC_VERSION_MINOR
;
1542 if (patch
) *patch
= TC_VERSION_PATCH
;
1543 return TC_VERSION_STRING
;
1546 // This function behaves similarly to MSVC's _set_new_mode.
1547 // If flag is 0 (default), calls to malloc will behave normally.
1548 // If flag is 1, calls to malloc will behave like calls to new,
1549 // and the std_new_handler will be invoked on failure.
1550 // Returns the previous mode.
1551 extern "C" PERFTOOLS_DLL_DECL
int tc_set_new_mode(int flag
) __THROW
{
1552 int old_mode
= tc_new_mode
;
1557 #ifndef TCMALLOC_USING_DEBUGALLOCATION // debugallocation.cc defines its own
1559 // CAVEAT: The code structure below ensures that MallocHook methods are always
1560 // called from the stack frame of the invoked allocation function.
1561 // heap-checker.cc depends on this to start a stack trace from
1562 // the call to the (de)allocation function.
1564 extern "C" PERFTOOLS_DLL_DECL
void* tc_malloc(size_t size
) __THROW
{
1565 void* result
= do_malloc_or_cpp_alloc(size
);
1566 MallocHook::InvokeNewHook(result
, size
);
1570 extern "C" PERFTOOLS_DLL_DECL
void tc_free(void* ptr
) __THROW
{
1571 MallocHook::InvokeDeleteHook(ptr
);
1575 extern "C" PERFTOOLS_DLL_DECL
void* tc_calloc(size_t n
,
1576 size_t elem_size
) __THROW
{
1577 void* result
= do_calloc(n
, elem_size
);
1578 MallocHook::InvokeNewHook(result
, n
* elem_size
);
1582 extern "C" PERFTOOLS_DLL_DECL
void tc_cfree(void* ptr
) __THROW
{
1583 MallocHook::InvokeDeleteHook(ptr
);
1587 extern "C" PERFTOOLS_DLL_DECL
void* tc_realloc(void* old_ptr
,
1588 size_t new_size
) __THROW
{
1589 if (old_ptr
== NULL
) {
1590 void* result
= do_malloc_or_cpp_alloc(new_size
);
1591 MallocHook::InvokeNewHook(result
, new_size
);
1594 if (new_size
== 0) {
1595 MallocHook::InvokeDeleteHook(old_ptr
);
1599 return do_realloc(old_ptr
, new_size
);
1602 extern "C" PERFTOOLS_DLL_DECL
void* tc_new(size_t size
) {
1603 void* p
= cpp_alloc(size
, false);
1604 // We keep this next instruction out of cpp_alloc for a reason: when
1605 // it's in, and new just calls cpp_alloc, the optimizer may fold the
1606 // new call into cpp_alloc, which messes up our whole section-based
1607 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
1608 // isn't the last thing this fn calls, and prevents the folding.
1609 MallocHook::InvokeNewHook(p
, size
);
1613 extern "C" PERFTOOLS_DLL_DECL
void* tc_new_nothrow(size_t size
, const std::nothrow_t
&) __THROW
{
1614 void* p
= cpp_alloc(size
, true);
1615 MallocHook::InvokeNewHook(p
, size
);
1619 extern "C" PERFTOOLS_DLL_DECL
void tc_delete(void* p
) __THROW
{
1620 MallocHook::InvokeDeleteHook(p
);
1624 // Standard C++ library implementations define and use this
1625 // (via ::operator delete(ptr, nothrow)).
1626 // But it's really the same as normal delete, so we just do the same thing.
1627 extern "C" PERFTOOLS_DLL_DECL
void tc_delete_nothrow(void* p
, const std::nothrow_t
&) __THROW
{
1628 MallocHook::InvokeDeleteHook(p
);
1632 extern "C" PERFTOOLS_DLL_DECL
void* tc_newarray(size_t size
) {
1633 void* p
= cpp_alloc(size
, false);
1634 // We keep this next instruction out of cpp_alloc for a reason: when
1635 // it's in, and new just calls cpp_alloc, the optimizer may fold the
1636 // new call into cpp_alloc, which messes up our whole section-based
1637 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
1638 // isn't the last thing this fn calls, and prevents the folding.
1639 MallocHook::InvokeNewHook(p
, size
);
1643 extern "C" PERFTOOLS_DLL_DECL
void* tc_newarray_nothrow(size_t size
, const std::nothrow_t
&)
1645 void* p
= cpp_alloc(size
, true);
1646 MallocHook::InvokeNewHook(p
, size
);
1650 extern "C" PERFTOOLS_DLL_DECL
void tc_deletearray(void* p
) __THROW
{
1651 MallocHook::InvokeDeleteHook(p
);
1655 extern "C" PERFTOOLS_DLL_DECL
void tc_deletearray_nothrow(void* p
, const std::nothrow_t
&) __THROW
{
1656 MallocHook::InvokeDeleteHook(p
);
1660 extern "C" PERFTOOLS_DLL_DECL
void* tc_memalign(size_t align
,
1661 size_t size
) __THROW
{
1662 void* result
= do_memalign_or_cpp_memalign(align
, size
);
1663 MallocHook::InvokeNewHook(result
, size
);
1667 extern "C" PERFTOOLS_DLL_DECL
int tc_posix_memalign(
1668 void** result_ptr
, size_t align
, size_t size
) __THROW
{
1669 if (((align
% sizeof(void*)) != 0) ||
1670 ((align
& (align
- 1)) != 0) ||
1675 void* result
= do_memalign_or_cpp_memalign(align
, size
);
1676 MallocHook::InvokeNewHook(result
, size
);
1677 if (result
== NULL
) {
1680 *result_ptr
= result
;
1685 static size_t pagesize
= 0;
1687 extern "C" PERFTOOLS_DLL_DECL
void* tc_valloc(size_t size
) __THROW
{
1688 // Allocate page-aligned object of length >= size bytes
1689 if (pagesize
== 0) pagesize
= getpagesize();
1690 void* result
= do_memalign_or_cpp_memalign(pagesize
, size
);
1691 MallocHook::InvokeNewHook(result
, size
);
1695 extern "C" PERFTOOLS_DLL_DECL
void* tc_pvalloc(size_t size
) __THROW
{
1696 // Round up size to a multiple of pagesize
1697 if (pagesize
== 0) pagesize
= getpagesize();
1698 if (size
== 0) { // pvalloc(0) should allocate one page, according to
1699 size
= pagesize
; // http://man.free4web.biz/man3/libmpatrol.3.html
1701 size
= (size
+ pagesize
- 1) & ~(pagesize
- 1);
1702 void* result
= do_memalign_or_cpp_memalign(pagesize
, size
);
1703 MallocHook::InvokeNewHook(result
, size
);
1707 extern "C" PERFTOOLS_DLL_DECL
void tc_malloc_stats(void) __THROW
{
1711 extern "C" PERFTOOLS_DLL_DECL
int tc_mallopt(int cmd
, int value
) __THROW
{
1712 return do_mallopt(cmd
, value
);
1715 #ifdef HAVE_STRUCT_MALLINFO
1716 extern "C" PERFTOOLS_DLL_DECL
struct mallinfo
tc_mallinfo(void) __THROW
{
1717 return do_mallinfo();
1721 extern "C" PERFTOOLS_DLL_DECL
size_t tc_malloc_size(void* ptr
) __THROW
{
1722 return MallocExtension::instance()->GetAllocatedSize(ptr
);
1725 #if defined(OS_LINUX)
1726 extern "C" void* PERFTOOLS_DLL_DECL
tc_malloc_skip_new_handler(size_t size
) {
1727 void* result
= do_malloc(size
);
1728 MallocHook::InvokeNewHook(result
, size
);
1733 #endif // TCMALLOC_USING_DEBUGALLOCATION
1735 #if defined(OS_LINUX)
1736 // Alias the weak symbol in chromium to our implementation.
1737 extern "C" __attribute__((visibility("default"), alias("tc_malloc_skip_new_handler")))
1738 void* tc_malloc_skip_new_handler_weak(size_t size
);
1741 // --- Validation implementation with an extra mark ----------------------------
1742 // We will put a mark at the extreme end of each allocation block. We make
1743 // sure that we always allocate enough "extra memory" that we can fit in the
1744 // mark, and still provide the requested usable region. If ever that mark is
1745 // not as expected, then we know that the user is corrupting memory beyond their
1746 // request size, or that they have called free a second time without having
1747 // the memory allocated (again). This allows us to spot most double free()s,
1748 // but some can "slip by" or confuse our logic if the caller reallocates memory
1749 // (for a second use) before performing an evil double-free of a first
1752 // This code can be optimized, but for now, it is written to be most easily
1753 // understood, and flexible (since it is evolving a bit). Potential
1754 // optimizations include using other calculated data, such as class size, or
1755 // allocation size, which is known in the code above, but then is recalculated
1756 // below. Another potential optimization would be careful manual inlining of
1757 // code, but I *think* that the compile will probably do this for me, and I've
1758 // been careful to avoid aliasing issues that might make a compiler back-off.
1760 // Evolution includes experimenting with different marks, to minimize the chance
1761 // that a mark would be misunderstood (missed corruption). The marks are meant
1762 // to be hashed encoding of the location, so that they can't be copied over a
1763 // different region (by accident) without being detected (most of the time).
1765 // Enable the following define to turn on all the TCMalloc checking.
1766 // It will cost about 2% in performance, but it will catch double frees (most of
1767 // the time), and will often catch allocated-buffer overrun errors. This
1768 // validation is only active when TCMalloc is used as the allocator.
1770 #define TCMALLOC_VALIDATION
1773 #if !defined(TCMALLOC_VALIDATION)
1775 static size_t ExcludeSpaceForMark(size_t size
) { return size
; }
1776 static void AddRoomForMark(size_t* size
) {}
1777 static void ExcludeMarkFromSize(size_t* new_size
) {}
1778 static void MarkAllocatedRegion(void* ptr
) {}
1779 static void ValidateAllocatedRegion(void* ptr
, size_t cl
) {}
1781 #else // TCMALLOC_VALIDATION
1783 static void DieFromDoubleFree() {
1784 Log(kCrash
, __FILE__
, __LINE__
, "Attempt to double free");
1787 static void DieFromMemoryCorruption() {
1788 Log(kCrash
, __FILE__
, __LINE__
, "Memory corrupted");
1791 // We can either do byte marking, or whole word marking based on the following
1792 // define. char is as small as we can get, and word marking probably provides
1793 // more than enough bits that we won't miss a corruption. Any sized integral
1794 // type can be used, but we just define two examples.
1796 // #define TCMALLOC_SMALL_VALIDATION
1797 #if defined (TCMALLOC_SMALL_VALIDATION)
1799 typedef char MarkType
; // char saves memory... int is more complete.
1800 static const MarkType kAllocationMarkMask
= static_cast<MarkType
>(0x36);
1804 typedef int MarkType
; // char saves memory... int is more complete.
1805 static const MarkType kAllocationMarkMask
= static_cast<MarkType
>(0xE1AB9536);
1809 // TODO(jar): See if use of reference rather than pointer gets better inlining,
1810 // or if macro is needed. My fear is that taking address map preclude register
1812 inline static void AddRoomForMark(size_t* size
) {
1813 *size
+= sizeof(kAllocationMarkMask
);
1816 inline static void ExcludeMarkFromSize(size_t* new_size
) {
1817 *new_size
-= sizeof(kAllocationMarkMask
);
1820 inline static size_t ExcludeSpaceForMark(size_t size
) {
1821 return size
- sizeof(kAllocationMarkMask
); // Lie about size when asked.
1824 inline static MarkType
* GetMarkLocation(void* ptr
) {
1825 size_t size
= GetSizeWithCallback(ptr
, &InvalidGetAllocatedSize
);
1826 ASSERT(size
% sizeof(kAllocationMarkMask
) == 0);
1827 size_t last_index
= (size
/ sizeof(kAllocationMarkMask
)) - 1;
1828 return static_cast<MarkType
*>(ptr
) + last_index
;
1831 // We hash in the mark location plus the pointer so that we effectively mix in
1832 // the size of the block. This means that if a span is used for different sizes
1833 // that the mark will be different. It would be good to hash in the size (which
1834 // we effectively get by using both mark location and pointer), but even better
1835 // would be to also include the class, as it concisely contains the entropy
1836 // found in the size (when we don't have large allocation), and there is less
1837 // risk of losing those bits to truncation. It would probably be good to combine
1838 // the high bits of size (capturing info about large blocks) with the class
1839 // (which is a 6 bit number).
1840 inline static MarkType
GetMarkValue(void* ptr
, MarkType
* mark
) {
1841 void* ptr2
= static_cast<void*>(mark
);
1842 size_t offset1
= static_cast<char*>(ptr
) - static_cast<char*>(NULL
);
1843 size_t offset2
= static_cast<char*>(ptr2
) - static_cast<char*>(NULL
);
1844 static const int kInvariantBits
= 2;
1845 ASSERT((offset1
>> kInvariantBits
) << kInvariantBits
== offset1
);
1846 // Note: low bits of both offsets are invariants due to alignment. High bits
1847 // of both offsets are the same (unless we have a large allocation). Avoid
1848 // XORing high bits together, as they will cancel for most small allocations.
1850 MarkType ret
= kAllocationMarkMask
;
1851 // Using a little shift, we can safely XOR together both offsets.
1852 ret
^= static_cast<MarkType
>(offset1
>> kInvariantBits
) ^
1853 static_cast<MarkType
>(offset2
);
1854 if (sizeof(ret
) == 1) {
1855 // Try to bring some high level bits into the mix.
1856 ret
+= static_cast<MarkType
>(offset1
>> 8) ^
1857 static_cast<MarkType
>(offset1
>> 16) ^
1858 static_cast<MarkType
>(offset1
>> 24) ;
1860 // Hash in high bits on a 64 bit architecture.
1861 if (sizeof(size_t) == 8 && sizeof(ret
) == 4)
1862 ret
+= offset1
>> 16;
1864 ret
= kAllocationMarkMask
; // Avoid common pattern of all zeros.
1868 // TODO(jar): Use the passed in TCmalloc Class Index to calculate mark location
1869 // faster. The current implementation calls general functions, which have to
1870 // recalculate this in order to get the Class Size. This is a slow and wasteful
1871 // recomputation... but it is much more readable this way (for now).
1872 static void ValidateAllocatedRegion(void* ptr
, size_t cl
) {
1873 if (ptr
== NULL
) return;
1874 MarkType
* mark
= GetMarkLocation(ptr
);
1875 MarkType allocated_mark
= GetMarkValue(ptr
, mark
);
1876 MarkType current_mark
= *mark
;
1878 if (current_mark
== ~allocated_mark
)
1879 DieFromDoubleFree();
1880 if (current_mark
!= allocated_mark
)
1881 DieFromMemoryCorruption();
1883 // In debug mode, copy the mark into all the free'd region.
1884 size_t class_size
= static_cast<size_t>(reinterpret_cast<char*>(mark
) -
1885 reinterpret_cast<char*>(ptr
));
1886 memset(ptr
, static_cast<char>(0x36), class_size
);
1888 *mark
= ~allocated_mark
; // Distinctively not allocated.
1891 static void MarkAllocatedRegion(void* ptr
) {
1892 if (ptr
== NULL
) return;
1893 MarkType
* mark
= GetMarkLocation(ptr
);
1894 *mark
= GetMarkValue(ptr
, mark
);
1897 #endif // TCMALLOC_VALIDATION