Merge Chromium + Blink git repositories
[chromium-blink-merge.git] / third_party / tcmalloc / chromium / src / base / low_level_alloc.cc
blobc043cb68b6ea4dd8c131771d79883c789e93beff
1 /* Copyright (c) 2006, Google Inc.
2 * All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // A low-level allocator that can be used by other low-level
32 // modules without introducing dependency cycles.
33 // This allocator is slow and wasteful of memory;
34 // it should not be used when performance is key.
36 #include "base/low_level_alloc.h"
37 #include "base/dynamic_annotations.h"
38 #include "base/spinlock.h"
39 #include "base/logging.h"
40 #include "malloc_hook-inl.h"
41 #include <gperftools/malloc_hook.h>
42 #include <errno.h>
43 #ifdef HAVE_UNISTD_H
44 #include <unistd.h>
45 #endif
46 #ifdef HAVE_MMAP
47 #include <sys/mman.h>
48 #endif
49 #include <new> // for placement-new
51 // On systems (like freebsd) that don't define MAP_ANONYMOUS, use the old
52 // form of the name instead.
53 #ifndef MAP_ANONYMOUS
54 # define MAP_ANONYMOUS MAP_ANON
55 #endif
57 // A first-fit allocator with amortized logarithmic free() time.
59 // ---------------------------------------------------------------------------
60 static const int kMaxLevel = 30;
62 // We put this class-only struct in a namespace to avoid polluting the
63 // global namespace with this struct name (thus risking an ODR violation).
64 namespace low_level_alloc_internal {
65 // This struct describes one allocated block, or one free block.
66 struct AllocList {
67 struct Header {
68 intptr_t size; // size of entire region, including this field. Must be
69 // first. Valid in both allocated and unallocated blocks
70 intptr_t magic; // kMagicAllocated or kMagicUnallocated xor this
71 LowLevelAlloc::Arena *arena; // pointer to parent arena
72 void *dummy_for_alignment; // aligns regions to 0 mod 2*sizeof(void*)
73 } header;
75 // Next two fields: in unallocated blocks: freelist skiplist data
76 // in allocated blocks: overlaps with client data
77 int levels; // levels in skiplist used
78 AllocList *next[kMaxLevel]; // actually has levels elements.
79 // The AllocList node may not have room for
80 // all kMaxLevel entries. See max_fit in
81 // LLA_SkiplistLevels()
84 using low_level_alloc_internal::AllocList;
87 // ---------------------------------------------------------------------------
88 // A trivial skiplist implementation. This is used to keep the freelist
89 // in address order while taking only logarithmic time per insert and delete.
91 // An integer approximation of log2(size/base)
92 // Requires size >= base.
93 static int IntLog2(size_t size, size_t base) {
94 int result = 0;
95 for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result)
96 result++;
98 // floor(size / 2**result) <= base < floor(size / 2**(result-1))
99 // => log2(size/(base+1)) <= result < 1+log2(size/base)
100 // => result ~= log2(size/base)
101 return result;
104 // Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
105 static int Random() {
106 static int32 r = 1; // no locking---it's not critical
107 ANNOTATE_BENIGN_RACE(&r, "benign race, not critical.");
108 int result = 1;
109 while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
110 result++;
112 return result;
115 // Return a number of skiplist levels for a node of size bytes, where
116 // base is the minimum node size. Compute level=log2(size / base)+n
117 // where n is 1 if random is false and otherwise a random number generated with
118 // the standard distribution for a skiplist: See Random() above.
119 // Bigger nodes tend to have more skiplist levels due to the log2(size / base)
120 // term, so first-fit searches touch fewer nodes. "level" is clipped so
121 // level<kMaxLevel and next[level-1] will fit in the node.
122 // 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
123 static int LLA_SkiplistLevels(size_t size, size_t base, bool random) {
124 // max_fit is the maximum number of levels that will fit in a node for the
125 // given size. We can't return more than max_fit, no matter what the
126 // random number generator says.
127 int max_fit = (size-OFFSETOF_MEMBER(AllocList, next)) / sizeof (AllocList *);
128 int level = IntLog2(size, base) + (random? Random() : 1);
129 if (level > max_fit) level = max_fit;
130 if (level > kMaxLevel-1) level = kMaxLevel - 1;
131 RAW_CHECK(level >= 1, "block not big enough for even one level");
132 return level;
135 // Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
136 // For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
137 // points to the last element at level i in the AllocList less than *e, or is
138 // head if no such element exists.
139 static AllocList *LLA_SkiplistSearch(AllocList *head,
140 AllocList *e, AllocList **prev) {
141 AllocList *p = head;
142 for (int level = head->levels - 1; level >= 0; level--) {
143 for (AllocList *n; (n = p->next[level]) != 0 && n < e; p = n) {
145 prev[level] = p;
147 return (head->levels == 0) ? 0 : prev[0]->next[0];
150 // Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch.
151 // Requires that e->levels be previously set by the caller (using
152 // LLA_SkiplistLevels())
153 static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
154 AllocList **prev) {
155 LLA_SkiplistSearch(head, e, prev);
156 for (; head->levels < e->levels; head->levels++) { // extend prev pointers
157 prev[head->levels] = head; // to all *e's levels
159 for (int i = 0; i != e->levels; i++) { // add element to list
160 e->next[i] = prev[i]->next[i];
161 prev[i]->next[i] = e;
165 // Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch().
166 // Requires that e->levels be previous set by the caller (using
167 // LLA_SkiplistLevels())
168 static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
169 AllocList **prev) {
170 AllocList *found = LLA_SkiplistSearch(head, e, prev);
171 RAW_CHECK(e == found, "element not in freelist");
172 for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
173 prev[i]->next[i] = e->next[i];
175 while (head->levels > 0 && head->next[head->levels - 1] == 0) {
176 head->levels--; // reduce head->levels if level unused
180 // ---------------------------------------------------------------------------
181 // Arena implementation
183 struct LowLevelAlloc::Arena {
184 Arena() : mu(SpinLock::LINKER_INITIALIZED) {} // does nothing; for static init
185 explicit Arena(int) : pagesize(0) {} // set pagesize to zero explicitly
186 // for non-static init
188 SpinLock mu; // protects freelist, allocation_count,
189 // pagesize, roundup, min_size
190 AllocList freelist; // head of free list; sorted by addr (under mu)
191 int32 allocation_count; // count of allocated blocks (under mu)
192 int32 flags; // flags passed to NewArena (ro after init)
193 size_t pagesize; // ==getpagesize() (init under mu, then ro)
194 size_t roundup; // lowest power of 2 >= max(16,sizeof (AllocList))
195 // (init under mu, then ro)
196 size_t min_size; // smallest allocation block size
197 // (init under mu, then ro)
200 // The default arena, which is used when 0 is passed instead of an Arena
201 // pointer.
202 static struct LowLevelAlloc::Arena default_arena;
204 // Non-malloc-hooked arenas: used only to allocate metadata for arenas that
205 // do not want malloc hook reporting, so that for them there's no malloc hook
206 // reporting even during arena creation.
207 static struct LowLevelAlloc::Arena unhooked_arena;
208 static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena;
210 // magic numbers to identify allocated and unallocated blocks
211 static const intptr_t kMagicAllocated = 0x4c833e95;
212 static const intptr_t kMagicUnallocated = ~kMagicAllocated;
214 namespace {
215 class SCOPED_LOCKABLE ArenaLock {
216 public:
217 explicit ArenaLock(LowLevelAlloc::Arena *arena)
218 EXCLUSIVE_LOCK_FUNCTION(arena->mu)
219 : left_(false), mask_valid_(false), arena_(arena) {
220 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
221 // We've decided not to support async-signal-safe arena use until
222 // there a demonstrated need. Here's how one could do it though
223 // (would need to be made more portable).
224 #if 0
225 sigset_t all;
226 sigfillset(&all);
227 this->mask_valid_ =
228 (pthread_sigmask(SIG_BLOCK, &all, &this->mask_) == 0);
229 #else
230 RAW_CHECK(false, "We do not yet support async-signal-safe arena.");
231 #endif
233 this->arena_->mu.Lock();
235 ~ArenaLock() { RAW_CHECK(this->left_, "haven't left Arena region"); }
236 void Leave() /*UNLOCK_FUNCTION()*/ {
237 this->arena_->mu.Unlock();
238 #if 0
239 if (this->mask_valid_) {
240 pthread_sigmask(SIG_SETMASK, &this->mask_, 0);
242 #endif
243 this->left_ = true;
245 private:
246 bool left_; // whether left region
247 bool mask_valid_;
248 #if 0
249 sigset_t mask_; // old mask of blocked signals
250 #endif
251 LowLevelAlloc::Arena *arena_;
252 DISALLOW_COPY_AND_ASSIGN(ArenaLock);
254 } // anonymous namespace
256 // create an appropriate magic number for an object at "ptr"
257 // "magic" should be kMagicAllocated or kMagicUnallocated
258 inline static intptr_t Magic(intptr_t magic, AllocList::Header *ptr) {
259 return magic ^ reinterpret_cast<intptr_t>(ptr);
262 // Initialize the fields of an Arena
263 static void ArenaInit(LowLevelAlloc::Arena *arena) {
264 if (arena->pagesize == 0) {
265 arena->pagesize = getpagesize();
266 // Round up block sizes to a power of two close to the header size.
267 arena->roundup = 16;
268 while (arena->roundup < sizeof (arena->freelist.header)) {
269 arena->roundup += arena->roundup;
271 // Don't allocate blocks less than twice the roundup size to avoid tiny
272 // free blocks.
273 arena->min_size = 2 * arena->roundup;
274 arena->freelist.header.size = 0;
275 arena->freelist.header.magic =
276 Magic(kMagicUnallocated, &arena->freelist.header);
277 arena->freelist.header.arena = arena;
278 arena->freelist.levels = 0;
279 memset(arena->freelist.next, 0, sizeof (arena->freelist.next));
280 arena->allocation_count = 0;
281 if (arena == &default_arena) {
282 // Default arena should be hooked, e.g. for heap-checker to trace
283 // pointer chains through objects in the default arena.
284 arena->flags = LowLevelAlloc::kCallMallocHook;
285 } else if (arena == &unhooked_async_sig_safe_arena) {
286 arena->flags = LowLevelAlloc::kAsyncSignalSafe;
287 } else {
288 arena->flags = 0; // other arenas' flags may be overridden by client,
289 // but unhooked_arena will have 0 in 'flags'.
294 // L < meta_data_arena->mu
295 LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32 flags,
296 Arena *meta_data_arena) {
297 RAW_CHECK(meta_data_arena != 0, "must pass a valid arena");
298 if (meta_data_arena == &default_arena) {
299 if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
300 meta_data_arena = &unhooked_async_sig_safe_arena;
301 } else if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
302 meta_data_arena = &unhooked_arena;
305 // Arena(0) uses the constructor for non-static contexts
306 Arena *result =
307 new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(0);
308 ArenaInit(result);
309 result->flags = flags;
310 return result;
313 // L < arena->mu, L < arena->arena->mu
314 bool LowLevelAlloc::DeleteArena(Arena *arena) {
315 RAW_CHECK(arena != 0 && arena != &default_arena && arena != &unhooked_arena,
316 "may not delete default arena");
317 ArenaLock section(arena);
318 bool empty = (arena->allocation_count == 0);
319 section.Leave();
320 if (empty) {
321 while (arena->freelist.next[0] != 0) {
322 AllocList *region = arena->freelist.next[0];
323 size_t size = region->header.size;
324 arena->freelist.next[0] = region->next[0];
325 RAW_CHECK(region->header.magic ==
326 Magic(kMagicUnallocated, &region->header),
327 "bad magic number in DeleteArena()");
328 RAW_CHECK(region->header.arena == arena,
329 "bad arena pointer in DeleteArena()");
330 RAW_CHECK(size % arena->pagesize == 0,
331 "empty arena has non-page-aligned block size");
332 RAW_CHECK(reinterpret_cast<intptr_t>(region) % arena->pagesize == 0,
333 "empty arena has non-page-aligned block");
334 int munmap_result;
335 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
336 munmap_result = munmap(region, size);
337 } else {
338 munmap_result = MallocHook::UnhookedMUnmap(region, size);
340 RAW_CHECK(munmap_result == 0,
341 "LowLevelAlloc::DeleteArena: munmap failed address");
343 Free(arena);
345 return empty;
348 // ---------------------------------------------------------------------------
350 // Return value rounded up to next multiple of align.
351 // align must be a power of two.
352 static intptr_t RoundUp(intptr_t addr, intptr_t align) {
353 return (addr + align - 1) & ~(align - 1);
356 // Equivalent to "return prev->next[i]" but with sanity checking
357 // that the freelist is in the correct order, that it
358 // consists of regions marked "unallocated", and that no two regions
359 // are adjacent in memory (they should have been coalesced).
360 // L < arena->mu
361 static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
362 RAW_CHECK(i < prev->levels, "too few levels in Next()");
363 AllocList *next = prev->next[i];
364 if (next != 0) {
365 RAW_CHECK(next->header.magic == Magic(kMagicUnallocated, &next->header),
366 "bad magic number in Next()");
367 RAW_CHECK(next->header.arena == arena,
368 "bad arena pointer in Next()");
369 if (prev != &arena->freelist) {
370 RAW_CHECK(prev < next, "unordered freelist");
371 RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
372 reinterpret_cast<char *>(next), "malformed freelist");
375 return next;
378 // Coalesce list item "a" with its successor if they are adjacent.
379 static void Coalesce(AllocList *a) {
380 AllocList *n = a->next[0];
381 if (n != 0 && reinterpret_cast<char *>(a) + a->header.size ==
382 reinterpret_cast<char *>(n)) {
383 LowLevelAlloc::Arena *arena = a->header.arena;
384 a->header.size += n->header.size;
385 n->header.magic = 0;
386 n->header.arena = 0;
387 AllocList *prev[kMaxLevel];
388 LLA_SkiplistDelete(&arena->freelist, n, prev);
389 LLA_SkiplistDelete(&arena->freelist, a, prev);
390 a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size, true);
391 LLA_SkiplistInsert(&arena->freelist, a, prev);
395 // Adds block at location "v" to the free list
396 // L >= arena->mu
397 static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
398 AllocList *f = reinterpret_cast<AllocList *>(
399 reinterpret_cast<char *>(v) - sizeof (f->header));
400 RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
401 "bad magic number in AddToFreelist()");
402 RAW_CHECK(f->header.arena == arena,
403 "bad arena pointer in AddToFreelist()");
404 f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size, true);
405 AllocList *prev[kMaxLevel];
406 LLA_SkiplistInsert(&arena->freelist, f, prev);
407 f->header.magic = Magic(kMagicUnallocated, &f->header);
408 Coalesce(f); // maybe coalesce with successor
409 Coalesce(prev[0]); // maybe coalesce with predecessor
412 // Frees storage allocated by LowLevelAlloc::Alloc().
413 // L < arena->mu
414 void LowLevelAlloc::Free(void *v) {
415 if (v != 0) {
416 AllocList *f = reinterpret_cast<AllocList *>(
417 reinterpret_cast<char *>(v) - sizeof (f->header));
418 RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
419 "bad magic number in Free()");
420 LowLevelAlloc::Arena *arena = f->header.arena;
421 if ((arena->flags & kCallMallocHook) != 0) {
422 MallocHook::InvokeDeleteHook(v);
424 ArenaLock section(arena);
425 AddToFreelist(v, arena);
426 RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
427 arena->allocation_count--;
428 section.Leave();
432 // allocates and returns a block of size bytes, to be freed with Free()
433 // L < arena->mu
434 static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
435 void *result = 0;
436 if (request != 0) {
437 AllocList *s; // will point to region that satisfies request
438 ArenaLock section(arena);
439 ArenaInit(arena);
440 // round up with header
441 size_t req_rnd = RoundUp(request + sizeof (s->header), arena->roundup);
442 for (;;) { // loop until we find a suitable region
443 // find the minimum levels that a block of this size must have
444 int i = LLA_SkiplistLevels(req_rnd, arena->min_size, false) - 1;
445 if (i < arena->freelist.levels) { // potential blocks exist
446 AllocList *before = &arena->freelist; // predecessor of s
447 while ((s = Next(i, before, arena)) != 0 && s->header.size < req_rnd) {
448 before = s;
450 if (s != 0) { // we found a region
451 break;
454 // we unlock before mmap() both because mmap() may call a callback hook,
455 // and because it may be slow.
456 arena->mu.Unlock();
457 // mmap generous 64K chunks to decrease
458 // the chances/impact of fragmentation:
459 size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
460 void *new_pages;
461 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
462 new_pages = MallocHook::UnhookedMMap(0, new_pages_size,
463 PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
464 } else {
465 new_pages = mmap(0, new_pages_size,
466 PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
468 RAW_CHECK(new_pages != MAP_FAILED, "mmap error");
469 arena->mu.Lock();
470 s = reinterpret_cast<AllocList *>(new_pages);
471 s->header.size = new_pages_size;
472 // Pretend the block is allocated; call AddToFreelist() to free it.
473 s->header.magic = Magic(kMagicAllocated, &s->header);
474 s->header.arena = arena;
475 AddToFreelist(&s->levels, arena); // insert new region into free list
477 AllocList *prev[kMaxLevel];
478 LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
479 // s points to the first free region that's big enough
480 if (req_rnd + arena->min_size <= s->header.size) { // big enough to split
481 AllocList *n = reinterpret_cast<AllocList *>
482 (req_rnd + reinterpret_cast<char *>(s));
483 n->header.size = s->header.size - req_rnd;
484 n->header.magic = Magic(kMagicAllocated, &n->header);
485 n->header.arena = arena;
486 s->header.size = req_rnd;
487 AddToFreelist(&n->levels, arena);
489 s->header.magic = Magic(kMagicAllocated, &s->header);
490 RAW_CHECK(s->header.arena == arena, "");
491 arena->allocation_count++;
492 section.Leave();
493 result = &s->levels;
495 ANNOTATE_NEW_MEMORY(result, request);
496 return result;
499 void *LowLevelAlloc::Alloc(size_t request) {
500 void *result = DoAllocWithArena(request, &default_arena);
501 if ((default_arena.flags & kCallMallocHook) != 0) {
502 // this call must be directly in the user-called allocator function
503 // for MallocHook::GetCallerStackTrace to work properly
504 MallocHook::InvokeNewHook(result, request);
506 return result;
509 void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
510 RAW_CHECK(arena != 0, "must pass a valid arena");
511 void *result = DoAllocWithArena(request, arena);
512 if ((arena->flags & kCallMallocHook) != 0) {
513 // this call must be directly in the user-called allocator function
514 // for MallocHook::GetCallerStackTrace to work properly
515 MallocHook::InvokeNewHook(result, request);
517 return result;
520 LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
521 return &default_arena;