1 /* -*- Mode: C; tab-width: 8; c-basic-offset: 8 -*- */
2 /* vim:set softtabstop=8 shiftwidth=8: */
4 * Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice(s), this list of conditions and the following disclaimer as
12 * the first lines of this file unmodified other than the possible
13 * addition of one or more copyright notices.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice(s), this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
26 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
28 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *******************************************************************************
33 * This allocator implementation is designed to provide scalable performance
34 * for multi-threaded programs on multi-processor systems. The following
35 * features are included for this purpose:
37 * + Multiple arenas are used if there are multiple CPUs, which reduces lock
38 * contention and cache sloshing.
40 * + Cache line sharing between arenas is avoided for internal data
43 * + Memory is managed in chunks and runs (chunks can be split into runs),
44 * rather than as individual pages. This provides a constant-time
45 * mechanism for associating allocations with particular arenas.
47 * Allocation requests are rounded up to the nearest size class, and no record
48 * of the original request size is maintained. Allocations are broken into
49 * categories according to size class. Assuming runtime defaults, 4 kB pages
50 * and a 16 byte quantum on a 32-bit system, the size classes in each category
53 * |=====================================|
54 * | Category | Subcategory | Size |
55 * |=====================================|
56 * | Small | Tiny | 2 |
59 * | |----------------+---------|
60 * | | Quantum-spaced | 16 |
67 * | |----------------+---------|
68 * | | Sub-page | 1 kB |
70 * |=====================================|
78 * |=====================================|
83 * |=====================================|
85 * A different mechanism is used for each category:
87 * Small : Each size class is segregated into its own set of runs. Each run
88 * maintains a bitmap of which regions are free/allocated.
90 * Large : Each allocation is backed by a dedicated run. Metadata are stored
91 * in the associated arena chunk header maps.
93 * Huge : Each allocation is backed by a dedicated contiguous set of chunks.
94 * Metadata are stored in a separate red-black tree.
96 *******************************************************************************
100 * MALLOC_PRODUCTION disables assertions and statistics gathering. It also
101 * defaults the A and J runtime options to off. These settings are appropriate
102 * for production systems.
104 #ifndef MOZ_MEMORY_DEBUG
105 # define MALLOC_PRODUCTION
109 * Use only one arena by default. Mozilla does not currently make extensive
110 * use of concurrent allocation, so the increased fragmentation associated with
111 * multiple arenas is not warranted.
113 #define MOZ_MEMORY_NARENAS_DEFAULT_ONE
116 * MALLOC_STATS enables statistics calculation, and is required for
121 #ifndef MALLOC_PRODUCTION
123 * MALLOC_DEBUG enables assertions and other sanity checks, and disables
126 # define MALLOC_DEBUG
128 /* Memory filling (junk/zero). */
131 /* Allocation tracing. */
132 # ifndef MOZ_MEMORY_WINDOWS
133 # define MALLOC_UTRACE
136 /* Support optional abort() on OOM. */
137 # define MALLOC_XMALLOC
139 /* Support SYSV semantics. */
144 * MALLOC_VALIDATE causes malloc_usable_size() to perform some pointer
145 * validation. There are many possible errors that validation does not even
148 #define MALLOC_VALIDATE
150 /* Embed no-op macros that support memory allocation tracking via valgrind. */
152 # define MALLOC_VALGRIND
154 #ifdef MALLOC_VALGRIND
155 # include <valgrind/valgrind.h>
157 # define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
158 # define VALGRIND_FREELIKE_BLOCK(addr, rzB)
162 * MALLOC_BALANCE enables monitoring of arena lock contention and dynamically
163 * re-balances arena load if exponentially averaged contention exceeds a
166 /* #define MALLOC_BALANCE */
168 #if (!defined(MOZ_MEMORY_WINDOWS) && !defined(MOZ_MEMORY_DARWIN))
170 * MALLOC_PAGEFILE causes all mmap()ed memory to be backed by temporary
171 * files, so that if a chunk is mapped, it is guaranteed to be swappable.
172 * This avoids asynchronous OOM failures that are due to VM over-commit.
174 * XXX OS X over-commits, so we should probably use mmap() instead of
175 * vm_allocate(), so that MALLOC_PAGEFILE works.
177 # define MALLOC_PAGEFILE
180 #ifdef MALLOC_PAGEFILE
181 /* Write size when initializing a page file. */
182 # define MALLOC_PAGEFILE_WRITE_SIZE 512
185 #ifdef MOZ_MEMORY_LINUX
186 #define _GNU_SOURCE /* For mremap(2). */
187 #define issetugid() 0
188 #if 0 /* Enable in order to test decommit code on Linux. */
189 # define MALLOC_DECOMMIT
193 #include <sys/types.h>
202 #ifdef MOZ_MEMORY_WINDOWS
203 #include <cruntime.h>
204 #include <internal.h>
208 #pragma warning( disable: 4267 4996 4146 )
212 #define inline __inline
213 #define SIZE_T_MAX SIZE_MAX
214 #define STDERR_FILENO 2
215 #define PATH_MAX MAX_PATH
216 #define vsnprintf _vsnprintf
218 static unsigned long tlsIndex
= 0xffffffff;
221 #define _pthread_self() __threadid()
222 #define issetugid() 0
224 /* use MSVC intrinsics */
225 #pragma intrinsic(_BitScanForward)
226 static __forceinline
int
231 if (_BitScanForward(&i
, x
) != 0)
237 /* Implement getenv without using malloc */
238 static char mozillaMallocOptionsBuf
[64];
240 #define getenv xgetenv
242 getenv(const char *name
)
245 if (GetEnvironmentVariableA(name
, (LPSTR
)&mozillaMallocOptionsBuf
,
246 sizeof(mozillaMallocOptionsBuf
)) > 0)
247 return (mozillaMallocOptionsBuf
);
252 typedef unsigned char uint8_t;
253 typedef unsigned uint32_t;
254 typedef unsigned long long uint64_t;
255 typedef unsigned long long uintmax_t;
256 typedef long ssize_t
;
258 #define MALLOC_DECOMMIT
261 #ifndef MOZ_MEMORY_WINDOWS
262 #ifndef MOZ_MEMORY_SOLARIS
263 #include <sys/cdefs.h>
266 # define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
269 __FBSDID("$FreeBSD: head/lib/libc/stdlib/malloc.c 180599 2008-07-18 19:35:44Z jasone $");
270 #include "libc_private.h"
274 #include "spinlock.h"
275 #include "namespace.h"
277 #include <sys/mman.h>
279 # define MADV_FREE MADV_DONTNEED
282 # define MAP_NOSYNC 0
284 #include <sys/param.h>
286 #include <sys/stddef.h>
288 #include <sys/time.h>
289 #include <sys/types.h>
290 #ifndef MOZ_MEMORY_SOLARIS
291 #include <sys/sysctl.h>
295 #include <sys/ktrace.h> /* Must come after several other sys/ includes. */
297 #include <machine/atomic.h>
298 #include <machine/cpufunc.h>
299 #include <machine/vmparam.h>
305 # define SIZE_T_MAX SIZE_MAX
308 #ifdef MOZ_MEMORY_DARWIN
309 #define _pthread_self pthread_self
310 #define _pthread_mutex_init pthread_mutex_init
311 #define _pthread_mutex_trylock pthread_mutex_trylock
312 #define _pthread_mutex_lock pthread_mutex_lock
313 #define _pthread_mutex_unlock pthread_mutex_unlock
322 #ifndef MOZ_MEMORY_DARWIN
327 #ifdef MOZ_MEMORY_DARWIN
328 #include <libkern/OSAtomic.h>
329 #include <mach/mach_error.h>
330 #include <mach/mach_init.h>
331 #include <mach/vm_map.h>
332 #include <malloc/malloc.h>
336 #include "un-namespace.h"
341 #include "jemalloc.h"
343 #ifdef MOZ_MEMORY_DARWIN
344 static const bool __isthreaded
= true;
347 #define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
351 #ifdef MOZ_MEMORY_WINDOWS
352 /* MSVC++ does not support C99 variable-length arrays. */
353 # define RB_NO_C99_VARARRAYS
358 /* Disable inlining to make debugging easier. */
366 /* Size of stack-allocated buffer passed to strerror_r(). */
367 #define STRERROR_BUF 64
369 /* Minimum alignment of allocations is 2^QUANTUM_2POW_MIN bytes. */
370 # define QUANTUM_2POW_MIN 4
371 #ifdef MOZ_MEMORY_SIZEOF_PTR_2POW
372 # define SIZEOF_PTR_2POW MOZ_MEMORY_SIZEOF_PTR_2POW
374 # define SIZEOF_PTR_2POW 2
377 #ifndef MOZ_MEMORY_DARWIN
378 static const bool __isthreaded
= true;
384 # define QUANTUM_2POW_MIN 4
385 # define SIZEOF_PTR_2POW 2
386 # define CPU_SPINWAIT __asm__ volatile("pause")
389 # define QUANTUM_2POW_MIN 4
390 # define SIZEOF_PTR_2POW 3
393 # define QUANTUM_2POW_MIN 4
394 # define SIZEOF_PTR_2POW 3
398 # define QUANTUM_2POW_MIN 4
399 # define SIZEOF_PTR_2POW 3
403 # define QUANTUM_2POW_MIN 4
404 # define SIZEOF_PTR_2POW 3
405 # define CPU_SPINWAIT __asm__ volatile("pause")
408 # define QUANTUM_2POW_MIN 3
409 # define SIZEOF_PTR_2POW 2
413 # define QUANTUM_2POW_MIN 3
414 # define SIZEOF_PTR_2POW 2
418 # define QUANTUM_2POW_MIN 4
419 # define SIZEOF_PTR_2POW 2
423 #define SIZEOF_PTR (1U << SIZEOF_PTR_2POW)
425 /* sizeof(int) == (1U << SIZEOF_INT_2POW). */
426 #ifndef SIZEOF_INT_2POW
427 # define SIZEOF_INT_2POW 2
430 /* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */
431 #if (!defined(PIC) && !defined(NO_TLS))
436 /* MALLOC_BALANCE requires TLS. */
437 # ifdef MALLOC_BALANCE
438 # undef MALLOC_BALANCE
443 * Size and alignment of memory chunks that are allocated by the OS's virtual
446 #define CHUNK_2POW_DEFAULT 20
448 /* Maximum number of dirty pages per arena. */
449 #define DIRTY_MAX_DEFAULT (1U << 10)
451 /* Default reserve chunks. */
452 #define RESERVE_MIN_2POW_DEFAULT 1
454 * Default range (in chunks) between reserve_min and reserve_max, in addition
455 * to the mandatory one chunk per arena.
457 #ifdef MALLOC_PAGEFILE
458 # define RESERVE_RANGE_2POW_DEFAULT 5
460 # define RESERVE_RANGE_2POW_DEFAULT 0
464 * Maximum size of L1 cache line. This is used to avoid cache line aliasing,
465 * so over-estimates are okay (up to a point), but under-estimates will
466 * negatively affect performance.
468 #define CACHELINE_2POW 6
469 #define CACHELINE ((size_t)(1U << CACHELINE_2POW))
471 /* Smallest size class to support. */
472 #define TINY_MIN_2POW 1
475 * Maximum size class that is a multiple of the quantum, but not (necessarily)
476 * a power of 2. Above this size, allocations are rounded up to the nearest
479 #define SMALL_MAX_2POW_DEFAULT 9
480 #define SMALL_MAX_DEFAULT (1U << SMALL_MAX_2POW_DEFAULT)
483 * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized
484 * as small as possible such that this setting is still honored, without
485 * violating other constraints. The goal is to make runs as small as possible
486 * without exceeding a per run external fragmentation threshold.
488 * We use binary fixed point math for overhead computations, where the binary
489 * point is implicitly RUN_BFP bits to the left.
491 * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
492 * honored for some/all object sizes, since there is one bit of header overhead
493 * per object (plus a constant). This constraint is relaxed (ignored) for runs
494 * that are so small that the per-region overhead is greater than:
496 * (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
499 /* \/ Implicit binary fixed point. */
500 #define RUN_MAX_OVRHD 0x0000003dU
501 #define RUN_MAX_OVRHD_RELAX 0x00001800U
503 /* Put a cap on small object run size. This overrides RUN_MAX_OVRHD. */
504 #define RUN_MAX_SMALL_2POW 15
505 #define RUN_MAX_SMALL (1U << RUN_MAX_SMALL_2POW)
508 * Hyper-threaded CPUs may need a special instruction inside spin loops in
509 * order to yield to another virtual CPU. If no such instruction is defined
510 * above, make CPU_SPINWAIT a no-op.
513 # define CPU_SPINWAIT
517 * Adaptive spinning must eventually switch to blocking, in order to avoid the
518 * potential for priority inversion deadlock. Backing off past a certain point
519 * can actually waste time.
521 #define SPIN_LIMIT_2POW 11
524 * Conversion from spinning to blocking is expensive; we use (1U <<
525 * BLOCK_COST_2POW) to estimate how many more times costly blocking is than
526 * worst-case spinning.
528 #define BLOCK_COST_2POW 4
530 #ifdef MALLOC_BALANCE
532 * We use an exponential moving average to track recent lock contention,
533 * where the size of the history window is N, and alpha=2/(N+1).
535 * Due to integer math rounding, very small values here can cause
536 * substantial degradation in accuracy, thus making the moving average decay
537 * faster than it would with precise calculation.
539 # define BALANCE_ALPHA_INV_2POW 9
542 * Threshold value for the exponential moving contention average at which to
543 * re-assign a thread.
545 # define BALANCE_THRESHOLD_DEFAULT (1U << (SPIN_LIMIT_2POW-4))
548 /******************************************************************************/
551 * Mutexes based on spinlocks. We can't use normal pthread spinlocks in all
552 * places, because they require malloc()ed memory, which causes bootstrapping
553 * issues in some cases.
555 #if defined(MOZ_MEMORY_WINDOWS)
556 #define malloc_mutex_t CRITICAL_SECTION
557 #define malloc_spinlock_t CRITICAL_SECTION
558 #elif defined(MOZ_MEMORY_DARWIN)
565 #elif defined(MOZ_MEMORY)
566 typedef pthread_mutex_t malloc_mutex_t
;
567 typedef pthread_mutex_t malloc_spinlock_t
;
569 /* XXX these should #ifdef these for freebsd (and linux?) only */
573 typedef malloc_spinlock_t malloc_mutex_t
;
576 /* Set to true once the allocator has been initialized. */
577 static bool malloc_initialized
= false;
579 #if defined(MOZ_MEMORY_WINDOWS)
580 /* No init lock for Windows. */
581 #elif defined(MOZ_MEMORY_DARWIN)
582 static malloc_mutex_t init_lock
= {OS_SPINLOCK_INIT
};
583 #elif defined(MOZ_MEMORY_LINUX)
584 static malloc_mutex_t init_lock
= PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
;
585 #elif defined(MOZ_MEMORY)
586 static malloc_mutex_t init_lock
= PTHREAD_MUTEX_INITIALIZER
;
588 static malloc_mutex_t init_lock
= {_SPINLOCK_INITIALIZER
};
591 /******************************************************************************/
593 * Statistics data structures.
598 typedef struct malloc_bin_stats_s malloc_bin_stats_t
;
599 struct malloc_bin_stats_s
{
601 * Number of allocation requests that corresponded to the size of this
606 /* Total number of runs created for this bin's size class. */
610 * Total number of runs reused by extracting them from the runs tree for
611 * this bin's size class.
615 /* High-water mark for this bin. */
616 unsigned long highruns
;
618 /* Current number of runs in this bin. */
619 unsigned long curruns
;
622 typedef struct arena_stats_s arena_stats_t
;
623 struct arena_stats_s
{
624 /* Number of bytes currently mapped. */
628 * Total number of purge sweeps, total number of madvise calls made,
629 * and total pages purged in order to keep dirty unused memory under
635 #ifdef MALLOC_DECOMMIT
637 * Total number of decommit/commit operations, and total number of
642 uint64_t decommitted
;
645 /* Per-size-category statistics. */
646 size_t allocated_small
;
647 uint64_t nmalloc_small
;
648 uint64_t ndalloc_small
;
650 size_t allocated_large
;
651 uint64_t nmalloc_large
;
652 uint64_t ndalloc_large
;
654 #ifdef MALLOC_BALANCE
655 /* Number of times this arena reassigned a thread due to contention. */
660 typedef struct chunk_stats_s chunk_stats_t
;
661 struct chunk_stats_s
{
662 /* Number of chunks that were allocated. */
665 /* High-water mark for number of chunks allocated. */
666 unsigned long highchunks
;
669 * Current number of chunks allocated. This value isn't maintained for
670 * any other purpose, so keep track of it in order to be able to set
673 unsigned long curchunks
;
676 #endif /* #ifdef MALLOC_STATS */
678 /******************************************************************************/
680 * Extent data structures.
683 /* Tree of extents. */
684 typedef struct extent_node_s extent_node_t
;
685 struct extent_node_s
{
686 /* Linkage for the size/address-ordered tree. */
687 rb_node(extent_node_t
) link_szad
;
689 /* Linkage for the address-ordered tree. */
690 rb_node(extent_node_t
) link_ad
;
692 /* Pointer to the extent that this tree node is responsible for. */
695 /* Total region size. */
698 typedef rb_tree(extent_node_t
) extent_tree_t
;
700 /******************************************************************************/
702 * Radix tree data structures.
705 #ifdef MALLOC_VALIDATE
707 * Size of each radix tree node (must be a power of 2). This impacts tree
710 # if (SIZEOF_PTR == 4)
711 # define MALLOC_RTREE_NODESIZE (1U << 14)
713 # define MALLOC_RTREE_NODESIZE CACHELINE
716 typedef struct malloc_rtree_s malloc_rtree_t
;
717 struct malloc_rtree_s
{
718 malloc_spinlock_t lock
;
721 unsigned level2bits
[1]; /* Dynamically sized. */
725 /******************************************************************************/
727 * Reserve data structures.
730 /* Callback registration. */
731 typedef struct reserve_reg_s reserve_reg_t
;
732 struct reserve_reg_s
{
733 /* Linkage for list of all registered callbacks. */
734 ql_elm(reserve_reg_t
) link
;
736 /* Callback function pointer. */
739 /* Opaque application data pointer. */
743 * Sequence number of condition notification most recently sent to this
749 /******************************************************************************/
751 * Arena data structures.
754 typedef struct arena_s arena_t
;
755 typedef struct arena_bin_s arena_bin_t
;
757 /* Each element of the chunk map corresponds to one page within the chunk. */
758 typedef struct arena_chunk_map_s arena_chunk_map_t
;
759 struct arena_chunk_map_s
{
761 * Linkage for run trees. There are two disjoint uses:
763 * 1) arena_t's runs_avail tree.
764 * 2) arena_run_t conceptually uses this linkage for in-use non-full
765 * runs, rather than directly embedding linkage.
767 rb_node(arena_chunk_map_t
) link
;
770 * Run address (or size) and various flags are stored together. The bit
771 * layout looks like (assuming 32-bit system):
773 * ???????? ???????? ????---- --ckdzla
775 * ? : Unallocated: Run address for first/last pages, unset for internal
777 * Small: Run address.
778 * Large: Run size for first page, unset for trailing pages.
787 * Following are example bit patterns for the three types of runs.
796 * ssssssss ssssssss ssss---- --c-----
797 * xxxxxxxx xxxxxxxx xxxx---- ----d---
798 * ssssssss ssssssss ssss---- -----z--
801 * rrrrrrrr rrrrrrrr rrrr---- -------a
802 * rrrrrrrr rrrrrrrr rrrr---- -------a
803 * rrrrrrrr rrrrrrrr rrrr---- -------a
806 * ssssssss ssssssss ssss---- ------la
807 * -------- -------- -------- ------la
808 * -------- -------- -------- ------la
811 #ifdef MALLOC_DECOMMIT
812 #define CHUNK_MAP_DECOMMITTED ((size_t)0x20U)
814 #define CHUNK_MAP_KEY ((size_t)0x10U)
815 #define CHUNK_MAP_DIRTY ((size_t)0x08U)
816 #define CHUNK_MAP_ZEROED ((size_t)0x04U)
817 #define CHUNK_MAP_LARGE ((size_t)0x02U)
818 #define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
820 typedef rb_tree(arena_chunk_map_t
) arena_avail_tree_t
;
821 typedef rb_tree(arena_chunk_map_t
) arena_run_tree_t
;
823 /* Arena chunk header. */
824 typedef struct arena_chunk_s arena_chunk_t
;
825 struct arena_chunk_s
{
826 /* Arena that owns the chunk. */
829 /* Linkage for the arena's chunks_dirty tree. */
830 rb_node(arena_chunk_t
) link_dirty
;
832 /* Number of dirty pages. */
835 /* Map of pages within chunk that keeps track of free/large/small. */
836 arena_chunk_map_t map
[1]; /* Dynamically sized. */
838 typedef rb_tree(arena_chunk_t
) arena_chunk_tree_t
;
840 typedef struct arena_run_s arena_run_t
;
844 # define ARENA_RUN_MAGIC 0x384adf93
847 /* Bin this run is associated with. */
850 /* Index of first element that might have a free region. */
851 unsigned regs_minelm
;
853 /* Number of free regions in run. */
856 /* Bitmask of in-use regions (0: in use, 1: free). */
857 unsigned regs_mask
[1]; /* Dynamically sized. */
862 * Current run being used to service allocations of this bin's size
868 * Tree of non-full runs. This tree is used when looking for an
869 * existing run when runcur is no longer usable. We choose the
870 * non-full run that is lowest in memory; this policy tends to keep
871 * objects packed well, and it can also help reduce the number of
872 * almost-empty chunks.
874 arena_run_tree_t runs
;
876 /* Size of regions in a run for this bin's size class. */
879 /* Total size of a run for this bin's size class. */
882 /* Total number of regions in a run for this bin's size class. */
885 /* Number of elements in a run's regs_mask for this bin's size class. */
886 uint32_t regs_mask_nelms
;
888 /* Offset of first region in a run for this bin's size class. */
889 uint32_t reg0_offset
;
892 /* Bin statistics. */
893 malloc_bin_stats_t stats
;
900 # define ARENA_MAGIC 0x947d3d24
903 /* All operations on this arena require that lock be locked. */
905 malloc_spinlock_t lock
;
907 pthread_mutex_t lock
;
915 * Chunk allocation sequence number, used to detect races with other
916 * threads during chunk allocation, and then discard unnecessary chunks.
920 /* Tree of dirty-page-containing chunks this arena manages. */
921 arena_chunk_tree_t chunks_dirty
;
924 * In order to avoid rapid chunk allocation/deallocation when an arena
925 * oscillates right on the cusp of needing a new chunk, cache the most
926 * recently freed chunk. The spare is left in the arena's chunk trees
927 * until it is deleted.
929 * There is one spare chunk per arena, rather than one spare total, in
930 * order to avoid interactions between multiple threads that could make
931 * a single spare inadequate.
933 arena_chunk_t
*spare
;
936 * Current count of pages within unused runs that are potentially
937 * dirty, and for which madvise(... MADV_FREE) has not been called. By
938 * tracking this, we can institute a limit on how much dirty unused
939 * memory is mapped for each arena.
944 * Size/address-ordered tree of this arena's available runs. This tree
945 * is used for first-best-fit run allocation.
947 arena_avail_tree_t runs_avail
;
949 #ifdef MALLOC_BALANCE
951 * The arena load balancing machinery needs to keep track of how much
952 * lock contention there is. This value is exponentially averaged.
958 * bins is used to store rings of free regions of the following sizes,
959 * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
980 arena_bin_t bins
[1]; /* Dynamically sized. */
983 /******************************************************************************/
988 /* Number of CPUs. */
989 static unsigned ncpus
;
992 static size_t pagesize
;
993 static size_t pagesize_mask
;
994 static size_t pagesize_2pow
;
996 /* Various bin-related settings. */
997 static size_t bin_maxclass
; /* Max size class for bins. */
998 static unsigned ntbins
; /* Number of (2^n)-spaced tiny bins. */
999 static unsigned nqbins
; /* Number of quantum-spaced bins. */
1000 static unsigned nsbins
; /* Number of (2^n)-spaced sub-page bins. */
1001 static size_t small_min
;
1002 static size_t small_max
;
1004 /* Various quantum-related settings. */
1005 static size_t quantum
;
1006 static size_t quantum_mask
; /* (quantum - 1). */
1008 /* Various chunk-related settings. */
1009 static size_t chunksize
;
1010 static size_t chunksize_mask
; /* (chunksize - 1). */
1011 static size_t chunk_npages
;
1012 static size_t arena_chunk_header_npages
;
1013 static size_t arena_maxclass
; /* Max size class for arenas. */
1020 #ifdef MALLOC_VALIDATE
1021 static malloc_rtree_t
*chunk_rtree
;
1024 /* Protects chunk-related data structures. */
1025 static malloc_mutex_t huge_mtx
;
1027 /* Tree of chunks that are stand-alone huge allocations. */
1028 static extent_tree_t huge
;
1031 /* Huge allocation statistics. */
1032 static uint64_t huge_nmalloc
;
1033 static uint64_t huge_ndalloc
;
1034 static size_t huge_allocated
;
1042 #ifdef MALLOC_PAGEFILE
1043 static char pagefile_templ
[PATH_MAX
];
1046 /* Protects reserve-related data structures. */
1047 static malloc_mutex_t reserve_mtx
;
1050 * Bounds on acceptable reserve size, and current reserve size. Reserve
1051 * depletion may cause (reserve_cur < reserve_min).
1053 static size_t reserve_min
;
1054 static size_t reserve_cur
;
1055 static size_t reserve_max
;
1057 /* List of registered callbacks. */
1058 static ql_head(reserve_reg_t
) reserve_regs
;
1061 * Condition notification sequence number, used to determine whether all
1062 * registered callbacks have been notified of the most current condition.
1064 static uint64_t reserve_seq
;
1067 * Trees of chunks currently in the memory reserve. Depending on function,
1068 * different tree orderings are needed, which is why there are two trees with
1069 * the same contents.
1071 static extent_tree_t reserve_chunks_szad
;
1072 static extent_tree_t reserve_chunks_ad
;
1074 /****************************/
1076 * base (internal allocation).
1080 * Current pages that are being used for internal memory allocations. These
1081 * pages are carved up in cacheline-size quanta, so that there is no chance of
1082 * false cache line sharing.
1084 static void *base_pages
;
1085 static void *base_next_addr
;
1086 #ifdef MALLOC_DECOMMIT
1087 static void *base_next_decommitted
;
1089 static void *base_past_addr
; /* Addr immediately past base_pages. */
1090 static extent_node_t
*base_nodes
;
1091 static reserve_reg_t
*base_reserve_regs
;
1092 static malloc_mutex_t base_mtx
;
1094 static size_t base_mapped
;
1103 * Arenas that are used to service external requests. Not all elements of the
1104 * arenas array are necessarily used; arenas are created lazily as needed.
1106 static arena_t
**arenas
;
1107 static unsigned narenas
;
1108 static unsigned narenas_2pow
;
1110 # ifdef MALLOC_BALANCE
1111 static unsigned narenas_2pow
;
1113 static unsigned next_arena
;
1117 static malloc_spinlock_t arenas_lock
; /* Protects arenas initialization. */
1119 static pthread_mutex_t arenas_lock
; /* Protects arenas initialization. */
1124 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
1127 #ifndef MOZ_MEMORY_WINDOWS
1128 static __thread arena_t
*arenas_map
;
1133 /* Chunk statistics. */
1134 static chunk_stats_t stats_chunks
;
1137 /*******************************/
1139 * Runtime configuration options.
1141 const char *_malloc_options
;
1143 #ifndef MALLOC_PRODUCTION
1144 static bool opt_abort
= true;
1146 static bool opt_junk
= true;
1149 static bool opt_abort
= false;
1151 static bool opt_junk
= false;
1154 static size_t opt_dirty_max
= DIRTY_MAX_DEFAULT
;
1155 #ifdef MALLOC_BALANCE
1156 static uint64_t opt_balance_threshold
= BALANCE_THRESHOLD_DEFAULT
;
1158 static bool opt_print_stats
= false;
1159 static size_t opt_quantum_2pow
= QUANTUM_2POW_MIN
;
1160 static size_t opt_small_max_2pow
= SMALL_MAX_2POW_DEFAULT
;
1161 static size_t opt_chunk_2pow
= CHUNK_2POW_DEFAULT
;
1162 static int opt_reserve_min_lshift
= 0;
1163 static int opt_reserve_range_lshift
= 0;
1164 #ifdef MALLOC_PAGEFILE
1165 static bool opt_pagefile
= true;
1167 #ifdef MALLOC_UTRACE
1168 static bool opt_utrace
= false;
1171 static bool opt_sysv
= false;
1173 #ifdef MALLOC_XMALLOC
1174 static bool opt_xmalloc
= false;
1177 static bool opt_zero
= false;
1179 static int opt_narenas_lshift
= 0;
1181 #ifdef MALLOC_UTRACE
1188 #define UTRACE(a, b, c) \
1190 malloc_utrace_t ut; \
1194 utrace(&ut, sizeof(ut)); \
1197 #define UTRACE(a, b, c)
1200 /******************************************************************************/
1202 * Begin function prototypes for non-inline static functions.
1205 static char *umax2s(uintmax_t x
, char *s
);
1206 static bool malloc_mutex_init(malloc_mutex_t
*mutex
);
1207 static bool malloc_spin_init(malloc_spinlock_t
*lock
);
1208 static void wrtmessage(const char *p1
, const char *p2
, const char *p3
,
1211 #ifdef MOZ_MEMORY_DARWIN
1212 /* Avoid namespace collision with OS X's malloc APIs. */
1213 #define malloc_printf moz_malloc_printf
1215 static void malloc_printf(const char *format
, ...);
1217 static bool base_pages_alloc_mmap(size_t minsize
);
1218 static bool base_pages_alloc(size_t minsize
);
1219 static void *base_alloc(size_t size
);
1220 static void *base_calloc(size_t number
, size_t size
);
1221 static extent_node_t
*base_node_alloc(void);
1222 static void base_node_dealloc(extent_node_t
*node
);
1223 static reserve_reg_t
*base_reserve_reg_alloc(void);
1224 static void base_reserve_reg_dealloc(reserve_reg_t
*reg
);
1226 static void stats_print(arena_t
*arena
);
1228 static void *pages_map(void *addr
, size_t size
, int pfd
);
1229 static void pages_unmap(void *addr
, size_t size
);
1230 static void *chunk_alloc_mmap(size_t size
, bool pagefile
);
1231 #ifdef MALLOC_PAGEFILE
1232 static int pagefile_init(size_t size
);
1233 static void pagefile_close(int pfd
);
1235 static void *chunk_recycle_reserve(size_t size
, bool zero
);
1236 static void *chunk_alloc(size_t size
, bool zero
, bool pagefile
);
1237 static extent_node_t
*chunk_dealloc_reserve(void *chunk
, size_t size
);
1238 static void chunk_dealloc_mmap(void *chunk
, size_t size
);
1239 static void chunk_dealloc(void *chunk
, size_t size
);
1241 static arena_t
*choose_arena_hard(void);
1243 static void arena_run_split(arena_t
*arena
, arena_run_t
*run
, size_t size
,
1244 bool large
, bool zero
);
1245 static void arena_chunk_init(arena_t
*arena
, arena_chunk_t
*chunk
);
1246 static void arena_chunk_dealloc(arena_t
*arena
, arena_chunk_t
*chunk
);
1247 static arena_run_t
*arena_run_alloc(arena_t
*arena
, arena_bin_t
*bin
,
1248 size_t size
, bool large
, bool zero
);
1249 static void arena_purge(arena_t
*arena
);
1250 static void arena_run_dalloc(arena_t
*arena
, arena_run_t
*run
, bool dirty
);
1251 static void arena_run_trim_head(arena_t
*arena
, arena_chunk_t
*chunk
,
1252 arena_run_t
*run
, size_t oldsize
, size_t newsize
);
1253 static void arena_run_trim_tail(arena_t
*arena
, arena_chunk_t
*chunk
,
1254 arena_run_t
*run
, size_t oldsize
, size_t newsize
, bool dirty
);
1255 static arena_run_t
*arena_bin_nonfull_run_get(arena_t
*arena
, arena_bin_t
*bin
);
1256 static void *arena_bin_malloc_hard(arena_t
*arena
, arena_bin_t
*bin
);
1257 static size_t arena_bin_run_size_calc(arena_bin_t
*bin
, size_t min_run_size
);
1258 #ifdef MALLOC_BALANCE
1259 static void arena_lock_balance_hard(arena_t
*arena
);
1261 static void *arena_malloc_large(arena_t
*arena
, size_t size
, bool zero
);
1262 static void *arena_palloc(arena_t
*arena
, size_t alignment
, size_t size
,
1264 static size_t arena_salloc(const void *ptr
);
1265 static void arena_dalloc_large(arena_t
*arena
, arena_chunk_t
*chunk
,
1267 static void arena_ralloc_large_shrink(arena_t
*arena
, arena_chunk_t
*chunk
,
1268 void *ptr
, size_t size
, size_t oldsize
);
1269 static bool arena_ralloc_large_grow(arena_t
*arena
, arena_chunk_t
*chunk
,
1270 void *ptr
, size_t size
, size_t oldsize
);
1271 static bool arena_ralloc_large(void *ptr
, size_t size
, size_t oldsize
);
1272 static void *arena_ralloc(void *ptr
, size_t size
, size_t oldsize
);
1273 static bool arena_new(arena_t
*arena
);
1274 static arena_t
*arenas_extend(unsigned ind
);
1275 static void *huge_malloc(size_t size
, bool zero
);
1276 static void *huge_palloc(size_t alignment
, size_t size
);
1277 static void *huge_ralloc(void *ptr
, size_t size
, size_t oldsize
);
1278 static void huge_dalloc(void *ptr
);
1279 static void malloc_print_stats(void);
1280 #ifndef MOZ_MEMORY_WINDOWS
1283 bool malloc_init_hard(void);
1284 static void reserve_shrink(void);
1285 static uint64_t reserve_notify(reserve_cnd_t cnd
, size_t size
, uint64_t seq
);
1286 static uint64_t reserve_crit(size_t size
, const char *fname
, uint64_t seq
);
1287 static void reserve_fail(size_t size
, const char *fname
);
1290 * End function prototypes.
1292 /******************************************************************************/
1295 * umax2s() provides minimal integer printing functionality, which is
1296 * especially useful for situations where allocation in vsnprintf() calls would
1297 * potentially cause deadlock.
1299 #define UMAX2S_BUFSIZE 21
1301 umax2s(uintmax_t x
, char *s
)
1305 i
= UMAX2S_BUFSIZE
- 1;
1309 s
[i
] = "0123456789"[x
% 10];
1317 wrtmessage(const char *p1
, const char *p2
, const char *p3
, const char *p4
)
1319 #if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_WINDOWS)
1320 #define _write write
1322 _write(STDERR_FILENO
, p1
, (unsigned int) strlen(p1
));
1323 _write(STDERR_FILENO
, p2
, (unsigned int) strlen(p2
));
1324 _write(STDERR_FILENO
, p3
, (unsigned int) strlen(p3
));
1325 _write(STDERR_FILENO
, p4
, (unsigned int) strlen(p4
));
1328 #define _malloc_message malloc_message
1330 void (*_malloc_message
)(const char *p1
, const char *p2
, const char *p3
,
1331 const char *p4
) = wrtmessage
;
1334 # define assert(e) do { \
1336 char line_buf[UMAX2S_BUFSIZE]; \
1337 _malloc_message(__FILE__, ":", umax2s(__LINE__, \
1338 line_buf), ": Failed assertion: "); \
1339 _malloc_message("\"", #e, "\"\n", ""); \
1347 /******************************************************************************/
1349 * Begin mutex. We can't use normal pthread mutexes in all places, because
1350 * they require malloc()ed memory, which causes bootstrapping issues in some
1355 malloc_mutex_init(malloc_mutex_t
*mutex
)
1357 #if defined(MOZ_MEMORY_WINDOWS)
1359 if (! __crtInitCritSecAndSpinCount(mutex
, _CRT_SPINCOUNT
))
1361 #elif defined(MOZ_MEMORY_DARWIN)
1362 mutex
->lock
= OS_SPINLOCK_INIT
;
1363 #elif defined(MOZ_MEMORY_LINUX)
1364 pthread_mutexattr_t attr
;
1365 if (pthread_mutexattr_init(&attr
) != 0)
1367 pthread_mutexattr_settype(&attr
, PTHREAD_MUTEX_ADAPTIVE_NP
);
1368 if (pthread_mutex_init(mutex
, &attr
) != 0) {
1369 pthread_mutexattr_destroy(&attr
);
1372 pthread_mutexattr_destroy(&attr
);
1373 #elif defined(MOZ_MEMORY)
1374 if (pthread_mutex_init(mutex
, NULL
) != 0)
1377 static const spinlock_t lock
= _SPINLOCK_INITIALIZER
;
1385 malloc_mutex_lock(malloc_mutex_t
*mutex
)
1388 #if defined(MOZ_MEMORY_WINDOWS)
1389 EnterCriticalSection(mutex
);
1390 #elif defined(MOZ_MEMORY_DARWIN)
1391 OSSpinLockLock(&mutex
->lock
);
1392 #elif defined(MOZ_MEMORY)
1393 pthread_mutex_lock(mutex
);
1396 _SPINLOCK(&mutex
->lock
);
1401 malloc_mutex_unlock(malloc_mutex_t
*mutex
)
1404 #if defined(MOZ_MEMORY_WINDOWS)
1405 LeaveCriticalSection(mutex
);
1406 #elif defined(MOZ_MEMORY_DARWIN)
1407 OSSpinLockUnlock(&mutex
->lock
);
1408 #elif defined(MOZ_MEMORY)
1409 pthread_mutex_unlock(mutex
);
1412 _SPINUNLOCK(&mutex
->lock
);
1417 malloc_spin_init(malloc_spinlock_t
*lock
)
1419 #if defined(MOZ_MEMORY_WINDOWS)
1421 if (! __crtInitCritSecAndSpinCount(lock
, _CRT_SPINCOUNT
))
1423 #elif defined(MOZ_MEMORY_DARWIN)
1424 lock
->lock
= OS_SPINLOCK_INIT
;
1425 #elif defined(MOZ_MEMORY_LINUX)
1426 pthread_mutexattr_t attr
;
1427 if (pthread_mutexattr_init(&attr
) != 0)
1429 pthread_mutexattr_settype(&attr
, PTHREAD_MUTEX_ADAPTIVE_NP
);
1430 if (pthread_mutex_init(lock
, &attr
) != 0) {
1431 pthread_mutexattr_destroy(&attr
);
1434 pthread_mutexattr_destroy(&attr
);
1435 #elif defined(MOZ_MEMORY)
1436 if (pthread_mutex_init(lock
, NULL
) != 0)
1439 lock
->lock
= _SPINLOCK_INITIALIZER
;
1445 malloc_spin_lock(malloc_spinlock_t
*lock
)
1448 #if defined(MOZ_MEMORY_WINDOWS)
1449 EnterCriticalSection(lock
);
1450 #elif defined(MOZ_MEMORY_DARWIN)
1451 OSSpinLockLock(&lock
->lock
);
1452 #elif defined(MOZ_MEMORY)
1453 pthread_mutex_lock(lock
);
1456 _SPINLOCK(&lock
->lock
);
1461 malloc_spin_unlock(malloc_spinlock_t
*lock
)
1463 #if defined(MOZ_MEMORY_WINDOWS)
1464 LeaveCriticalSection(lock
);
1465 #elif defined(MOZ_MEMORY_DARWIN)
1466 OSSpinLockUnlock(&lock
->lock
);
1467 #elif defined(MOZ_MEMORY)
1468 pthread_mutex_unlock(lock
);
1471 _SPINUNLOCK(&lock
->lock
);
1478 /******************************************************************************/
1480 * Begin spin lock. Spin locks here are actually adaptive mutexes that block
1481 * after a period of spinning, because unbounded spinning would allow for
1482 * priority inversion.
1485 #if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_DARWIN)
1486 # define malloc_spin_init malloc_mutex_init
1487 # define malloc_spin_lock malloc_mutex_lock
1488 # define malloc_spin_unlock malloc_mutex_unlock
1493 * We use an unpublished interface to initialize pthread mutexes with an
1494 * allocation callback, in order to avoid infinite recursion.
1496 int _pthread_mutex_init_calloc_cb(pthread_mutex_t
*mutex
,
1497 void *(calloc_cb
)(size_t, size_t));
1499 __weak_reference(_pthread_mutex_init_calloc_cb_stub
,
1500 _pthread_mutex_init_calloc_cb
);
1503 _pthread_mutex_init_calloc_cb_stub(pthread_mutex_t
*mutex
,
1504 void *(calloc_cb
)(size_t, size_t))
1511 malloc_spin_init(pthread_mutex_t
*lock
)
1514 if (_pthread_mutex_init_calloc_cb(lock
, base_calloc
) != 0)
1520 static inline unsigned
1521 malloc_spin_lock(pthread_mutex_t
*lock
)
1526 if (_pthread_mutex_trylock(lock
) != 0) {
1528 volatile unsigned j
;
1530 /* Exponentially back off. */
1531 for (i
= 1; i
<= SPIN_LIMIT_2POW
; i
++) {
1532 for (j
= 0; j
< (1U << i
); j
++)
1536 if (_pthread_mutex_trylock(lock
) == 0)
1541 * Spinning failed. Block until the lock becomes
1542 * available, in order to avoid indefinite priority
1545 _pthread_mutex_lock(lock
);
1546 assert((ret
<< BLOCK_COST_2POW
) != 0);
1547 return (ret
<< BLOCK_COST_2POW
);
1555 malloc_spin_unlock(pthread_mutex_t
*lock
)
1559 _pthread_mutex_unlock(lock
);
1566 /******************************************************************************/
1568 * Begin Utility functions/macros.
1571 /* Return the chunk address for allocation address a. */
1572 #define CHUNK_ADDR2BASE(a) \
1573 ((void *)((uintptr_t)(a) & ~chunksize_mask))
1575 /* Return the chunk offset of address a. */
1576 #define CHUNK_ADDR2OFFSET(a) \
1577 ((size_t)((uintptr_t)(a) & chunksize_mask))
1579 /* Return the smallest chunk multiple that is >= s. */
1580 #define CHUNK_CEILING(s) \
1581 (((s) + chunksize_mask) & ~chunksize_mask)
1583 /* Return the smallest cacheline multiple that is >= s. */
1584 #define CACHELINE_CEILING(s) \
1585 (((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
1587 /* Return the smallest quantum multiple that is >= a. */
1588 #define QUANTUM_CEILING(a) \
1589 (((a) + quantum_mask) & ~quantum_mask)
1591 /* Return the smallest pagesize multiple that is >= s. */
1592 #define PAGE_CEILING(s) \
1593 (((s) + pagesize_mask) & ~pagesize_mask)
1595 /* Compute the smallest power of 2 that is >= x. */
1596 static inline size_t
1606 #if (SIZEOF_PTR == 8)
1613 #ifdef MALLOC_BALANCE
1615 * Use a simple linear congruential pseudo-random number generator:
1617 * prn(y) = (a*x + c) % m
1619 * where the following constants ensure maximal period:
1621 * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
1622 * c == Odd number (relatively prime to 2^n).
1625 * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
1627 * This choice of m has the disadvantage that the quality of the bits is
1628 * proportional to bit position. For example. the lowest bit has a cycle of 2,
1629 * the next has a cycle of 4, etc. For this reason, we prefer to use the upper
1632 # define PRN_DEFINE(suffix, var, a, c) \
1633 static inline void \
1634 sprn_##suffix(uint32_t seed) \
1639 static inline uint32_t \
1640 prn_##suffix(uint32_t lg_range) \
1644 assert(lg_range > 0); \
1645 assert(lg_range <= 32); \
1647 x = (var * (a)) + (c); \
1649 ret = x >> (32 - lg_range); \
1653 # define SPRN(suffix, seed) sprn_##suffix(seed)
1654 # define PRN(suffix, lg_range) prn_##suffix(lg_range)
1657 #ifdef MALLOC_BALANCE
1658 /* Define the PRNG used for arena assignment. */
1659 static __thread
uint32_t balance_x
;
1660 PRN_DEFINE(balance
, balance_x
, 1297, 1301)
1663 #ifdef MALLOC_UTRACE
1665 utrace(const void *addr
, size_t len
)
1667 malloc_utrace_t
*ut
= (malloc_utrace_t
*)addr
;
1669 assert(len
== sizeof(malloc_utrace_t
));
1671 if (ut
->p
== NULL
&& ut
->s
== 0 && ut
->r
== NULL
)
1672 malloc_printf("%d x USER malloc_init()\n", getpid());
1673 else if (ut
->p
== NULL
&& ut
->r
!= NULL
) {
1674 malloc_printf("%d x USER %p = malloc(%zu)\n", getpid(), ut
->r
,
1676 } else if (ut
->p
!= NULL
&& ut
->r
!= NULL
) {
1677 malloc_printf("%d x USER %p = realloc(%p, %zu)\n", getpid(),
1678 ut
->r
, ut
->p
, ut
->s
);
1680 malloc_printf("%d x USER free(%p)\n", getpid(), ut
->p
);
1686 static inline const char *
1690 return ("<jemalloc>");
1695 * Print to stderr in such a way as to (hopefully) avoid memory allocation.
1698 malloc_printf(const char *format
, ...)
1703 va_start(ap
, format
);
1704 vsnprintf(buf
, sizeof(buf
), format
, ap
);
1706 _malloc_message(buf
, "", "", "");
1710 /******************************************************************************/
1712 #ifdef MALLOC_DECOMMIT
1714 pages_decommit(void *addr
, size_t size
)
1717 #ifdef MOZ_MEMORY_WINDOWS
1718 VirtualFree(addr
, size
, MEM_DECOMMIT
);
1720 if (mmap(addr
, size
, PROT_NONE
, MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1,
1727 pages_commit(void *addr
, size_t size
)
1730 # ifdef MOZ_MEMORY_WINDOWS
1731 VirtualAlloc(addr
, size
, MEM_COMMIT
, PAGE_READWRITE
);
1733 if (mmap(addr
, size
, PROT_READ
| PROT_WRITE
, MAP_FIXED
| MAP_PRIVATE
|
1734 MAP_ANON
, -1, 0) == MAP_FAILED
)
1741 base_pages_alloc_mmap(size_t minsize
)
1745 #ifdef MALLOC_DECOMMIT
1750 assert(minsize
!= 0);
1751 csize
= CHUNK_CEILING(minsize
);
1752 #ifdef MALLOC_PAGEFILE
1754 pfd
= pagefile_init(csize
);
1760 base_pages
= pages_map(NULL
, csize
, pfd
);
1761 if (base_pages
== NULL
) {
1765 base_next_addr
= base_pages
;
1766 base_past_addr
= (void *)((uintptr_t)base_pages
+ csize
);
1767 #ifdef MALLOC_DECOMMIT
1769 * Leave enough pages for minsize committed, since otherwise they would
1770 * have to be immediately recommitted.
1772 pminsize
= PAGE_CEILING(minsize
);
1773 base_next_decommitted
= (void *)((uintptr_t)base_pages
+ pminsize
);
1774 if (pminsize
< csize
)
1775 pages_decommit(base_next_decommitted
, csize
- pminsize
);
1778 base_mapped
+= csize
;
1783 #ifdef MALLOC_PAGEFILE
1785 pagefile_close(pfd
);
1791 base_pages_alloc(size_t minsize
)
1794 if (base_pages_alloc_mmap(minsize
) == false)
1801 base_alloc(size_t size
)
1806 /* Round size up to nearest multiple of the cacheline size. */
1807 csize
= CACHELINE_CEILING(size
);
1809 malloc_mutex_lock(&base_mtx
);
1810 /* Make sure there's enough space for the allocation. */
1811 if ((uintptr_t)base_next_addr
+ csize
> (uintptr_t)base_past_addr
) {
1812 if (base_pages_alloc(csize
)) {
1813 malloc_mutex_unlock(&base_mtx
);
1818 ret
= base_next_addr
;
1819 base_next_addr
= (void *)((uintptr_t)base_next_addr
+ csize
);
1820 #ifdef MALLOC_DECOMMIT
1821 /* Make sure enough pages are committed for the new allocation. */
1822 if ((uintptr_t)base_next_addr
> (uintptr_t)base_next_decommitted
) {
1823 void *pbase_next_addr
=
1824 (void *)(PAGE_CEILING((uintptr_t)base_next_addr
));
1826 pages_commit(base_next_decommitted
, (uintptr_t)pbase_next_addr
-
1827 (uintptr_t)base_next_decommitted
);
1828 base_next_decommitted
= pbase_next_addr
;
1831 malloc_mutex_unlock(&base_mtx
);
1832 VALGRIND_MALLOCLIKE_BLOCK(ret
, size
, 0, false);
1838 base_calloc(size_t number
, size_t size
)
1842 ret
= base_alloc(number
* size
);
1843 #ifdef MALLOC_VALGRIND
1845 VALGRIND_FREELIKE_BLOCK(ret
, 0);
1846 VALGRIND_MALLOCLIKE_BLOCK(ret
, size
, 0, true);
1849 memset(ret
, 0, number
* size
);
1854 static extent_node_t
*
1855 base_node_alloc(void)
1859 malloc_mutex_lock(&base_mtx
);
1860 if (base_nodes
!= NULL
) {
1862 base_nodes
= *(extent_node_t
**)ret
;
1863 VALGRIND_FREELIKE_BLOCK(ret
, 0);
1864 VALGRIND_MALLOCLIKE_BLOCK(ret
, sizeof(extent_node_t
), 0, false);
1865 malloc_mutex_unlock(&base_mtx
);
1867 malloc_mutex_unlock(&base_mtx
);
1868 ret
= (extent_node_t
*)base_alloc(sizeof(extent_node_t
));
1875 base_node_dealloc(extent_node_t
*node
)
1878 malloc_mutex_lock(&base_mtx
);
1879 VALGRIND_FREELIKE_BLOCK(node
, 0);
1880 VALGRIND_MALLOCLIKE_BLOCK(node
, sizeof(extent_node_t
*), 0, false);
1881 *(extent_node_t
**)node
= base_nodes
;
1883 malloc_mutex_unlock(&base_mtx
);
1886 static reserve_reg_t
*
1887 base_reserve_reg_alloc(void)
1891 malloc_mutex_lock(&base_mtx
);
1892 if (base_reserve_regs
!= NULL
) {
1893 ret
= base_reserve_regs
;
1894 base_reserve_regs
= *(reserve_reg_t
**)ret
;
1895 VALGRIND_FREELIKE_BLOCK(ret
, 0);
1896 VALGRIND_MALLOCLIKE_BLOCK(ret
, sizeof(reserve_reg_t
), 0, false);
1897 malloc_mutex_unlock(&base_mtx
);
1899 malloc_mutex_unlock(&base_mtx
);
1900 ret
= (reserve_reg_t
*)base_alloc(sizeof(reserve_reg_t
));
1907 base_reserve_reg_dealloc(reserve_reg_t
*reg
)
1910 malloc_mutex_lock(&base_mtx
);
1911 VALGRIND_FREELIKE_BLOCK(reg
, 0);
1912 VALGRIND_MALLOCLIKE_BLOCK(reg
, sizeof(reserve_reg_t
*), 0, false);
1913 *(reserve_reg_t
**)reg
= base_reserve_regs
;
1914 base_reserve_regs
= reg
;
1915 malloc_mutex_unlock(&base_mtx
);
1918 /******************************************************************************/
1922 stats_print(arena_t
*arena
)
1924 unsigned i
, gap_start
;
1926 #ifdef MOZ_MEMORY_WINDOWS
1927 malloc_printf("dirty: %Iu page%s dirty, %I64u sweep%s,"
1928 " %I64u madvise%s, %I64u page%s purged\n",
1929 arena
->ndirty
, arena
->ndirty
== 1 ? "" : "s",
1930 arena
->stats
.npurge
, arena
->stats
.npurge
== 1 ? "" : "s",
1931 arena
->stats
.nmadvise
, arena
->stats
.nmadvise
== 1 ? "" : "s",
1932 arena
->stats
.purged
, arena
->stats
.purged
== 1 ? "" : "s");
1933 # ifdef MALLOC_DECOMMIT
1934 malloc_printf("decommit: %I64u decommit%s, %I64u commit%s,"
1935 " %I64u page%s decommitted\n",
1936 arena
->stats
.ndecommit
, (arena
->stats
.ndecommit
== 1) ? "" : "s",
1937 arena
->stats
.ncommit
, (arena
->stats
.ncommit
== 1) ? "" : "s",
1938 arena
->stats
.decommitted
,
1939 (arena
->stats
.decommitted
== 1) ? "" : "s");
1942 malloc_printf(" allocated nmalloc ndalloc\n");
1943 malloc_printf("small: %12Iu %12I64u %12I64u\n",
1944 arena
->stats
.allocated_small
, arena
->stats
.nmalloc_small
,
1945 arena
->stats
.ndalloc_small
);
1946 malloc_printf("large: %12Iu %12I64u %12I64u\n",
1947 arena
->stats
.allocated_large
, arena
->stats
.nmalloc_large
,
1948 arena
->stats
.ndalloc_large
);
1949 malloc_printf("total: %12Iu %12I64u %12I64u\n",
1950 arena
->stats
.allocated_small
+ arena
->stats
.allocated_large
,
1951 arena
->stats
.nmalloc_small
+ arena
->stats
.nmalloc_large
,
1952 arena
->stats
.ndalloc_small
+ arena
->stats
.ndalloc_large
);
1953 malloc_printf("mapped: %12Iu\n", arena
->stats
.mapped
);
1955 malloc_printf("dirty: %zu page%s dirty, %llu sweep%s,"
1956 " %llu madvise%s, %llu page%s purged\n",
1957 arena
->ndirty
, arena
->ndirty
== 1 ? "" : "s",
1958 arena
->stats
.npurge
, arena
->stats
.npurge
== 1 ? "" : "s",
1959 arena
->stats
.nmadvise
, arena
->stats
.nmadvise
== 1 ? "" : "s",
1960 arena
->stats
.purged
, arena
->stats
.purged
== 1 ? "" : "s");
1961 # ifdef MALLOC_DECOMMIT
1962 malloc_printf("decommit: %llu decommit%s, %llu commit%s,"
1963 " %llu page%s decommitted\n",
1964 arena
->stats
.ndecommit
, (arena
->stats
.ndecommit
== 1) ? "" : "s",
1965 arena
->stats
.ncommit
, (arena
->stats
.ncommit
== 1) ? "" : "s",
1966 arena
->stats
.decommitted
,
1967 (arena
->stats
.decommitted
== 1) ? "" : "s");
1970 malloc_printf(" allocated nmalloc ndalloc\n");
1971 malloc_printf("small: %12zu %12llu %12llu\n",
1972 arena
->stats
.allocated_small
, arena
->stats
.nmalloc_small
,
1973 arena
->stats
.ndalloc_small
);
1974 malloc_printf("large: %12zu %12llu %12llu\n",
1975 arena
->stats
.allocated_large
, arena
->stats
.nmalloc_large
,
1976 arena
->stats
.ndalloc_large
);
1977 malloc_printf("total: %12zu %12llu %12llu\n",
1978 arena
->stats
.allocated_small
+ arena
->stats
.allocated_large
,
1979 arena
->stats
.nmalloc_small
+ arena
->stats
.nmalloc_large
,
1980 arena
->stats
.ndalloc_small
+ arena
->stats
.ndalloc_large
);
1981 malloc_printf("mapped: %12zu\n", arena
->stats
.mapped
);
1983 malloc_printf("bins: bin size regs pgs requests newruns"
1984 " reruns maxruns curruns\n");
1985 for (i
= 0, gap_start
= UINT_MAX
; i
< ntbins
+ nqbins
+ nsbins
; i
++) {
1986 if (arena
->bins
[i
].stats
.nrequests
== 0) {
1987 if (gap_start
== UINT_MAX
)
1990 if (gap_start
!= UINT_MAX
) {
1991 if (i
> gap_start
+ 1) {
1992 /* Gap of more than one size class. */
1993 malloc_printf("[%u..%u]\n",
1996 /* Gap of one size class. */
1997 malloc_printf("[%u]\n", gap_start
);
1999 gap_start
= UINT_MAX
;
2002 #if defined(MOZ_MEMORY_WINDOWS)
2003 "%13u %1s %4u %4u %3u %9I64u %9I64u"
2004 " %9I64u %7u %7u\n",
2006 "%13u %1s %4u %4u %3u %9llu %9llu"
2007 " %9llu %7lu %7lu\n",
2010 i
< ntbins
? "T" : i
< ntbins
+ nqbins
? "Q" : "S",
2011 arena
->bins
[i
].reg_size
,
2012 arena
->bins
[i
].nregs
,
2013 arena
->bins
[i
].run_size
>> pagesize_2pow
,
2014 arena
->bins
[i
].stats
.nrequests
,
2015 arena
->bins
[i
].stats
.nruns
,
2016 arena
->bins
[i
].stats
.reruns
,
2017 arena
->bins
[i
].stats
.highruns
,
2018 arena
->bins
[i
].stats
.curruns
);
2021 if (gap_start
!= UINT_MAX
) {
2022 if (i
> gap_start
+ 1) {
2023 /* Gap of more than one size class. */
2024 malloc_printf("[%u..%u]\n", gap_start
, i
- 1);
2026 /* Gap of one size class. */
2027 malloc_printf("[%u]\n", gap_start
);
2034 * End Utility functions/macros.
2036 /******************************************************************************/
2038 * Begin extent tree code.
2042 extent_szad_comp(extent_node_t
*a
, extent_node_t
*b
)
2045 size_t a_size
= a
->size
;
2046 size_t b_size
= b
->size
;
2048 ret
= (a_size
> b_size
) - (a_size
< b_size
);
2050 uintptr_t a_addr
= (uintptr_t)a
->addr
;
2051 uintptr_t b_addr
= (uintptr_t)b
->addr
;
2053 ret
= (a_addr
> b_addr
) - (a_addr
< b_addr
);
2059 /* Wrap red-black tree macros in functions. */
2060 rb_wrap(static, extent_tree_szad_
, extent_tree_t
, extent_node_t
,
2061 link_szad
, extent_szad_comp
)
2064 extent_ad_comp(extent_node_t
*a
, extent_node_t
*b
)
2066 uintptr_t a_addr
= (uintptr_t)a
->addr
;
2067 uintptr_t b_addr
= (uintptr_t)b
->addr
;
2069 return ((a_addr
> b_addr
) - (a_addr
< b_addr
));
2072 /* Wrap red-black tree macros in functions. */
2073 rb_wrap(static, extent_tree_ad_
, extent_tree_t
, extent_node_t
, link_ad
,
2077 * End extent tree code.
2079 /******************************************************************************/
2081 * Begin chunk management functions.
2084 #ifdef MOZ_MEMORY_WINDOWS
2086 pages_map(void *addr
, size_t size
, int pfd
)
2090 ret
= VirtualAlloc(addr
, size
, MEM_COMMIT
| MEM_RESERVE
,
2097 pages_unmap(void *addr
, size_t size
)
2100 if (VirtualFree(addr
, 0, MEM_RELEASE
) == 0) {
2101 _malloc_message(_getprogname(),
2102 ": (malloc) Error in VirtualFree()\n", "", "");
2107 #elif (defined(MOZ_MEMORY_DARWIN))
2109 pages_map(void *addr
, size_t size
, int pfd
)
2119 flags
= VM_FLAGS_ANYWHERE
;
2121 err
= vm_allocate((vm_map_t
)mach_task_self(), (vm_address_t
*)&ret
,
2122 (vm_size_t
)size
, flags
);
2123 if (err
!= KERN_SUCCESS
)
2126 assert(ret
== NULL
|| (addr
== NULL
&& ret
!= addr
)
2127 || (addr
!= NULL
&& ret
== addr
));
2132 pages_unmap(void *addr
, size_t size
)
2136 err
= vm_deallocate((vm_map_t
)mach_task_self(), (vm_address_t
)addr
,
2138 if (err
!= KERN_SUCCESS
) {
2139 malloc_message(_getprogname(),
2140 ": (malloc) Error in vm_deallocate(): ",
2141 mach_error_string(err
), "\n");
2147 #define VM_COPY_MIN (pagesize << 5)
2149 pages_copy(void *dest
, const void *src
, size_t n
)
2152 assert((void *)((uintptr_t)dest
& ~pagesize_mask
) == dest
);
2153 assert(n
>= VM_COPY_MIN
);
2154 assert((void *)((uintptr_t)src
& ~pagesize_mask
) == src
);
2156 vm_copy(mach_task_self(), (vm_address_t
)src
, (vm_size_t
)n
,
2157 (vm_address_t
)dest
);
2159 #else /* MOZ_MEMORY_DARWIN */
2161 pages_map(void *addr
, size_t size
, int pfd
)
2166 * We don't use MAP_FIXED here, because it can cause the *replacement*
2167 * of existing mappings, and we only want to create new mappings.
2169 #ifdef MALLOC_PAGEFILE
2171 ret
= mmap(addr
, size
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
|
2172 MAP_NOSYNC
, pfd
, 0);
2176 ret
= mmap(addr
, size
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
|
2179 assert(ret
!= NULL
);
2181 if (ret
== MAP_FAILED
)
2183 else if (addr
!= NULL
&& ret
!= addr
) {
2185 * We succeeded in mapping memory, but not in the right place.
2187 if (munmap(ret
, size
) == -1) {
2188 char buf
[STRERROR_BUF
];
2190 strerror_r(errno
, buf
, sizeof(buf
));
2191 _malloc_message(_getprogname(),
2192 ": (malloc) Error in munmap(): ", buf
, "\n");
2199 assert(ret
== NULL
|| (addr
== NULL
&& ret
!= addr
)
2200 || (addr
!= NULL
&& ret
== addr
));
2205 pages_unmap(void *addr
, size_t size
)
2208 if (munmap(addr
, size
) == -1) {
2209 char buf
[STRERROR_BUF
];
2211 strerror_r(errno
, buf
, sizeof(buf
));
2212 _malloc_message(_getprogname(),
2213 ": (malloc) Error in munmap(): ", buf
, "\n");
2220 #ifdef MALLOC_VALIDATE
2221 static inline malloc_rtree_t
*
2222 malloc_rtree_new(unsigned bits
)
2224 malloc_rtree_t
*ret
;
2225 unsigned bits_per_level
, height
, i
;
2227 bits_per_level
= ffs(pow2_ceil((MALLOC_RTREE_NODESIZE
/
2228 sizeof(void *)))) - 1;
2229 height
= bits
/ bits_per_level
;
2230 if (height
* bits_per_level
!= bits
)
2232 assert(height
* bits_per_level
>= bits
);
2234 ret
= base_calloc(1, sizeof(malloc_rtree_t
) + (sizeof(unsigned) *
2239 malloc_spin_init(&ret
->lock
);
2240 ret
->height
= height
;
2241 if (bits_per_level
* height
> bits
)
2242 ret
->level2bits
[0] = bits
% bits_per_level
;
2244 ret
->level2bits
[0] = bits_per_level
;
2245 for (i
= 1; i
< height
; i
++)
2246 ret
->level2bits
[i
] = bits_per_level
;
2248 ret
->root
= base_calloc(1, sizeof(void *) << ret
->level2bits
[0]);
2249 if (ret
->root
== NULL
) {
2251 * We leak the rtree here, since there's no generic base
2260 /* The least significant bits of the key are ignored. */
2261 static inline void *
2262 malloc_rtree_get(malloc_rtree_t
*rtree
, uintptr_t key
)
2266 unsigned i
, lshift
, height
, bits
;
2267 void **node
, **child
;
2269 malloc_spin_lock(&rtree
->lock
);
2270 for (i
= lshift
= 0, height
= rtree
->height
, node
= rtree
->root
;
2272 i
++, lshift
+= bits
, node
= child
) {
2273 bits
= rtree
->level2bits
[i
];
2274 subkey
= (key
<< lshift
) >> ((SIZEOF_PTR
<< 3) - bits
);
2275 child
= node
[subkey
];
2276 if (child
== NULL
) {
2277 malloc_spin_unlock(&rtree
->lock
);
2282 /* node is a leaf, so it contains values rather than node pointers. */
2283 bits
= rtree
->level2bits
[i
];
2284 subkey
= (key
<< lshift
) >> ((SIZEOF_PTR
<< 3) - bits
);
2286 malloc_spin_unlock(&rtree
->lock
);
2292 malloc_rtree_set(malloc_rtree_t
*rtree
, uintptr_t key
, void *val
)
2295 unsigned i
, lshift
, height
, bits
;
2296 void **node
, **child
;
2298 malloc_spin_lock(&rtree
->lock
);
2299 for (i
= lshift
= 0, height
= rtree
->height
, node
= rtree
->root
;
2301 i
++, lshift
+= bits
, node
= child
) {
2302 bits
= rtree
->level2bits
[i
];
2303 subkey
= (key
<< lshift
) >> ((SIZEOF_PTR
<< 3) - bits
);
2304 child
= node
[subkey
];
2305 if (child
== NULL
) {
2306 child
= base_calloc(1, sizeof(void *) <<
2307 rtree
->level2bits
[i
+1]);
2308 if (child
== NULL
) {
2309 malloc_spin_unlock(&rtree
->lock
);
2312 node
[subkey
] = child
;
2316 /* node is a leaf, so it contains values rather than node pointers. */
2317 bits
= rtree
->level2bits
[i
];
2318 subkey
= (key
<< lshift
) >> ((SIZEOF_PTR
<< 3) - bits
);
2320 malloc_spin_unlock(&rtree
->lock
);
2327 chunk_alloc_mmap(size_t size
, bool pagefile
)
2333 #ifdef MALLOC_PAGEFILE
2334 if (opt_pagefile
&& pagefile
) {
2335 pfd
= pagefile_init(size
);
2343 * Windows requires that there be a 1:1 mapping between VM
2344 * allocation/deallocation operations. Therefore, take care here to
2345 * acquire the final result via one mapping operation. This means
2346 * unmapping any preliminary result that is not correctly aligned.
2348 * The MALLOC_PAGEFILE code also benefits from this mapping algorithm,
2349 * since it reduces the number of page files.
2352 ret
= pages_map(NULL
, size
, pfd
);
2356 offset
= CHUNK_ADDR2OFFSET(ret
);
2358 /* Deallocate, then try to allocate at (ret + size - offset). */
2359 pages_unmap(ret
, size
);
2360 ret
= pages_map((void *)((uintptr_t)ret
+ size
- offset
), size
,
2362 while (ret
== NULL
) {
2364 * Over-allocate in order to map a memory region that
2365 * is definitely large enough.
2367 ret
= pages_map(NULL
, size
+ chunksize
, -1);
2371 * Deallocate, then allocate the correct size, within
2372 * the over-sized mapping.
2374 offset
= CHUNK_ADDR2OFFSET(ret
);
2375 pages_unmap(ret
, size
+ chunksize
);
2377 ret
= pages_map(ret
, size
, pfd
);
2379 ret
= pages_map((void *)((uintptr_t)ret
+
2380 chunksize
- offset
), size
, pfd
);
2383 * Failure here indicates a race with another thread, so
2390 #ifdef MALLOC_PAGEFILE
2392 pagefile_close(pfd
);
2396 stats_chunks
.nchunks
+= (size
/ chunksize
);
2401 #ifdef MALLOC_PAGEFILE
2403 pagefile_init(size_t size
)
2407 char pagefile_path
[PATH_MAX
];
2408 char zbuf
[MALLOC_PAGEFILE_WRITE_SIZE
];
2411 * Create a temporary file, then immediately unlink it so that it will
2414 strcpy(pagefile_path
, pagefile_templ
);
2415 ret
= mkstemp(pagefile_path
);
2418 if (unlink(pagefile_path
)) {
2419 char buf
[STRERROR_BUF
];
2421 strerror_r(errno
, buf
, sizeof(buf
));
2422 _malloc_message(_getprogname(), ": (malloc) Error in unlink(\"",
2423 pagefile_path
, "\"):");
2424 _malloc_message(buf
, "\n", "", "");
2430 * Write sequential zeroes to the file in order to assure that disk
2431 * space is committed, with minimal fragmentation. It would be
2432 * sufficient to write one zero per disk block, but that potentially
2433 * results in more system calls, for no real gain.
2435 memset(zbuf
, 0, sizeof(zbuf
));
2436 for (i
= 0; i
< size
; i
+= sizeof(zbuf
)) {
2437 if (write(ret
, zbuf
, sizeof(zbuf
)) != sizeof(zbuf
)) {
2438 if (errno
!= ENOSPC
) {
2439 char buf
[STRERROR_BUF
];
2441 strerror_r(errno
, buf
, sizeof(buf
));
2442 _malloc_message(_getprogname(),
2443 ": (malloc) Error in write(): ", buf
, "\n");
2447 pagefile_close(ret
);
2456 pagefile_close(int pfd
)
2460 char buf
[STRERROR_BUF
];
2462 strerror_r(errno
, buf
, sizeof(buf
));
2463 _malloc_message(_getprogname(),
2464 ": (malloc) Error in close(): ", buf
, "\n");
2472 chunk_recycle_reserve(size_t size
, bool zero
)
2474 extent_node_t
*node
, key
;
2476 #ifdef MALLOC_DECOMMIT
2477 if (size
!= chunksize
)
2483 malloc_mutex_lock(&reserve_mtx
);
2484 node
= extent_tree_szad_nsearch(&reserve_chunks_szad
, &key
);
2486 void *ret
= node
->addr
;
2488 /* Remove node from the tree. */
2489 extent_tree_szad_remove(&reserve_chunks_szad
, node
);
2490 #ifndef MALLOC_DECOMMIT
2491 if (node
->size
== size
) {
2493 assert(node
->size
== size
);
2495 extent_tree_ad_remove(&reserve_chunks_ad
, node
);
2496 base_node_dealloc(node
);
2497 #ifndef MALLOC_DECOMMIT
2500 * Insert the remainder of node's address range as a
2501 * smaller chunk. Its position within reserve_chunks_ad
2504 assert(node
->size
> size
);
2505 node
->addr
= (void *)((uintptr_t)node
->addr
+ size
);
2507 extent_tree_szad_insert(&reserve_chunks_szad
, node
);
2510 reserve_cur
-= size
;
2512 * Try to replenish the reserve if this allocation depleted it.
2514 #ifndef MALLOC_DECOMMIT
2515 if (reserve_cur
< reserve_min
) {
2516 size_t diff
= reserve_min
- reserve_cur
;
2518 while (reserve_cur
< reserve_min
) {
2519 # define diff chunksize
2523 malloc_mutex_unlock(&reserve_mtx
);
2524 chunk
= chunk_alloc_mmap(diff
, true);
2525 malloc_mutex_lock(&reserve_mtx
);
2526 if (chunk
== NULL
) {
2530 seq
= reserve_notify(RESERVE_CND_LOW
,
2532 } while (reserve_cur
< reserve_min
&& seq
!= 0);
2534 extent_node_t
*node
;
2536 node
= chunk_dealloc_reserve(chunk
, diff
);
2540 pages_unmap(chunk
, diff
);
2542 seq
= reserve_notify(
2543 RESERVE_CND_LOW
, size
, seq
);
2544 } while (reserve_cur
< reserve_min
&&
2549 malloc_mutex_unlock(&reserve_mtx
);
2551 #ifdef MALLOC_DECOMMIT
2552 pages_commit(ret
, size
);
2556 memset(ret
, 0, size
);
2560 malloc_mutex_unlock(&reserve_mtx
);
2566 chunk_alloc(size_t size
, bool zero
, bool pagefile
)
2571 assert((size
& chunksize_mask
) == 0);
2573 ret
= chunk_recycle_reserve(size
, zero
);
2577 ret
= chunk_alloc_mmap(size
, pagefile
);
2582 /* All strategies for allocation failed. */
2587 stats_chunks
.curchunks
+= (size
/ chunksize
);
2588 if (stats_chunks
.curchunks
> stats_chunks
.highchunks
)
2589 stats_chunks
.highchunks
= stats_chunks
.curchunks
;
2592 #ifdef MALLOC_VALIDATE
2594 if (malloc_rtree_set(chunk_rtree
, (uintptr_t)ret
, ret
)) {
2595 chunk_dealloc(ret
, size
);
2601 assert(CHUNK_ADDR2BASE(ret
) == ret
);
2605 static extent_node_t
*
2606 chunk_dealloc_reserve(void *chunk
, size_t size
)
2608 extent_node_t
*node
;
2610 #ifdef MALLOC_DECOMMIT
2611 if (size
!= chunksize
)
2614 extent_node_t
*prev
, key
;
2616 key
.addr
= (void *)((uintptr_t)chunk
+ size
);
2617 node
= extent_tree_ad_nsearch(&reserve_chunks_ad
, &key
);
2618 /* Try to coalesce forward. */
2619 if (node
!= NULL
&& node
->addr
== key
.addr
) {
2621 * Coalesce chunk with the following address range. This does
2622 * not change the position within reserve_chunks_ad, so only
2623 * remove/insert from/into reserve_chunks_szad.
2625 extent_tree_szad_remove(&reserve_chunks_szad
, node
);
2628 extent_tree_szad_insert(&reserve_chunks_szad
, node
);
2631 /* Coalescing forward failed, so insert a new node. */
2632 node
= base_node_alloc();
2637 extent_tree_ad_insert(&reserve_chunks_ad
, node
);
2638 extent_tree_szad_insert(&reserve_chunks_szad
, node
);
2639 #ifndef MALLOC_DECOMMIT
2642 /* Try to coalesce backward. */
2643 prev
= extent_tree_ad_prev(&reserve_chunks_ad
, node
);
2644 if (prev
!= NULL
&& (void *)((uintptr_t)prev
->addr
+ prev
->size
) ==
2647 * Coalesce chunk with the previous address range. This does
2648 * not change the position within reserve_chunks_ad, so only
2649 * remove/insert node from/into reserve_chunks_szad.
2651 extent_tree_szad_remove(&reserve_chunks_szad
, prev
);
2652 extent_tree_ad_remove(&reserve_chunks_ad
, prev
);
2654 extent_tree_szad_remove(&reserve_chunks_szad
, node
);
2655 node
->addr
= prev
->addr
;
2656 node
->size
+= prev
->size
;
2657 extent_tree_szad_insert(&reserve_chunks_szad
, node
);
2659 base_node_dealloc(prev
);
2663 #ifdef MALLOC_DECOMMIT
2664 pages_decommit(chunk
, size
);
2666 madvise(chunk
, size
, MADV_FREE
);
2669 reserve_cur
+= size
;
2670 if (reserve_cur
> reserve_max
)
2677 chunk_dealloc_mmap(void *chunk
, size_t size
)
2680 pages_unmap(chunk
, size
);
2684 chunk_dealloc(void *chunk
, size_t size
)
2686 extent_node_t
*node
;
2688 assert(chunk
!= NULL
);
2689 assert(CHUNK_ADDR2BASE(chunk
) == chunk
);
2691 assert((size
& chunksize_mask
) == 0);
2694 stats_chunks
.curchunks
-= (size
/ chunksize
);
2696 #ifdef MALLOC_VALIDATE
2697 malloc_rtree_set(chunk_rtree
, (uintptr_t)chunk
, NULL
);
2700 /* Try to merge chunk into the reserve. */
2701 malloc_mutex_lock(&reserve_mtx
);
2702 node
= chunk_dealloc_reserve(chunk
, size
);
2703 malloc_mutex_unlock(&reserve_mtx
);
2705 chunk_dealloc_mmap(chunk
, size
);
2709 * End chunk management functions.
2711 /******************************************************************************/
2717 * Choose an arena based on a per-thread value (fast-path code, calls slow-path
2718 * code if necessary).
2720 static inline arena_t
*
2726 * We can only use TLS if this is a PIC library, since for the static
2727 * library version, libc's malloc is used by TLS allocation, which
2728 * introduces a bootstrapping issue.
2731 if (__isthreaded
== false) {
2732 /* Avoid the overhead of TLS for single-threaded operation. */
2736 # ifdef MOZ_MEMORY_WINDOWS
2737 ret
= TlsGetValue(tlsIndex
);
2743 ret
= choose_arena_hard();
2744 assert(ret
!= NULL
);
2747 if (__isthreaded
&& narenas
> 1) {
2751 * Hash _pthread_self() to one of the arenas. There is a prime
2752 * number of arenas, so this has a reasonable chance of
2753 * working. Even so, the hashing can be easily thwarted by
2754 * inconvenient _pthread_self() values. Without specific
2755 * knowledge of how _pthread_self() calculates values, we can't
2756 * easily do much better than this.
2758 ind
= (unsigned long) _pthread_self() % narenas
;
2761 * Optimistially assume that arenas[ind] has been initialized.
2762 * At worst, we find out that some other thread has already
2763 * done so, after acquiring the lock in preparation. Note that
2764 * this lazy locking also has the effect of lazily forcing
2765 * cache coherency; without the lock acquisition, there's no
2766 * guarantee that modification of arenas[ind] by another thread
2767 * would be seen on this CPU for an arbitrary amount of time.
2769 * In general, this approach to modifying a synchronized value
2770 * isn't a good idea, but in this case we only ever modify the
2771 * value once, so things work out well.
2776 * Avoid races with another thread that may have already
2777 * initialized arenas[ind].
2779 malloc_spin_lock(&arenas_lock
);
2780 if (arenas
[ind
] == NULL
)
2781 ret
= arenas_extend((unsigned)ind
);
2784 malloc_spin_unlock(&arenas_lock
);
2790 assert(ret
!= NULL
);
2796 * Choose an arena based on a per-thread value (slow-path code only, called
2797 * only by choose_arena()).
2800 choose_arena_hard(void)
2804 assert(__isthreaded
);
2806 #ifdef MALLOC_BALANCE
2807 /* Seed the PRNG used for arena load balancing. */
2808 SPRN(balance
, (uint32_t)(uintptr_t)(_pthread_self()));
2812 #ifdef MALLOC_BALANCE
2815 ind
= PRN(balance
, narenas_2pow
);
2816 if ((ret
= arenas
[ind
]) == NULL
) {
2817 malloc_spin_lock(&arenas_lock
);
2818 if ((ret
= arenas
[ind
]) == NULL
)
2819 ret
= arenas_extend(ind
);
2820 malloc_spin_unlock(&arenas_lock
);
2823 malloc_spin_lock(&arenas_lock
);
2824 if ((ret
= arenas
[next_arena
]) == NULL
)
2825 ret
= arenas_extend(next_arena
);
2826 next_arena
= (next_arena
+ 1) % narenas
;
2827 malloc_spin_unlock(&arenas_lock
);
2832 #ifdef MOZ_MEMORY_WINDOWS
2833 TlsSetValue(tlsIndex
, ret
);
2843 arena_chunk_comp(arena_chunk_t
*a
, arena_chunk_t
*b
)
2845 uintptr_t a_chunk
= (uintptr_t)a
;
2846 uintptr_t b_chunk
= (uintptr_t)b
;
2851 return ((a_chunk
> b_chunk
) - (a_chunk
< b_chunk
));
2854 /* Wrap red-black tree macros in functions. */
2855 rb_wrap(static, arena_chunk_tree_dirty_
, arena_chunk_tree_t
,
2856 arena_chunk_t
, link_dirty
, arena_chunk_comp
)
2859 arena_run_comp(arena_chunk_map_t
*a
, arena_chunk_map_t
*b
)
2861 uintptr_t a_mapelm
= (uintptr_t)a
;
2862 uintptr_t b_mapelm
= (uintptr_t)b
;
2867 return ((a_mapelm
> b_mapelm
) - (a_mapelm
< b_mapelm
));
2870 /* Wrap red-black tree macros in functions. */
2871 rb_wrap(static, arena_run_tree_
, arena_run_tree_t
, arena_chunk_map_t
, link
,
2875 arena_avail_comp(arena_chunk_map_t
*a
, arena_chunk_map_t
*b
)
2878 size_t a_size
= a
->bits
& ~pagesize_mask
;
2879 size_t b_size
= b
->bits
& ~pagesize_mask
;
2881 ret
= (a_size
> b_size
) - (a_size
< b_size
);
2883 uintptr_t a_mapelm
, b_mapelm
;
2885 if ((a
->bits
& CHUNK_MAP_KEY
) == 0)
2886 a_mapelm
= (uintptr_t)a
;
2889 * Treat keys as though they are lower than anything
2894 b_mapelm
= (uintptr_t)b
;
2896 ret
= (a_mapelm
> b_mapelm
) - (a_mapelm
< b_mapelm
);
2902 /* Wrap red-black tree macros in functions. */
2903 rb_wrap(static, arena_avail_tree_
, arena_avail_tree_t
, arena_chunk_map_t
, link
,
2906 static inline void *
2907 arena_run_reg_alloc(arena_run_t
*run
, arena_bin_t
*bin
)
2910 unsigned i
, mask
, bit
, regind
;
2912 assert(run
->magic
== ARENA_RUN_MAGIC
);
2913 assert(run
->regs_minelm
< bin
->regs_mask_nelms
);
2916 * Move the first check outside the loop, so that run->regs_minelm can
2917 * be updated unconditionally, without the possibility of updating it
2920 i
= run
->regs_minelm
;
2921 mask
= run
->regs_mask
[i
];
2923 /* Usable allocation found. */
2924 bit
= ffs((int)mask
) - 1;
2926 regind
= ((i
<< (SIZEOF_INT_2POW
+ 3)) + bit
);
2927 assert(regind
< bin
->nregs
);
2928 ret
= (void *)(((uintptr_t)run
) + bin
->reg0_offset
2929 + (bin
->reg_size
* regind
));
2932 mask
^= (1U << bit
);
2933 run
->regs_mask
[i
] = mask
;
2938 for (i
++; i
< bin
->regs_mask_nelms
; i
++) {
2939 mask
= run
->regs_mask
[i
];
2941 /* Usable allocation found. */
2942 bit
= ffs((int)mask
) - 1;
2944 regind
= ((i
<< (SIZEOF_INT_2POW
+ 3)) + bit
);
2945 assert(regind
< bin
->nregs
);
2946 ret
= (void *)(((uintptr_t)run
) + bin
->reg0_offset
2947 + (bin
->reg_size
* regind
));
2950 mask
^= (1U << bit
);
2951 run
->regs_mask
[i
] = mask
;
2954 * Make a note that nothing before this element
2955 * contains a free region.
2957 run
->regs_minelm
= i
; /* Low payoff: + (mask == 0); */
2968 arena_run_reg_dalloc(arena_run_t
*run
, arena_bin_t
*bin
, void *ptr
, size_t size
)
2971 * To divide by a number D that is not a power of two we multiply
2972 * by (2^21 / D) and then right shift by 21 positions.
2978 * (X * size_invs[(D >> QUANTUM_2POW_MIN) - 3]) >> SIZE_INV_SHIFT
2980 #define SIZE_INV_SHIFT 21
2981 #define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s << QUANTUM_2POW_MIN)) + 1)
2982 static const unsigned size_invs
[] = {
2984 SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
2985 SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
2986 SIZE_INV(12),SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
2987 SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
2988 SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
2989 SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
2990 SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
2991 #if (QUANTUM_2POW_MIN < 4)
2993 SIZE_INV(32), SIZE_INV(33), SIZE_INV(34), SIZE_INV(35),
2994 SIZE_INV(36), SIZE_INV(37), SIZE_INV(38), SIZE_INV(39),
2995 SIZE_INV(40), SIZE_INV(41), SIZE_INV(42), SIZE_INV(43),
2996 SIZE_INV(44), SIZE_INV(45), SIZE_INV(46), SIZE_INV(47),
2997 SIZE_INV(48), SIZE_INV(49), SIZE_INV(50), SIZE_INV(51),
2998 SIZE_INV(52), SIZE_INV(53), SIZE_INV(54), SIZE_INV(55),
2999 SIZE_INV(56), SIZE_INV(57), SIZE_INV(58), SIZE_INV(59),
3000 SIZE_INV(60), SIZE_INV(61), SIZE_INV(62), SIZE_INV(63)
3003 unsigned diff
, regind
, elm
, bit
;
3005 assert(run
->magic
== ARENA_RUN_MAGIC
);
3006 assert(((sizeof(size_invs
)) / sizeof(unsigned)) + 3
3007 >= (SMALL_MAX_DEFAULT
>> QUANTUM_2POW_MIN
));
3010 * Avoid doing division with a variable divisor if possible. Using
3011 * actual division here can reduce allocator throughput by over 20%!
3013 diff
= (unsigned)((uintptr_t)ptr
- (uintptr_t)run
- bin
->reg0_offset
);
3014 if ((size
& (size
- 1)) == 0) {
3016 * log2_table allows fast division of a power of two in the
3019 * (x / divisor) becomes (x >> log2_table[divisor - 1]).
3021 static const unsigned char log2_table
[] = {
3022 0, 1, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4,
3023 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5,
3024 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3025 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
3026 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3027 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3028 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3029 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7
3033 regind
= (diff
>> log2_table
[size
- 1]);
3034 else if (size
<= 32768)
3035 regind
= diff
>> (8 + log2_table
[(size
>> 8) - 1]);
3038 * The run size is too large for us to use the lookup
3039 * table. Use real division.
3041 regind
= diff
/ size
;
3043 } else if (size
<= ((sizeof(size_invs
) / sizeof(unsigned))
3044 << QUANTUM_2POW_MIN
) + 2) {
3045 regind
= size_invs
[(size
>> QUANTUM_2POW_MIN
) - 3] * diff
;
3046 regind
>>= SIZE_INV_SHIFT
;
3049 * size_invs isn't large enough to handle this size class, so
3050 * calculate regind using actual division. This only happens
3051 * if the user increases small_max via the 'S' runtime
3052 * configuration option.
3054 regind
= diff
/ size
;
3056 assert(diff
== regind
* size
);
3057 assert(regind
< bin
->nregs
);
3059 elm
= regind
>> (SIZEOF_INT_2POW
+ 3);
3060 if (elm
< run
->regs_minelm
)
3061 run
->regs_minelm
= elm
;
3062 bit
= regind
- (elm
<< (SIZEOF_INT_2POW
+ 3));
3063 assert((run
->regs_mask
[elm
] & (1U << bit
)) == 0);
3064 run
->regs_mask
[elm
] |= (1U << bit
);
3066 #undef SIZE_INV_SHIFT
3070 arena_run_split(arena_t
*arena
, arena_run_t
*run
, size_t size
, bool large
,
3073 arena_chunk_t
*chunk
;
3074 size_t old_ndirty
, run_ind
, total_pages
, need_pages
, rem_pages
, i
;
3076 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
3077 old_ndirty
= chunk
->ndirty
;
3078 run_ind
= (unsigned)(((uintptr_t)run
- (uintptr_t)chunk
)
3080 total_pages
= (chunk
->map
[run_ind
].bits
& ~pagesize_mask
) >>
3082 need_pages
= (size
>> pagesize_2pow
);
3083 assert(need_pages
> 0);
3084 assert(need_pages
<= total_pages
);
3085 rem_pages
= total_pages
- need_pages
;
3087 arena_avail_tree_remove(&arena
->runs_avail
, &chunk
->map
[run_ind
]);
3089 /* Keep track of trailing unused pages for later use. */
3090 if (rem_pages
> 0) {
3091 chunk
->map
[run_ind
+need_pages
].bits
= (rem_pages
<<
3092 pagesize_2pow
) | (chunk
->map
[run_ind
+need_pages
].bits
&
3094 chunk
->map
[run_ind
+total_pages
-1].bits
= (rem_pages
<<
3095 pagesize_2pow
) | (chunk
->map
[run_ind
+total_pages
-1].bits
&
3097 arena_avail_tree_insert(&arena
->runs_avail
,
3098 &chunk
->map
[run_ind
+need_pages
]);
3101 for (i
= 0; i
< need_pages
; i
++) {
3102 #ifdef MALLOC_DECOMMIT
3104 * Commit decommitted pages if necessary. If a decommitted
3105 * page is encountered, commit all needed adjacent decommitted
3106 * pages in one operation, in order to reduce system call
3109 if (chunk
->map
[run_ind
+ i
].bits
& CHUNK_MAP_DECOMMITTED
) {
3113 * Advance i+j to just past the index of the last page
3114 * to commit. Clear CHUNK_MAP_DECOMMITTED along the
3117 for (j
= 0; i
+ j
< need_pages
&& (chunk
->map
[run_ind
+
3118 i
+ j
].bits
& CHUNK_MAP_DECOMMITTED
); j
++) {
3119 chunk
->map
[run_ind
+ i
+ j
].bits
^=
3120 CHUNK_MAP_DECOMMITTED
;
3123 pages_commit((void *)((uintptr_t)chunk
+ ((run_ind
+ i
)
3124 << pagesize_2pow
)), (j
<< pagesize_2pow
));
3125 # ifdef MALLOC_STATS
3126 arena
->stats
.ncommit
++;
3128 } else /* No need to zero since commit zeros. */
3131 /* Zero if necessary. */
3133 if ((chunk
->map
[run_ind
+ i
].bits
& CHUNK_MAP_ZEROED
)
3135 VALGRIND_MALLOCLIKE_BLOCK((void *)((uintptr_t)
3136 chunk
+ ((run_ind
+ i
) << pagesize_2pow
)),
3137 pagesize
, 0, false);
3138 memset((void *)((uintptr_t)chunk
+ ((run_ind
3139 + i
) << pagesize_2pow
)), 0, pagesize
);
3140 VALGRIND_FREELIKE_BLOCK((void *)((uintptr_t)
3141 chunk
+ ((run_ind
+ i
) << pagesize_2pow
)),
3143 /* CHUNK_MAP_ZEROED is cleared below. */
3147 /* Update dirty page accounting. */
3148 if (chunk
->map
[run_ind
+ i
].bits
& CHUNK_MAP_DIRTY
) {
3151 /* CHUNK_MAP_DIRTY is cleared below. */
3154 /* Initialize the chunk map. */
3156 chunk
->map
[run_ind
+ i
].bits
= CHUNK_MAP_LARGE
3157 | CHUNK_MAP_ALLOCATED
;
3159 chunk
->map
[run_ind
+ i
].bits
= (size_t)run
3160 | CHUNK_MAP_ALLOCATED
;
3165 * Set the run size only in the first element for large runs. This is
3166 * primarily a debugging aid, since the lack of size info for trailing
3167 * pages only matters if the application tries to operate on an
3171 chunk
->map
[run_ind
].bits
|= size
;
3173 if (chunk
->ndirty
== 0 && old_ndirty
> 0)
3174 arena_chunk_tree_dirty_remove(&arena
->chunks_dirty
, chunk
);
3178 arena_chunk_init(arena_t
*arena
, arena_chunk_t
*chunk
)
3183 VALGRIND_MALLOCLIKE_BLOCK(chunk
, (arena_chunk_header_npages
<<
3184 pagesize_2pow
), 0, false);
3186 arena
->stats
.mapped
+= chunksize
;
3189 chunk
->arena
= arena
;
3192 * Claim that no pages are in use, since the header is merely overhead.
3196 /* Initialize the map to contain one maximal free untouched run. */
3197 run
= (arena_run_t
*)((uintptr_t)chunk
+ (arena_chunk_header_npages
<<
3199 for (i
= 0; i
< arena_chunk_header_npages
; i
++)
3200 chunk
->map
[i
].bits
= 0;
3201 chunk
->map
[i
].bits
= arena_maxclass
3202 #ifdef MALLOC_DECOMMIT
3203 | CHUNK_MAP_DECOMMITTED
3206 for (i
++; i
< chunk_npages
-1; i
++) {
3207 chunk
->map
[i
].bits
=
3208 #ifdef MALLOC_DECOMMIT
3209 CHUNK_MAP_DECOMMITTED
|
3213 chunk
->map
[chunk_npages
-1].bits
= arena_maxclass
3214 #ifdef MALLOC_DECOMMIT
3215 | CHUNK_MAP_DECOMMITTED
3219 #ifdef MALLOC_DECOMMIT
3221 * Start out decommitted, in order to force a closer correspondence
3222 * between dirty pages and committed untouched pages.
3224 pages_decommit(run
, arena_maxclass
);
3225 # ifdef MALLOC_STATS
3226 arena
->stats
.ndecommit
++;
3227 arena
->stats
.decommitted
+= (chunk_npages
- arena_chunk_header_npages
);
3231 /* Insert the run into the runs_avail tree. */
3232 arena_avail_tree_insert(&arena
->runs_avail
,
3233 &chunk
->map
[arena_chunk_header_npages
]);
3237 arena_chunk_dealloc(arena_t
*arena
, arena_chunk_t
*chunk
)
3240 if (arena
->spare
!= NULL
) {
3241 if (arena
->spare
->ndirty
> 0) {
3242 arena_chunk_tree_dirty_remove(
3243 &chunk
->arena
->chunks_dirty
, arena
->spare
);
3244 arena
->ndirty
-= arena
->spare
->ndirty
;
3246 VALGRIND_FREELIKE_BLOCK(arena
->spare
, 0);
3247 chunk_dealloc((void *)arena
->spare
, chunksize
);
3249 arena
->stats
.mapped
-= chunksize
;
3254 * Remove run from runs_avail, regardless of whether this chunk
3255 * will be cached, so that the arena does not use it. Dirty page
3256 * flushing only uses the chunks_dirty tree, so leaving this chunk in
3257 * the chunks_* trees is sufficient for that purpose.
3259 arena_avail_tree_remove(&arena
->runs_avail
,
3260 &chunk
->map
[arena_chunk_header_npages
]);
3262 arena
->spare
= chunk
;
3265 static arena_run_t
*
3266 arena_run_alloc(arena_t
*arena
, arena_bin_t
*bin
, size_t size
, bool large
,
3269 arena_chunk_t
*chunk
;
3271 arena_chunk_map_t
*mapelm
, key
;
3273 assert(size
<= arena_maxclass
);
3274 assert((size
& pagesize_mask
) == 0);
3278 /* Search the arena's chunks for the lowest best fit. */
3279 key
.bits
= size
| CHUNK_MAP_KEY
;
3280 mapelm
= arena_avail_tree_nsearch(&arena
->runs_avail
, &key
);
3281 if (mapelm
!= NULL
) {
3282 arena_chunk_t
*run_chunk
= CHUNK_ADDR2BASE(mapelm
);
3283 size_t pageind
= ((uintptr_t)mapelm
-
3284 (uintptr_t)run_chunk
->map
) /
3285 sizeof(arena_chunk_map_t
);
3288 chunk_dealloc(chunk
, chunksize
);
3289 run
= (arena_run_t
*)((uintptr_t)run_chunk
+ (pageind
3291 arena_run_split(arena
, run
, size
, large
, zero
);
3295 if (arena
->spare
!= NULL
) {
3296 /* Use the spare. */
3297 chunk
= arena
->spare
;
3298 arena
->spare
= NULL
;
3299 run
= (arena_run_t
*)((uintptr_t)chunk
+
3300 (arena_chunk_header_npages
<< pagesize_2pow
));
3301 /* Insert the run into the runs_avail tree. */
3302 arena_avail_tree_insert(&arena
->runs_avail
,
3303 &chunk
->map
[arena_chunk_header_npages
]);
3304 arena_run_split(arena
, run
, size
, large
, zero
);
3309 * No usable runs. Create a new chunk from which to allocate
3312 if (chunk
== NULL
) {
3316 * Record the chunk allocation sequence number in order
3320 chunk_seq
= arena
->chunk_seq
;
3323 * Drop the arena lock while allocating a chunk, since
3324 * reserve notifications may cause recursive
3325 * allocation. Dropping the lock here opens an
3326 * allocataion race, but we recover.
3328 malloc_mutex_unlock(&arena
->lock
);
3329 chunk
= (arena_chunk_t
*)chunk_alloc(chunksize
, true,
3331 malloc_mutex_lock(&arena
->lock
);
3334 * Check whether a race allowed a usable run to appear.
3336 if (bin
!= NULL
&& (run
= bin
->runcur
) != NULL
&&
3339 chunk_dealloc(chunk
, chunksize
);
3344 * If this thread raced with another such that multiple
3345 * chunks were allocated, make sure that there is still
3346 * inadequate space before using this chunk.
3348 if (chunk_seq
!= arena
->chunk_seq
)
3352 * Check for an error *after* checking for a race,
3353 * since a race could also cause a transient OOM
3360 arena_chunk_init(arena
, chunk
);
3361 run
= (arena_run_t
*)((uintptr_t)chunk
+
3362 (arena_chunk_header_npages
<< pagesize_2pow
));
3363 /* Update page map. */
3364 arena_run_split(arena
, run
, size
, large
, zero
);
3370 arena_purge(arena_t
*arena
)
3372 arena_chunk_t
*chunk
;
3376 rb_foreach_begin(arena_chunk_t
, link_dirty
, &arena
->chunks_dirty
,
3378 ndirty
+= chunk
->ndirty
;
3379 } rb_foreach_end(arena_chunk_t
, link_dirty
, &arena
->chunks_dirty
, chunk
)
3380 assert(ndirty
== arena
->ndirty
);
3382 assert(arena
->ndirty
> opt_dirty_max
);
3385 arena
->stats
.npurge
++;
3389 * Iterate downward through chunks until enough dirty memory has been
3390 * purged. Terminate as soon as possible in order to minimize the
3391 * number of system calls, even if a chunk has only been partially
3394 while (arena
->ndirty
> (opt_dirty_max
>> 1)) {
3395 chunk
= arena_chunk_tree_dirty_last(&arena
->chunks_dirty
);
3396 assert(chunk
!= NULL
);
3398 for (i
= chunk_npages
- 1; chunk
->ndirty
> 0; i
--) {
3399 assert(i
>= arena_chunk_header_npages
);
3401 if (chunk
->map
[i
].bits
& CHUNK_MAP_DIRTY
) {
3402 #ifdef MALLOC_DECOMMIT
3403 assert((chunk
->map
[i
].bits
&
3404 CHUNK_MAP_DECOMMITTED
) == 0);
3406 chunk
->map
[i
].bits
^=
3407 #ifdef MALLOC_DECOMMIT
3408 CHUNK_MAP_DECOMMITTED
|
3411 /* Find adjacent dirty run(s). */
3412 for (npages
= 1; i
> arena_chunk_header_npages
3413 && (chunk
->map
[i
- 1].bits
&
3414 CHUNK_MAP_DIRTY
); npages
++) {
3416 #ifdef MALLOC_DECOMMIT
3417 assert((chunk
->map
[i
].bits
&
3418 CHUNK_MAP_DECOMMITTED
) == 0);
3420 chunk
->map
[i
].bits
^=
3421 #ifdef MALLOC_DECOMMIT
3422 CHUNK_MAP_DECOMMITTED
|
3426 chunk
->ndirty
-= npages
;
3427 arena
->ndirty
-= npages
;
3429 #ifdef MALLOC_DECOMMIT
3430 pages_decommit((void *)((uintptr_t)
3431 chunk
+ (i
<< pagesize_2pow
)),
3432 (npages
<< pagesize_2pow
));
3433 # ifdef MALLOC_STATS
3434 arena
->stats
.ndecommit
++;
3435 arena
->stats
.decommitted
+= npages
;
3438 madvise((void *)((uintptr_t)chunk
+ (i
<<
3439 pagesize_2pow
)), (npages
<< pagesize_2pow
),
3443 arena
->stats
.nmadvise
++;
3444 arena
->stats
.purged
+= npages
;
3446 if (arena
->ndirty
<= (opt_dirty_max
>> 1))
3451 if (chunk
->ndirty
== 0) {
3452 arena_chunk_tree_dirty_remove(&arena
->chunks_dirty
,
3459 arena_run_dalloc(arena_t
*arena
, arena_run_t
*run
, bool dirty
)
3461 arena_chunk_t
*chunk
;
3462 size_t size
, run_ind
, run_pages
;
3464 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
3465 run_ind
= (size_t)(((uintptr_t)run
- (uintptr_t)chunk
)
3467 assert(run_ind
>= arena_chunk_header_npages
);
3468 assert(run_ind
< chunk_npages
);
3469 if ((chunk
->map
[run_ind
].bits
& CHUNK_MAP_LARGE
) != 0)
3470 size
= chunk
->map
[run_ind
].bits
& ~pagesize_mask
;
3472 size
= run
->bin
->run_size
;
3473 run_pages
= (size
>> pagesize_2pow
);
3475 /* Mark pages as unallocated in the chunk map. */
3479 for (i
= 0; i
< run_pages
; i
++) {
3480 assert((chunk
->map
[run_ind
+ i
].bits
& CHUNK_MAP_DIRTY
)
3482 chunk
->map
[run_ind
+ i
].bits
= CHUNK_MAP_DIRTY
;
3485 if (chunk
->ndirty
== 0) {
3486 arena_chunk_tree_dirty_insert(&arena
->chunks_dirty
,
3489 chunk
->ndirty
+= run_pages
;
3490 arena
->ndirty
+= run_pages
;
3494 for (i
= 0; i
< run_pages
; i
++) {
3495 chunk
->map
[run_ind
+ i
].bits
&= ~(CHUNK_MAP_LARGE
|
3496 CHUNK_MAP_ALLOCATED
);
3499 chunk
->map
[run_ind
].bits
= size
| (chunk
->map
[run_ind
].bits
&
3501 chunk
->map
[run_ind
+run_pages
-1].bits
= size
|
3502 (chunk
->map
[run_ind
+run_pages
-1].bits
& pagesize_mask
);
3504 /* Try to coalesce forward. */
3505 if (run_ind
+ run_pages
< chunk_npages
&&
3506 (chunk
->map
[run_ind
+run_pages
].bits
& CHUNK_MAP_ALLOCATED
) == 0) {
3507 size_t nrun_size
= chunk
->map
[run_ind
+run_pages
].bits
&
3511 * Remove successor from runs_avail; the coalesced run is
3514 arena_avail_tree_remove(&arena
->runs_avail
,
3515 &chunk
->map
[run_ind
+run_pages
]);
3518 run_pages
= size
>> pagesize_2pow
;
3520 assert((chunk
->map
[run_ind
+run_pages
-1].bits
& ~pagesize_mask
)
3522 chunk
->map
[run_ind
].bits
= size
| (chunk
->map
[run_ind
].bits
&
3524 chunk
->map
[run_ind
+run_pages
-1].bits
= size
|
3525 (chunk
->map
[run_ind
+run_pages
-1].bits
& pagesize_mask
);
3528 /* Try to coalesce backward. */
3529 if (run_ind
> arena_chunk_header_npages
&& (chunk
->map
[run_ind
-1].bits
&
3530 CHUNK_MAP_ALLOCATED
) == 0) {
3531 size_t prun_size
= chunk
->map
[run_ind
-1].bits
& ~pagesize_mask
;
3533 run_ind
-= prun_size
>> pagesize_2pow
;
3536 * Remove predecessor from runs_avail; the coalesced run is
3539 arena_avail_tree_remove(&arena
->runs_avail
,
3540 &chunk
->map
[run_ind
]);
3543 run_pages
= size
>> pagesize_2pow
;
3545 assert((chunk
->map
[run_ind
].bits
& ~pagesize_mask
) ==
3547 chunk
->map
[run_ind
].bits
= size
| (chunk
->map
[run_ind
].bits
&
3549 chunk
->map
[run_ind
+run_pages
-1].bits
= size
|
3550 (chunk
->map
[run_ind
+run_pages
-1].bits
& pagesize_mask
);
3553 /* Insert into runs_avail, now that coalescing is complete. */
3554 arena_avail_tree_insert(&arena
->runs_avail
, &chunk
->map
[run_ind
]);
3556 /* Deallocate chunk if it is now completely unused. */
3557 if ((chunk
->map
[arena_chunk_header_npages
].bits
& (~pagesize_mask
|
3558 CHUNK_MAP_ALLOCATED
)) == arena_maxclass
)
3559 arena_chunk_dealloc(arena
, chunk
);
3561 /* Enforce opt_dirty_max. */
3562 if (arena
->ndirty
> opt_dirty_max
)
3567 arena_run_trim_head(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
3568 size_t oldsize
, size_t newsize
)
3570 size_t pageind
= ((uintptr_t)run
- (uintptr_t)chunk
) >> pagesize_2pow
;
3571 size_t head_npages
= (oldsize
- newsize
) >> pagesize_2pow
;
3573 assert(oldsize
> newsize
);
3576 * Update the chunk map so that arena_run_dalloc() can treat the
3577 * leading run as separately allocated.
3579 chunk
->map
[pageind
].bits
= (oldsize
- newsize
) | CHUNK_MAP_LARGE
|
3580 CHUNK_MAP_ALLOCATED
;
3581 chunk
->map
[pageind
+head_npages
].bits
= newsize
| CHUNK_MAP_LARGE
|
3582 CHUNK_MAP_ALLOCATED
;
3584 arena_run_dalloc(arena
, run
, false);
3588 arena_run_trim_tail(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
3589 size_t oldsize
, size_t newsize
, bool dirty
)
3591 size_t pageind
= ((uintptr_t)run
- (uintptr_t)chunk
) >> pagesize_2pow
;
3592 size_t npages
= newsize
>> pagesize_2pow
;
3594 assert(oldsize
> newsize
);
3597 * Update the chunk map so that arena_run_dalloc() can treat the
3598 * trailing run as separately allocated.
3600 chunk
->map
[pageind
].bits
= newsize
| CHUNK_MAP_LARGE
|
3601 CHUNK_MAP_ALLOCATED
;
3602 chunk
->map
[pageind
+npages
].bits
= (oldsize
- newsize
) | CHUNK_MAP_LARGE
3603 | CHUNK_MAP_ALLOCATED
;
3605 arena_run_dalloc(arena
, (arena_run_t
*)((uintptr_t)run
+ newsize
),
3609 static arena_run_t
*
3610 arena_bin_nonfull_run_get(arena_t
*arena
, arena_bin_t
*bin
)
3612 arena_chunk_map_t
*mapelm
;
3614 unsigned i
, remainder
;
3616 /* Look for a usable run. */
3617 mapelm
= arena_run_tree_first(&bin
->runs
);
3618 if (mapelm
!= NULL
) {
3619 /* run is guaranteed to have available space. */
3620 arena_run_tree_remove(&bin
->runs
, mapelm
);
3621 run
= (arena_run_t
*)(mapelm
->bits
& ~pagesize_mask
);
3623 bin
->stats
.reruns
++;
3627 /* No existing runs have any space available. */
3629 /* Allocate a new run. */
3630 run
= arena_run_alloc(arena
, bin
, bin
->run_size
, false, false);
3634 * Don't initialize if a race in arena_run_alloc() allowed an existing
3635 * run to become usable.
3637 if (run
== bin
->runcur
)
3640 VALGRIND_MALLOCLIKE_BLOCK(run
, sizeof(arena_run_t
) + (sizeof(unsigned) *
3641 (bin
->regs_mask_nelms
- 1)), 0, false);
3643 /* Initialize run internals. */
3646 for (i
= 0; i
< bin
->regs_mask_nelms
- 1; i
++)
3647 run
->regs_mask
[i
] = UINT_MAX
;
3648 remainder
= bin
->nregs
& ((1U << (SIZEOF_INT_2POW
+ 3)) - 1);
3650 run
->regs_mask
[i
] = UINT_MAX
;
3652 /* The last element has spare bits that need to be unset. */
3653 run
->regs_mask
[i
] = (UINT_MAX
>> ((1U << (SIZEOF_INT_2POW
+ 3))
3657 run
->regs_minelm
= 0;
3659 run
->nfree
= bin
->nregs
;
3661 run
->magic
= ARENA_RUN_MAGIC
;
3666 bin
->stats
.curruns
++;
3667 if (bin
->stats
.curruns
> bin
->stats
.highruns
)
3668 bin
->stats
.highruns
= bin
->stats
.curruns
;
3673 /* bin->runcur must have space available before this function is called. */
3674 static inline void *
3675 arena_bin_malloc_easy(arena_t
*arena
, arena_bin_t
*bin
, arena_run_t
*run
)
3679 assert(run
->magic
== ARENA_RUN_MAGIC
);
3680 assert(run
->nfree
> 0);
3682 ret
= arena_run_reg_alloc(run
, bin
);
3683 assert(ret
!= NULL
);
3689 /* Re-fill bin->runcur, then call arena_bin_malloc_easy(). */
3691 arena_bin_malloc_hard(arena_t
*arena
, arena_bin_t
*bin
)
3694 bin
->runcur
= arena_bin_nonfull_run_get(arena
, bin
);
3695 if (bin
->runcur
== NULL
)
3697 assert(bin
->runcur
->magic
== ARENA_RUN_MAGIC
);
3698 assert(bin
->runcur
->nfree
> 0);
3700 return (arena_bin_malloc_easy(arena
, bin
, bin
->runcur
));
3704 * Calculate bin->run_size such that it meets the following constraints:
3706 * *) bin->run_size >= min_run_size
3707 * *) bin->run_size <= arena_maxclass
3708 * *) bin->run_size <= RUN_MAX_SMALL
3709 * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
3711 * bin->nregs, bin->regs_mask_nelms, and bin->reg0_offset are
3712 * also calculated here, since these settings are all interdependent.
3715 arena_bin_run_size_calc(arena_bin_t
*bin
, size_t min_run_size
)
3717 size_t try_run_size
, good_run_size
;
3718 unsigned good_nregs
, good_mask_nelms
, good_reg0_offset
;
3719 unsigned try_nregs
, try_mask_nelms
, try_reg0_offset
;
3721 assert(min_run_size
>= pagesize
);
3722 assert(min_run_size
<= arena_maxclass
);
3723 assert(min_run_size
<= RUN_MAX_SMALL
);
3726 * Calculate known-valid settings before entering the run_size
3727 * expansion loop, so that the first part of the loop always copies
3730 * The do..while loop iteratively reduces the number of regions until
3731 * the run header and the regions no longer overlap. A closed formula
3732 * would be quite messy, since there is an interdependency between the
3733 * header's mask length and the number of regions.
3735 try_run_size
= min_run_size
;
3736 try_nregs
= ((try_run_size
- sizeof(arena_run_t
)) / bin
->reg_size
)
3737 + 1; /* Counter-act try_nregs-- in loop. */
3740 try_mask_nelms
= (try_nregs
>> (SIZEOF_INT_2POW
+ 3)) +
3741 ((try_nregs
& ((1U << (SIZEOF_INT_2POW
+ 3)) - 1)) ? 1 : 0);
3742 try_reg0_offset
= try_run_size
- (try_nregs
* bin
->reg_size
);
3743 } while (sizeof(arena_run_t
) + (sizeof(unsigned) * (try_mask_nelms
- 1))
3746 /* run_size expansion loop. */
3749 * Copy valid settings before trying more aggressive settings.
3751 good_run_size
= try_run_size
;
3752 good_nregs
= try_nregs
;
3753 good_mask_nelms
= try_mask_nelms
;
3754 good_reg0_offset
= try_reg0_offset
;
3756 /* Try more aggressive settings. */
3757 try_run_size
+= pagesize
;
3758 try_nregs
= ((try_run_size
- sizeof(arena_run_t
)) /
3759 bin
->reg_size
) + 1; /* Counter-act try_nregs-- in loop. */
3762 try_mask_nelms
= (try_nregs
>> (SIZEOF_INT_2POW
+ 3)) +
3763 ((try_nregs
& ((1U << (SIZEOF_INT_2POW
+ 3)) - 1)) ?
3765 try_reg0_offset
= try_run_size
- (try_nregs
*
3767 } while (sizeof(arena_run_t
) + (sizeof(unsigned) *
3768 (try_mask_nelms
- 1)) > try_reg0_offset
);
3769 } while (try_run_size
<= arena_maxclass
&& try_run_size
<= RUN_MAX_SMALL
3770 && RUN_MAX_OVRHD
* (bin
->reg_size
<< 3) > RUN_MAX_OVRHD_RELAX
3771 && (try_reg0_offset
<< RUN_BFP
) > RUN_MAX_OVRHD
* try_run_size
);
3773 assert(sizeof(arena_run_t
) + (sizeof(unsigned) * (good_mask_nelms
- 1))
3774 <= good_reg0_offset
);
3775 assert((good_mask_nelms
<< (SIZEOF_INT_2POW
+ 3)) >= good_nregs
);
3777 /* Copy final settings. */
3778 bin
->run_size
= good_run_size
;
3779 bin
->nregs
= good_nregs
;
3780 bin
->regs_mask_nelms
= good_mask_nelms
;
3781 bin
->reg0_offset
= good_reg0_offset
;
3783 return (good_run_size
);
3786 #ifdef MALLOC_BALANCE
3788 arena_lock_balance(arena_t
*arena
)
3790 unsigned contention
;
3792 contention
= malloc_spin_lock(&arena
->lock
);
3795 * Calculate the exponentially averaged contention for this
3796 * arena. Due to integer math always rounding down, this value
3797 * decays somewhat faster then normal.
3799 arena
->contention
= (((uint64_t)arena
->contention
3800 * (uint64_t)((1U << BALANCE_ALPHA_INV_2POW
)-1))
3801 + (uint64_t)contention
) >> BALANCE_ALPHA_INV_2POW
;
3802 if (arena
->contention
>= opt_balance_threshold
)
3803 arena_lock_balance_hard(arena
);
3808 arena_lock_balance_hard(arena_t
*arena
)
3812 arena
->contention
= 0;
3814 arena
->stats
.nbalance
++;
3816 ind
= PRN(balance
, narenas_2pow
);
3817 if (arenas
[ind
] != NULL
) {
3818 #ifdef MOZ_MEMORY_WINDOWS
3819 TlsSetValue(tlsIndex
, arenas
[ind
]);
3821 arenas_map
= arenas
[ind
];
3824 malloc_spin_lock(&arenas_lock
);
3825 if (arenas
[ind
] != NULL
) {
3826 #ifdef MOZ_MEMORY_WINDOWS
3827 TlsSetValue(tlsIndex
, arenas
[ind
]);
3829 arenas_map
= arenas
[ind
];
3832 #ifdef MOZ_MEMORY_WINDOWS
3833 TlsSetValue(tlsIndex
, arenas_extend(ind
));
3835 arenas_map
= arenas_extend(ind
);
3838 malloc_spin_unlock(&arenas_lock
);
3843 static inline void *
3844 arena_malloc_small(arena_t
*arena
, size_t size
, bool zero
)
3850 if (size
< small_min
) {
3852 size
= pow2_ceil(size
);
3853 bin
= &arena
->bins
[ffs((int)(size
>> (TINY_MIN_2POW
+
3855 #if (!defined(NDEBUG) || defined(MALLOC_STATS))
3857 * Bin calculation is always correct, but we may need
3858 * to fix size for the purposes of assertions and/or
3861 if (size
< (1U << TINY_MIN_2POW
))
3862 size
= (1U << TINY_MIN_2POW
);
3864 } else if (size
<= small_max
) {
3865 /* Quantum-spaced. */
3866 size
= QUANTUM_CEILING(size
);
3867 bin
= &arena
->bins
[ntbins
+ (size
>> opt_quantum_2pow
)
3871 size
= pow2_ceil(size
);
3872 bin
= &arena
->bins
[ntbins
+ nqbins
3873 + (ffs((int)(size
>> opt_small_max_2pow
)) - 2)];
3875 assert(size
== bin
->reg_size
);
3877 #ifdef MALLOC_BALANCE
3878 arena_lock_balance(arena
);
3880 malloc_spin_lock(&arena
->lock
);
3882 if ((run
= bin
->runcur
) != NULL
&& run
->nfree
> 0)
3883 ret
= arena_bin_malloc_easy(arena
, bin
, run
);
3885 ret
= arena_bin_malloc_hard(arena
, bin
);
3888 malloc_spin_unlock(&arena
->lock
);
3893 bin
->stats
.nrequests
++;
3894 arena
->stats
.nmalloc_small
++;
3895 arena
->stats
.allocated_small
+= size
;
3897 malloc_spin_unlock(&arena
->lock
);
3899 VALGRIND_MALLOCLIKE_BLOCK(ret
, size
, 0, zero
);
3900 if (zero
== false) {
3903 memset(ret
, 0xa5, size
);
3905 memset(ret
, 0, size
);
3908 memset(ret
, 0, size
);
3914 arena_malloc_large(arena_t
*arena
, size_t size
, bool zero
)
3918 /* Large allocation. */
3919 size
= PAGE_CEILING(size
);
3920 #ifdef MALLOC_BALANCE
3921 arena_lock_balance(arena
);
3923 malloc_spin_lock(&arena
->lock
);
3925 ret
= (void *)arena_run_alloc(arena
, NULL
, size
, true, zero
);
3927 malloc_spin_unlock(&arena
->lock
);
3931 arena
->stats
.nmalloc_large
++;
3932 arena
->stats
.allocated_large
+= size
;
3934 malloc_spin_unlock(&arena
->lock
);
3936 VALGRIND_MALLOCLIKE_BLOCK(ret
, size
, 0, zero
);
3937 if (zero
== false) {
3940 memset(ret
, 0xa5, size
);
3942 memset(ret
, 0, size
);
3949 static inline void *
3950 arena_malloc(arena_t
*arena
, size_t size
, bool zero
)
3953 assert(arena
!= NULL
);
3954 assert(arena
->magic
== ARENA_MAGIC
);
3956 assert(QUANTUM_CEILING(size
) <= arena_maxclass
);
3958 if (size
<= bin_maxclass
) {
3959 return (arena_malloc_small(arena
, size
, zero
));
3961 return (arena_malloc_large(arena
, size
, zero
));
3964 static inline void *
3965 imalloc(size_t size
)
3970 if (size
<= arena_maxclass
)
3971 return (arena_malloc(choose_arena(), size
, false));
3973 return (huge_malloc(size
, false));
3976 static inline void *
3977 icalloc(size_t size
)
3980 if (size
<= arena_maxclass
)
3981 return (arena_malloc(choose_arena(), size
, true));
3983 return (huge_malloc(size
, true));
3986 /* Only handles large allocations that require more than page alignment. */
3988 arena_palloc(arena_t
*arena
, size_t alignment
, size_t size
, size_t alloc_size
)
3992 arena_chunk_t
*chunk
;
3994 assert((size
& pagesize_mask
) == 0);
3995 assert((alignment
& pagesize_mask
) == 0);
3997 #ifdef MALLOC_BALANCE
3998 arena_lock_balance(arena
);
4000 malloc_spin_lock(&arena
->lock
);
4002 ret
= (void *)arena_run_alloc(arena
, NULL
, alloc_size
, true, false);
4004 malloc_spin_unlock(&arena
->lock
);
4008 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ret
);
4010 offset
= (uintptr_t)ret
& (alignment
- 1);
4011 assert((offset
& pagesize_mask
) == 0);
4012 assert(offset
< alloc_size
);
4014 arena_run_trim_tail(arena
, chunk
, ret
, alloc_size
, size
, false);
4016 size_t leadsize
, trailsize
;
4018 leadsize
= alignment
- offset
;
4020 arena_run_trim_head(arena
, chunk
, ret
, alloc_size
,
4021 alloc_size
- leadsize
);
4022 ret
= (void *)((uintptr_t)ret
+ leadsize
);
4025 trailsize
= alloc_size
- leadsize
- size
;
4026 if (trailsize
!= 0) {
4027 /* Trim trailing space. */
4028 assert(trailsize
< alloc_size
);
4029 arena_run_trim_tail(arena
, chunk
, ret
, size
+ trailsize
,
4035 arena
->stats
.nmalloc_large
++;
4036 arena
->stats
.allocated_large
+= size
;
4038 malloc_spin_unlock(&arena
->lock
);
4040 VALGRIND_MALLOCLIKE_BLOCK(ret
, size
, 0, false);
4043 memset(ret
, 0xa5, size
);
4045 memset(ret
, 0, size
);
4050 static inline void *
4051 ipalloc(size_t alignment
, size_t size
)
4057 * Round size up to the nearest multiple of alignment.
4059 * This done, we can take advantage of the fact that for each small
4060 * size class, every object is aligned at the smallest power of two
4061 * that is non-zero in the base two representation of the size. For
4064 * Size | Base 2 | Minimum alignment
4065 * -----+----------+------------------
4067 * 144 | 10100000 | 32
4068 * 192 | 11000000 | 64
4070 * Depending on runtime settings, it is possible that arena_malloc()
4071 * will further round up to a power of two, but that never causes
4072 * correctness issues.
4074 ceil_size
= (size
+ (alignment
- 1)) & (-alignment
);
4076 * (ceil_size < size) protects against the combination of maximal
4077 * alignment and size greater than maximal alignment.
4079 if (ceil_size
< size
) {
4080 /* size_t overflow. */
4084 if (ceil_size
<= pagesize
|| (alignment
<= pagesize
4085 && ceil_size
<= arena_maxclass
))
4086 ret
= arena_malloc(choose_arena(), ceil_size
, false);
4091 * We can't achieve sub-page alignment, so round up alignment
4092 * permanently; it makes later calculations simpler.
4094 alignment
= PAGE_CEILING(alignment
);
4095 ceil_size
= PAGE_CEILING(size
);
4097 * (ceil_size < size) protects against very large sizes within
4098 * pagesize of SIZE_T_MAX.
4100 * (ceil_size + alignment < ceil_size) protects against the
4101 * combination of maximal alignment and ceil_size large enough
4102 * to cause overflow. This is similar to the first overflow
4103 * check above, but it needs to be repeated due to the new
4104 * ceil_size value, which may now be *equal* to maximal
4105 * alignment, whereas before we only detected overflow if the
4106 * original size was *greater* than maximal alignment.
4108 if (ceil_size
< size
|| ceil_size
+ alignment
< ceil_size
) {
4109 /* size_t overflow. */
4114 * Calculate the size of the over-size run that arena_palloc()
4115 * would need to allocate in order to guarantee the alignment.
4117 if (ceil_size
>= alignment
)
4118 run_size
= ceil_size
+ alignment
- pagesize
;
4121 * It is possible that (alignment << 1) will cause
4122 * overflow, but it doesn't matter because we also
4123 * subtract pagesize, which in the case of overflow
4124 * leaves us with a very large run_size. That causes
4125 * the first conditional below to fail, which means
4126 * that the bogus run_size value never gets used for
4127 * anything important.
4129 run_size
= (alignment
<< 1) - pagesize
;
4132 if (run_size
<= arena_maxclass
) {
4133 ret
= arena_palloc(choose_arena(), alignment
, ceil_size
,
4135 } else if (alignment
<= chunksize
)
4136 ret
= huge_malloc(ceil_size
, false);
4138 ret
= huge_palloc(alignment
, ceil_size
);
4141 assert(((uintptr_t)ret
& (alignment
- 1)) == 0);
4145 /* Return the size of the allocation pointed to by ptr. */
4147 arena_salloc(const void *ptr
)
4150 arena_chunk_t
*chunk
;
4151 size_t pageind
, mapbits
;
4153 assert(ptr
!= NULL
);
4154 assert(CHUNK_ADDR2BASE(ptr
) != ptr
);
4156 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
4157 pageind
= (((uintptr_t)ptr
- (uintptr_t)chunk
) >> pagesize_2pow
);
4158 mapbits
= chunk
->map
[pageind
].bits
;
4159 assert((mapbits
& CHUNK_MAP_ALLOCATED
) != 0);
4160 if ((mapbits
& CHUNK_MAP_LARGE
) == 0) {
4161 arena_run_t
*run
= (arena_run_t
*)(mapbits
& ~pagesize_mask
);
4162 assert(run
->magic
== ARENA_RUN_MAGIC
);
4163 ret
= run
->bin
->reg_size
;
4165 ret
= mapbits
& ~pagesize_mask
;
4172 #if (defined(MALLOC_VALIDATE) || defined(MOZ_MEMORY_DARWIN))
4174 * Validate ptr before assuming that it points to an allocation. Currently,
4175 * the following validation is performed:
4177 * + Check that ptr is not NULL.
4179 * + Check that ptr lies within a mapped chunk.
4181 static inline size_t
4182 isalloc_validate(const void *ptr
)
4184 arena_chunk_t
*chunk
;
4186 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
4190 if (malloc_rtree_get(chunk_rtree
, (uintptr_t)chunk
) == NULL
)
4194 assert(chunk
->arena
->magic
== ARENA_MAGIC
);
4195 return (arena_salloc(ptr
));
4198 extent_node_t
*node
;
4202 key
.addr
= (void *)chunk
;
4203 malloc_mutex_lock(&huge_mtx
);
4204 node
= extent_tree_ad_search(&huge
, &key
);
4209 malloc_mutex_unlock(&huge_mtx
);
4215 static inline size_t
4216 isalloc(const void *ptr
)
4219 arena_chunk_t
*chunk
;
4221 assert(ptr
!= NULL
);
4223 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
4226 assert(chunk
->arena
->magic
== ARENA_MAGIC
);
4228 ret
= arena_salloc(ptr
);
4230 extent_node_t
*node
, key
;
4232 /* Chunk (huge allocation). */
4234 malloc_mutex_lock(&huge_mtx
);
4236 /* Extract from tree of huge allocations. */
4237 key
.addr
= __DECONST(void *, ptr
);
4238 node
= extent_tree_ad_search(&huge
, &key
);
4239 assert(node
!= NULL
);
4243 malloc_mutex_unlock(&huge_mtx
);
4250 arena_dalloc_small(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
4251 arena_chunk_map_t
*mapelm
)
4257 run
= (arena_run_t
*)(mapelm
->bits
& ~pagesize_mask
);
4258 assert(run
->magic
== ARENA_RUN_MAGIC
);
4260 size
= bin
->reg_size
;
4264 memset(ptr
, 0x5a, size
);
4267 arena_run_reg_dalloc(run
, bin
, ptr
, size
);
4270 if (run
->nfree
== bin
->nregs
) {
4271 /* Deallocate run. */
4272 if (run
== bin
->runcur
)
4274 else if (bin
->nregs
!= 1) {
4275 size_t run_pageind
= (((uintptr_t)run
-
4276 (uintptr_t)chunk
)) >> pagesize_2pow
;
4277 arena_chunk_map_t
*run_mapelm
=
4278 &chunk
->map
[run_pageind
];
4280 * This block's conditional is necessary because if the
4281 * run only contains one region, then it never gets
4282 * inserted into the non-full runs tree.
4284 assert(arena_run_tree_search(&bin
->runs
, run_mapelm
) ==
4286 arena_run_tree_remove(&bin
->runs
, run_mapelm
);
4291 VALGRIND_FREELIKE_BLOCK(run
, 0);
4292 arena_run_dalloc(arena
, run
, true);
4294 bin
->stats
.curruns
--;
4296 } else if (run
->nfree
== 1 && run
!= bin
->runcur
) {
4298 * Make sure that bin->runcur always refers to the lowest
4299 * non-full run, if one exists.
4301 if (bin
->runcur
== NULL
)
4303 else if ((uintptr_t)run
< (uintptr_t)bin
->runcur
) {
4304 /* Switch runcur. */
4305 if (bin
->runcur
->nfree
> 0) {
4306 arena_chunk_t
*runcur_chunk
=
4307 CHUNK_ADDR2BASE(bin
->runcur
);
4308 size_t runcur_pageind
=
4309 (((uintptr_t)bin
->runcur
-
4310 (uintptr_t)runcur_chunk
)) >> pagesize_2pow
;
4311 arena_chunk_map_t
*runcur_mapelm
=
4312 &runcur_chunk
->map
[runcur_pageind
];
4314 /* Insert runcur. */
4315 assert(arena_run_tree_search(&bin
->runs
,
4316 runcur_mapelm
) == NULL
);
4317 arena_run_tree_insert(&bin
->runs
,
4322 size_t run_pageind
= (((uintptr_t)run
-
4323 (uintptr_t)chunk
)) >> pagesize_2pow
;
4324 arena_chunk_map_t
*run_mapelm
=
4325 &chunk
->map
[run_pageind
];
4327 assert(arena_run_tree_search(&bin
->runs
, run_mapelm
) ==
4329 arena_run_tree_insert(&bin
->runs
, run_mapelm
);
4333 arena
->stats
.allocated_small
-= size
;
4334 arena
->stats
.ndalloc_small
++;
4339 arena_dalloc_large(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
)
4341 /* Large allocation. */
4342 malloc_spin_lock(&arena
->lock
);
4345 #ifndef MALLOC_STATS
4350 size_t pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >>
4352 size_t size
= chunk
->map
[pageind
].bits
& ~pagesize_mask
;
4358 memset(ptr
, 0x5a, size
);
4361 arena
->stats
.allocated_large
-= size
;
4365 arena
->stats
.ndalloc_large
++;
4368 arena_run_dalloc(arena
, (arena_run_t
*)ptr
, true);
4369 malloc_spin_unlock(&arena
->lock
);
4373 arena_dalloc(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
)
4376 arena_chunk_map_t
*mapelm
;
4378 assert(arena
!= NULL
);
4379 assert(arena
->magic
== ARENA_MAGIC
);
4380 assert(chunk
->arena
== arena
);
4381 assert(ptr
!= NULL
);
4382 assert(CHUNK_ADDR2BASE(ptr
) != ptr
);
4384 pageind
= (((uintptr_t)ptr
- (uintptr_t)chunk
) >> pagesize_2pow
);
4385 mapelm
= &chunk
->map
[pageind
];
4386 assert((mapelm
->bits
& CHUNK_MAP_ALLOCATED
) != 0);
4387 if ((mapelm
->bits
& CHUNK_MAP_LARGE
) == 0) {
4388 /* Small allocation. */
4389 malloc_spin_lock(&arena
->lock
);
4390 arena_dalloc_small(arena
, chunk
, ptr
, mapelm
);
4391 malloc_spin_unlock(&arena
->lock
);
4393 arena_dalloc_large(arena
, chunk
, ptr
);
4394 VALGRIND_FREELIKE_BLOCK(ptr
, 0);
4400 arena_chunk_t
*chunk
;
4402 assert(ptr
!= NULL
);
4404 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
4406 arena_dalloc(chunk
->arena
, chunk
, ptr
);
4412 arena_ralloc_large_shrink(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
4413 size_t size
, size_t oldsize
)
4416 assert(size
< oldsize
);
4419 * Shrink the run, and make trailing pages available for other
4422 #ifdef MALLOC_BALANCE
4423 arena_lock_balance(arena
);
4425 malloc_spin_lock(&arena
->lock
);
4427 arena_run_trim_tail(arena
, chunk
, (arena_run_t
*)ptr
, oldsize
, size
,
4430 arena
->stats
.allocated_large
-= oldsize
- size
;
4432 malloc_spin_unlock(&arena
->lock
);
4436 arena_ralloc_large_grow(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
4437 size_t size
, size_t oldsize
)
4439 size_t pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> pagesize_2pow
;
4440 size_t npages
= oldsize
>> pagesize_2pow
;
4442 assert(oldsize
== (chunk
->map
[pageind
].bits
& ~pagesize_mask
));
4444 /* Try to extend the run. */
4445 assert(size
> oldsize
);
4446 #ifdef MALLOC_BALANCE
4447 arena_lock_balance(arena
);
4449 malloc_spin_lock(&arena
->lock
);
4451 if (pageind
+ npages
< chunk_npages
&& (chunk
->map
[pageind
+npages
].bits
4452 & CHUNK_MAP_ALLOCATED
) == 0 && (chunk
->map
[pageind
+npages
].bits
&
4453 ~pagesize_mask
) >= size
- oldsize
) {
4455 * The next run is available and sufficiently large. Split the
4456 * following run, then merge the first part with the existing
4459 arena_run_split(arena
, (arena_run_t
*)((uintptr_t)chunk
+
4460 ((pageind
+npages
) << pagesize_2pow
)), size
- oldsize
, true,
4463 chunk
->map
[pageind
].bits
= size
| CHUNK_MAP_LARGE
|
4464 CHUNK_MAP_ALLOCATED
;
4465 chunk
->map
[pageind
+npages
].bits
= CHUNK_MAP_LARGE
|
4466 CHUNK_MAP_ALLOCATED
;
4469 arena
->stats
.allocated_large
+= size
- oldsize
;
4471 malloc_spin_unlock(&arena
->lock
);
4474 malloc_spin_unlock(&arena
->lock
);
4480 * Try to resize a large allocation, in order to avoid copying. This will
4481 * always fail if growing an object, and the following run is already in use.
4484 arena_ralloc_large(void *ptr
, size_t size
, size_t oldsize
)
4488 psize
= PAGE_CEILING(size
);
4489 if (psize
== oldsize
) {
4490 /* Same size class. */
4492 if (opt_junk
&& size
< oldsize
) {
4493 memset((void *)((uintptr_t)ptr
+ size
), 0x5a, oldsize
-
4499 arena_chunk_t
*chunk
;
4502 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
4503 arena
= chunk
->arena
;
4504 assert(arena
->magic
== ARENA_MAGIC
);
4506 if (psize
< oldsize
) {
4508 /* Fill before shrinking in order avoid a race. */
4510 memset((void *)((uintptr_t)ptr
+ size
), 0x5a,
4514 arena_ralloc_large_shrink(arena
, chunk
, ptr
, psize
,
4518 bool ret
= arena_ralloc_large_grow(arena
, chunk
, ptr
,
4521 if (ret
== false && opt_zero
) {
4522 memset((void *)((uintptr_t)ptr
+ oldsize
), 0,
4532 arena_ralloc(void *ptr
, size_t size
, size_t oldsize
)
4537 /* Try to avoid moving the allocation. */
4538 if (size
< small_min
) {
4539 if (oldsize
< small_min
&&
4540 ffs((int)(pow2_ceil(size
) >> (TINY_MIN_2POW
+ 1)))
4541 == ffs((int)(pow2_ceil(oldsize
) >> (TINY_MIN_2POW
+ 1))))
4542 goto IN_PLACE
; /* Same size class. */
4543 } else if (size
<= small_max
) {
4544 if (oldsize
>= small_min
&& oldsize
<= small_max
&&
4545 (QUANTUM_CEILING(size
) >> opt_quantum_2pow
)
4546 == (QUANTUM_CEILING(oldsize
) >> opt_quantum_2pow
))
4547 goto IN_PLACE
; /* Same size class. */
4548 } else if (size
<= bin_maxclass
) {
4549 if (oldsize
> small_max
&& oldsize
<= bin_maxclass
&&
4550 pow2_ceil(size
) == pow2_ceil(oldsize
))
4551 goto IN_PLACE
; /* Same size class. */
4552 } else if (oldsize
> bin_maxclass
&& oldsize
<= arena_maxclass
) {
4553 assert(size
> bin_maxclass
);
4554 if (arena_ralloc_large(ptr
, size
, oldsize
) == false)
4559 * If we get here, then size and oldsize are different enough that we
4560 * need to move the object. In that case, fall back to allocating new
4561 * space and copying.
4563 ret
= arena_malloc(choose_arena(), size
, false);
4567 /* Junk/zero-filling were already done by arena_malloc(). */
4568 copysize
= (size
< oldsize
) ? size
: oldsize
;
4570 if (copysize
>= VM_COPY_MIN
)
4571 pages_copy(ret
, ptr
, copysize
);
4574 memcpy(ret
, ptr
, copysize
);
4579 if (opt_junk
&& size
< oldsize
)
4580 memset((void *)((uintptr_t)ptr
+ size
), 0x5a, oldsize
- size
);
4581 else if (opt_zero
&& size
> oldsize
)
4582 memset((void *)((uintptr_t)ptr
+ oldsize
), 0, size
- oldsize
);
4587 static inline void *
4588 iralloc(void *ptr
, size_t size
)
4592 assert(ptr
!= NULL
);
4595 oldsize
= isalloc(ptr
);
4597 #ifndef MALLOC_VALGRIND
4598 if (size
<= arena_maxclass
)
4599 return (arena_ralloc(ptr
, size
, oldsize
));
4601 return (huge_ralloc(ptr
, size
, oldsize
));
4604 * Valgrind does not provide a public interface for modifying an
4605 * existing allocation, so use malloc/memcpy/free instead.
4608 void *ret
= imalloc(size
);
4611 memcpy(ret
, ptr
, oldsize
);
4613 memcpy(ret
, ptr
, size
);
4622 arena_new(arena_t
*arena
)
4626 size_t pow2_size
, prev_run_size
;
4628 if (malloc_spin_init(&arena
->lock
))
4632 memset(&arena
->stats
, 0, sizeof(arena_stats_t
));
4635 arena
->chunk_seq
= 0;
4637 /* Initialize chunks. */
4638 arena_chunk_tree_dirty_new(&arena
->chunks_dirty
);
4639 arena
->spare
= NULL
;
4643 arena_avail_tree_new(&arena
->runs_avail
);
4645 #ifdef MALLOC_BALANCE
4646 arena
->contention
= 0;
4649 /* Initialize bins. */
4650 prev_run_size
= pagesize
;
4652 /* (2^n)-spaced tiny bins. */
4653 for (i
= 0; i
< ntbins
; i
++) {
4654 bin
= &arena
->bins
[i
];
4656 arena_run_tree_new(&bin
->runs
);
4658 bin
->reg_size
= (1U << (TINY_MIN_2POW
+ i
));
4660 prev_run_size
= arena_bin_run_size_calc(bin
, prev_run_size
);
4663 memset(&bin
->stats
, 0, sizeof(malloc_bin_stats_t
));
4667 /* Quantum-spaced bins. */
4668 for (; i
< ntbins
+ nqbins
; i
++) {
4669 bin
= &arena
->bins
[i
];
4671 arena_run_tree_new(&bin
->runs
);
4673 bin
->reg_size
= quantum
* (i
- ntbins
+ 1);
4675 pow2_size
= pow2_ceil(quantum
* (i
- ntbins
+ 1));
4676 prev_run_size
= arena_bin_run_size_calc(bin
, prev_run_size
);
4679 memset(&bin
->stats
, 0, sizeof(malloc_bin_stats_t
));
4683 /* (2^n)-spaced sub-page bins. */
4684 for (; i
< ntbins
+ nqbins
+ nsbins
; i
++) {
4685 bin
= &arena
->bins
[i
];
4687 arena_run_tree_new(&bin
->runs
);
4689 bin
->reg_size
= (small_max
<< (i
- (ntbins
+ nqbins
) + 1));
4691 prev_run_size
= arena_bin_run_size_calc(bin
, prev_run_size
);
4694 memset(&bin
->stats
, 0, sizeof(malloc_bin_stats_t
));
4699 arena
->magic
= ARENA_MAGIC
;
4705 /* Create a new arena and insert it into the arenas array at index ind. */
4707 arenas_extend(unsigned ind
)
4711 /* Allocate enough space for trailing bins. */
4712 ret
= (arena_t
*)base_alloc(sizeof(arena_t
)
4713 + (sizeof(arena_bin_t
) * (ntbins
+ nqbins
+ nsbins
- 1)));
4714 if (ret
!= NULL
&& arena_new(ret
) == false) {
4718 /* Only reached if there is an OOM error. */
4721 * OOM here is quite inconvenient to propagate, since dealing with it
4722 * would require a check for failure in the fast path. Instead, punt
4723 * by using arenas[0]. In practice, this is an extremely unlikely
4726 _malloc_message(_getprogname(),
4727 ": (malloc) Error initializing arena\n", "", "");
4737 /******************************************************************************/
4739 * Begin general internal functions.
4743 huge_malloc(size_t size
, bool zero
)
4747 #ifdef MALLOC_DECOMMIT
4750 extent_node_t
*node
;
4752 /* Allocate one or more contiguous chunks for this request. */
4754 csize
= CHUNK_CEILING(size
);
4756 /* size is large enough to cause size_t wrap-around. */
4760 /* Allocate an extent node with which to track the chunk. */
4761 node
= base_node_alloc();
4765 ret
= chunk_alloc(csize
, zero
, true);
4767 base_node_dealloc(node
);
4771 /* Insert node into huge. */
4773 #ifdef MALLOC_DECOMMIT
4774 psize
= PAGE_CEILING(size
);
4780 malloc_mutex_lock(&huge_mtx
);
4781 extent_tree_ad_insert(&huge
, node
);
4784 # ifdef MALLOC_DECOMMIT
4785 huge_allocated
+= psize
;
4787 huge_allocated
+= csize
;
4790 malloc_mutex_unlock(&huge_mtx
);
4792 #ifdef MALLOC_DECOMMIT
4793 if (csize
- psize
> 0)
4794 pages_decommit((void *)((uintptr_t)ret
+ psize
), csize
- psize
);
4797 #ifdef MALLOC_DECOMMIT
4798 VALGRIND_MALLOCLIKE_BLOCK(ret
, psize
, 0, zero
);
4800 VALGRIND_MALLOCLIKE_BLOCK(ret
, csize
, 0, zero
);
4804 if (zero
== false) {
4806 # ifdef MALLOC_DECOMMIT
4807 memset(ret
, 0xa5, psize
);
4809 memset(ret
, 0xa5, csize
);
4812 # ifdef MALLOC_DECOMMIT
4813 memset(ret
, 0, psize
);
4815 memset(ret
, 0, csize
);
4823 /* Only handles large allocations that require more than chunk alignment. */
4825 huge_palloc(size_t alignment
, size_t size
)
4828 size_t alloc_size
, chunk_size
, offset
;
4829 #ifdef MALLOC_DECOMMIT
4832 extent_node_t
*node
;
4836 * This allocation requires alignment that is even larger than chunk
4837 * alignment. This means that huge_malloc() isn't good enough.
4839 * Allocate almost twice as many chunks as are demanded by the size or
4840 * alignment, in order to assure the alignment can be achieved, then
4841 * unmap leading and trailing chunks.
4843 assert(alignment
>= chunksize
);
4845 chunk_size
= CHUNK_CEILING(size
);
4847 if (size
>= alignment
)
4848 alloc_size
= chunk_size
+ alignment
- chunksize
;
4850 alloc_size
= (alignment
<< 1) - chunksize
;
4852 /* Allocate an extent node with which to track the chunk. */
4853 node
= base_node_alloc();
4858 * Windows requires that there be a 1:1 mapping between VM
4859 * allocation/deallocation operations. Therefore, take care here to
4860 * acquire the final result via one mapping operation.
4862 * The MALLOC_PAGEFILE code also benefits from this mapping algorithm,
4863 * since it reduces the number of page files.
4865 #ifdef MALLOC_PAGEFILE
4867 pfd
= pagefile_init(size
);
4876 over
= chunk_alloc(alloc_size
, false, false);
4878 base_node_dealloc(node
);
4883 offset
= (uintptr_t)over
& (alignment
- 1);
4884 assert((offset
& chunksize_mask
) == 0);
4885 assert(offset
< alloc_size
);
4886 ret
= (void *)((uintptr_t)over
+ offset
);
4887 chunk_dealloc(over
, alloc_size
);
4888 ret
= pages_map(ret
, chunk_size
, pfd
);
4890 * Failure here indicates a race with another thread, so try
4893 } while (ret
== NULL
);
4895 /* Insert node into huge. */
4897 #ifdef MALLOC_DECOMMIT
4898 psize
= PAGE_CEILING(size
);
4901 node
->size
= chunk_size
;
4904 malloc_mutex_lock(&huge_mtx
);
4905 extent_tree_ad_insert(&huge
, node
);
4908 # ifdef MALLOC_DECOMMIT
4909 huge_allocated
+= psize
;
4911 huge_allocated
+= chunk_size
;
4914 malloc_mutex_unlock(&huge_mtx
);
4916 #ifdef MALLOC_DECOMMIT
4917 if (chunk_size
- psize
> 0) {
4918 pages_decommit((void *)((uintptr_t)ret
+ psize
),
4919 chunk_size
- psize
);
4923 #ifdef MALLOC_DECOMMIT
4924 VALGRIND_MALLOCLIKE_BLOCK(ret
, psize
, 0, false);
4926 VALGRIND_MALLOCLIKE_BLOCK(ret
, chunk_size
, 0, false);
4931 # ifdef MALLOC_DECOMMIT
4932 memset(ret
, 0xa5, psize
);
4934 memset(ret
, 0xa5, chunk_size
);
4937 # ifdef MALLOC_DECOMMIT
4938 memset(ret
, 0, psize
);
4940 memset(ret
, 0, chunk_size
);
4945 #ifdef MALLOC_PAGEFILE
4947 pagefile_close(pfd
);
4953 huge_ralloc(void *ptr
, size_t size
, size_t oldsize
)
4958 /* Avoid moving the allocation if the size class would not change. */
4960 if (oldsize
> arena_maxclass
&&
4961 CHUNK_CEILING(size
) == CHUNK_CEILING(oldsize
)) {
4962 #ifdef MALLOC_DECOMMIT
4963 size_t psize
= PAGE_CEILING(size
);
4966 if (opt_junk
&& size
< oldsize
) {
4967 memset((void *)((uintptr_t)ptr
+ size
), 0x5a, oldsize
4971 #ifdef MALLOC_DECOMMIT
4972 if (psize
< oldsize
) {
4973 extent_node_t
*node
, key
;
4975 pages_decommit((void *)((uintptr_t)ptr
+ psize
),
4978 /* Update recorded size. */
4979 malloc_mutex_lock(&huge_mtx
);
4980 key
.addr
= __DECONST(void *, ptr
);
4981 node
= extent_tree_ad_search(&huge
, &key
);
4982 assert(node
!= NULL
);
4983 assert(node
->size
== oldsize
);
4984 # ifdef MALLOC_STATS
4985 huge_allocated
-= oldsize
- psize
;
4988 malloc_mutex_unlock(&huge_mtx
);
4989 } else if (psize
> oldsize
) {
4990 extent_node_t
*node
, key
;
4992 pages_commit((void *)((uintptr_t)ptr
+ oldsize
),
4995 /* Update recorded size. */
4996 malloc_mutex_lock(&huge_mtx
);
4997 key
.addr
= __DECONST(void *, ptr
);
4998 node
= extent_tree_ad_search(&huge
, &key
);
4999 assert(node
!= NULL
);
5000 assert(node
->size
== oldsize
);
5001 # ifdef MALLOC_STATS
5002 huge_allocated
+= psize
- oldsize
;
5005 malloc_mutex_unlock(&huge_mtx
);
5009 if (opt_zero
&& size
> oldsize
) {
5010 memset((void *)((uintptr_t)ptr
+ oldsize
), 0, size
5018 * If we get here, then size and oldsize are different enough that we
5019 * need to use a different size class. In that case, fall back to
5020 * allocating new space and copying.
5022 ret
= huge_malloc(size
, false);
5026 copysize
= (size
< oldsize
) ? size
: oldsize
;
5028 if (copysize
>= VM_COPY_MIN
)
5029 pages_copy(ret
, ptr
, copysize
);
5032 memcpy(ret
, ptr
, copysize
);
5038 huge_dalloc(void *ptr
)
5040 extent_node_t
*node
, key
;
5042 malloc_mutex_lock(&huge_mtx
);
5044 /* Extract from tree of huge allocations. */
5046 node
= extent_tree_ad_search(&huge
, &key
);
5047 assert(node
!= NULL
);
5048 assert(node
->addr
== ptr
);
5049 extent_tree_ad_remove(&huge
, node
);
5053 huge_allocated
-= node
->size
;
5056 malloc_mutex_unlock(&huge_mtx
);
5061 memset(node
->addr
, 0x5a, node
->size
);
5063 #ifdef MALLOC_DECOMMIT
5064 chunk_dealloc(node
->addr
, CHUNK_CEILING(node
->size
));
5066 chunk_dealloc(node
->addr
, node
->size
);
5068 VALGRIND_FREELIKE_BLOCK(node
->addr
, 0);
5070 base_node_dealloc(node
);
5073 #ifdef MOZ_MEMORY_BSD
5074 static inline unsigned
5084 if (sysctl(mib
, 2, &ret
, &len
, (void *) 0, 0) == -1) {
5091 #elif (defined(MOZ_MEMORY_LINUX))
5094 static inline unsigned
5098 int fd
, nread
, column
;
5100 static const char matchstr
[] = "processor\t:";
5103 * sysconf(3) would be the preferred method for determining the number
5104 * of CPUs, but it uses malloc internally, which causes untennable
5105 * recursion during malloc initialization.
5107 fd
= open("/proc/cpuinfo", O_RDONLY
);
5109 return (1); /* Error. */
5111 * Count the number of occurrences of matchstr at the beginnings of
5112 * lines. This treats hyperthreaded CPUs as multiple processors.
5117 nread
= read(fd
, &buf
, sizeof(buf
));
5119 break; /* EOF or error. */
5123 else if (column
!= -1) {
5124 if (buf
[0] == matchstr
[column
]) {
5126 if (column
== sizeof(matchstr
) - 1) {
5135 ret
= 1; /* Something went wrong in the parser. */
5140 #elif (defined(MOZ_MEMORY_DARWIN))
5141 #include <mach/mach_init.h>
5142 #include <mach/mach_host.h>
5144 static inline unsigned
5147 kern_return_t error
;
5149 processor_info_array_t pinfo
;
5150 mach_msg_type_number_t pinfocnt
;
5152 error
= host_processor_info(mach_host_self(), PROCESSOR_BASIC_INFO
,
5153 &n
, &pinfo
, &pinfocnt
);
5154 if (error
!= KERN_SUCCESS
)
5155 return (1); /* Error. */
5159 #elif (defined(MOZ_MEMORY_SOLARIS))
5161 static inline unsigned
5164 return sysconf(_SC_NPROCESSORS_ONLN
);
5167 static inline unsigned
5172 * We lack a way to determine the number of CPUs on this platform, so
5180 malloc_print_stats(void)
5183 if (opt_print_stats
) {
5184 char s
[UMAX2S_BUFSIZE
];
5185 _malloc_message("___ Begin malloc statistics ___\n", "", "",
5187 _malloc_message("Assertions ",
5194 _malloc_message("Boolean MALLOC_OPTIONS: ",
5195 opt_abort
? "A" : "a", "", "");
5197 _malloc_message(opt_junk
? "J" : "j", "", "", "");
5199 #ifdef MALLOC_PAGEFILE
5200 _malloc_message(opt_pagefile
? "o" : "O", "", "", "");
5202 _malloc_message("P", "", "", "");
5203 #ifdef MALLOC_UTRACE
5204 _malloc_message(opt_utrace
? "U" : "u", "", "", "");
5207 _malloc_message(opt_sysv
? "V" : "v", "", "", "");
5209 #ifdef MALLOC_XMALLOC
5210 _malloc_message(opt_xmalloc
? "X" : "x", "", "", "");
5213 _malloc_message(opt_zero
? "Z" : "z", "", "", "");
5215 _malloc_message("\n", "", "", "");
5217 _malloc_message("CPUs: ", umax2s(ncpus
, s
), "\n", "");
5218 _malloc_message("Max arenas: ", umax2s(narenas
, s
), "\n", "");
5219 #ifdef MALLOC_BALANCE
5220 _malloc_message("Arena balance threshold: ",
5221 umax2s(opt_balance_threshold
, s
), "\n", "");
5223 _malloc_message("Pointer size: ", umax2s(sizeof(void *), s
),
5225 _malloc_message("Quantum size: ", umax2s(quantum
, s
), "\n", "");
5226 _malloc_message("Max small size: ", umax2s(small_max
, s
), "\n",
5228 _malloc_message("Max dirty pages per arena: ",
5229 umax2s(opt_dirty_max
, s
), "\n", "");
5231 _malloc_message("Chunk size: ", umax2s(chunksize
, s
), "", "");
5232 _malloc_message(" (2^", umax2s(opt_chunk_2pow
, s
), ")\n", "");
5236 size_t allocated
, mapped
;
5237 #ifdef MALLOC_BALANCE
5238 uint64_t nbalance
= 0;
5243 /* Calculate and print allocated/mapped stats. */
5246 for (i
= 0, allocated
= 0; i
< narenas
; i
++) {
5247 if (arenas
[i
] != NULL
) {
5248 malloc_spin_lock(&arenas
[i
]->lock
);
5250 arenas
[i
]->stats
.allocated_small
;
5252 arenas
[i
]->stats
.allocated_large
;
5253 #ifdef MALLOC_BALANCE
5254 nbalance
+= arenas
[i
]->stats
.nbalance
;
5256 malloc_spin_unlock(&arenas
[i
]->lock
);
5261 malloc_mutex_lock(&huge_mtx
);
5262 allocated
+= huge_allocated
;
5263 mapped
= stats_chunks
.curchunks
* chunksize
;
5264 malloc_mutex_unlock(&huge_mtx
);
5266 malloc_mutex_lock(&base_mtx
);
5267 mapped
+= base_mapped
;
5268 malloc_mutex_unlock(&base_mtx
);
5270 #ifdef MOZ_MEMORY_WINDOWS
5271 malloc_printf("Allocated: %lu, mapped: %lu\n",
5274 malloc_printf("Allocated: %zu, mapped: %zu\n",
5278 malloc_mutex_lock(&reserve_mtx
);
5279 malloc_printf("Reserve: min "
5281 #ifdef MOZ_MEMORY_WINDOWS
5282 malloc_printf(" %12lu %12lu %12lu\n",
5283 CHUNK_CEILING(reserve_min
) >> opt_chunk_2pow
,
5284 reserve_cur
>> opt_chunk_2pow
,
5285 reserve_max
>> opt_chunk_2pow
);
5287 malloc_printf(" %12zu %12zu %12zu\n",
5288 CHUNK_CEILING(reserve_min
) >> opt_chunk_2pow
,
5289 reserve_cur
>> opt_chunk_2pow
,
5290 reserve_max
>> opt_chunk_2pow
);
5292 malloc_mutex_unlock(&reserve_mtx
);
5294 #ifdef MALLOC_BALANCE
5295 malloc_printf("Arena balance reassignments: %llu\n",
5299 /* Print chunk stats. */
5301 chunk_stats_t chunks_stats
;
5303 malloc_mutex_lock(&huge_mtx
);
5304 chunks_stats
= stats_chunks
;
5305 malloc_mutex_unlock(&huge_mtx
);
5307 malloc_printf("chunks: nchunks "
5308 "highchunks curchunks\n");
5309 malloc_printf(" %13llu%13lu%13lu\n",
5310 chunks_stats
.nchunks
,
5311 chunks_stats
.highchunks
,
5312 chunks_stats
.curchunks
);
5315 /* Print chunk stats. */
5317 "huge: nmalloc ndalloc allocated\n");
5318 #ifdef MOZ_MEMORY_WINDOWS
5319 malloc_printf(" %12llu %12llu %12lu\n",
5320 huge_nmalloc
, huge_ndalloc
, huge_allocated
);
5322 malloc_printf(" %12llu %12llu %12zu\n",
5323 huge_nmalloc
, huge_ndalloc
, huge_allocated
);
5325 /* Print stats for each arena. */
5326 for (i
= 0; i
< narenas
; i
++) {
5328 if (arena
!= NULL
) {
5330 "\narenas[%u]:\n", i
);
5331 malloc_spin_lock(&arena
->lock
);
5333 malloc_spin_unlock(&arena
->lock
);
5337 #endif /* #ifdef MALLOC_STATS */
5338 _malloc_message("--- End malloc statistics ---\n", "", "", "");
5343 * FreeBSD's pthreads implementation calls malloc(3), so the malloc
5344 * implementation has to take pains to avoid infinite recursion during
5347 #if (defined(MOZ_MEMORY_WINDOWS) || defined(MOZ_MEMORY_DARWIN))
5348 #define malloc_init() false
5354 if (malloc_initialized
== false)
5355 return (malloc_init_hard());
5361 #ifndef MOZ_MEMORY_WINDOWS
5365 malloc_init_hard(void)
5368 char buf
[PATH_MAX
+ 1];
5371 #ifndef MOZ_MEMORY_WINDOWS
5375 #ifndef MOZ_MEMORY_WINDOWS
5376 malloc_mutex_lock(&init_lock
);
5379 if (malloc_initialized
) {
5381 * Another thread initialized the allocator before this one
5382 * acquired init_lock.
5384 #ifndef MOZ_MEMORY_WINDOWS
5385 malloc_mutex_unlock(&init_lock
);
5390 #ifdef MOZ_MEMORY_WINDOWS
5391 /* get a thread local storage index */
5392 tlsIndex
= TlsAlloc();
5395 /* Get page size and number of CPUs */
5396 #ifdef MOZ_MEMORY_WINDOWS
5400 GetSystemInfo(&info
);
5401 result
= info
.dwPageSize
;
5403 pagesize
= (unsigned) result
;
5405 ncpus
= info
.dwNumberOfProcessors
;
5408 ncpus
= malloc_ncpus();
5410 result
= sysconf(_SC_PAGESIZE
);
5411 assert(result
!= -1);
5413 pagesize
= (unsigned) result
;
5417 * We assume that pagesize is a power of 2 when calculating
5418 * pagesize_mask and pagesize_2pow.
5420 assert(((result
- 1) & result
) == 0);
5421 pagesize_mask
= result
- 1;
5422 pagesize_2pow
= ffs((int)result
) - 1;
5424 #ifdef MALLOC_PAGEFILE
5426 * Determine where to create page files. It is insufficient to
5427 * unconditionally use P_tmpdir (typically "/tmp"), since for some
5428 * operating systems /tmp is a separate filesystem that is rather small.
5429 * Therefore prefer, in order, the following locations:
5438 static const char suffix
[] = "/jemalloc.XXXXXX";
5440 if ((s
= getenv("MALLOC_TMPDIR")) == NULL
&& (s
=
5441 getenv("TMPDIR")) == NULL
)
5444 if (slen
+ sizeof(suffix
) > sizeof(pagefile_templ
)) {
5445 _malloc_message(_getprogname(),
5446 ": (malloc) Page file path too long\n",
5450 memcpy(pagefile_templ
, s
, slen
);
5451 memcpy(&pagefile_templ
[slen
], suffix
, sizeof(suffix
));
5455 for (i
= 0; i
< 3; i
++) {
5458 /* Get runtime configuration. */
5461 #ifndef MOZ_MEMORY_WINDOWS
5462 if ((linklen
= readlink("/etc/malloc.conf", buf
,
5463 sizeof(buf
) - 1)) != -1) {
5465 * Use the contents of the "/etc/malloc.conf"
5466 * symbolic link's name.
5468 buf
[linklen
] = '\0';
5473 /* No configuration specified. */
5479 if (issetugid() == 0 && (opts
=
5480 getenv("MALLOC_OPTIONS")) != NULL
) {
5482 * Do nothing; opts is already initialized to
5483 * the value of the MALLOC_OPTIONS environment
5487 /* No configuration specified. */
5493 if (_malloc_options
!= NULL
) {
5495 * Use options that were compiled into the
5498 opts
= _malloc_options
;
5500 /* No configuration specified. */
5512 for (j
= 0; opts
[j
] != '\0'; j
++) {
5516 /* Parse repetition count, if any. */
5517 for (nreps
= 0, nseen
= false;; j
++, nseen
= true) {
5519 case '0': case '1': case '2': case '3':
5520 case '4': case '5': case '6': case '7':
5523 nreps
+= opts
[j
] - '0';
5533 for (k
= 0; k
< nreps
; k
++) {
5542 #ifdef MALLOC_BALANCE
5543 opt_balance_threshold
>>= 1;
5547 #ifdef MALLOC_BALANCE
5548 if (opt_balance_threshold
== 0)
5549 opt_balance_threshold
= 1;
5550 else if ((opt_balance_threshold
<< 1)
5551 > opt_balance_threshold
)
5552 opt_balance_threshold
<<= 1;
5556 opt_dirty_max
>>= 1;
5559 if (opt_dirty_max
== 0)
5561 else if ((opt_dirty_max
<< 1) != 0)
5562 opt_dirty_max
<<= 1;
5565 opt_reserve_range_lshift
--;
5568 opt_reserve_range_lshift
++;
5580 * Chunks always require at least one
5581 * header page, so chunks can never be
5582 * smaller than two pages.
5584 if (opt_chunk_2pow
> pagesize_2pow
+ 1)
5588 if (opt_chunk_2pow
+ 1 <
5589 (sizeof(size_t) << 3))
5593 opt_narenas_lshift
--;
5596 opt_narenas_lshift
++;
5598 #ifdef MALLOC_PAGEFILE
5600 /* Do not over-commit. */
5601 opt_pagefile
= true;
5604 /* Allow over-commit. */
5605 opt_pagefile
= false;
5609 opt_print_stats
= false;
5612 opt_print_stats
= true;
5615 if (opt_quantum_2pow
> QUANTUM_2POW_MIN
)
5619 if (opt_quantum_2pow
< pagesize_2pow
-
5624 opt_reserve_min_lshift
--;
5627 opt_reserve_min_lshift
++;
5630 if (opt_small_max_2pow
>
5632 opt_small_max_2pow
--;
5635 if (opt_small_max_2pow
< pagesize_2pow
5637 opt_small_max_2pow
++;
5639 #ifdef MALLOC_UTRACE
5655 #ifdef MALLOC_XMALLOC
5657 opt_xmalloc
= false;
5676 _malloc_message(_getprogname(),
5677 ": (malloc) Unsupported character "
5678 "in malloc options: '", cbuf
,
5686 /* Take care to call atexit() only once. */
5687 if (opt_print_stats
) {
5688 #ifndef MOZ_MEMORY_WINDOWS
5689 /* Print statistics at exit. */
5690 atexit(malloc_print_stats
);
5694 /* Set variables according to the value of opt_small_max_2pow. */
5695 if (opt_small_max_2pow
< opt_quantum_2pow
)
5696 opt_small_max_2pow
= opt_quantum_2pow
;
5697 small_max
= (1U << opt_small_max_2pow
);
5699 /* Set bin-related variables. */
5700 bin_maxclass
= (pagesize
>> 1);
5701 assert(opt_quantum_2pow
>= TINY_MIN_2POW
);
5702 ntbins
= opt_quantum_2pow
- TINY_MIN_2POW
;
5703 assert(ntbins
<= opt_quantum_2pow
);
5704 nqbins
= (small_max
>> opt_quantum_2pow
);
5705 nsbins
= pagesize_2pow
- opt_small_max_2pow
- 1;
5707 /* Set variables according to the value of opt_quantum_2pow. */
5708 quantum
= (1U << opt_quantum_2pow
);
5709 quantum_mask
= quantum
- 1;
5711 small_min
= (quantum
>> 1) + 1;
5714 assert(small_min
<= quantum
);
5716 /* Set variables according to the value of opt_chunk_2pow. */
5717 chunksize
= (1LU << opt_chunk_2pow
);
5718 chunksize_mask
= chunksize
- 1;
5719 chunk_npages
= (chunksize
>> pagesize_2pow
);
5724 * Compute the header size such that it is large
5725 * enough to contain the page map and enough nodes for the
5726 * worst case: one node per non-header page plus one extra for
5727 * situations where we briefly have one more node allocated
5728 * than we will need.
5730 header_size
= sizeof(arena_chunk_t
) +
5731 (sizeof(arena_chunk_map_t
) * (chunk_npages
- 1));
5732 arena_chunk_header_npages
= (header_size
>> pagesize_2pow
) +
5733 ((header_size
& pagesize_mask
) != 0);
5735 arena_maxclass
= chunksize
- (arena_chunk_header_npages
<<
5741 memset(&stats_chunks
, 0, sizeof(chunk_stats_t
));
5744 /* Various sanity checks that regard configuration. */
5745 assert(quantum
>= sizeof(void *));
5746 assert(quantum
<= pagesize
);
5747 assert(chunksize
>= pagesize
);
5748 assert(quantum
* 4 <= chunksize
);
5750 /* Initialize chunks data. */
5751 malloc_mutex_init(&huge_mtx
);
5752 extent_tree_ad_new(&huge
);
5759 /* Initialize base allocation data structures. */
5764 base_reserve_regs
= NULL
;
5765 malloc_mutex_init(&base_mtx
);
5767 #ifdef MOZ_MEMORY_NARENAS_DEFAULT_ONE
5772 * For SMP systems, create four times as many arenas as there
5773 * are CPUs by default.
5775 opt_narenas_lshift
+= 2;
5778 /* Determine how many arenas to use. */
5781 if (opt_narenas_lshift
> 0) {
5782 if ((narenas
<< opt_narenas_lshift
) > narenas
)
5783 narenas
<<= opt_narenas_lshift
;
5785 * Make sure not to exceed the limits of what base_alloc() can
5788 if (narenas
* sizeof(arena_t
*) > chunksize
)
5789 narenas
= chunksize
/ sizeof(arena_t
*);
5790 } else if (opt_narenas_lshift
< 0) {
5791 if ((narenas
>> -opt_narenas_lshift
) < narenas
)
5792 narenas
>>= -opt_narenas_lshift
;
5793 /* Make sure there is at least one arena. */
5797 #ifdef MALLOC_BALANCE
5798 assert(narenas
!= 0);
5799 for (narenas_2pow
= 0;
5800 (narenas
>> (narenas_2pow
+ 1)) != 0;
5806 static const unsigned primes
[] = {1, 3, 5, 7, 11, 13, 17, 19,
5807 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83,
5808 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
5809 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211,
5810 223, 227, 229, 233, 239, 241, 251, 257, 263};
5811 unsigned nprimes
, parenas
;
5814 * Pick a prime number of hash arenas that is more than narenas
5815 * so that direct hashing of pthread_self() pointers tends to
5816 * spread allocations evenly among the arenas.
5818 assert((narenas
& 1) == 0); /* narenas must be even. */
5819 nprimes
= (sizeof(primes
) >> SIZEOF_INT_2POW
);
5820 parenas
= primes
[nprimes
- 1]; /* In case not enough primes. */
5821 for (i
= 1; i
< nprimes
; i
++) {
5822 if (primes
[i
] > narenas
) {
5823 parenas
= primes
[i
];
5832 # ifndef MALLOC_BALANCE
5837 /* Allocate and initialize arenas. */
5838 arenas
= (arena_t
**)base_alloc(sizeof(arena_t
*) * narenas
);
5839 if (arenas
== NULL
) {
5840 #ifndef MOZ_MEMORY_WINDOWS
5841 malloc_mutex_unlock(&init_lock
);
5846 * Zero the array. In practice, this should always be pre-zeroed,
5847 * since it was just mmap()ed, but let's be sure.
5849 memset(arenas
, 0, sizeof(arena_t
*) * narenas
);
5852 * Initialize one arena here. The rest are lazily created in
5853 * choose_arena_hard().
5856 if (arenas
[0] == NULL
) {
5857 #ifndef MOZ_MEMORY_WINDOWS
5858 malloc_mutex_unlock(&init_lock
);
5864 * Assign the initial arena to the initial thread, in order to avoid
5865 * spurious creation of an extra arena if the application switches to
5868 #ifdef MOZ_MEMORY_WINDOWS
5869 TlsSetValue(tlsIndex
, arenas
[0]);
5871 arenas_map
= arenas
[0];
5876 * Seed here for the initial thread, since choose_arena_hard() is only
5877 * called for other threads. The seed value doesn't really matter.
5879 #ifdef MALLOC_BALANCE
5883 malloc_spin_init(&arenas_lock
);
5885 #ifdef MALLOC_VALIDATE
5886 chunk_rtree
= malloc_rtree_new((SIZEOF_PTR
<< 3) - opt_chunk_2pow
);
5887 if (chunk_rtree
== NULL
)
5892 * Configure and initialize the memory reserve. This needs to happen
5893 * late during initialization, since chunks are allocated.
5895 malloc_mutex_init(&reserve_mtx
);
5899 if (RESERVE_RANGE_2POW_DEFAULT
+ opt_reserve_range_lshift
>= 0) {
5900 reserve_max
+= chunksize
<< (RESERVE_RANGE_2POW_DEFAULT
+
5901 opt_reserve_range_lshift
);
5903 ql_new(&reserve_regs
);
5905 extent_tree_szad_new(&reserve_chunks_szad
);
5906 extent_tree_ad_new(&reserve_chunks_ad
);
5907 if (RESERVE_MIN_2POW_DEFAULT
+ opt_reserve_min_lshift
>= 0) {
5908 reserve_min_set(chunksize
<< (RESERVE_MIN_2POW_DEFAULT
+
5909 opt_reserve_min_lshift
));
5912 malloc_initialized
= true;
5913 #ifndef MOZ_MEMORY_WINDOWS
5914 malloc_mutex_unlock(&init_lock
);
5919 /* XXX Why not just expose malloc_print_stats()? */
5920 #ifdef MOZ_MEMORY_WINDOWS
5925 malloc_print_stats();
5930 * End general internal functions.
5932 /******************************************************************************/
5934 * Begin malloc(3)-compatible functions.
5938 * Inline the standard malloc functions if they are being subsumed by Darwin's
5939 * zone infrastructure.
5941 #ifdef MOZ_MEMORY_DARWIN
5942 # define ZONE_INLINE inline
5944 # define ZONE_INLINE
5947 /* Mangle standard interfaces on Darwin, in order to avoid linking problems. */
5948 #ifdef MOZ_MEMORY_DARWIN
5949 #define malloc(a) moz_malloc(a)
5950 #define valloc(a) moz_valloc(a)
5951 #define calloc(a, b) moz_calloc(a, b)
5952 #define realloc(a, b) moz_realloc(a, b)
5953 #define free(a) moz_free(a)
5962 if (malloc_init()) {
5969 if (opt_sysv
== false)
5980 ret
= imalloc(size
);
5984 #ifdef MALLOC_XMALLOC
5986 _malloc_message(_getprogname(),
5987 ": (malloc) Error in malloc(): out of memory\n", "",
5995 UTRACE(0, size
, ret
);
5999 #ifdef MOZ_MEMORY_SOLARIS
6002 memalign(size_t alignment
, size_t size
);
6003 #pragma no_inline(memalign)
6004 # elif (defined(__GNU_C__))
6005 __attribute__((noinline
))
6011 memalign(size_t alignment
, size_t size
)
6015 assert(((alignment
- 1) & alignment
) == 0 && alignment
>=
6018 if (malloc_init()) {
6023 ret
= ipalloc(alignment
, size
);
6026 #ifdef MALLOC_XMALLOC
6027 if (opt_xmalloc
&& ret
== NULL
) {
6028 _malloc_message(_getprogname(),
6029 ": (malloc) Error in memalign(): out of memory\n", "", "");
6033 UTRACE(0, size
, ret
);
6039 posix_memalign(void **memptr
, size_t alignment
, size_t size
)
6043 /* Make sure that alignment is a large enough power of 2. */
6044 if (((alignment
- 1) & alignment
) != 0 || alignment
< sizeof(void *)) {
6045 #ifdef MALLOC_XMALLOC
6047 _malloc_message(_getprogname(),
6048 ": (malloc) Error in posix_memalign(): "
6049 "invalid alignment\n", "", "");
6056 #ifdef MOZ_MEMORY_DARWIN
6057 result
= moz_memalign(alignment
, size
);
6059 result
= memalign(alignment
, size
);
6072 #ifdef MOZ_MEMORY_DARWIN
6073 return (moz_memalign(pagesize
, size
));
6075 return (memalign(pagesize
, size
));
6081 calloc(size_t num
, size_t size
)
6086 if (malloc_init()) {
6092 num_size
= num
* size
;
6093 if (num_size
== 0) {
6095 if ((opt_sysv
== false) && ((num
== 0) || (size
== 0)))
6105 * Try to avoid division here. We know that it isn't possible to
6106 * overflow during multiplication if neither operand uses any of the
6107 * most significant half of the bits in a size_t.
6109 } else if (((num
| size
) & (SIZE_T_MAX
<< (sizeof(size_t) << 2)))
6110 && (num_size
/ size
!= num
)) {
6111 /* size_t overflow. */
6116 ret
= icalloc(num_size
);
6120 #ifdef MALLOC_XMALLOC
6122 _malloc_message(_getprogname(),
6123 ": (malloc) Error in calloc(): out of memory\n", "",
6131 UTRACE(0, num_size
, ret
);
6137 realloc(void *ptr
, size_t size
)
6143 if (opt_sysv
== false)
6157 assert(malloc_initialized
);
6159 ret
= iralloc(ptr
, size
);
6162 #ifdef MALLOC_XMALLOC
6164 _malloc_message(_getprogname(),
6165 ": (malloc) Error in realloc(): out of "
6166 "memory\n", "", "");
6176 ret
= imalloc(size
);
6179 #ifdef MALLOC_XMALLOC
6181 _malloc_message(_getprogname(),
6182 ": (malloc) Error in realloc(): out of "
6183 "memory\n", "", "");
6194 UTRACE(ptr
, size
, ret
);
6205 assert(malloc_initialized
);
6212 * End malloc(3)-compatible functions.
6214 /******************************************************************************/
6216 * Begin non-standard functions.
6220 malloc_usable_size(const void *ptr
)
6223 #ifdef MALLOC_VALIDATE
6224 return (isalloc_validate(ptr
));
6226 assert(ptr
!= NULL
);
6228 return (isalloc(ptr
));
6233 jemalloc_stats(jemalloc_stats_t
*stats
)
6237 assert(stats
!= NULL
);
6240 * Gather runtime settings.
6242 stats
->opt_abort
= opt_abort
;
6249 #ifdef MALLOC_UTRACE
6258 stats
->opt_xmalloc
=
6259 #ifdef MALLOC_XMALLOC
6260 opt_xmalloc
? true :
6268 stats
->narenas
= narenas
;
6269 stats
->balance_threshold
=
6270 #ifdef MALLOC_BALANCE
6271 opt_balance_threshold
6276 stats
->quantum
= quantum
;
6277 stats
->small_max
= small_max
;
6278 stats
->large_max
= arena_maxclass
;
6279 stats
->chunksize
= chunksize
;
6280 stats
->dirty_max
= opt_dirty_max
;
6282 malloc_mutex_lock(&reserve_mtx
);
6283 stats
->reserve_min
= reserve_min
;
6284 stats
->reserve_max
= reserve_max
;
6285 stats
->reserve_cur
= reserve_cur
;
6286 malloc_mutex_unlock(&reserve_mtx
);
6289 * Gather current memory usage statistics.
6292 stats
->committed
= 0;
6293 stats
->allocated
= 0;
6296 /* Get huge mapped/allocated. */
6297 malloc_mutex_lock(&huge_mtx
);
6298 stats
->mapped
+= stats_chunks
.curchunks
* chunksize
;
6299 #ifdef MALLOC_DECOMMIT
6300 stats
->committed
+= huge_allocated
;
6302 stats
->allocated
+= huge_allocated
;
6303 malloc_mutex_unlock(&huge_mtx
);
6305 /* Get base mapped. */
6306 malloc_mutex_lock(&base_mtx
);
6307 stats
->mapped
+= base_mapped
;
6308 #ifdef MALLOC_DECOMMIT
6309 stats
->committed
+= base_mapped
;
6311 malloc_mutex_unlock(&base_mtx
);
6313 /* Iterate over arenas and their chunks. */
6314 for (i
= 0; i
< narenas
; i
++) {
6315 arena_t
*arena
= arenas
[i
];
6316 if (arena
!= NULL
) {
6317 arena_chunk_t
*chunk
;
6319 malloc_spin_lock(&arena
->lock
);
6320 stats
->allocated
+= arena
->stats
.allocated_small
;
6321 stats
->allocated
+= arena
->stats
.allocated_large
;
6322 #ifdef MALLOC_DECOMMIT
6323 rb_foreach_begin(arena_chunk_t
, link_dirty
,
6324 &arena
->chunks_dirty
, chunk
) {
6327 for (j
= 0; j
< chunk_npages
; j
++) {
6328 if ((chunk
->map
[j
].bits
&
6329 CHUNK_MAP_DECOMMITTED
) == 0)
6330 stats
->committed
+= pagesize
;
6332 } rb_foreach_end(arena_chunk_t
, link_dirty
,
6333 &arena
->chunks_dirty
, chunk
)
6335 stats
->dirty
+= (arena
->ndirty
<< pagesize_2pow
);
6336 malloc_spin_unlock(&arena
->lock
);
6340 #ifndef MALLOC_DECOMMIT
6341 stats
->committed
= stats
->mapped
;
6346 xmalloc(size_t size
)
6351 reserve_fail(size
, "xmalloc");
6355 if (opt_sysv
== false)
6360 _malloc_message(_getprogname(),
6361 ": (malloc) Error in xmalloc(): ",
6362 "invalid size 0", "\n");
6368 ret
= imalloc(size
);
6373 seq
= reserve_crit(size
, "xmalloc", seq
);
6374 ret
= imalloc(size
);
6375 } while (ret
== NULL
);
6378 UTRACE(0, size
, ret
);
6383 xcalloc(size_t num
, size_t size
)
6388 num_size
= num
* size
;
6390 reserve_fail(num_size
, "xcalloc");
6392 if (num_size
== 0) {
6394 if ((opt_sysv
== false) && ((num
== 0) || (size
== 0)))
6399 _malloc_message(_getprogname(),
6400 ": (malloc) Error in xcalloc(): ",
6401 "invalid size 0", "\n");
6406 * Try to avoid division here. We know that it isn't possible to
6407 * overflow during multiplication if neither operand uses any of the
6408 * most significant half of the bits in a size_t.
6410 } else if (((num
| size
) & (SIZE_T_MAX
<< (sizeof(size_t) << 2)))
6411 && (num_size
/ size
!= num
)) {
6412 /* size_t overflow. */
6413 _malloc_message(_getprogname(),
6414 ": (malloc) Error in xcalloc(): ",
6415 "size overflow", "\n");
6419 ret
= icalloc(num_size
);
6424 seq
= reserve_crit(num_size
, "xcalloc", seq
);
6425 ret
= icalloc(num_size
);
6426 } while (ret
== NULL
);
6429 UTRACE(0, num_size
, ret
);
6434 xrealloc(void *ptr
, size_t size
)
6440 if (opt_sysv
== false)
6447 _malloc_message(_getprogname(),
6448 ": (malloc) Error in xrealloc(): ",
6449 "invalid size 0", "\n");
6456 assert(malloc_initialized
);
6458 ret
= iralloc(ptr
, size
);
6463 seq
= reserve_crit(size
, "xrealloc", seq
);
6464 ret
= iralloc(ptr
, size
);
6465 } while (ret
== NULL
);
6469 reserve_fail(size
, "xrealloc");
6471 ret
= imalloc(size
);
6476 seq
= reserve_crit(size
, "xrealloc", seq
);
6477 ret
= imalloc(size
);
6478 } while (ret
== NULL
);
6482 UTRACE(ptr
, size
, ret
);
6487 xmemalign(size_t alignment
, size_t size
)
6491 assert(((alignment
- 1) & alignment
) == 0 && alignment
>=
6495 reserve_fail(size
, "xmemalign");
6497 ret
= ipalloc(alignment
, size
);
6502 seq
= reserve_crit(size
, "xmemalign", seq
);
6503 ret
= ipalloc(alignment
, size
);
6504 } while (ret
== NULL
);
6507 UTRACE(0, size
, ret
);
6512 reserve_shrink(void)
6514 extent_node_t
*node
;
6516 assert(reserve_cur
> reserve_max
);
6519 extent_node_t
*node
;
6520 size_t reserve_size
;
6523 rb_foreach_begin(extent_node_t
, link_szad
, &reserve_chunks_szad
,
6525 reserve_size
+= node
->size
;
6526 } rb_foreach_end(extent_node_t
, link_szad
, &reserve_chunks_szad
,
6528 assert(reserve_size
== reserve_cur
);
6531 rb_foreach_begin(extent_node_t
, link_ad
, &reserve_chunks_ad
,
6533 reserve_size
+= node
->size
;
6534 } rb_foreach_end(extent_node_t
, link_ad
, &reserve_chunks_ad
,
6536 assert(reserve_size
== reserve_cur
);
6540 /* Discard chunks until the the reserve is below the size limit. */
6541 rb_foreach_reverse_begin(extent_node_t
, link_ad
, &reserve_chunks_ad
,
6543 #ifndef MALLOC_DECOMMIT
6544 if (node
->size
<= reserve_cur
- reserve_max
) {
6546 extent_node_t
*tnode
= extent_tree_ad_prev(
6547 &reserve_chunks_ad
, node
);
6549 #ifdef MALLOC_DECOMMIT
6550 assert(node
->size
<= reserve_cur
- reserve_max
);
6553 /* Discard the entire [multi-]chunk. */
6554 extent_tree_szad_remove(&reserve_chunks_szad
, node
);
6555 extent_tree_ad_remove(&reserve_chunks_ad
, node
);
6556 reserve_cur
-= node
->size
;
6557 pages_unmap(node
->addr
, node
->size
);
6559 stats_chunks
.curchunks
-= (node
->size
/ chunksize
);
6561 base_node_dealloc(node
);
6562 if (reserve_cur
== reserve_max
)
6565 rb_foreach_reverse_prev(extent_node_t
, link_ad
,
6566 extent_ad_comp
, &reserve_chunks_ad
, tnode
);
6567 #ifndef MALLOC_DECOMMIT
6569 /* Discard the end of the multi-chunk. */
6570 extent_tree_szad_remove(&reserve_chunks_szad
, node
);
6571 node
->size
-= reserve_cur
- reserve_max
;
6572 extent_tree_szad_insert(&reserve_chunks_szad
, node
);
6573 pages_unmap((void *)((uintptr_t)node
->addr
+
6574 node
->size
), reserve_cur
- reserve_max
);
6576 stats_chunks
.curchunks
-= ((reserve_cur
- reserve_max
) /
6579 reserve_cur
= reserve_max
;
6583 assert(reserve_cur
> reserve_max
);
6584 } rb_foreach_reverse_end(extent_node_t
, link_ad
, &reserve_chunks_ad
,
6588 /* Send a condition notification. */
6590 reserve_notify(reserve_cnd_t cnd
, size_t size
, uint64_t seq
)
6594 /* seq is used to keep track of distinct condition-causing events. */
6596 /* Allocate new sequence number. */
6602 * Advance to the next callback registration and send a notification,
6603 * unless one has already been sent for this condition-causing event.
6605 reg
= ql_first(&reserve_regs
);
6608 ql_first(&reserve_regs
) = ql_next(&reserve_regs
, reg
, link
);
6609 if (reg
->seq
== seq
)
6612 malloc_mutex_unlock(&reserve_mtx
);
6613 reg
->cb(reg
->ctx
, cnd
, size
);
6614 malloc_mutex_lock(&reserve_mtx
);
6619 /* Allocation failure due to OOM. Try to free some memory via callbacks. */
6621 reserve_crit(size_t size
, const char *fname
, uint64_t seq
)
6625 * Send one condition notification. Iteration is handled by the
6626 * caller of this function.
6628 malloc_mutex_lock(&reserve_mtx
);
6629 seq
= reserve_notify(RESERVE_CND_CRIT
, size
, seq
);
6630 malloc_mutex_unlock(&reserve_mtx
);
6632 /* If no notification could be sent, then no further recourse exists. */
6634 reserve_fail(size
, fname
);
6639 /* Permanent allocation failure due to OOM. */
6641 reserve_fail(size_t size
, const char *fname
)
6645 /* Send fail notifications. */
6646 malloc_mutex_lock(&reserve_mtx
);
6648 seq
= reserve_notify(RESERVE_CND_FAIL
, size
, seq
);
6650 malloc_mutex_unlock(&reserve_mtx
);
6652 /* Terminate the application. */
6653 _malloc_message(_getprogname(),
6654 ": (malloc) Error in ", fname
, "(): out of memory\n");
6659 reserve_cb_register(reserve_cb_t
*cb
, void *ctx
)
6661 reserve_reg_t
*reg
= base_reserve_reg_alloc();
6665 ql_elm_new(reg
, link
);
6670 malloc_mutex_lock(&reserve_mtx
);
6671 ql_head_insert(&reserve_regs
, reg
, link
);
6672 malloc_mutex_unlock(&reserve_mtx
);
6678 reserve_cb_unregister(reserve_cb_t
*cb
, void *ctx
)
6680 reserve_reg_t
*reg
= NULL
;
6682 malloc_mutex_lock(&reserve_mtx
);
6683 ql_foreach(reg
, &reserve_regs
, link
) {
6684 if (reg
->cb
== cb
&& reg
->ctx
== ctx
) {
6685 ql_remove(&reserve_regs
, reg
, link
);
6689 malloc_mutex_unlock(&reserve_mtx
);
6692 base_reserve_reg_dealloc(reg
);
6698 reserve_cur_get(void)
6702 malloc_mutex_lock(&reserve_mtx
);
6704 malloc_mutex_unlock(&reserve_mtx
);
6710 reserve_min_get(void)
6714 malloc_mutex_lock(&reserve_mtx
);
6716 malloc_mutex_unlock(&reserve_mtx
);
6722 reserve_min_set(size_t min
)
6725 min
= CHUNK_CEILING(min
);
6727 malloc_mutex_lock(&reserve_mtx
);
6728 /* Keep |reserve_max - reserve_min| the same. */
6729 if (min
< reserve_min
) {
6730 reserve_max
-= reserve_min
- min
;
6733 /* Protect against wrap-around. */
6734 if (reserve_max
+ min
- reserve_min
< reserve_max
) {
6735 reserve_min
= SIZE_T_MAX
- (reserve_max
- reserve_min
)
6737 reserve_max
= SIZE_T_MAX
- chunksize
+ 1;
6739 reserve_max
+= min
- reserve_min
;
6744 /* Resize the reserve if necessary. */
6745 if (reserve_cur
< reserve_min
) {
6746 size_t size
= reserve_min
- reserve_cur
;
6748 /* Force the reserve to grow by allocating/deallocating. */
6749 malloc_mutex_unlock(&reserve_mtx
);
6750 #ifdef MALLOC_DECOMMIT
6755 n
= size
>> opt_chunk_2pow
;
6756 chunks
= imalloc(n
* sizeof(void *));
6759 for (i
= 0; i
< n
; i
++) {
6760 chunks
[i
] = huge_malloc(chunksize
, false);
6761 if (chunks
[i
] == NULL
) {
6764 for (j
= 0; j
< i
; j
++) {
6765 huge_dalloc(chunks
[j
]);
6771 for (i
= 0; i
< n
; i
++)
6772 huge_dalloc(chunks
[i
]);
6777 void *x
= huge_malloc(size
, false);
6784 } else if (reserve_cur
> reserve_max
) {
6786 malloc_mutex_unlock(&reserve_mtx
);
6788 malloc_mutex_unlock(&reserve_mtx
);
6793 #ifdef MOZ_MEMORY_WINDOWS
6795 _recalloc(void *ptr
, size_t count
, size_t size
)
6797 size_t oldsize
= (ptr
!= NULL
) ? isalloc(ptr
) : 0;
6798 size_t newsize
= count
* size
;
6801 * In order for all trailing bytes to be zeroed, the caller needs to
6802 * use calloc(), followed by recalloc(). However, the current calloc()
6803 * implementation only zeros the bytes requested, so if recalloc() is
6804 * to work 100% correctly, calloc() will need to change to zero
6808 ptr
= realloc(ptr
, newsize
);
6809 if (ptr
!= NULL
&& oldsize
< newsize
) {
6810 memset((void *)((uintptr_t)ptr
+ oldsize
), 0, newsize
-
6818 * This impl of _expand doesn't ever actually expand or shrink blocks: it
6819 * simply replies that you may continue using a shrunk block.
6822 _expand(void *ptr
, size_t newsize
)
6824 if (isalloc(ptr
) >= newsize
)
6831 _msize(const void *ptr
)
6834 return malloc_usable_size(ptr
);
6839 * End non-standard functions.
6841 /******************************************************************************/
6843 * Begin library-private functions, used by threading libraries for protection
6844 * of malloc during fork(). These functions are only called if the program is
6845 * running in threaded mode, so there is no need to check whether the program
6850 _malloc_prefork(void)
6854 /* Acquire all mutexes in a safe order. */
6856 malloc_spin_lock(&arenas_lock
);
6857 for (i
= 0; i
< narenas
; i
++) {
6858 if (arenas
[i
] != NULL
)
6859 malloc_spin_lock(&arenas
[i
]->lock
);
6861 malloc_spin_unlock(&arenas_lock
);
6863 malloc_mutex_lock(&base_mtx
);
6865 malloc_mutex_lock(&huge_mtx
);
6869 _malloc_postfork(void)
6873 /* Release all mutexes, now that fork() has completed. */
6875 malloc_mutex_unlock(&huge_mtx
);
6877 malloc_mutex_unlock(&base_mtx
);
6879 malloc_spin_lock(&arenas_lock
);
6880 for (i
= 0; i
< narenas
; i
++) {
6881 if (arenas
[i
] != NULL
)
6882 malloc_spin_unlock(&arenas
[i
]->lock
);
6884 malloc_spin_unlock(&arenas_lock
);
6888 * End library-private functions.
6890 /******************************************************************************/
6892 #ifdef MOZ_MEMORY_DARWIN
6893 static malloc_zone_t zone
;
6894 static struct malloc_introspection_t zone_introspect
;
6897 zone_size(malloc_zone_t
*zone
, void *ptr
)
6901 * There appear to be places within Darwin (such as setenv(3)) that
6902 * cause calls to this function with pointers that *no* zone owns. If
6903 * we knew that all pointers were owned by *some* zone, we could split
6904 * our zone into two parts, and use one as the default allocator and
6905 * the other as the default deallocator/reallocator. Since that will
6906 * not work in practice, we must check all pointers to assure that they
6907 * reside within a mapped chunk before determining size.
6909 return (isalloc_validate(ptr
));
6913 zone_malloc(malloc_zone_t
*zone
, size_t size
)
6916 return (malloc(size
));
6920 zone_calloc(malloc_zone_t
*zone
, size_t num
, size_t size
)
6923 return (calloc(num
, size
));
6927 zone_valloc(malloc_zone_t
*zone
, size_t size
)
6929 void *ret
= NULL
; /* Assignment avoids useless compiler warning. */
6931 posix_memalign(&ret
, pagesize
, size
);
6937 zone_free(malloc_zone_t
*zone
, void *ptr
)
6944 zone_realloc(malloc_zone_t
*zone
, void *ptr
, size_t size
)
6947 return (realloc(ptr
, size
));
6951 zone_destroy(malloc_zone_t
*zone
)
6954 /* This function should never be called. */
6960 zone_good_size(malloc_zone_t
*zone
, size_t size
)
6966 * Actually create an object of the appropriate size, then find out
6967 * how large it could have been without moving up to the next size
6981 zone_force_lock(malloc_zone_t
*zone
)
6988 zone_force_unlock(malloc_zone_t
*zone
)
6994 static malloc_zone_t
*
6998 assert(malloc_initialized
);
7000 zone
.size
= (void *)zone_size
;
7001 zone
.malloc
= (void *)zone_malloc
;
7002 zone
.calloc
= (void *)zone_calloc
;
7003 zone
.valloc
= (void *)zone_valloc
;
7004 zone
.free
= (void *)zone_free
;
7005 zone
.realloc
= (void *)zone_realloc
;
7006 zone
.destroy
= (void *)zone_destroy
;
7007 zone
.zone_name
= "jemalloc_zone";
7008 zone
.batch_malloc
= NULL
;
7009 zone
.batch_free
= NULL
;
7010 zone
.introspect
= &zone_introspect
;
7012 zone_introspect
.enumerator
= NULL
;
7013 zone_introspect
.good_size
= (void *)zone_good_size
;
7014 zone_introspect
.check
= NULL
;
7015 zone_introspect
.print
= NULL
;
7016 zone_introspect
.log
= NULL
;
7017 zone_introspect
.force_lock
= (void *)zone_force_lock
;
7018 zone_introspect
.force_unlock
= (void *)zone_force_unlock
;
7019 zone_introspect
.statistics
= NULL
;
7024 __attribute__((constructor
))
7026 jemalloc_darwin_init(void)
7028 extern unsigned malloc_num_zones
;
7029 extern malloc_zone_t
**malloc_zones
;
7031 if (malloc_init_hard())
7035 * The following code is *not* thread-safe, so it's critical that
7036 * initialization be manually triggered.
7039 /* Register the custom zones. */
7040 malloc_zone_register(create_zone());
7041 assert(malloc_zones
[malloc_num_zones
- 1] == &zone
);
7044 * Shift malloc_zones around so that zone is first, which makes it the
7047 assert(malloc_num_zones
> 1);
7048 memmove(&malloc_zones
[1], &malloc_zones
[0],
7049 sizeof(malloc_zone_t
*) * (malloc_num_zones
- 1));
7050 malloc_zones
[0] = &zone
;