2 * Copyright (C) 1998-2000 Netscape Communications Corporation.
5 * Nick Blievers <nickb@adacel.com.au>
6 * Jeff Hostetler <jeff@nerdone.com>
7 * Tom Rini <trini@kernel.crashing.org>
8 * Raffaele Sena <raff@netwinder.org>
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * Alternatively, the contents of this file may be used under the terms
25 * of either the Mozilla Public License Version 1.1, found at
26 * http://www.mozilla.org/MPL/ (the "MPL") or the GNU General Public
27 * License Version 2.0, found at http://www.fsf.org/copyleft/gpl.html
28 * (the "GPL"), in which case the provisions of the MPL or the GPL are
29 * applicable instead of those above. If you wish to allow use of your
30 * version of this file only under the terms of one of those two
31 * licenses (the MPL or the GPL) and not to allow others to use your
32 * version of this file under the LGPL, indicate your decision by
33 * deletingthe provisions above and replace them with the notice and
34 * other provisions required by the MPL or the GPL, as the case may be.
35 * If you do not delete the provisions above, a recipient may use your
36 * version of this file under any of the LGPL, the MPL or the GPL.
40 * Lifetime-based fast allocation, inspired by much prior art, including
41 * "Fast Allocation and Deallocation of Memory Based on Object Lifetimes"
42 * David R. Hanson, Software -- Practice and Experience, Vol. 20(1).
52 #ifdef HAVE_GETPAGESIZE
54 #define POOL_SIZE qMax(8192u, 2*( unsigned ) getpagesize())
56 #define POOL_SIZE 8192
59 //#define DEBUG_ARENA_MALLOC
60 #ifdef DEBUG_ARENA_MALLOC
67 #ifdef DEBUG_ARENA_MALLOC
71 #define FREELIST_MAX 50
72 #define LARGE_ALLOCATION_CEIL(pool) (pool)->arenasize * 256
73 #define MAX_DISCRETE_ALLOCATION(pool) (pool)->arenasize * 32
74 static Arena
*arena_freelist
= 0;
75 static int freelist_count
= 0;
77 #define ARENA_DEFAULT_ALIGN sizeof(double)
78 #define BIT(n) ((unsigned int)1 << (n))
79 #define BITMASK(n) (BIT(n) - 1)
80 #define CEILING_LOG2(_log2,_n) \
81 unsigned int j_ = (unsigned int)(_n); \
83 if ((j_) & ((j_)-1)) \
86 (_log2) += 16, (j_) >>= 16; \
88 (_log2) += 8, (j_) >>= 8; \
90 (_log2) += 4, (j_) >>= 4; \
92 (_log2) += 2, (j_) >>= 2; \
96 int CeilingLog2(unsigned int i
) {
102 void InitArenaPool(ArenaPool
*pool
, const char* /*name*/,
103 unsigned int /*size*/, unsigned int align
)
105 unsigned int size
= POOL_SIZE
;
107 align
= ARENA_DEFAULT_ALIGN
;
108 pool
->mask
= BITMASK(CeilingLog2(align
));
109 pool
->first
.next
= NULL
;
110 pool
->first
.base
= pool
->first
.avail
= pool
->first
.limit
=
111 (uword
)ARENA_ALIGN(pool
, &pool
->first
+ 1);
112 pool
->current
= &pool
->first
;
113 pool
->arenasize
= size
;
114 pool
->largealloc
= LARGE_ALLOCATION_CEIL(pool
);
120 ** ArenaAllocate() -- allocate space from an arena pool
122 ** Description: ArenaAllocate() allocates space from an arena
125 ** First try to satisfy the request from arenas starting at
128 ** If there is not enough space in the arena pool->current, try
129 ** to claim an arena, on a first fit basis, from the global
130 ** freelist (arena_freelist).
132 ** If no arena in arena_freelist is suitable, then try to
133 ** allocate a new arena from the heap.
135 ** Returns: pointer to allocated space or NULL
138 void* ArenaAllocate(ArenaPool
*pool
, unsigned int nb
)
141 char *rp
; /* returned pointer */
143 #ifdef DEBUG_ARENA_MALLOC
144 assert((nb
& pool
->mask
) == 0);
147 nb
= (uword
)ARENA_ALIGN(pool
, nb
); /* force alignment */
149 /* attempt to allocate from arenas at pool->current */
153 if ( a
->avail
+nb
<= a
->limit
) {
155 rp
= (char *)a
->avail
;
157 VALGRIND_MEMPOOL_ALLOC(a
->base
, rp
, nb
);
160 } while( NULL
!= (a
= a
->next
) );
163 /* attempt to allocate from arena_freelist */
165 Arena
*p
; /* previous pointer, for unlinking from freelist */
167 for ( a
= p
= arena_freelist
; a
!= NULL
; p
= a
, a
= a
->next
) {
168 if ( a
->base
+nb
<= a
->limit
) {
169 if ( p
== arena_freelist
)
170 arena_freelist
= a
->next
;
174 rp
= (char *)a
->avail
;
176 VALGRIND_MEMPOOL_ALLOC(a
->base
, rp
, nb
);
177 /* the newly allocated arena is linked after pool->current
178 * and becomes pool->current */
179 a
->next
= pool
->current
->next
;
180 pool
->current
->next
= a
;
182 if ( 0 == pool
->first
.next
)
183 pool
->first
.next
= a
;
190 /* attempt to allocate from the heap */
194 if (pool
->cumul
> pool
->largealloc
) {
195 // High memory pressure. Switch to a fractional allocation strategy
196 // so that malloc gets a chance to successfully trim us down when it's over.
197 sz
= qMin(pool
->cumul
/25, MAX_DISCRETE_ALLOCATION(pool
));
200 sz
= pool
->arenasize
> nb
? pool
->arenasize
: nb
;
201 sz
+= sizeof *a
+ pool
->mask
; /* header and alignment slop */
203 #ifdef DEBUG_ARENA_MALLOC
205 printf("Malloc: %d\n", i
);
207 a
= (Arena
*)malloc(sz
);
209 a
->limit
= (uword
)a
+ sz
;
210 a
->base
= a
->avail
= (uword
)ARENA_ALIGN(pool
, a
+ 1);
211 VALGRIND_CREATE_MEMPOOL(a
->base
, 0, 0);
212 rp
= (char *)a
->avail
;
214 VALGRIND_MEMPOOL_ALLOC(a
->base
, rp
, nb
);
216 /* the newly allocated arena is linked after pool->current
217 * and becomes pool->current */
218 a
->next
= pool
->current
->next
;
219 pool
->current
->next
= a
;
221 if ( !pool
->first
.next
)
222 pool
->first
.next
= a
;
227 /* we got to here, and there's no memory to allocate */
229 } /* --- end ArenaAllocate() --- */
232 * Free tail arenas linked after head, which may not be the true list head.
233 * Reset pool->current to point to head in case it pointed at a tail arena.
235 static void FreeArenaList(ArenaPool
*pool
, Arena
*head
, bool reallyFree
)
244 #ifdef DEBUG_ARENA_MALLOC
246 assert(a
->base
<= a
->avail
&& a
->avail
<= a
->limit
);
249 } while ((a
= a
->next
) != 0);
253 if (freelist_count
>= FREELIST_MAX
)
259 VALGRIND_DESTROY_MEMPOOL(a
->base
);
261 #ifdef DEBUG_ARENA_MALLOC
264 printf("Free: %d\n", i
);
268 } while ((a
= *ap
) != 0);
270 /* Insert as much of the arena chain as we can hold at the front of the freelist. */
274 } while (*ap
&& freelist_count
< FREELIST_MAX
);
276 /* Get rid of excess */
279 for (xa
= *ap
; xa
; xa
= n
) {
280 VALGRIND_DESTROY_MEMPOOL(xa
->base
);
282 #ifdef DEBUG_ARENA_MALLOC
284 printf("Free: %d\n", i
);
290 *ap
= arena_freelist
;
294 pool
->current
= head
;
297 void ArenaRelease(ArenaPool
*pool
, char *mark
)
301 for (a
= pool
->first
.next
; a
; a
= a
->next
) {
302 if (UPTRDIFF(mark
, a
->base
) < UPTRDIFF(a
->avail
, a
->base
)) {
303 a
->avail
= (uword
)ARENA_ALIGN(pool
, mark
);
304 FreeArenaList(pool
, a
, false);
310 void FreeArenaPool(ArenaPool
*pool
)
312 FreeArenaList(pool
, &pool
->first
, false);
315 void FinishArenaPool(ArenaPool
*pool
)
317 FreeArenaList(pool
, &pool
->first
, true);
323 #ifdef DEBUG_ARENA_MALLOC
324 printf("releasing global Arena freelist\n");
326 for (a
= arena_freelist
; a
; a
= next
) {
331 arena_freelist
= NULL
;