don't discard iframe children.
[kdelibs.git] / khtml / misc / arena.cpp
blob21d797ae58d491b8c8df260f12003816ac53fd80
1 /*
2 * Copyright (C) 1998-2000 Netscape Communications Corporation.
4 * Other contributors:
5 * Nick Blievers <nickb@adacel.com.au>
6 * Jeff Hostetler <jeff@nerdone.com>
7 * Tom Rini <trini@kernel.crashing.org>
8 * Raffaele Sena <raff@netwinder.org>
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * Alternatively, the contents of this file may be used under the terms
25 * of either the Mozilla Public License Version 1.1, found at
26 * http://www.mozilla.org/MPL/ (the "MPL") or the GNU General Public
27 * License Version 2.0, found at http://www.fsf.org/copyleft/gpl.html
28 * (the "GPL"), in which case the provisions of the MPL or the GPL are
29 * applicable instead of those above. If you wish to allow use of your
30 * version of this file only under the terms of one of those two
31 * licenses (the MPL or the GPL) and not to allow others to use your
32 * version of this file under the LGPL, indicate your decision by
33 * deletingthe provisions above and replace them with the notice and
34 * other provisions required by the MPL or the GPL, as the case may be.
35 * If you do not delete the provisions above, a recipient may use your
36 * version of this file under any of the LGPL, the MPL or the GPL.
40 * Lifetime-based fast allocation, inspired by much prior art, including
41 * "Fast Allocation and Deallocation of Memory Based on Object Lifetimes"
42 * David R. Hanson, Software -- Practice and Experience, Vol. 20(1).
45 #include "arena.h"
47 #include <config.h>
48 #include <stdlib.h>
49 #include <string.h>
50 #include <kglobal.h>
52 #ifdef HAVE_GETPAGESIZE
53 #include <unistd.h>
54 #define POOL_SIZE qMax(8192u, 2*( unsigned ) getpagesize())
55 #else
56 #define POOL_SIZE 8192
57 #endif
59 //#define DEBUG_ARENA_MALLOC
60 #ifdef DEBUG_ARENA_MALLOC
61 #include <assert.h>
62 #include <stdio.h>
63 #endif
65 namespace khtml {
67 #ifdef DEBUG_ARENA_MALLOC
68 static int i = 0;
69 #endif
71 #define FREELIST_MAX 50
72 #define LARGE_ALLOCATION_CEIL(pool) (pool)->arenasize * 256
73 #define MAX_DISCRETE_ALLOCATION(pool) (pool)->arenasize * 32
74 static Arena *arena_freelist = 0;
75 static int freelist_count = 0;
77 #define ARENA_DEFAULT_ALIGN sizeof(double)
78 #define BIT(n) ((unsigned int)1 << (n))
79 #define BITMASK(n) (BIT(n) - 1)
80 #define CEILING_LOG2(_log2,_n) \
81 unsigned int j_ = (unsigned int)(_n); \
82 (_log2) = 0; \
83 if ((j_) & ((j_)-1)) \
84 (_log2) += 1; \
85 if ((j_) >> 16) \
86 (_log2) += 16, (j_) >>= 16; \
87 if ((j_) >> 8) \
88 (_log2) += 8, (j_) >>= 8; \
89 if ((j_) >> 4) \
90 (_log2) += 4, (j_) >>= 4; \
91 if ((j_) >> 2) \
92 (_log2) += 2, (j_) >>= 2; \
93 if ((j_) >> 1) \
94 (_log2) += 1;
96 int CeilingLog2(unsigned int i) {
97 int log2;
98 CEILING_LOG2(log2,i);
99 return log2;
102 void InitArenaPool(ArenaPool *pool, const char* /*name*/,
103 unsigned int /*size*/, unsigned int align)
105 unsigned int size = POOL_SIZE;
106 if (align == 0)
107 align = ARENA_DEFAULT_ALIGN;
108 pool->mask = BITMASK(CeilingLog2(align));
109 pool->first.next = NULL;
110 pool->first.base = pool->first.avail = pool->first.limit =
111 (uword)ARENA_ALIGN(pool, &pool->first + 1);
112 pool->current = &pool->first;
113 pool->arenasize = size;
114 pool->largealloc = LARGE_ALLOCATION_CEIL(pool);
115 pool->cumul = 0;
120 ** ArenaAllocate() -- allocate space from an arena pool
122 ** Description: ArenaAllocate() allocates space from an arena
123 ** pool.
125 ** First try to satisfy the request from arenas starting at
126 ** pool->current.
128 ** If there is not enough space in the arena pool->current, try
129 ** to claim an arena, on a first fit basis, from the global
130 ** freelist (arena_freelist).
132 ** If no arena in arena_freelist is suitable, then try to
133 ** allocate a new arena from the heap.
135 ** Returns: pointer to allocated space or NULL
138 void* ArenaAllocate(ArenaPool *pool, unsigned int nb)
140 Arena *a;
141 char *rp; /* returned pointer */
143 #ifdef DEBUG_ARENA_MALLOC
144 assert((nb & pool->mask) == 0);
145 #endif
147 nb = (uword)ARENA_ALIGN(pool, nb); /* force alignment */
149 /* attempt to allocate from arenas at pool->current */
151 a = pool->current;
152 do {
153 if ( a->avail +nb <= a->limit ) {
154 pool->current = a;
155 rp = (char *)a->avail;
156 a->avail += nb;
157 VALGRIND_MEMPOOL_ALLOC(a->base, rp, nb);
158 return rp;
160 } while( NULL != (a = a->next) );
163 /* attempt to allocate from arena_freelist */
165 Arena *p; /* previous pointer, for unlinking from freelist */
167 for ( a = p = arena_freelist; a != NULL ; p = a, a = a->next ) {
168 if ( a->base +nb <= a->limit ) {
169 if ( p == arena_freelist )
170 arena_freelist = a->next;
171 else
172 p->next = a->next;
173 a->avail = a->base;
174 rp = (char *)a->avail;
175 a->avail += nb;
176 VALGRIND_MEMPOOL_ALLOC(a->base, rp, nb);
177 /* the newly allocated arena is linked after pool->current
178 * and becomes pool->current */
179 a->next = pool->current->next;
180 pool->current->next = a;
181 pool->current = a;
182 if ( 0 == pool->first.next )
183 pool->first.next = a;
184 freelist_count--;
185 return(rp);
190 /* attempt to allocate from the heap */
192 unsigned int sz;
193 #ifdef HAVE_MMAP
194 if (pool->cumul > pool->largealloc) {
195 // High memory pressure. Switch to a fractional allocation strategy
196 // so that malloc gets a chance to successfully trim us down when it's over.
197 sz = qMin(pool->cumul/25, MAX_DISCRETE_ALLOCATION(pool));
198 } else
199 #endif
200 sz = pool->arenasize > nb ? pool->arenasize : nb;
201 sz += sizeof *a + pool->mask; /* header and alignment slop */
202 pool->cumul += sz;
203 #ifdef DEBUG_ARENA_MALLOC
204 i++;
205 printf("Malloc: %d\n", i);
206 #endif
207 a = (Arena*)malloc(sz);
208 if (a) {
209 a->limit = (uword)a + sz;
210 a->base = a->avail = (uword)ARENA_ALIGN(pool, a + 1);
211 VALGRIND_CREATE_MEMPOOL(a->base, 0, 0);
212 rp = (char *)a->avail;
213 a->avail += nb;
214 VALGRIND_MEMPOOL_ALLOC(a->base, rp, nb);
216 /* the newly allocated arena is linked after pool->current
217 * and becomes pool->current */
218 a->next = pool->current->next;
219 pool->current->next = a;
220 pool->current = a;
221 if ( !pool->first.next )
222 pool->first.next = a;
223 return(rp);
227 /* we got to here, and there's no memory to allocate */
228 return(0);
229 } /* --- end ArenaAllocate() --- */
232 * Free tail arenas linked after head, which may not be the true list head.
233 * Reset pool->current to point to head in case it pointed at a tail arena.
235 static void FreeArenaList(ArenaPool *pool, Arena *head, bool reallyFree)
237 Arena **ap, *a;
239 ap = &head->next;
240 a = *ap;
241 if (!a)
242 return;
244 #ifdef DEBUG_ARENA_MALLOC
245 do {
246 assert(a->base <= a->avail && a->avail <= a->limit);
247 a->avail = a->base;
248 CLEAR_UNUSED(a);
249 } while ((a = a->next) != 0);
250 a = *ap;
251 #endif
253 if (freelist_count >= FREELIST_MAX)
254 reallyFree = true;
256 if (reallyFree) {
257 do {
258 *ap = a->next;
259 VALGRIND_DESTROY_MEMPOOL(a->base);
260 CLEAR_ARENA(a);
261 #ifdef DEBUG_ARENA_MALLOC
262 if (a) {
263 i--;
264 printf("Free: %d\n", i);
266 #endif
267 free(a); a = 0;
268 } while ((a = *ap) != 0);
269 } else {
270 /* Insert as much of the arena chain as we can hold at the front of the freelist. */
271 do {
272 ap = &(*ap)->next;
273 freelist_count++;
274 } while (*ap && freelist_count < FREELIST_MAX);
276 /* Get rid of excess */
277 if (*ap) {
278 Arena *xa, *n;
279 for (xa = *ap; xa; xa = n) {
280 VALGRIND_DESTROY_MEMPOOL(xa->base);
281 n = xa->next;
282 #ifdef DEBUG_ARENA_MALLOC
283 i--;
284 printf("Free: %d\n", i);
285 #endif
286 CLEAR_ARENA(xa);
287 free(xa);
290 *ap = arena_freelist;
291 arena_freelist = a;
292 head->next = 0;
294 pool->current = head;
297 void ArenaRelease(ArenaPool *pool, char *mark)
299 Arena *a;
301 for (a = pool->first.next; a; a = a->next) {
302 if (UPTRDIFF(mark, a->base) < UPTRDIFF(a->avail, a->base)) {
303 a->avail = (uword)ARENA_ALIGN(pool, mark);
304 FreeArenaList(pool, a, false);
305 return;
310 void FreeArenaPool(ArenaPool *pool)
312 FreeArenaList(pool, &pool->first, false);
315 void FinishArenaPool(ArenaPool *pool)
317 FreeArenaList(pool, &pool->first, true);
320 void ArenaFinish()
322 Arena *a, *next;
323 #ifdef DEBUG_ARENA_MALLOC
324 printf("releasing global Arena freelist\n");
325 #endif
326 for (a = arena_freelist; a; a = next) {
327 next = a->next;
328 free(a); a = 0;
330 freelist_count = 0;
331 arena_freelist = NULL;
334 } // namespace