Import firefox-3.0b1-source tarball
[mozilla-1.9/b1.git] / js / src / jsgc.c
blob1794f6d5d8687e6773d744a43c64e2d5a075e52c
1 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sw=4 et tw=78:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
15 * License.
17 * The Original Code is Mozilla Communicator client code, released
18 * March 31, 1998.
20 * The Initial Developer of the Original Code is
21 * Netscape Communications Corporation.
22 * Portions created by the Initial Developer are Copyright (C) 1998
23 * the Initial Developer. All Rights Reserved.
25 * Contributor(s):
27 * Alternatively, the contents of this file may be used under the terms of
28 * either of the GNU General Public License Version 2 or later (the "GPL"),
29 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
39 * ***** END LICENSE BLOCK ***** */
42 * JS Mark-and-Sweep Garbage Collector.
44 * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see
45 * jsgc.h). It allocates from a special GC arena pool with each arena allocated
46 * using malloc. It uses an ideally parallel array of flag bytes to hold the
47 * mark bit, finalizer type index, etc.
49 * XXX swizzle page to freelist for better locality of reference
51 #include "jsstddef.h"
52 #include <stdlib.h> /* for free */
53 #include <string.h> /* for memset used when DEBUG */
54 #include "jstypes.h"
55 #include "jsutil.h" /* Added by JSIFY */
56 #include "jshash.h" /* Added by JSIFY */
57 #include "jsapi.h"
58 #include "jsatom.h"
59 #include "jsbit.h"
60 #include "jsclist.h"
61 #include "jscntxt.h"
62 #include "jsconfig.h"
63 #include "jsdbgapi.h"
64 #include "jsexn.h"
65 #include "jsfun.h"
66 #include "jsgc.h"
67 #include "jsinterp.h"
68 #include "jsiter.h"
69 #include "jslock.h"
70 #include "jsnum.h"
71 #include "jsobj.h"
72 #include "jsparse.h"
73 #include "jsscope.h"
74 #include "jsscript.h"
75 #include "jsstr.h"
77 #if JS_HAS_XML_SUPPORT
78 #include "jsxml.h"
79 #endif
82 * Deduce if mmap or similar is available.
84 #ifndef JS_GC_USE_MMAP
85 # if defined(XP_WIN)
86 # define JS_GC_USE_MMAP 1
87 # elif defined(XP_UNIX) || defined(XP_BEOS)
88 # include <unistd.h>
89 # if defined(_POSIX_MAPPED_FILES) && _POSIX_MAPPED_FILES > 0
90 # define JS_GC_USE_MMAP 1
91 # endif
92 # endif
93 #endif
95 #ifndef JS_GC_USE_MMAP
96 # define JS_GC_USE_MMAP 0
97 #endif
99 #if JS_GC_USE_MMAP
100 # if defined(XP_WIN)
101 # include <windows.h>
102 # elif defined(XP_UNIX) || defined(XP_BEOS)
103 # include <sys/mman.h>
105 /* On Mac OS X MAP_ANONYMOUS is not defined. */
106 # if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
107 # define MAP_ANONYMOUS MAP_ANON
108 # endif
110 # endif
111 #endif
114 * A GC arena contains one flag byte for each thing in its heap, and supports
115 * O(1) lookup of a flag given its thing's address.
117 * To implement this, we allocate things of the same size from a GC arena
118 * containing GC_ARENA_SIZE bytes aligned on GC_ARENA_SIZE boundary. The
119 * following picture shows arena's layout:
121 * +------------------------------+--------------------+---------------+
122 * | allocation area for GC thing | flags of GC things | JSGCArenaInfo |
123 * +------------------------------+--------------------+---------------+
125 * For a GC thing of size thingSize the number of things that the arena can
126 * hold is given by:
127 * (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / (thingSize + 1)
129 * The address of thing's flag is given by:
130 * flagByteAddress =
131 * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo) -
132 * (thingAddress & GC_ARENA_MASK) / thingSize
133 * where
134 * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo)
135 * is the last byte of flags' area and
136 * (thingAddress & GC_ARENA_MASK) / thingSize
137 * is thing's index counting from arena's start.
139 * Things are allocated from the start of their area and flags are allocated
140 * from the end of their area. This avoids calculating the location of the
141 * boundary separating things and flags.
143 * JS_GC_USE_MMAP macros governs the allocation of aligned arenas. When the
144 * macro is true, a platform specific allocation code like POSIX mmap is used
145 * with no extra overhead. If the macro is false, the code uses malloc to
146 * allocate a chunk of
147 * GC_ARENA_SIZE * (js_gcArenasPerChunk + 1)
148 * bytes. The chunk contains at least js_gcArenasPerChunk aligned arenas so
149 * the overhead of this schema is approximately 1/js_gcArenasPerChunk. See
150 * NewGCChunk/DestroyGCChunk below for details.
152 * Note that even when JS_GC_USE_MMAP is true the code still allocates arenas
153 * in chunks to minimize the overhead of mmap/munmap.
157 * When mmap is available, use the minimal known CPU page size as the size of
158 * GC arena. Otherwise use 1K arenas to minimize the overhead of the aligned
159 * allocation.
161 #if JS_GC_USE_MMAP
162 # define GC_ARENA_SHIFT 12
163 #else
164 # define GC_ARENA_SHIFT 10
165 #endif
167 #define GC_ARENA_MASK ((jsuword) JS_BITMASK(GC_ARENA_SHIFT))
168 #define GC_ARENA_SIZE JS_BIT(GC_ARENA_SHIFT)
170 struct JSGCArenaInfo {
172 * Allocation list for the arena.
174 JSGCArenaList *list;
177 * Pointer to the previous arena in a linked list. The arena can either
178 * belong to one of JSContext.gcArenaList lists or, when it does not have
179 * any allocated GC things, to the list of free arenas in the chunk with
180 * head stored in JSGCChunkInfo.lastFreeArena.
182 JSGCArenaInfo *prev;
185 * A link field for the list of arenas with marked but not yet traced
186 * things. The field is encoded as arena's page to share the space with
187 * firstArena and arenaIndex fields.
189 jsuword prevUntracedPage : JS_BITS_PER_WORD - GC_ARENA_SHIFT;
192 * When firstArena is false, the index of arena in the chunk. When
193 * firstArena is true, the index of a free arena holding JSGCChunkInfo or
194 * NO_FREE_ARENAS if there are no free arenas in the chunk.
196 * GET_ARENA_INDEX and GET_CHUNK_INFO_INDEX are convenience macros to
197 * access either of indexes.
199 jsuword arenaIndex : GC_ARENA_SHIFT - 1;
202 * Flag indicating if the arena is the first in the chunk.
204 jsuword firstArena : 1;
207 * Bitset for fast search of marked but not yet traced things.
209 jsuword untracedThings;
213 * Verify that the bit fields are indeed shared and JSGCArenaInfo is as small
214 * as possible. The code does not rely on this check so if on a particular
215 * platform this does not compile, then, as a workaround, comment the assert
216 * out and submit a bug report.
218 JS_STATIC_ASSERT(sizeof(JSGCArenaInfo) == 4 * sizeof(jsuword));
220 #define NO_FREE_ARENAS JS_BITMASK(GC_ARENA_SHIFT - 1)
223 * All chunks that have at least one free arena are put on the doubly-linked
224 * list with the head stored in JSRuntime.gcChunkList. JSGCChunkInfo contains
225 * the head of the chunk's free arena list together with the link fields for
226 * gcChunkList.
228 * The structure is stored in one of chunk's free arenas. GET_CHUNK_INFO_INDEX
229 * macro gives the index of this arena. When all arenas in the chunk are used,
230 * it is removed from the list and the index is set to NO_FREE_ARENAS
231 * indicating that the chunk is not on gcChunkList and has no JSGCChunkInfo
232 * available.
234 struct JSGCChunkInfo {
235 JSGCChunkInfo **prevp;
236 JSGCChunkInfo *next;
237 JSGCArenaInfo *lastFreeArena;
238 uint32 numFreeArenas;
242 * Even when mmap is available, its overhead may be too big so the final
243 * decision to use it is done at runtime.
245 #if JS_GC_USE_MMAP
246 static uint32 js_gcArenasPerChunk = 0;
247 static JSBool js_gcUseMmap = JS_FALSE;
248 #else
249 # define js_gcArenasPerChunk 31
250 #endif
253 * Macros to convert between JSGCArenaInfo, the start address of the arena and
254 * arena's page defined as (start address) >> GC_ARENA_SHIFT.
256 #define ARENA_INFO_OFFSET (GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo))
258 #define IS_ARENA_INFO_ADDRESS(arena) \
259 (((jsuword) (arena) & GC_ARENA_MASK) == ARENA_INFO_OFFSET)
261 #define ARENA_START_TO_INFO(arenaStart) \
262 (JS_ASSERT(((arenaStart) & (jsuword) GC_ARENA_MASK) == 0), \
263 (JSGCArenaInfo *) ((arenaStart) + ARENA_INFO_OFFSET))
265 #define ARENA_INFO_TO_START(arena) \
266 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
267 (jsuword) (arena) & ~(jsuword) GC_ARENA_MASK)
269 #define ARENA_PAGE_TO_INFO(arenaPage) \
270 (JS_ASSERT(arenaPage != 0), \
271 JS_ASSERT(((arenaPage) >> (JS_BITS_PER_WORD - GC_ARENA_SHIFT)) == 0), \
272 ARENA_START_TO_INFO((arenaPage) << GC_ARENA_SHIFT))
274 #define ARENA_INFO_TO_PAGE(arena) \
275 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
276 ((jsuword) (arena) >> GC_ARENA_SHIFT))
278 #define GET_ARENA_INFO(chunk, index) \
279 (JS_ASSERT((index) < js_gcArenasPerChunk), \
280 ARENA_START_TO_INFO(chunk + ((index) << GC_ARENA_SHIFT)))
283 * Macros to access/modify information about the chunk of GC arenas.
285 #define GET_ARENA_CHUNK(arena, index) \
286 (JS_ASSERT(GET_ARENA_INDEX(arena) == index), \
287 ARENA_INFO_TO_START(arena) - ((index) << GC_ARENA_SHIFT))
289 #define GET_ARENA_INDEX(arena) \
290 ((arena)->firstArena ? 0 : (uint32) (arena)->arenaIndex)
292 #define GET_CHUNK_INFO_INDEX(chunk) \
293 ((uint32) ARENA_START_TO_INFO(chunk)->arenaIndex)
295 #define SET_CHUNK_INFO_INDEX(chunk, index) \
296 (JS_ASSERT((index) < js_gcArenasPerChunk || (index) == NO_FREE_ARENAS), \
297 (void) (ARENA_START_TO_INFO(chunk)->arenaIndex = (jsuword) (index)))
299 #define GET_CHUNK_INFO(chunk, infoIndex) \
300 (JS_ASSERT(GET_CHUNK_INFO_INDEX(chunk) == (infoIndex)), \
301 JS_ASSERT((uint32) (infoIndex) < js_gcArenasPerChunk), \
302 (JSGCChunkInfo *) ((chunk) + ((infoIndex) << GC_ARENA_SHIFT)))
304 #define CHUNK_INFO_TO_INDEX(ci) \
305 GET_ARENA_INDEX(ARENA_START_TO_INFO((jsuword)ci))
308 * Macros for GC-thing operations.
310 #define THINGS_PER_ARENA(thingSize) \
311 ((GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo)) / ((thingSize) + 1U))
313 #define THING_TO_ARENA(thing) \
314 ((JSGCArenaInfo *)(((jsuword) (thing) | GC_ARENA_MASK) + \
315 1 - sizeof(JSGCArenaInfo)))
317 #define THING_TO_INDEX(thing, thingSize) \
318 ((uint32) ((jsuword) (thing) & GC_ARENA_MASK) / (uint32) (thingSize))
320 #define THING_FLAGS_END(arena) ((uint8 *)(arena))
322 #define THING_FLAGP(arena, thingIndex) \
323 (JS_ASSERT((jsuword) (thingIndex) \
324 < (jsuword) THINGS_PER_ARENA((arena)->list->thingSize)), \
325 (uint8 *)(arena) - 1 - (thingIndex))
327 #define THING_TO_FLAGP(thing, thingSize) \
328 THING_FLAGP(THING_TO_ARENA(thing), THING_TO_INDEX(thing, thingSize))
330 #define FLAGP_TO_ARENA(flagp) THING_TO_ARENA(flagp)
332 #define FLAGP_TO_INDEX(flagp) \
333 (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) < ARENA_INFO_OFFSET), \
334 (ARENA_INFO_OFFSET - 1 - (uint32) ((jsuword) (flagp) & GC_ARENA_MASK)))
336 #define FLAGP_TO_THING(flagp, thingSize) \
337 (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) >= \
338 (ARENA_INFO_OFFSET - THINGS_PER_ARENA(thingSize))), \
339 (JSGCThing *)(((jsuword) (flagp) & ~GC_ARENA_MASK) + \
340 (thingSize) * FLAGP_TO_INDEX(flagp)))
342 #ifdef JS_THREADSAFE
344 * The maximum number of things to put on the local free list by taking
345 * several things from the global free list or from the tail of the last
346 * allocated arena to amortize the cost of rt->gcLock.
348 * We use number 8 based on benchmarks from bug 312238.
350 #define MAX_THREAD_LOCAL_THINGS 8
352 #endif
354 JS_STATIC_ASSERT(sizeof(JSStackHeader) >= 2 * sizeof(jsval));
356 JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSString));
357 JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(jsdouble));
359 /* We want to use all the available GC thing space for object's slots. */
360 JS_STATIC_ASSERT(sizeof(JSObject) % sizeof(JSGCThing) == 0);
362 static uint8 GCTypeToTraceKindMap[GCX_NTYPES] = {
363 JSTRACE_OBJECT, /* GCX_OBJECT */
364 JSTRACE_STRING, /* GCX_STRING */
365 JSTRACE_DOUBLE, /* GCX_DOUBLE */
366 JSTRACE_FUNCTION, /* GCX_FUNCTION */
367 JSTRACE_NAMESPACE, /* GCX_NAMESPACE */
368 JSTRACE_QNAME, /* GCX_QNAME */
369 JSTRACE_XML, /* GCX_XML */
370 (uint8)-1, /* unused */
371 JSTRACE_STRING, /* GCX_EXTERNAL_STRING + 0 */
372 JSTRACE_STRING, /* GCX_EXTERNAL_STRING + 1 */
373 JSTRACE_STRING, /* GCX_EXTERNAL_STRING + 2 */
374 JSTRACE_STRING, /* GCX_EXTERNAL_STRING + 3 */
375 JSTRACE_STRING, /* GCX_EXTERNAL_STRING + 4 */
376 JSTRACE_STRING, /* GCX_EXTERNAL_STRING + 5 */
377 JSTRACE_STRING, /* GCX_EXTERNAL_STRING + 6 */
378 JSTRACE_STRING, /* GCX_EXTERNAL_STRING + 7 */
382 * Ensure that JSObject is allocated from a different GC-list rather than
383 * jsdouble and JSString so we can easily finalize JSObject before these 2
384 * types of GC things. See comments in js_GC.
386 JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(JSString)) !=
387 GC_FREELIST_INDEX(sizeof(JSObject)));
388 JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(jsdouble)) !=
389 GC_FREELIST_INDEX(sizeof(JSObject)));
392 * JSPtrTable capacity growth descriptor. The table grows by powers of two
393 * starting from capacity JSPtrTableInfo.minCapacity, but switching to linear
394 * growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold.
396 typedef struct JSPtrTableInfo {
397 uint16 minCapacity;
398 uint16 linearGrowthThreshold;
399 } JSPtrTableInfo;
401 #define GC_ITERATOR_TABLE_MIN 4
402 #define GC_ITERATOR_TABLE_LINEAR 1024
404 static const JSPtrTableInfo iteratorTableInfo = {
405 GC_ITERATOR_TABLE_MIN,
406 GC_ITERATOR_TABLE_LINEAR
409 /* Calculate table capacity based on the current value of JSPtrTable.count. */
410 static size_t
411 PtrTableCapacity(size_t count, const JSPtrTableInfo *info)
413 size_t linear, log, capacity;
415 linear = info->linearGrowthThreshold;
416 JS_ASSERT(info->minCapacity <= linear);
418 if (count == 0) {
419 capacity = 0;
420 } else if (count < linear) {
421 log = JS_CEILING_LOG2W(count);
422 JS_ASSERT(log != JS_BITS_PER_WORD);
423 capacity = (size_t)1 << log;
424 if (capacity < info->minCapacity)
425 capacity = info->minCapacity;
426 } else {
427 capacity = JS_ROUNDUP(count, linear);
430 JS_ASSERT(capacity >= count);
431 return capacity;
434 static void
435 FreePtrTable(JSPtrTable *table, const JSPtrTableInfo *info)
437 if (table->array) {
438 JS_ASSERT(table->count > 0);
439 free(table->array);
440 table->array = NULL;
441 table->count = 0;
443 JS_ASSERT(table->count == 0);
446 static JSBool
447 AddToPtrTable(JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info,
448 void *ptr)
450 size_t count, capacity;
451 void **array;
453 count = table->count;
454 capacity = PtrTableCapacity(count, info);
456 if (count == capacity) {
457 if (capacity < info->minCapacity) {
458 JS_ASSERT(capacity == 0);
459 JS_ASSERT(!table->array);
460 capacity = info->minCapacity;
461 } else {
463 * Simplify the overflow detection assuming pointer is bigger
464 * than byte.
466 JS_STATIC_ASSERT(2 <= sizeof table->array[0]);
467 capacity = (capacity < info->linearGrowthThreshold)
468 ? 2 * capacity
469 : capacity + info->linearGrowthThreshold;
470 if (capacity > (size_t)-1 / sizeof table->array[0])
471 goto bad;
473 array = (void **) realloc(table->array,
474 capacity * sizeof table->array[0]);
475 if (!array)
476 goto bad;
477 #ifdef DEBUG
478 memset(array + count, JS_FREE_PATTERN,
479 (capacity - count) * sizeof table->array[0]);
480 #endif
481 table->array = array;
484 table->array[count] = ptr;
485 table->count = count + 1;
487 return JS_TRUE;
489 bad:
490 JS_ReportOutOfMemory(cx);
491 return JS_FALSE;
494 static void
495 ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info,
496 size_t newCount)
498 size_t oldCapacity, capacity;
499 void **array;
501 JS_ASSERT(newCount <= table->count);
502 if (newCount == table->count)
503 return;
505 oldCapacity = PtrTableCapacity(table->count, info);
506 table->count = newCount;
507 capacity = PtrTableCapacity(newCount, info);
509 if (oldCapacity != capacity) {
510 array = table->array;
511 JS_ASSERT(array);
512 if (capacity == 0) {
513 free(array);
514 table->array = NULL;
515 return;
517 array = (void **) realloc(array, capacity * sizeof array[0]);
518 if (array)
519 table->array = array;
521 #ifdef DEBUG
522 memset(table->array + newCount, JS_FREE_PATTERN,
523 (capacity - newCount) * sizeof table->array[0]);
524 #endif
527 #ifdef JS_GCMETER
528 # define METER(x) x
529 #else
530 # define METER(x) ((void) 0)
531 #endif
534 * For chunks allocated via over-sized malloc, get a pointer to store the gap
535 * between the malloc's result and the first arena in the chunk.
537 static uint32 *
538 GetMallocedChunkGapPtr(jsuword chunk)
540 JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
542 /* Use the memory after the chunk, see NewGCChunk for details. */
543 return (uint32 *) (chunk + (js_gcArenasPerChunk << GC_ARENA_SHIFT));
546 static jsuword
547 NewGCChunk()
549 void *p;
550 jsuword chunk;
552 #if JS_GC_USE_MMAP
553 if (js_gcUseMmap) {
554 # if defined(XP_WIN)
555 p = VirtualAlloc(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
556 MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
557 return (jsuword) p;
558 # elif defined(XP_UNIX) || defined(XP_BEOS)
559 p = mmap(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
560 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
561 return (p == MAP_FAILED) ? 0 : (jsuword) p;
562 # else
563 # error "Not implemented"
564 # endif
566 #endif
569 * Implement the chunk allocation using over sized malloc if mmap cannot
570 * be used. FIXME bug 396007: the code should use posix_memalign when it
571 * is available.
573 * Since malloc allocates pointers aligned on the word boundary, to get
574 * js_gcArenasPerChunk aligned arenas, we need to malloc only
575 * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT) - sizeof(size_t)
576 * bytes. But since we stores the gap between the malloced pointer and the
577 * first arena in the chunk after the chunk, we need to ask for
578 * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT)
579 * bytes to ensure that we always have room to store the gap.
581 p = malloc((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT);
582 if (!p)
583 return 0;
584 chunk = ((jsuword) p + GC_ARENA_MASK) & ~GC_ARENA_MASK;
585 *GetMallocedChunkGapPtr(chunk) = (uint32) (chunk - (jsuword) p);
586 return chunk;
589 static void
590 DestroyGCChunk(jsuword chunk)
592 JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
593 #if JS_GC_USE_MMAP
594 if (js_gcUseMmap) {
595 # if defined(XP_WIN)
596 VirtualFree((void *) chunk, 0, MEM_RELEASE);
597 # elif defined(XP_UNIX) || defined(XP_BEOS)
598 munmap((void *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT);
599 # else
600 # error "Not implemented"
601 # endif
602 return;
604 #endif
606 /* See comments in NewGCChunk. */
607 JS_ASSERT(*GetMallocedChunkGapPtr(chunk) < GC_ARENA_SIZE);
608 free((void *) (chunk - *GetMallocedChunkGapPtr(chunk)));
611 static void
612 AddChunkToList(JSRuntime *rt, JSGCChunkInfo *ci)
614 ci->prevp = &rt->gcChunkList;
615 ci->next = rt->gcChunkList;
616 if (rt->gcChunkList) {
617 JS_ASSERT(rt->gcChunkList->prevp == &rt->gcChunkList);
618 rt->gcChunkList->prevp = &ci->next;
620 rt->gcChunkList = ci;
623 static void
624 RemoveChunkFromList(JSRuntime *rt, JSGCChunkInfo *ci)
626 *ci->prevp = ci->next;
627 if (ci->next) {
628 JS_ASSERT(ci->next->prevp == &ci->next);
629 ci->next->prevp = ci->prevp;
633 static JSGCArenaInfo *
634 NewGCArena(JSRuntime *rt)
636 jsuword chunk;
637 JSGCChunkInfo *ci;
638 uint32 i;
639 JSGCArenaInfo *a, *aprev;
641 if (js_gcArenasPerChunk == 1) {
642 chunk = NewGCChunk();
643 return (chunk == 0) ? NULL : ARENA_START_TO_INFO(chunk);
646 ci = rt->gcChunkList;
647 if (!ci) {
648 chunk = NewGCChunk();
649 if (chunk == 0)
650 return NULL;
651 JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
652 a = GET_ARENA_INFO(chunk, 0);
653 a->firstArena = JS_TRUE;
654 a->arenaIndex = 0;
655 aprev = NULL;
656 i = 0;
657 do {
658 a->prev = aprev;
659 aprev = a;
660 ++i;
661 a = GET_ARENA_INFO(chunk, i);
662 a->firstArena = JS_FALSE;
663 a->arenaIndex = i;
664 } while (i != js_gcArenasPerChunk - 1);
665 ci = GET_CHUNK_INFO(chunk, 0);
666 ci->lastFreeArena = aprev;
667 ci->numFreeArenas = js_gcArenasPerChunk - 1;
668 AddChunkToList(rt, ci);
669 } else {
670 JS_ASSERT(ci->prevp == &rt->gcChunkList);
671 a = ci->lastFreeArena;
672 aprev = a->prev;
673 if (!aprev) {
674 JS_ASSERT(ci->numFreeArenas == 1);
675 JS_ASSERT(ARENA_INFO_TO_START(a) == (jsuword) ci);
676 RemoveChunkFromList(rt, ci);
677 chunk = GET_ARENA_CHUNK(a, GET_ARENA_INDEX(a));
678 SET_CHUNK_INFO_INDEX(chunk, NO_FREE_ARENAS);
679 } else {
680 JS_ASSERT(ci->numFreeArenas >= 2);
681 JS_ASSERT(ARENA_INFO_TO_START(a) != (jsuword) ci);
682 ci->lastFreeArena = aprev;
683 ci->numFreeArenas--;
687 return a;
690 static void
691 DestroyGCArena(JSRuntime *rt, JSGCArenaInfo *a)
693 uint32 arenaIndex;
694 jsuword chunk;
695 uint32 chunkInfoIndex;
696 JSGCChunkInfo *ci;
698 METER(rt->gcStats.afree++);
700 if (js_gcArenasPerChunk == 1) {
701 DestroyGCChunk(ARENA_INFO_TO_START(a));
702 return;
705 #ifdef DEBUG
707 jsuword firstArena, arenaIndex;
709 firstArena = a->firstArena;
710 arenaIndex = a->arenaIndex;
711 memset((void *) ARENA_INFO_TO_START(a), JS_FREE_PATTERN,
712 GC_ARENA_SIZE);
713 a->firstArena = firstArena;
714 a->arenaIndex = arenaIndex;
716 #endif
718 arenaIndex = GET_ARENA_INDEX(a);
719 chunk = GET_ARENA_CHUNK(a, arenaIndex);
720 chunkInfoIndex = GET_CHUNK_INFO_INDEX(chunk);
721 if (chunkInfoIndex == NO_FREE_ARENAS) {
722 chunkInfoIndex = arenaIndex;
723 SET_CHUNK_INFO_INDEX(chunk, arenaIndex);
724 ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
725 a->prev = NULL;
726 ci->lastFreeArena = a;
727 ci->numFreeArenas = 1;
728 AddChunkToList(rt, ci);
729 } else {
730 JS_ASSERT(chunkInfoIndex != arenaIndex);
731 ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
732 JS_ASSERT(ci->numFreeArenas != 0);
733 JS_ASSERT(ci->lastFreeArena);
734 JS_ASSERT(a != ci->lastFreeArena);
735 if (ci->numFreeArenas == js_gcArenasPerChunk - 1) {
736 RemoveChunkFromList(rt, ci);
737 DestroyGCChunk(chunk);
738 } else {
739 ++ci->numFreeArenas;
740 a->prev = ci->lastFreeArena;
741 ci->lastFreeArena = a;
746 static void
747 InitGCArenaLists(JSRuntime *rt)
749 uintN i, thingSize;
750 JSGCArenaList *arenaList;
752 for (i = 0; i < GC_NUM_FREELISTS; i++) {
753 arenaList = &rt->gcArenaList[i];
754 thingSize = GC_FREELIST_NBYTES(i);
755 JS_ASSERT((size_t)(uint16)thingSize == thingSize);
756 arenaList->last = NULL;
757 arenaList->lastCount = THINGS_PER_ARENA(thingSize);
758 arenaList->thingSize = (uint16)thingSize;
759 arenaList->freeList = NULL;
760 METER(memset(&arenaList->stats, 0, sizeof arenaList->stats));
764 static void
765 FinishGCArenaLists(JSRuntime *rt)
767 uintN i;
768 JSGCArenaList *arenaList;
769 JSGCArenaInfo *a, *aprev;
771 for (i = 0; i < GC_NUM_FREELISTS; i++) {
772 arenaList = &rt->gcArenaList[i];
774 for (a = arenaList->last; a; a = aprev) {
775 aprev = a->prev;
776 DestroyGCArena(rt, a);
778 arenaList->last = NULL;
779 arenaList->lastCount = THINGS_PER_ARENA(arenaList->thingSize);
780 arenaList->freeList = NULL;
781 METER(arenaList->stats.narenas = 0);
783 rt->gcBytes = 0;
784 JS_ASSERT(rt->gcChunkList == 0);
787 JS_FRIEND_API(uint8 *)
788 js_GetGCThingFlags(void *thing)
790 JSGCArenaInfo *a;
791 uint32 index;
793 a = THING_TO_ARENA(thing);
794 index = THING_TO_INDEX(thing, a->list->thingSize);
795 return THING_FLAGP(a, index);
798 JSRuntime*
799 js_GetGCStringRuntime(JSString *str)
801 JSGCArenaList *list;
803 list = THING_TO_ARENA(str)->list;
805 JS_ASSERT(list->thingSize == sizeof(JSGCThing));
806 JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing)) == 0);
808 return (JSRuntime *)((uint8 *)list - offsetof(JSRuntime, gcArenaList));
811 JSBool
812 js_IsAboutToBeFinalized(JSContext *cx, void *thing)
814 uint8 flags = *js_GetGCThingFlags(thing);
816 return !(flags & (GCF_MARK | GCF_LOCK | GCF_FINAL));
819 #ifdef DEBUG
820 static const char newborn_external_string[] = "newborn external string";
822 static const char *gc_typenames[GCX_NTYPES] = {
823 "newborn object",
824 "newborn string",
825 "newborn double",
826 "newborn mutable string",
827 "newborn function",
828 "newborn Namespace",
829 "newborn QName",
830 "newborn XML",
831 newborn_external_string,
832 newborn_external_string,
833 newborn_external_string,
834 newborn_external_string,
835 newborn_external_string,
836 newborn_external_string,
837 newborn_external_string,
838 newborn_external_string
840 #endif
842 /* This is compatible with JSDHashEntryStub. */
843 typedef struct JSGCRootHashEntry {
844 JSDHashEntryHdr hdr;
845 void *root;
846 const char *name;
847 } JSGCRootHashEntry;
849 /* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
850 #define GC_ROOTS_SIZE 256
853 * For a CPU with extremely large pages using them for GC things wastes
854 * too much memory.
856 #define GC_ARENAS_PER_CPU_PAGE_LIMIT JS_BIT(18 - GC_ARENA_SHIFT)
858 JS_STATIC_ASSERT(GC_ARENAS_PER_CPU_PAGE_LIMIT <= NO_FREE_ARENAS);
860 JSBool
861 js_InitGC(JSRuntime *rt, uint32 maxbytes)
863 #if JS_GC_USE_MMAP
864 if (js_gcArenasPerChunk == 0) {
865 size_t cpuPageSize, arenasPerPage;
866 # if defined(XP_WIN)
867 SYSTEM_INFO si;
869 GetSystemInfo(&si);
870 cpuPageSize = si.dwPageSize;
872 # elif defined(XP_UNIX) || defined(XP_BEOS)
873 cpuPageSize = (size_t) sysconf(_SC_PAGESIZE);
874 # else
875 # error "Not implemented"
876 # endif
877 /* cpuPageSize is a power of 2. */
878 JS_ASSERT((cpuPageSize & (cpuPageSize - 1)) == 0);
879 arenasPerPage = cpuPageSize >> GC_ARENA_SHIFT;
880 #ifdef DEBUG
881 if (arenasPerPage == 0) {
882 fprintf(stderr,
883 "JS engine warning: the size of the CPU page, %u bytes, is too low to use\n"
884 "paged allocation for the garbage collector. Please report this.\n",
885 (unsigned) cpuPageSize);
887 #endif
888 if (arenasPerPage - 1 <= (size_t) (GC_ARENAS_PER_CPU_PAGE_LIMIT - 1)) {
890 * Use at least 4 GC arenas per paged allocation chunk to minimize
891 * the overhead of mmap/VirtualAlloc.
893 js_gcUseMmap = JS_TRUE;
894 js_gcArenasPerChunk = JS_MAX((uint32) arenasPerPage, 4);
895 } else {
896 js_gcUseMmap = JS_FALSE;
897 js_gcArenasPerChunk = 7;
900 #endif
901 JS_ASSERT(1 <= js_gcArenasPerChunk &&
902 js_gcArenasPerChunk <= NO_FREE_ARENAS);
904 InitGCArenaLists(rt);
905 if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL,
906 sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) {
907 rt->gcRootsHash.ops = NULL;
908 return JS_FALSE;
910 rt->gcLocksHash = NULL; /* create lazily */
913 * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
914 * for default backward API compatibility.
916 rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes;
918 return JS_TRUE;
921 #ifdef JS_GCMETER
922 JS_FRIEND_API(void)
923 js_DumpGCStats(JSRuntime *rt, FILE *fp)
925 uintN i;
926 size_t thingsPerArena;
927 size_t totalThings, totalMaxThings, totalBytes;
928 size_t sumArenas, sumTotalArenas;
929 size_t sumFreeSize, sumTotalFreeSize;
931 fprintf(fp, "\nGC allocation statistics:\n");
933 #define UL(x) ((unsigned long)(x))
934 #define ULSTAT(x) UL(rt->gcStats.x)
935 totalThings = 0;
936 totalMaxThings = 0;
937 totalBytes = 0;
938 sumArenas = 0;
939 sumTotalArenas = 0;
940 sumFreeSize = 0;
941 sumTotalFreeSize = 0;
942 for (i = 0; i < GC_NUM_FREELISTS; i++) {
943 JSGCArenaList *list = &rt->gcArenaList[i];
944 JSGCArenaStats *stats = &list->stats;
945 if (stats->maxarenas == 0) {
946 fprintf(fp, "ARENA LIST %u (thing size %lu): NEVER USED\n",
947 i, UL(GC_FREELIST_NBYTES(i)));
948 continue;
950 thingsPerArena = THINGS_PER_ARENA(list->thingSize);
951 fprintf(fp, "ARENA LIST %u (thing size %lu):\n",
952 i, UL(GC_FREELIST_NBYTES(i)));
953 fprintf(fp, " arenas: %lu\n", UL(stats->narenas));
954 fprintf(fp, " max arenas: %lu\n", UL(stats->maxarenas));
955 fprintf(fp, " things: %lu\n", UL(stats->nthings));
956 fprintf(fp, " max things: %lu\n", UL(stats->maxthings));
957 fprintf(fp, " free list: %lu\n", UL(stats->freelen));
958 fprintf(fp, " free list density: %.1f%%\n",
959 stats->narenas == 0
960 ? 0.0
961 : 100.0 * stats->freelen / (thingsPerArena * stats->narenas));
962 fprintf(fp, " average free list density: %.1f%%\n",
963 stats->totalarenas == 0
964 ? 0.0
965 : 100.0 * stats->totalfreelen /
966 (thingsPerArena * stats->totalarenas));
967 fprintf(fp, " recycles: %lu\n", UL(stats->recycle));
968 fprintf(fp, " recycle/alloc ratio: %.2f\n",
969 (double) stats->recycle / (stats->totalnew - stats->recycle));
970 totalThings += stats->nthings;
971 totalMaxThings += stats->maxthings;
972 totalBytes += list->thingSize * stats->nthings;
973 sumArenas += stats->narenas;
974 sumTotalArenas += stats->totalarenas;
975 sumFreeSize += list->thingSize * stats->freelen;
976 sumTotalFreeSize += list->thingSize * stats->totalfreelen;
978 fprintf(fp, "TOTAL STATS:\n");
979 fprintf(fp, " bytes allocated: %lu\n", UL(rt->gcBytes));
980 fprintf(fp, " alloc attempts: %lu\n", ULSTAT(alloc));
981 #ifdef JS_THREADSAFE
982 fprintf(fp, " alloc without locks: %1u\n", ULSTAT(localalloc));
983 #endif
984 fprintf(fp, " total GC things: %lu\n", UL(totalThings));
985 fprintf(fp, " max total GC things: %lu\n", UL(totalMaxThings));
986 fprintf(fp, " GC things size: %lu\n", UL(totalBytes));
987 fprintf(fp, " total GC arenas: %lu\n", UL(sumArenas));
988 fprintf(fp, " total free list density: %.1f%%\n",
989 sumArenas == 0
990 ? 0.0
991 : 100.0 * sumFreeSize / (sumArenas << GC_ARENA_SHIFT));
992 fprintf(fp, " average free list density: %.1f%%\n",
993 sumTotalFreeSize == 0
994 ? 0.0
995 : 100.0 * sumTotalFreeSize / (sumTotalArenas << GC_ARENA_SHIFT));
996 fprintf(fp, "allocation retries after GC: %lu\n", ULSTAT(retry));
997 fprintf(fp, " allocation failures: %lu\n", ULSTAT(fail));
998 fprintf(fp, " things born locked: %lu\n", ULSTAT(lockborn));
999 fprintf(fp, " valid lock calls: %lu\n", ULSTAT(lock));
1000 fprintf(fp, " valid unlock calls: %lu\n", ULSTAT(unlock));
1001 fprintf(fp, " mark recursion depth: %lu\n", ULSTAT(depth));
1002 fprintf(fp, " maximum mark recursion: %lu\n", ULSTAT(maxdepth));
1003 fprintf(fp, " mark C recursion depth: %lu\n", ULSTAT(cdepth));
1004 fprintf(fp, " maximum mark C recursion: %lu\n", ULSTAT(maxcdepth));
1005 fprintf(fp, " delayed tracing calls: %lu\n", ULSTAT(untraced));
1006 #ifdef DEBUG
1007 fprintf(fp, " max trace later count: %lu\n", ULSTAT(maxuntraced));
1008 #endif
1009 fprintf(fp, " maximum GC nesting level: %lu\n", ULSTAT(maxlevel));
1010 fprintf(fp, "potentially useful GC calls: %lu\n", ULSTAT(poke));
1011 fprintf(fp, " thing arenas freed so far: %lu\n", ULSTAT(afree));
1012 fprintf(fp, " stack segments scanned: %lu\n", ULSTAT(stackseg));
1013 fprintf(fp, "stack segment slots scanned: %lu\n", ULSTAT(segslots));
1014 fprintf(fp, "reachable closeable objects: %lu\n", ULSTAT(nclose));
1015 fprintf(fp, " max reachable closeable: %lu\n", ULSTAT(maxnclose));
1016 fprintf(fp, " scheduled close hooks: %lu\n", ULSTAT(closelater));
1017 fprintf(fp, " max scheduled close hooks: %lu\n", ULSTAT(maxcloselater));
1018 #undef UL
1019 #undef US
1021 #ifdef JS_ARENAMETER
1022 JS_DumpArenaStats(fp);
1023 #endif
1025 #endif
1027 #ifdef DEBUG
1028 static void
1029 CheckLeakedRoots(JSRuntime *rt);
1030 #endif
1032 void
1033 js_FinishGC(JSRuntime *rt)
1035 #ifdef JS_ARENAMETER
1036 JS_DumpArenaStats(stdout);
1037 #endif
1038 #ifdef JS_GCMETER
1039 js_DumpGCStats(rt, stdout);
1040 #endif
1042 FreePtrTable(&rt->gcIteratorTable, &iteratorTableInfo);
1043 FinishGCArenaLists(rt);
1045 if (rt->gcRootsHash.ops) {
1046 #ifdef DEBUG
1047 CheckLeakedRoots(rt);
1048 #endif
1049 JS_DHashTableFinish(&rt->gcRootsHash);
1050 rt->gcRootsHash.ops = NULL;
1052 if (rt->gcLocksHash) {
1053 JS_DHashTableDestroy(rt->gcLocksHash);
1054 rt->gcLocksHash = NULL;
1058 JSBool
1059 js_AddRoot(JSContext *cx, void *rp, const char *name)
1061 JSBool ok = js_AddRootRT(cx->runtime, rp, name);
1062 if (!ok)
1063 JS_ReportOutOfMemory(cx);
1064 return ok;
1067 JSBool
1068 js_AddRootRT(JSRuntime *rt, void *rp, const char *name)
1070 JSBool ok;
1071 JSGCRootHashEntry *rhe;
1074 * Due to the long-standing, but now removed, use of rt->gcLock across the
1075 * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
1076 * properly with a racing GC, without calling JS_AddRoot from a request.
1077 * We have to preserve API compatibility here, now that we avoid holding
1078 * rt->gcLock across the mark phase (including the root hashtable mark).
1080 * If the GC is running and we're called on another thread, wait for this
1081 * GC activation to finish. We can safely wait here (in the case where we
1082 * are called within a request on another thread's context) without fear
1083 * of deadlock because the GC doesn't set rt->gcRunning until after it has
1084 * waited for all active requests to end.
1086 JS_LOCK_GC(rt);
1087 #ifdef JS_THREADSAFE
1088 JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
1089 if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
1090 do {
1091 JS_AWAIT_GC_DONE(rt);
1092 } while (rt->gcLevel > 0);
1094 #endif
1095 rhe = (JSGCRootHashEntry *)
1096 JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_ADD);
1097 if (rhe) {
1098 rhe->root = rp;
1099 rhe->name = name;
1100 ok = JS_TRUE;
1101 } else {
1102 ok = JS_FALSE;
1104 JS_UNLOCK_GC(rt);
1105 return ok;
1108 JSBool
1109 js_RemoveRoot(JSRuntime *rt, void *rp)
1112 * Due to the JS_RemoveRootRT API, we may be called outside of a request.
1113 * Same synchronization drill as above in js_AddRoot.
1115 JS_LOCK_GC(rt);
1116 #ifdef JS_THREADSAFE
1117 JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
1118 if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
1119 do {
1120 JS_AWAIT_GC_DONE(rt);
1121 } while (rt->gcLevel > 0);
1123 #endif
1124 (void) JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_REMOVE);
1125 rt->gcPoke = JS_TRUE;
1126 JS_UNLOCK_GC(rt);
1127 return JS_TRUE;
1130 #ifdef DEBUG
1132 JS_STATIC_DLL_CALLBACK(JSDHashOperator)
1133 js_root_printer(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 i, void *arg)
1135 uint32 *leakedroots = (uint32 *)arg;
1136 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
1138 (*leakedroots)++;
1139 fprintf(stderr,
1140 "JS engine warning: leaking GC root \'%s\' at %p\n",
1141 rhe->name ? (char *)rhe->name : "", rhe->root);
1143 return JS_DHASH_NEXT;
1146 static void
1147 CheckLeakedRoots(JSRuntime *rt)
1149 uint32 leakedroots = 0;
1151 /* Warn (but don't assert) debug builds of any remaining roots. */
1152 JS_DHashTableEnumerate(&rt->gcRootsHash, js_root_printer,
1153 &leakedroots);
1154 if (leakedroots > 0) {
1155 if (leakedroots == 1) {
1156 fprintf(stderr,
1157 "JS engine warning: 1 GC root remains after destroying the JSRuntime.\n"
1158 " This root may point to freed memory. Objects reachable\n"
1159 " through it have not been finalized.\n");
1160 } else {
1161 fprintf(stderr,
1162 "JS engine warning: %lu GC roots remain after destroying the JSRuntime.\n"
1163 " These roots may point to freed memory. Objects reachable\n"
1164 " through them have not been finalized.\n",
1165 (unsigned long) leakedroots);
1170 typedef struct NamedRootDumpArgs {
1171 void (*dump)(const char *name, void *rp, void *data);
1172 void *data;
1173 } NamedRootDumpArgs;
1175 JS_STATIC_DLL_CALLBACK(JSDHashOperator)
1176 js_named_root_dumper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
1177 void *arg)
1179 NamedRootDumpArgs *args = (NamedRootDumpArgs *) arg;
1180 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
1182 if (rhe->name)
1183 args->dump(rhe->name, rhe->root, args->data);
1184 return JS_DHASH_NEXT;
1187 void
1188 js_DumpNamedRoots(JSRuntime *rt,
1189 void (*dump)(const char *name, void *rp, void *data),
1190 void *data)
1192 NamedRootDumpArgs args;
1194 args.dump = dump;
1195 args.data = data;
1196 JS_DHashTableEnumerate(&rt->gcRootsHash, js_named_root_dumper, &args);
1199 #endif /* DEBUG */
1201 typedef struct GCRootMapArgs {
1202 JSGCRootMapFun map;
1203 void *data;
1204 } GCRootMapArgs;
1206 JS_STATIC_DLL_CALLBACK(JSDHashOperator)
1207 js_gcroot_mapper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
1208 void *arg)
1210 GCRootMapArgs *args = (GCRootMapArgs *) arg;
1211 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
1212 intN mapflags;
1213 int op;
1215 mapflags = args->map(rhe->root, rhe->name, args->data);
1217 #if JS_MAP_GCROOT_NEXT == JS_DHASH_NEXT && \
1218 JS_MAP_GCROOT_STOP == JS_DHASH_STOP && \
1219 JS_MAP_GCROOT_REMOVE == JS_DHASH_REMOVE
1220 op = (JSDHashOperator)mapflags;
1221 #else
1222 op = JS_DHASH_NEXT;
1223 if (mapflags & JS_MAP_GCROOT_STOP)
1224 op |= JS_DHASH_STOP;
1225 if (mapflags & JS_MAP_GCROOT_REMOVE)
1226 op |= JS_DHASH_REMOVE;
1227 #endif
1229 return (JSDHashOperator) op;
1232 uint32
1233 js_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data)
1235 GCRootMapArgs args;
1236 uint32 rv;
1238 args.map = map;
1239 args.data = data;
1240 JS_LOCK_GC(rt);
1241 rv = JS_DHashTableEnumerate(&rt->gcRootsHash, js_gcroot_mapper, &args);
1242 JS_UNLOCK_GC(rt);
1243 return rv;
1246 JSBool
1247 js_RegisterCloseableIterator(JSContext *cx, JSObject *obj)
1249 JSRuntime *rt;
1250 JSBool ok;
1252 rt = cx->runtime;
1253 JS_ASSERT(!rt->gcRunning);
1255 JS_LOCK_GC(rt);
1256 ok = AddToPtrTable(cx, &rt->gcIteratorTable, &iteratorTableInfo, obj);
1257 JS_UNLOCK_GC(rt);
1258 return ok;
1261 static void
1262 CloseNativeIterators(JSContext *cx)
1264 JSRuntime *rt;
1265 size_t count, newCount, i;
1266 void **array;
1267 JSObject *obj;
1269 rt = cx->runtime;
1270 count = rt->gcIteratorTable.count;
1271 array = rt->gcIteratorTable.array;
1273 newCount = 0;
1274 for (i = 0; i != count; ++i) {
1275 obj = (JSObject *)array[i];
1276 if (js_IsAboutToBeFinalized(cx, obj))
1277 js_CloseNativeIterator(cx, obj);
1278 else
1279 array[newCount++] = obj;
1281 ShrinkPtrTable(&rt->gcIteratorTable, &iteratorTableInfo, newCount);
1284 #if defined(DEBUG_brendan) || defined(DEBUG_timeless)
1285 #define DEBUG_gchist
1286 #endif
1288 #ifdef DEBUG_gchist
1289 #define NGCHIST 64
1291 static struct GCHist {
1292 JSBool lastDitch;
1293 JSGCThing *freeList;
1294 } gchist[NGCHIST];
1296 unsigned gchpos;
1297 #endif
1299 void *
1300 js_NewGCThing(JSContext *cx, uintN flags, size_t nbytes)
1302 JSRuntime *rt;
1303 uintN flindex;
1304 JSBool doGC;
1305 JSGCThing *thing;
1306 uint8 *flagp;
1307 JSGCArenaList *arenaList;
1308 JSGCArenaInfo *a;
1309 uintN thingsLimit;
1310 JSLocalRootStack *lrs;
1311 #ifdef JS_THREADSAFE
1312 JSBool gcLocked;
1313 uintN localMallocBytes;
1314 JSGCThing **flbase, **lastptr;
1315 JSGCThing *tmpthing;
1316 uint8 *tmpflagp;
1317 uintN maxFreeThings; /* max to take from the global free list */
1318 METER(size_t nfree);
1319 #endif
1321 rt = cx->runtime;
1322 METER(rt->gcStats.alloc++); /* this is not thread-safe */
1323 nbytes = JS_ROUNDUP(nbytes, sizeof(JSGCThing));
1324 flindex = GC_FREELIST_INDEX(nbytes);
1326 #ifdef JS_THREADSAFE
1327 gcLocked = JS_FALSE;
1328 JS_ASSERT(cx->thread);
1329 flbase = cx->thread->gcFreeLists;
1330 JS_ASSERT(flbase);
1331 thing = flbase[flindex];
1332 localMallocBytes = cx->thread->gcMallocBytes;
1333 if (thing && rt->gcMaxMallocBytes - rt->gcMallocBytes > localMallocBytes) {
1334 flagp = thing->flagp;
1335 flbase[flindex] = thing->next;
1336 METER(rt->gcStats.localalloc++); /* this is not thread-safe */
1337 goto success;
1340 JS_LOCK_GC(rt);
1341 gcLocked = JS_TRUE;
1343 /* Transfer thread-local counter to global one. */
1344 if (localMallocBytes != 0) {
1345 cx->thread->gcMallocBytes = 0;
1346 if (rt->gcMaxMallocBytes - rt->gcMallocBytes < localMallocBytes)
1347 rt->gcMallocBytes = rt->gcMaxMallocBytes;
1348 else
1349 rt->gcMallocBytes += localMallocBytes;
1351 #endif
1352 JS_ASSERT(!rt->gcRunning);
1353 if (rt->gcRunning) {
1354 METER(rt->gcStats.finalfail++);
1355 JS_UNLOCK_GC(rt);
1356 return NULL;
1359 doGC = (rt->gcMallocBytes >= rt->gcMaxMallocBytes && rt->gcPoke);
1360 #ifdef JS_GC_ZEAL
1361 doGC = doGC || rt->gcZeal >= 2 || (rt->gcZeal >= 1 && rt->gcPoke);
1362 #endif
1364 arenaList = &rt->gcArenaList[flindex];
1365 for (;;) {
1366 if (doGC) {
1368 * Keep rt->gcLock across the call into js_GC so we don't starve
1369 * and lose to racing threads who deplete the heap just after
1370 * js_GC has replenished it (or has synchronized with a racing
1371 * GC that collected a bunch of garbage). This unfair scheduling
1372 * can happen on certain operating systems. For the gory details,
1373 * see bug 162779 at https://bugzilla.mozilla.org/.
1375 js_GC(cx, GC_LAST_DITCH);
1376 METER(rt->gcStats.retry++);
1379 /* Try to get thing from the free list. */
1380 thing = arenaList->freeList;
1381 if (thing) {
1382 arenaList->freeList = thing->next;
1383 flagp = thing->flagp;
1384 JS_ASSERT(*flagp & GCF_FINAL);
1385 METER(arenaList->stats.freelen--);
1386 METER(arenaList->stats.recycle++);
1388 #ifdef JS_THREADSAFE
1390 * Refill the local free list by taking several things from the
1391 * global free list unless we are still at rt->gcMaxMallocBytes
1392 * barrier or the free list is already populated. The former
1393 * happens when GC is canceled due to !gcCallback(cx, JSGC_BEGIN)
1394 * or no gcPoke. The latter is caused via allocating new things
1395 * in gcCallback(cx, JSGC_END).
1397 if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex])
1398 break;
1399 tmpthing = arenaList->freeList;
1400 if (tmpthing) {
1401 maxFreeThings = MAX_THREAD_LOCAL_THINGS;
1402 do {
1403 if (!tmpthing->next)
1404 break;
1405 tmpthing = tmpthing->next;
1406 } while (--maxFreeThings != 0);
1408 flbase[flindex] = arenaList->freeList;
1409 arenaList->freeList = tmpthing->next;
1410 tmpthing->next = NULL;
1412 #endif
1413 break;
1417 * Try to allocate things from the last arena. If it is fully used,
1418 * check if we can allocate a new one and, if we cannot, consider
1419 * doing a "last ditch" GC unless already tried.
1421 thingsLimit = THINGS_PER_ARENA(nbytes);
1422 if (arenaList->lastCount != thingsLimit) {
1423 JS_ASSERT(arenaList->lastCount < thingsLimit);
1424 a = arenaList->last;
1425 } else {
1426 if (rt->gcBytes >= rt->gcMaxBytes || !(a = NewGCArena(rt))) {
1427 if (doGC)
1428 goto fail;
1429 doGC = JS_TRUE;
1430 continue;
1433 rt->gcBytes += GC_ARENA_SIZE;
1434 METER(++arenaList->stats.narenas);
1435 METER(arenaList->stats.maxarenas
1436 = JS_MAX(arenaList->stats.maxarenas,
1437 arenaList->stats.narenas));
1439 a->list = arenaList;
1440 a->prev = arenaList->last;
1441 a->prevUntracedPage = 0;
1442 a->untracedThings = 0;
1443 arenaList->last = a;
1444 arenaList->lastCount = 0;
1447 flagp = THING_FLAGP(a, arenaList->lastCount);
1448 thing = FLAGP_TO_THING(flagp, nbytes);
1449 arenaList->lastCount++;
1451 #ifdef JS_THREADSAFE
1453 * Refill the local free list by taking free things from the last
1454 * arena. Prefer to order free things by ascending address in the
1455 * (unscientific) hope of better cache locality.
1457 if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex])
1458 break;
1459 METER(nfree = 0);
1460 lastptr = &flbase[flindex];
1461 maxFreeThings = thingsLimit - arenaList->lastCount;
1462 if (maxFreeThings > MAX_THREAD_LOCAL_THINGS)
1463 maxFreeThings = MAX_THREAD_LOCAL_THINGS;
1464 METER(arenaList->stats.freelen += maxFreeThings);
1465 while (maxFreeThings != 0) {
1466 --maxFreeThings;
1468 tmpflagp = THING_FLAGP(a, arenaList->lastCount);
1469 tmpthing = FLAGP_TO_THING(tmpflagp, nbytes);
1470 arenaList->lastCount++;
1471 tmpthing->flagp = tmpflagp;
1472 *tmpflagp = GCF_FINAL; /* signifying that thing is free */
1474 *lastptr = tmpthing;
1475 lastptr = &tmpthing->next;
1477 *lastptr = NULL;
1478 #endif
1479 break;
1482 /* We successfully allocated the thing. */
1483 #ifdef JS_THREADSAFE
1484 success:
1485 #endif
1486 lrs = cx->localRootStack;
1487 if (lrs) {
1489 * If we're in a local root scope, don't set newborn[type] at all, to
1490 * avoid entraining garbage from it for an unbounded amount of time
1491 * on this context. A caller will leave the local root scope and pop
1492 * this reference, allowing thing to be GC'd if it has no other refs.
1493 * See JS_EnterLocalRootScope and related APIs.
1495 if (js_PushLocalRoot(cx, lrs, (jsval) thing) < 0) {
1497 * When we fail for a thing allocated through the tail of the last
1498 * arena, thing's flag byte is not initialized. So to prevent GC
1499 * accessing the uninitialized flags during the finalization, we
1500 * always mark the thing as final. See bug 337407.
1502 *flagp = GCF_FINAL;
1503 goto fail;
1505 } else {
1507 * No local root scope, so we're stuck with the old, fragile model of
1508 * depending on a pigeon-hole newborn per type per context.
1510 cx->weakRoots.newborn[flags & GCF_TYPEMASK] = thing;
1513 /* We can't fail now, so update flags. */
1514 *flagp = (uint8)flags;
1516 #ifdef DEBUG_gchist
1517 gchist[gchpos].lastDitch = doGC;
1518 gchist[gchpos].freeList = rt->gcArenaList[flindex].freeList;
1519 if (++gchpos == NGCHIST)
1520 gchpos = 0;
1521 #endif
1522 #ifdef JS_GCMETER
1524 JSGCArenaStats *stats = &rt->gcArenaList[flindex].stats;
1526 /* This is not thread-safe for thread-local allocations. */
1527 if (flags & GCF_LOCK)
1528 rt->gcStats.lockborn++;
1529 stats->totalnew++;
1530 stats->nthings++;
1531 if (stats->nthings > stats->maxthings)
1532 stats->maxthings = stats->nthings;
1534 #endif
1535 #ifdef JS_THREADSAFE
1536 if (gcLocked)
1537 JS_UNLOCK_GC(rt);
1538 #endif
1539 JS_COUNT_OPERATION(cx, JSOW_ALLOCATION);
1540 return thing;
1542 fail:
1543 #ifdef JS_THREADSAFE
1544 if (gcLocked)
1545 JS_UNLOCK_GC(rt);
1546 #endif
1547 METER(rt->gcStats.fail++);
1548 JS_ReportOutOfMemory(cx);
1549 return NULL;
1552 JSBool
1553 js_LockGCThing(JSContext *cx, void *thing)
1555 JSBool ok = js_LockGCThingRT(cx->runtime, thing);
1556 if (!ok)
1557 JS_ReportOutOfMemory(cx);
1558 return ok;
1562 * Deep GC-things can't be locked just by setting the GCF_LOCK bit, because
1563 * their descendants must be marked by the GC. To find them during the mark
1564 * phase, they are added to rt->gcLocksHash, which is created lazily.
1566 * NB: we depend on the order of GC-thing type indexes here!
1568 #define GC_TYPE_IS_STRING(t) ((t) == GCX_STRING || \
1569 (t) >= GCX_EXTERNAL_STRING)
1570 #define GC_TYPE_IS_XML(t) ((unsigned)((t) - GCX_NAMESPACE) <= \
1571 (unsigned)(GCX_XML - GCX_NAMESPACE))
1572 #define GC_TYPE_IS_DEEP(t) ((t) == GCX_OBJECT || GC_TYPE_IS_XML(t))
1574 #define IS_DEEP_STRING(t,o) (GC_TYPE_IS_STRING(t) && \
1575 JSSTRING_IS_DEPENDENT((JSString *)(o)))
1577 #define GC_THING_IS_DEEP(t,o) (GC_TYPE_IS_DEEP(t) || IS_DEEP_STRING(t, o))
1579 /* This is compatible with JSDHashEntryStub. */
1580 typedef struct JSGCLockHashEntry {
1581 JSDHashEntryHdr hdr;
1582 const JSGCThing *thing;
1583 uint32 count;
1584 } JSGCLockHashEntry;
1586 JSBool
1587 js_LockGCThingRT(JSRuntime *rt, void *thing)
1589 JSBool ok, deep;
1590 uint8 *flagp;
1591 uintN flags, lock, type;
1592 JSGCLockHashEntry *lhe;
1594 ok = JS_TRUE;
1595 if (!thing)
1596 return ok;
1598 flagp = js_GetGCThingFlags(thing);
1600 JS_LOCK_GC(rt);
1601 flags = *flagp;
1602 lock = (flags & GCF_LOCK);
1603 type = (flags & GCF_TYPEMASK);
1604 deep = GC_THING_IS_DEEP(type, thing);
1607 * Avoid adding a rt->gcLocksHash entry for shallow things until someone
1608 * nests a lock -- then start such an entry with a count of 2, not 1.
1610 if (lock || deep) {
1611 if (!rt->gcLocksHash) {
1612 rt->gcLocksHash =
1613 JS_NewDHashTable(JS_DHashGetStubOps(), NULL,
1614 sizeof(JSGCLockHashEntry),
1615 GC_ROOTS_SIZE);
1616 if (!rt->gcLocksHash) {
1617 ok = JS_FALSE;
1618 goto done;
1620 } else if (lock == 0) {
1621 #ifdef DEBUG
1622 JSDHashEntryHdr *hdr =
1623 JS_DHashTableOperate(rt->gcLocksHash, thing,
1624 JS_DHASH_LOOKUP);
1625 JS_ASSERT(JS_DHASH_ENTRY_IS_FREE(hdr));
1626 #endif
1629 lhe = (JSGCLockHashEntry *)
1630 JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_ADD);
1631 if (!lhe) {
1632 ok = JS_FALSE;
1633 goto done;
1635 if (!lhe->thing) {
1636 lhe->thing = (JSGCThing *) thing;
1637 lhe->count = deep ? 1 : 2;
1638 } else {
1639 JS_ASSERT(lhe->count >= 1);
1640 lhe->count++;
1644 *flagp = (uint8)(flags | GCF_LOCK);
1645 METER(rt->gcStats.lock++);
1646 ok = JS_TRUE;
1647 done:
1648 JS_UNLOCK_GC(rt);
1649 return ok;
1652 JSBool
1653 js_UnlockGCThingRT(JSRuntime *rt, void *thing)
1655 uint8 *flagp, flags;
1656 JSGCLockHashEntry *lhe;
1658 if (!thing)
1659 return JS_TRUE;
1661 flagp = js_GetGCThingFlags(thing);
1662 JS_LOCK_GC(rt);
1663 flags = *flagp;
1665 if (flags & GCF_LOCK) {
1666 if (!rt->gcLocksHash ||
1667 (lhe = (JSGCLockHashEntry *)
1668 JS_DHashTableOperate(rt->gcLocksHash, thing,
1669 JS_DHASH_LOOKUP),
1670 JS_DHASH_ENTRY_IS_FREE(&lhe->hdr))) {
1671 /* Shallow GC-thing with an implicit lock count of 1. */
1672 JS_ASSERT(!GC_THING_IS_DEEP(flags & GCF_TYPEMASK, thing));
1673 } else {
1674 /* Basis or nested unlock of a deep thing, or nested of shallow. */
1675 if (--lhe->count != 0)
1676 goto out;
1677 JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_REMOVE);
1679 *flagp = (uint8)(flags & ~GCF_LOCK);
1682 rt->gcPoke = JS_TRUE;
1683 out:
1684 METER(rt->gcStats.unlock++);
1685 JS_UNLOCK_GC(rt);
1686 return JS_TRUE;
1689 JS_PUBLIC_API(void)
1690 JS_TraceChildren(JSTracer *trc, void *thing, uint32 kind)
1692 JSObject *obj;
1693 size_t nslots, i;
1694 jsval v;
1695 JSString *str;
1697 switch (kind) {
1698 case JSTRACE_OBJECT:
1699 /* If obj has no map, it must be a newborn. */
1700 obj = (JSObject *) thing;
1701 if (!obj->map)
1702 break;
1703 if (obj->map->ops->trace) {
1704 obj->map->ops->trace(trc, obj);
1705 } else {
1706 nslots = STOBJ_NSLOTS(obj);
1707 for (i = 0; i != nslots; ++i) {
1708 v = STOBJ_GET_SLOT(obj, i);
1709 if (JSVAL_IS_TRACEABLE(v)) {
1710 JS_SET_TRACING_INDEX(trc, "slot", i);
1711 JS_CallTracer(trc, JSVAL_TO_TRACEABLE(v),
1712 JSVAL_TRACE_KIND(v));
1716 break;
1718 case JSTRACE_STRING:
1719 str = (JSString *)thing;
1720 if (JSSTRING_IS_DEPENDENT(str))
1721 JS_CALL_STRING_TRACER(trc, JSSTRDEP_BASE(str), "base");
1722 break;
1724 case JSTRACE_FUNCTION:
1726 * No tracing of JSFunction* instance is done for now. See bug 375808.
1728 break;
1730 #if JS_HAS_XML_SUPPORT
1731 case JSTRACE_NAMESPACE:
1732 js_TraceXMLNamespace(trc, (JSXMLNamespace *)thing);
1733 break;
1735 case JSTRACE_QNAME:
1736 js_TraceXMLQName(trc, (JSXMLQName *)thing);
1737 break;
1739 case JSTRACE_XML:
1740 js_TraceXML(trc, (JSXML *)thing);
1741 break;
1742 #endif
1747 * Number of things covered by a single bit of JSGCArenaInfo.untracedThings.
1749 #define THINGS_PER_UNTRACED_BIT(thingSize) \
1750 JS_HOWMANY(THINGS_PER_ARENA(thingSize), JS_BITS_PER_WORD)
1752 static void
1753 DelayTracingChildren(JSRuntime *rt, uint8 *flagp)
1755 JSGCArenaInfo *a;
1756 uint32 untracedBitIndex;
1757 jsuword bit;
1760 * Things with children to be traced later are marked with
1761 * GCF_MARK | GCF_FINAL flags.
1763 JS_ASSERT((*flagp & (GCF_MARK | GCF_FINAL)) == GCF_MARK);
1764 *flagp |= GCF_FINAL;
1766 METER(rt->gcStats.untraced++);
1767 #ifdef DEBUG
1768 ++rt->gcTraceLaterCount;
1769 METER(if (rt->gcTraceLaterCount > rt->gcStats.maxuntraced)
1770 rt->gcStats.maxuntraced = rt->gcTraceLaterCount);
1771 #endif
1773 a = FLAGP_TO_ARENA(flagp);
1774 untracedBitIndex = FLAGP_TO_INDEX(flagp) /
1775 THINGS_PER_UNTRACED_BIT(a->list->thingSize);
1776 JS_ASSERT(untracedBitIndex < JS_BITS_PER_WORD);
1777 bit = (jsuword)1 << untracedBitIndex;
1778 if (a->untracedThings != 0) {
1779 JS_ASSERT(rt->gcUntracedArenaStackTop);
1780 if (a->untracedThings & bit) {
1781 /* bit already covers things with children to trace later. */
1782 return;
1784 a->untracedThings |= bit;
1785 } else {
1787 * The thing is the first thing with not yet traced children in the
1788 * whole arena, so push the arena on the stack of arenas with things
1789 * to be traced later unless the arena has already been pushed. We
1790 * detect that through checking prevUntracedPage as the field is 0
1791 * only for not yet pushed arenas. To ensure that
1792 * prevUntracedPage != 0
1793 * even when the stack contains one element, we make prevUntracedPage
1794 * for the arena at the bottom to point to itself.
1796 * See comments in TraceDelayedChildren.
1798 a->untracedThings = bit;
1799 if (a->prevUntracedPage == 0) {
1800 if (!rt->gcUntracedArenaStackTop) {
1801 /* Stack was empty, mark the arena as the bottom element. */
1802 a->prevUntracedPage = ARENA_INFO_TO_PAGE(a);
1803 } else {
1804 JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage != 0);
1805 a->prevUntracedPage =
1806 ARENA_INFO_TO_PAGE(rt->gcUntracedArenaStackTop);
1808 rt->gcUntracedArenaStackTop = a;
1811 JS_ASSERT(rt->gcUntracedArenaStackTop);
1814 static void
1815 TraceDelayedChildren(JSTracer *trc)
1817 JSRuntime *rt;
1818 JSGCArenaInfo *a, *aprev;
1819 uint32 thingSize;
1820 uint32 thingsPerUntracedBit;
1821 uint32 untracedBitIndex, thingIndex, indexLimit, endIndex;
1822 JSGCThing *thing;
1823 uint8 *flagp;
1825 rt = trc->context->runtime;
1826 a = rt->gcUntracedArenaStackTop;
1827 if (!a) {
1828 JS_ASSERT(rt->gcTraceLaterCount == 0);
1829 return;
1832 for (;;) {
1834 * The following assert verifies that the current arena belongs to the
1835 * untraced stack, since DelayTracingChildren ensures that even for
1836 * stack's bottom prevUntracedPage != 0 but rather points to itself.
1838 JS_ASSERT(a->prevUntracedPage != 0);
1839 JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage != 0);
1840 thingSize = a->list->thingSize;
1841 indexLimit = (a == a->list->last)
1842 ? a->list->lastCount
1843 : THINGS_PER_ARENA(thingSize);
1844 thingsPerUntracedBit = THINGS_PER_UNTRACED_BIT(thingSize);
1847 * We can not use do-while loop here as a->untracedThings can be zero
1848 * before the loop as a leftover from the previous iterations. See
1849 * comments after the loop.
1851 while (a->untracedThings != 0) {
1852 untracedBitIndex = JS_FLOOR_LOG2W(a->untracedThings);
1853 a->untracedThings &= ~((jsuword)1 << untracedBitIndex);
1854 thingIndex = untracedBitIndex * thingsPerUntracedBit;
1855 endIndex = thingIndex + thingsPerUntracedBit;
1858 * endIndex can go beyond the last allocated thing as the real
1859 * limit can be "inside" the bit.
1861 if (endIndex > indexLimit)
1862 endIndex = indexLimit;
1863 JS_ASSERT(thingIndex < indexLimit);
1865 do {
1867 * Skip free or already traced things that share the bit
1868 * with untraced ones.
1870 flagp = THING_FLAGP(a, thingIndex);
1871 if ((*flagp & (GCF_MARK|GCF_FINAL)) != (GCF_MARK|GCF_FINAL))
1872 continue;
1873 *flagp &= ~GCF_FINAL;
1874 #ifdef DEBUG
1875 JS_ASSERT(rt->gcTraceLaterCount != 0);
1876 --rt->gcTraceLaterCount;
1877 #endif
1878 thing = FLAGP_TO_THING(flagp, thingSize);
1879 JS_TraceChildren(trc, thing,
1880 GCTypeToTraceKindMap[*flagp & GCF_TYPEMASK]);
1881 } while (++thingIndex != endIndex);
1885 * We finished tracing of all things in the the arena but we can only
1886 * pop it from the stack if the arena is the stack's top.
1888 * When JS_TraceChildren from the above calls JS_CallTracer that in
1889 * turn on low C stack calls DelayTracingChildren and the latter
1890 * pushes new arenas to the untraced stack, we have to skip popping
1891 * of this arena until it becomes the top of the stack again.
1893 if (a == rt->gcUntracedArenaStackTop) {
1894 aprev = ARENA_PAGE_TO_INFO(a->prevUntracedPage);
1895 a->prevUntracedPage = 0;
1896 if (a == aprev) {
1898 * prevUntracedPage points to itself and we reached the
1899 * bottom of the stack.
1901 break;
1903 rt->gcUntracedArenaStackTop = a = aprev;
1904 } else {
1905 a = rt->gcUntracedArenaStackTop;
1908 JS_ASSERT(rt->gcUntracedArenaStackTop);
1909 JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage == 0);
1910 rt->gcUntracedArenaStackTop = NULL;
1911 JS_ASSERT(rt->gcTraceLaterCount == 0);
1914 JS_PUBLIC_API(void)
1915 JS_CallTracer(JSTracer *trc, void *thing, uint32 kind)
1917 JSContext *cx;
1918 JSRuntime *rt;
1919 uint8 *flagp;
1921 JS_ASSERT(thing);
1922 JS_ASSERT(JS_IS_VALID_TRACE_KIND(kind));
1923 JS_ASSERT(trc->debugPrinter || trc->debugPrintArg);
1925 if (!IS_GC_MARKING_TRACER(trc)) {
1926 trc->callback(trc, thing, kind);
1927 goto out;
1930 cx = trc->context;
1931 rt = cx->runtime;
1932 JS_ASSERT(rt->gcMarkingTracer == trc);
1933 JS_ASSERT(rt->gcLevel > 0);
1936 * Optimize for string and double as their size is known and their tracing
1937 * is not recursive.
1939 switch (kind) {
1940 case JSTRACE_DOUBLE:
1941 flagp = THING_TO_FLAGP(thing, sizeof(JSGCThing));
1942 JS_ASSERT((*flagp & GCF_FINAL) == 0);
1943 JS_ASSERT(GCTypeToTraceKindMap[*flagp & GCF_TYPEMASK] == kind);
1944 if (rt->gcThingCallback)
1945 rt->gcThingCallback(thing, *flagp, rt->gcThingCallbackClosure);
1947 *flagp |= GCF_MARK;
1948 goto out;
1950 case JSTRACE_STRING:
1951 for (;;) {
1952 flagp = THING_TO_FLAGP(thing, sizeof(JSGCThing));
1953 JS_ASSERT((*flagp & GCF_FINAL) == 0);
1954 JS_ASSERT(GCTypeToTraceKindMap[*flagp & GCF_TYPEMASK] == kind);
1955 if (rt->gcThingCallback)
1956 rt->gcThingCallback(thing, *flagp, rt->gcThingCallbackClosure);
1958 if (!JSSTRING_IS_DEPENDENT((JSString *) thing)) {
1959 *flagp |= GCF_MARK;
1960 goto out;
1962 if (*flagp & GCF_MARK)
1963 goto out;
1964 *flagp |= GCF_MARK;
1965 thing = JSSTRDEP_BASE((JSString *) thing);
1967 /* NOTREACHED */
1970 flagp = js_GetGCThingFlags(thing);
1971 JS_ASSERT(GCTypeToTraceKindMap[*flagp & GCF_TYPEMASK] == kind);
1973 if (rt->gcThingCallback)
1974 rt->gcThingCallback(thing, *flagp, rt->gcThingCallbackClosure);
1976 if (*flagp & GCF_MARK)
1977 goto out;
1980 * We check for non-final flag only if mark is unset as
1981 * DelayTracingChildren uses the flag. See comments in the function.
1983 JS_ASSERT(*flagp != GCF_FINAL);
1984 *flagp |= GCF_MARK;
1985 if (!cx->insideGCMarkCallback) {
1987 * With JS_GC_ASSUME_LOW_C_STACK defined the mark phase of GC always
1988 * uses the non-recursive code that otherwise would be called only on
1989 * a low C stack condition.
1991 #ifdef JS_GC_ASSUME_LOW_C_STACK
1992 # define RECURSION_TOO_DEEP() JS_TRUE
1993 #else
1994 int stackDummy;
1995 # define RECURSION_TOO_DEEP() (!JS_CHECK_STACK_SIZE(cx, stackDummy))
1996 #endif
1997 if (RECURSION_TOO_DEEP())
1998 DelayTracingChildren(rt, flagp);
1999 else
2000 JS_TraceChildren(trc, thing, kind);
2001 } else {
2003 * For API compatibility we allow for the callback to assume that
2004 * after it calls JS_MarkGCThing for the last time, the callback can
2005 * start to finalize its own objects that are only referenced by
2006 * unmarked GC things.
2008 * Since we do not know which call from inside the callback is the
2009 * last, we ensure that children of all marked things are traced and
2010 * call TraceDelayedChildren(trc) after tracing the thing.
2012 * As TraceDelayedChildren unconditionally invokes JS_TraceChildren
2013 * for the things with untraced children, calling DelayTracingChildren
2014 * is useless here. Hence we always trace thing's children even with a
2015 * low native stack.
2017 cx->insideGCMarkCallback = JS_FALSE;
2018 JS_TraceChildren(trc, thing, kind);
2019 TraceDelayedChildren(trc);
2020 cx->insideGCMarkCallback = JS_TRUE;
2023 out:
2024 #ifdef DEBUG
2025 trc->debugPrinter = NULL;
2026 trc->debugPrintArg = NULL;
2027 #endif
2028 return; /* to avoid out: right_curl when DEBUG is not defined */
2031 void
2032 js_CallValueTracerIfGCThing(JSTracer *trc, jsval v)
2034 void *thing;
2035 uint32 kind;
2037 if (JSVAL_IS_DOUBLE(v) || JSVAL_IS_STRING(v)) {
2038 thing = JSVAL_TO_TRACEABLE(v);
2039 kind = JSVAL_TRACE_KIND(v);
2040 } else if (JSVAL_IS_OBJECT(v) && v != JSVAL_NULL) {
2041 /* v can be an arbitrary GC thing reinterpreted as an object. */
2042 thing = JSVAL_TO_OBJECT(v);
2043 kind = GCTypeToTraceKindMap[*js_GetGCThingFlags(thing) & GCF_TYPEMASK];
2044 } else {
2045 return;
2047 JS_CallTracer(trc, thing, kind);
2050 JS_STATIC_DLL_CALLBACK(JSDHashOperator)
2051 gc_root_traversal(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num,
2052 void *arg)
2054 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
2055 JSTracer *trc = (JSTracer *)arg;
2056 jsval *rp = (jsval *)rhe->root;
2057 jsval v = *rp;
2059 /* Ignore null object and scalar values. */
2060 if (!JSVAL_IS_NULL(v) && JSVAL_IS_GCTHING(v)) {
2061 #ifdef DEBUG
2062 JSBool root_points_to_gcArenaList = JS_FALSE;
2063 jsuword thing = (jsuword) JSVAL_TO_GCTHING(v);
2064 uintN i;
2065 JSGCArenaList *arenaList;
2066 uint32 thingSize;
2067 JSGCArenaInfo *a;
2068 size_t limit;
2070 for (i = 0; i < GC_NUM_FREELISTS; i++) {
2071 arenaList = &trc->context->runtime->gcArenaList[i];
2072 thingSize = arenaList->thingSize;
2073 limit = (size_t) arenaList->lastCount * thingSize;
2074 for (a = arenaList->last; a; a = a->prev) {
2075 if (thing - ARENA_INFO_TO_START(a) < limit) {
2076 root_points_to_gcArenaList = JS_TRUE;
2077 break;
2079 limit = (size_t) THINGS_PER_ARENA(thingSize) * thingSize;
2082 if (!root_points_to_gcArenaList && rhe->name) {
2083 fprintf(stderr,
2084 "JS API usage error: the address passed to JS_AddNamedRoot currently holds an\n"
2085 "invalid jsval. This is usually caused by a missing call to JS_RemoveRoot.\n"
2086 "The root's name is \"%s\".\n",
2087 rhe->name);
2089 JS_ASSERT(root_points_to_gcArenaList);
2090 #endif
2091 JS_SET_TRACING_NAME(trc, rhe->name ? rhe->name : "root");
2092 js_CallValueTracerIfGCThing(trc, v);
2095 return JS_DHASH_NEXT;
2098 JS_STATIC_DLL_CALLBACK(JSDHashOperator)
2099 gc_lock_traversal(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num,
2100 void *arg)
2102 JSGCLockHashEntry *lhe = (JSGCLockHashEntry *)hdr;
2103 void *thing = (void *)lhe->thing;
2104 JSTracer *trc = (JSTracer *)arg;
2105 uint8 flags;
2106 uint32 traceKind;
2107 JSRuntime *rt;
2108 uint32 n;
2110 JS_ASSERT(lhe->count >= 1);
2111 flags = *js_GetGCThingFlags(thing);
2112 traceKind = GCTypeToTraceKindMap[flags & GCF_TYPEMASK];
2113 JS_CALL_TRACER(trc, thing, traceKind, "locked object");
2116 * Bug 379455: we called the tracer once, but to communicate the value of
2117 * thing's lock count to the tracer, or to gcThingCallback when the tracer
2118 * is the GC marking phase, we need to call an extra lhe->count - 1 times.
2120 n = lhe->count - 1;
2121 if (n != 0) {
2122 if (IS_GC_MARKING_TRACER(trc)) {
2123 rt = trc->context->runtime;
2124 if (rt->gcThingCallback) {
2125 do {
2126 rt->gcThingCallback(thing, flags,
2127 rt->gcThingCallbackClosure);
2128 } while (--n != 0);
2130 } else {
2131 do {
2132 JS_CALL_TRACER(trc, thing, traceKind, "locked object");
2133 } while (--n != 0);
2136 return JS_DHASH_NEXT;
2139 #define TRACE_JSVALS(trc, len, vec, name) \
2140 JS_BEGIN_MACRO \
2141 jsval _v, *_vp, *_end; \
2143 for (_vp = vec, _end = _vp + len; _vp < _end; _vp++) { \
2144 _v = *_vp; \
2145 if (JSVAL_IS_TRACEABLE(_v)) { \
2146 JS_SET_TRACING_INDEX(trc, name, _vp - (vec)); \
2147 JS_CallTracer(trc, JSVAL_TO_TRACEABLE(_v), \
2148 JSVAL_TRACE_KIND(_v)); \
2151 JS_END_MACRO
2153 void
2154 js_TraceStackFrame(JSTracer *trc, JSStackFrame *fp)
2156 uintN nslots, minargs, skip;
2158 if (fp->callobj)
2159 JS_CALL_OBJECT_TRACER(trc, fp->callobj, "call");
2160 if (fp->argsobj)
2161 JS_CALL_OBJECT_TRACER(trc, fp->argsobj, "arguments");
2162 if (fp->varobj)
2163 JS_CALL_OBJECT_TRACER(trc, fp->varobj, "variables");
2164 if (fp->script) {
2165 js_TraceScript(trc, fp->script);
2166 if (fp->spbase) {
2168 * Don't mark what has not been pushed yet, or what has been
2169 * popped already.
2171 JS_ASSERT(JS_UPTRDIFF(fp->sp, fp->spbase) <=
2172 fp->script->depth * sizeof(jsval));
2173 nslots = (uintN) (fp->sp - fp->spbase);
2174 TRACE_JSVALS(trc, nslots, fp->spbase, "operand");
2178 /* Allow for primitive this parameter due to JSFUN_THISP_* flags. */
2179 JS_ASSERT(JSVAL_IS_OBJECT((jsval)fp->thisp) ||
2180 (fp->fun && JSFUN_THISP_FLAGS(fp->fun->flags)));
2181 JS_CALL_VALUE_TRACER(trc, (jsval)fp->thisp, "this");
2183 if (fp->callee)
2184 JS_CALL_OBJECT_TRACER(trc, fp->callee, "callee");
2186 if (fp->argv) {
2187 nslots = fp->argc;
2188 skip = 0;
2189 if (fp->fun) {
2190 minargs = FUN_MINARGS(fp->fun);
2191 if (minargs > nslots)
2192 nslots = minargs;
2193 if (!FUN_INTERPRETED(fp->fun)) {
2194 JS_ASSERT(!(fp->fun->flags & JSFUN_FAST_NATIVE));
2195 nslots += fp->fun->u.n.extra;
2197 if (fp->fun->flags & JSFRAME_ROOTED_ARGV)
2198 skip = 2 + fp->argc;
2200 TRACE_JSVALS(trc, 2 + nslots - skip, fp->argv - 2 + skip, "operand");
2202 JS_CALL_VALUE_TRACER(trc, fp->rval, "rval");
2203 if (fp->vars)
2204 TRACE_JSVALS(trc, fp->nvars, fp->vars, "var");
2205 if (fp->scopeChain)
2206 JS_CALL_OBJECT_TRACER(trc, fp->scopeChain, "scope chain");
2207 if (fp->sharpArray)
2208 JS_CALL_OBJECT_TRACER(trc, fp->sharpArray, "sharp array");
2210 if (fp->xmlNamespace)
2211 JS_CALL_OBJECT_TRACER(trc, fp->xmlNamespace, "xmlNamespace");
2214 static void
2215 TraceWeakRoots(JSTracer *trc, JSWeakRoots *wr)
2217 uintN i;
2218 void *thing;
2220 for (i = 0; i < GCX_NTYPES; i++) {
2221 thing = wr->newborn[i];
2222 if (thing) {
2223 JS_CALL_TRACER(trc, thing, GCTypeToTraceKindMap[i],
2224 gc_typenames[i]);
2227 JS_CALL_VALUE_TRACER(trc, wr->lastAtom, "lastAtom");
2228 JS_SET_TRACING_NAME(trc, "lastInternalResult");
2229 js_CallValueTracerIfGCThing(trc, wr->lastInternalResult);
2232 JS_FRIEND_API(void)
2233 js_TraceContext(JSTracer *trc, JSContext *acx)
2235 JSStackFrame *fp, *nextChain;
2236 JSStackHeader *sh;
2237 JSTempValueRooter *tvr;
2240 * Iterate frame chain and dormant chains.
2242 * (NB: see comment on this whole "dormant" thing in js_Execute.)
2244 fp = acx->fp;
2245 nextChain = acx->dormantFrameChain;
2246 if (!fp)
2247 goto next_chain;
2249 /* The top frame must not be dormant. */
2250 JS_ASSERT(!fp->dormantNext);
2251 for (;;) {
2252 do {
2253 js_TraceStackFrame(trc, fp);
2254 } while ((fp = fp->down) != NULL);
2256 next_chain:
2257 if (!nextChain)
2258 break;
2259 fp = nextChain;
2260 nextChain = nextChain->dormantNext;
2263 /* Mark other roots-by-definition in acx. */
2264 if (acx->globalObject)
2265 JS_CALL_OBJECT_TRACER(trc, acx->globalObject, "global object");
2266 TraceWeakRoots(trc, &acx->weakRoots);
2267 if (acx->throwing) {
2268 JS_CALL_VALUE_TRACER(trc, acx->exception, "exception");
2269 } else {
2270 /* Avoid keeping GC-ed junk stored in JSContext.exception. */
2271 acx->exception = JSVAL_NULL;
2273 #if JS_HAS_LVALUE_RETURN
2274 if (acx->rval2set)
2275 JS_CALL_VALUE_TRACER(trc, acx->rval2, "rval2");
2276 #endif
2278 for (sh = acx->stackHeaders; sh; sh = sh->down) {
2279 METER(trc->context->runtime->gcStats.stackseg++);
2280 METER(trc->context->runtime->gcStats.segslots += sh->nslots);
2281 TRACE_JSVALS(trc, sh->nslots, JS_STACK_SEGMENT(sh), "stack");
2284 if (acx->localRootStack)
2285 js_TraceLocalRoots(trc, acx->localRootStack);
2287 for (tvr = acx->tempValueRooters; tvr; tvr = tvr->down) {
2288 switch (tvr->count) {
2289 case JSTVU_SINGLE:
2290 JS_SET_TRACING_NAME(trc, "tvr->u.value");
2291 js_CallValueTracerIfGCThing(trc, tvr->u.value);
2292 break;
2293 case JSTVU_TRACE:
2294 tvr->u.trace(trc, tvr);
2295 break;
2296 case JSTVU_SPROP:
2297 TRACE_SCOPE_PROPERTY(trc, tvr->u.sprop);
2298 break;
2299 case JSTVU_WEAK_ROOTS:
2300 TraceWeakRoots(trc, tvr->u.weakRoots);
2301 break;
2302 case JSTVU_PARSE_CONTEXT:
2303 js_TraceParseContext(trc, tvr->u.parseContext);
2304 break;
2305 case JSTVU_SCRIPT:
2306 js_TraceScript(trc, tvr->u.script);
2307 break;
2308 default:
2309 JS_ASSERT(tvr->count >= 0);
2310 TRACE_JSVALS(trc, tvr->count, tvr->u.array, "tvr->u.array");
2314 if (acx->sharpObjectMap.depth > 0)
2315 js_TraceSharpMap(trc, &acx->sharpObjectMap);
2318 void
2319 js_TraceRuntime(JSTracer *trc, JSBool allAtoms)
2321 JSRuntime *rt = trc->context->runtime;
2322 JSContext *iter, *acx;
2324 JS_DHashTableEnumerate(&rt->gcRootsHash, gc_root_traversal, trc);
2325 if (rt->gcLocksHash)
2326 JS_DHashTableEnumerate(rt->gcLocksHash, gc_lock_traversal, trc);
2327 js_TraceAtomState(trc, allAtoms);
2328 js_TraceNativeIteratorStates(trc);
2330 iter = NULL;
2331 while ((acx = js_ContextIterator(rt, JS_TRUE, &iter)) != NULL)
2332 js_TraceContext(trc, acx);
2334 if (rt->gcExtraRootsTraceOp)
2335 rt->gcExtraRootsTraceOp(trc, rt->gcExtraRootsData);
2339 * When gckind is GC_LAST_DITCH, it indicates a call from js_NewGCThing with
2340 * rt->gcLock already held and when the lock should be kept on return.
2342 void
2343 js_GC(JSContext *cx, JSGCInvocationKind gckind)
2345 JSRuntime *rt;
2346 JSBool keepAtoms;
2347 uintN i, type;
2348 JSTracer trc;
2349 uint32 thingSize, indexLimit;
2350 JSGCArenaInfo *a, **ap, *emptyArenas;
2351 uint8 flags, *flagp;
2352 JSGCThing *thing, *freeList;
2353 JSGCArenaList *arenaList;
2354 JSBool allClear;
2355 #ifdef JS_THREADSAFE
2356 uint32 requestDebit;
2357 JSContext *acx, *iter;
2358 #endif
2360 rt = cx->runtime;
2361 #ifdef JS_THREADSAFE
2362 /* Avoid deadlock. */
2363 JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt));
2364 #endif
2366 if (gckind == GC_LAST_DITCH) {
2367 /* The last ditch GC preserves all atoms and weak roots. */
2368 keepAtoms = JS_TRUE;
2369 } else {
2370 /* Keep atoms when a suspended compile is running on another context. */
2371 keepAtoms = (rt->gcKeepAtoms != 0);
2372 JS_CLEAR_WEAK_ROOTS(&cx->weakRoots);
2376 * Don't collect garbage if the runtime isn't up, and cx is not the last
2377 * context in the runtime. The last context must force a GC, and nothing
2378 * should suppress that final collection or there may be shutdown leaks,
2379 * or runtime bloat until the next context is created.
2381 if (rt->state != JSRTS_UP && gckind != GC_LAST_CONTEXT)
2382 return;
2384 restart_after_callback:
2386 * Let the API user decide to defer a GC if it wants to (unless this
2387 * is the last context). Invoke the callback regardless.
2389 if (rt->gcCallback &&
2390 !rt->gcCallback(cx, JSGC_BEGIN) &&
2391 gckind != GC_LAST_CONTEXT) {
2392 return;
2395 /* Lock out other GC allocator and collector invocations. */
2396 if (gckind != GC_LAST_DITCH)
2397 JS_LOCK_GC(rt);
2399 METER(rt->gcStats.poke++);
2400 rt->gcPoke = JS_FALSE;
2402 #ifdef JS_THREADSAFE
2403 JS_ASSERT(cx->thread->id == js_CurrentThreadId());
2405 /* Bump gcLevel and return rather than nest on this thread. */
2406 if (rt->gcThread == cx->thread) {
2407 JS_ASSERT(rt->gcLevel > 0);
2408 rt->gcLevel++;
2409 METER(if (rt->gcLevel > rt->gcStats.maxlevel)
2410 rt->gcStats.maxlevel = rt->gcLevel);
2411 if (gckind != GC_LAST_DITCH)
2412 JS_UNLOCK_GC(rt);
2413 return;
2417 * If we're in one or more requests (possibly on more than one context)
2418 * running on the current thread, indicate, temporarily, that all these
2419 * requests are inactive. If cx->thread is NULL, then cx is not using
2420 * the request model, and does not contribute to rt->requestCount.
2422 requestDebit = 0;
2423 if (cx->thread) {
2424 JSCList *head, *link;
2427 * Check all contexts on cx->thread->contextList for active requests,
2428 * counting each such context against requestDebit.
2430 head = &cx->thread->contextList;
2431 for (link = head->next; link != head; link = link->next) {
2432 acx = CX_FROM_THREAD_LINKS(link);
2433 JS_ASSERT(acx->thread == cx->thread);
2434 if (acx->requestDepth)
2435 requestDebit++;
2437 } else {
2439 * We assert, but check anyway, in case someone is misusing the API.
2440 * Avoiding the loop over all of rt's contexts is a win in the event
2441 * that the GC runs only on request-less contexts with null threads,
2442 * in a special thread such as might be used by the UI/DOM/Layout
2443 * "mozilla" or "main" thread in Mozilla-the-browser.
2445 JS_ASSERT(cx->requestDepth == 0);
2446 if (cx->requestDepth)
2447 requestDebit = 1;
2449 if (requestDebit) {
2450 JS_ASSERT(requestDebit <= rt->requestCount);
2451 rt->requestCount -= requestDebit;
2452 if (rt->requestCount == 0)
2453 JS_NOTIFY_REQUEST_DONE(rt);
2456 /* If another thread is already in GC, don't attempt GC; wait instead. */
2457 if (rt->gcLevel > 0) {
2458 /* Bump gcLevel to restart the current GC, so it finds new garbage. */
2459 rt->gcLevel++;
2460 METER(if (rt->gcLevel > rt->gcStats.maxlevel)
2461 rt->gcStats.maxlevel = rt->gcLevel);
2463 /* Wait for the other thread to finish, then resume our request. */
2464 while (rt->gcLevel > 0)
2465 JS_AWAIT_GC_DONE(rt);
2466 if (requestDebit)
2467 rt->requestCount += requestDebit;
2468 if (gckind != GC_LAST_DITCH)
2469 JS_UNLOCK_GC(rt);
2470 return;
2473 /* No other thread is in GC, so indicate that we're now in GC. */
2474 rt->gcLevel = 1;
2475 rt->gcThread = cx->thread;
2477 /* Wait for all other requests to finish. */
2478 while (rt->requestCount > 0)
2479 JS_AWAIT_REQUEST_DONE(rt);
2481 #else /* !JS_THREADSAFE */
2483 /* Bump gcLevel and return rather than nest; the outer gc will restart. */
2484 rt->gcLevel++;
2485 METER(if (rt->gcLevel > rt->gcStats.maxlevel)
2486 rt->gcStats.maxlevel = rt->gcLevel);
2487 if (rt->gcLevel > 1)
2488 return;
2490 #endif /* !JS_THREADSAFE */
2493 * Set rt->gcRunning here within the GC lock, and after waiting for any
2494 * active requests to end, so that new requests that try to JS_AddRoot,
2495 * JS_RemoveRoot, or JS_RemoveRootRT block in JS_BeginRequest waiting for
2496 * rt->gcLevel to drop to zero, while request-less calls to the *Root*
2497 * APIs block in js_AddRoot or js_RemoveRoot (see above in this file),
2498 * waiting for GC to finish.
2500 rt->gcRunning = JS_TRUE;
2501 JS_UNLOCK_GC(rt);
2503 /* Reset malloc counter. */
2504 rt->gcMallocBytes = 0;
2506 #ifdef DEBUG_scopemeters
2507 { extern void js_DumpScopeMeters(JSRuntime *rt);
2508 js_DumpScopeMeters(rt);
2510 #endif
2512 #ifdef JS_THREADSAFE
2514 * Set all thread local freelists to NULL. We may visit a thread's
2515 * freelist more than once. To avoid redundant clearing we unroll the
2516 * current thread's step.
2518 * Also, in case a JSScript wrapped within an object was finalized, we
2519 * null acx->thread->gsnCache.script and finish the cache's hashtable.
2520 * Note that js_DestroyScript, called from script_finalize, will have
2521 * already cleared cx->thread->gsnCache above during finalization, so we
2522 * don't have to here.
2524 memset(cx->thread->gcFreeLists, 0, sizeof cx->thread->gcFreeLists);
2525 iter = NULL;
2526 while ((acx = js_ContextIterator(rt, JS_FALSE, &iter)) != NULL) {
2527 if (!acx->thread || acx->thread == cx->thread)
2528 continue;
2529 memset(acx->thread->gcFreeLists, 0, sizeof acx->thread->gcFreeLists);
2530 GSN_CACHE_CLEAR(&acx->thread->gsnCache);
2532 #else
2533 /* The thread-unsafe case just has to clear the runtime's GSN cache. */
2534 GSN_CACHE_CLEAR(&rt->gsnCache);
2535 #endif
2537 restart:
2538 rt->gcNumber++;
2539 JS_ASSERT(!rt->gcUntracedArenaStackTop);
2540 JS_ASSERT(rt->gcTraceLaterCount == 0);
2543 * Mark phase.
2545 JS_TRACER_INIT(&trc, cx, NULL);
2546 rt->gcMarkingTracer = &trc;
2547 JS_ASSERT(IS_GC_MARKING_TRACER(&trc));
2548 js_TraceRuntime(&trc, keepAtoms);
2549 js_MarkScriptFilenames(rt, keepAtoms);
2552 * Mark children of things that caused too deep recursion during the above
2553 * tracing.
2555 TraceDelayedChildren(&trc);
2557 JS_ASSERT(!cx->insideGCMarkCallback);
2558 if (rt->gcCallback) {
2559 cx->insideGCMarkCallback = JS_TRUE;
2560 (void) rt->gcCallback(cx, JSGC_MARK_END);
2561 JS_ASSERT(cx->insideGCMarkCallback);
2562 cx->insideGCMarkCallback = JS_FALSE;
2564 JS_ASSERT(rt->gcTraceLaterCount == 0);
2566 rt->gcMarkingTracer = NULL;
2569 * Sweep phase.
2571 * Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set
2572 * so that any attempt to allocate a GC-thing from a finalizer will fail,
2573 * rather than nest badly and leave the unmarked newborn to be swept.
2575 * We first sweep atom state so we can use js_IsAboutToBeFinalized on
2576 * JSString or jsdouble held in a hashtable to check if the hashtable
2577 * entry can be freed. Note that even after the entry is freed, JSObject
2578 * finalizers can continue to access the corresponding jsdouble* and
2579 * JSString* assuming that they are unique. This works since the
2580 * atomization API must not be called during GC.
2582 js_SweepAtomState(cx);
2584 /* Finalize iterator states before the objects they iterate over. */
2585 CloseNativeIterators(cx);
2587 /* Finalize watch points associated with unreachable objects. */
2588 js_SweepWatchPoints(cx);
2590 #ifdef DUMP_CALL_TABLE
2592 * Call js_DumpCallTable here so it can meter and then clear weak refs to
2593 * GC-things that are about to be finalized.
2595 js_DumpCallTable(cx);
2596 #endif
2599 * Here we need to ensure that JSObject instances are finalized before GC-
2600 * allocated JSString and jsdouble instances so object's finalizer can
2601 * access them even if they will be freed. For that we simply finalize the
2602 * list containing JSObject first since the static assert at the beginning
2603 * of the file guarantees that JSString and jsdouble instances are
2604 * allocated from a different list.
2606 emptyArenas = NULL;
2607 for (i = 0; i < GC_NUM_FREELISTS; i++) {
2608 arenaList = &rt->gcArenaList[i == 0
2609 ? GC_FREELIST_INDEX(sizeof(JSObject))
2610 : i == GC_FREELIST_INDEX(sizeof(JSObject))
2612 : i];
2613 ap = &arenaList->last;
2614 if (!(a = *ap))
2615 continue;
2617 JS_ASSERT(arenaList->lastCount > 0);
2618 arenaList->freeList = NULL;
2619 freeList = NULL;
2620 METER(arenaList->stats.nthings = 0);
2621 METER(arenaList->stats.freelen = 0);
2622 thingSize = arenaList->thingSize;
2623 indexLimit = THINGS_PER_ARENA(thingSize);
2624 flagp = THING_FLAGP(a, arenaList->lastCount - 1);
2625 for (;;) {
2626 METER(size_t nfree = 0);
2628 JS_ASSERT(a->prevUntracedPage == 0);
2629 JS_ASSERT(a->untracedThings == 0);
2630 allClear = JS_TRUE;
2631 do {
2632 flags = *flagp;
2633 if (flags & (GCF_MARK | GCF_LOCK)) {
2634 *flagp &= ~GCF_MARK;
2635 allClear = JS_FALSE;
2636 METER(++arenaList->stats.nthings);
2637 } else if (!(flags & GCF_FINAL)) {
2638 /* Call the finalizer with GCF_FINAL ORed into flags. */
2639 thing = FLAGP_TO_THING(flagp, thingSize);
2640 *flagp = (uint8)(flags | GCF_FINAL);
2641 type = flags & GCF_TYPEMASK;
2642 switch (type) {
2643 case GCX_OBJECT:
2644 js_FinalizeObject(cx, (JSObject *) thing);
2645 break;
2646 case GCX_DOUBLE:
2647 /* Do nothing. */
2648 break;
2649 case GCX_FUNCTION:
2650 js_FinalizeFunction(cx, (JSFunction *) thing);
2651 break;
2652 #if JS_HAS_XML_SUPPORT
2653 case GCX_NAMESPACE:
2654 js_FinalizeXMLNamespace(cx, (JSXMLNamespace *) thing);
2655 break;
2656 case GCX_QNAME:
2657 js_FinalizeXMLQName(cx, (JSXMLQName *) thing);
2658 break;
2659 case GCX_XML:
2660 js_FinalizeXML(cx, (JSXML *) thing);
2661 break;
2662 #endif
2663 default:
2664 JS_ASSERT(type == GCX_STRING ||
2665 type - GCX_EXTERNAL_STRING <
2666 GCX_NTYPES - GCX_EXTERNAL_STRING);
2667 js_FinalizeStringRT(rt, (JSString *) thing, type, cx);
2668 break;
2670 thing->flagp = flagp;
2671 thing->next = freeList;
2672 freeList = thing;
2673 METER(++nfree);
2675 } while (++flagp != THING_FLAGS_END(a));
2677 if (allClear) {
2679 * Forget just assembled free list head for the arena and
2680 * add the arena itself to the destroy list.
2682 freeList = arenaList->freeList;
2683 if (a == arenaList->last)
2684 arenaList->lastCount = indexLimit;
2685 *ap = a->prev;
2686 JS_ASSERT(rt->gcBytes >= GC_ARENA_SIZE);
2687 rt->gcBytes -= GC_ARENA_SIZE;
2688 a->prev = emptyArenas;
2689 emptyArenas = a;
2690 } else {
2691 arenaList->freeList = freeList;
2692 ap = &a->prev;
2693 METER(arenaList->stats.freelen += nfree);
2694 METER(arenaList->stats.totalfreelen += nfree);
2695 METER(++arenaList->stats.totalarenas);
2697 if (!(a = *ap))
2698 break;
2699 flagp = THING_FLAGP(a, indexLimit - 1);
2704 * Sweep the runtime's property tree after finalizing objects, in case any
2705 * had watchpoints referencing tree nodes.
2707 js_SweepScopeProperties(cx);
2710 * Sweep script filenames after sweeping functions in the generic loop
2711 * above. In this way when a scripted function's finalizer destroys the
2712 * script and calls rt->destroyScriptHook, the hook can still access the
2713 * script's filename. See bug 323267.
2715 js_SweepScriptFilenames(rt);
2718 * Destroy arenas after we finished the sweeping sofinalizers can safely
2719 * use js_IsAboutToBeFinalized().
2721 while (emptyArenas) {
2722 a = emptyArenas;
2723 emptyArenas = emptyArenas->prev;
2724 DestroyGCArena(rt, a);
2727 if (rt->gcCallback)
2728 (void) rt->gcCallback(cx, JSGC_FINALIZE_END);
2729 #ifdef DEBUG_srcnotesize
2730 { extern void DumpSrcNoteSizeHist();
2731 DumpSrcNoteSizeHist();
2732 printf("GC HEAP SIZE %lu\n", (unsigned long)rt->gcBytes);
2734 #endif
2736 JS_LOCK_GC(rt);
2739 * We want to restart GC if js_GC was called recursively or if any of the
2740 * finalizers called js_RemoveRoot or js_UnlockGCThingRT.
2742 if (rt->gcLevel > 1 || rt->gcPoke) {
2743 rt->gcLevel = 1;
2744 rt->gcPoke = JS_FALSE;
2745 JS_UNLOCK_GC(rt);
2746 goto restart;
2748 rt->gcLevel = 0;
2749 rt->gcLastBytes = rt->gcBytes;
2750 rt->gcRunning = JS_FALSE;
2752 #ifdef JS_THREADSAFE
2753 /* If we were invoked during a request, pay back the temporary debit. */
2754 if (requestDebit)
2755 rt->requestCount += requestDebit;
2756 rt->gcThread = NULL;
2757 JS_NOTIFY_GC_DONE(rt);
2760 * Unlock unless we have GC_LAST_DITCH which requires locked GC on return.
2762 if (gckind != GC_LAST_DITCH)
2763 JS_UNLOCK_GC(rt);
2764 #endif
2766 /* Execute JSGC_END callback outside the lock. */
2767 if (rt->gcCallback) {
2768 JSWeakRoots savedWeakRoots;
2769 JSTempValueRooter tvr;
2771 if (gckind == GC_LAST_DITCH) {
2773 * We allow JSGC_END implementation to force a full GC or allocate
2774 * new GC things. Thus we must protect the weak roots from GC or
2775 * overwrites.
2777 savedWeakRoots = cx->weakRoots;
2778 JS_PUSH_TEMP_ROOT_WEAK_COPY(cx, &savedWeakRoots, &tvr);
2779 JS_KEEP_ATOMS(rt);
2780 JS_UNLOCK_GC(rt);
2783 (void) rt->gcCallback(cx, JSGC_END);
2785 if (gckind == GC_LAST_DITCH) {
2786 JS_LOCK_GC(rt);
2787 JS_UNKEEP_ATOMS(rt);
2788 JS_POP_TEMP_ROOT(cx, &tvr);
2789 } else if (gckind == GC_LAST_CONTEXT && rt->gcPoke) {
2791 * On shutdown iterate until JSGC_END callback stops creating
2792 * garbage.
2794 goto restart_after_callback;
2799 void
2800 js_UpdateMallocCounter(JSContext *cx, size_t nbytes)
2802 uint32 *pbytes, bytes;
2804 #ifdef JS_THREADSAFE
2805 pbytes = &cx->thread->gcMallocBytes;
2806 #else
2807 pbytes = &cx->runtime->gcMallocBytes;
2808 #endif
2809 bytes = *pbytes;
2810 *pbytes = ((uint32)-1 - bytes <= nbytes) ? (uint32)-1 : bytes + nbytes;