1 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sw=4 et tw=78:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
17 * The Original Code is Mozilla Communicator client code, released
20 * The Initial Developer of the Original Code is
21 * Netscape Communications Corporation.
22 * Portions created by the Initial Developer are Copyright (C) 1998
23 * the Initial Developer. All Rights Reserved.
27 * Alternatively, the contents of this file may be used under the terms of
28 * either of the GNU General Public License Version 2 or later (the "GPL"),
29 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
39 * ***** END LICENSE BLOCK ***** */
42 * JS Mark-and-Sweep Garbage Collector.
44 * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see
45 * jsgc.h). It allocates from a special GC arena pool with each arena allocated
46 * using malloc. It uses an ideally parallel array of flag bytes to hold the
47 * mark bit, finalizer type index, etc.
49 * XXX swizzle page to freelist for better locality of reference
52 #include <stdlib.h> /* for free */
54 #include <string.h> /* for memset used when DEBUG */
56 #include "jsutil.h" /* Added by JSIFY */
57 #include "jshash.h" /* Added by JSIFY */
64 #include "jsversion.h"
80 #if JS_HAS_XML_SUPPORT
85 * Check if posix_memalign is available.
87 #if _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 || MOZ_MEMORY
88 # define HAS_POSIX_MEMALIGN 1
90 # define HAS_POSIX_MEMALIGN 0
94 * jemalloc provides posix_memalign.
98 #include "../../memory/jemalloc/jemalloc.h"
103 * Include the headers for mmap unless we have posix_memalign and do not
106 #if JS_GC_USE_MMAP || (!defined JS_GC_USE_MMAP && !HAS_POSIX_MEMALIGN)
108 # ifndef JS_GC_USE_MMAP
109 # define JS_GC_USE_MMAP 1
111 # include <windows.h>
113 # if defined(XP_UNIX) || defined(XP_BEOS)
116 # if _POSIX_MAPPED_FILES > 0
117 # ifndef JS_GC_USE_MMAP
118 # define JS_GC_USE_MMAP 1
120 # include <sys/mman.h>
122 /* On Mac OS X MAP_ANONYMOUS is not defined. */
123 # if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
124 # define MAP_ANONYMOUS MAP_ANON
128 # error "JS_GC_USE_MMAP is set when mmap is not available"
135 * A GC arena contains a fixed number of flag bits for each thing in its heap,
136 * and supports O(1) lookup of a flag given its thing's address.
138 * To implement this, we allocate things of the same size from a GC arena
139 * containing GC_ARENA_SIZE bytes aligned on GC_ARENA_SIZE boundary. The
140 * following picture shows arena's layout:
142 * +------------------------------+--------------------+---------------+
143 * | allocation area for GC thing | flags of GC things | JSGCArenaInfo |
144 * +------------------------------+--------------------+---------------+
146 * To find the flag bits for the thing we calculate the thing index counting
147 * from arena's start using:
149 * thingIndex = (thingAddress & GC_ARENA_MASK) / thingSize
151 * The details of flag's lookup depend on thing's kind. For all GC things
152 * except doubles we use one byte of flags where the 4 bits determine thing's
153 * type and the rest is used to implement GC marking, finalization and
154 * locking. We calculate the address of flag's byte using:
157 * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo) - thingIndex
161 * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo)
163 * is the last byte of flags' area.
165 * This implies that the things are allocated from the start of their area and
166 * flags are allocated from the end. This arrangement avoids a relatively
167 * expensive calculation of the location of the boundary separating things and
168 * flags. The boundary's offset from the start of the arena is given by:
170 * thingsPerArena * thingSize
172 * where thingsPerArena is the number of things that the arena can hold:
174 * (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / (thingSize + 1).
176 * To allocate doubles we use a specialized arena. It can contain only numbers
177 * so we do not need the type bits. Moreover, since the doubles do not require
178 * a finalizer and very few of them are locked via js_LockGCThing API, we use
179 * just one bit of flags per double to denote if it was marked during the
180 * marking phase of the GC. The locking is implemented via a hash table. Thus
181 * for doubles the flag area becomes a bitmap.
183 * JS_GC_USE_MMAP macro governs the choice of the aligned arena allocator.
184 * When it is true, a platform-dependent function like mmap is used to get
185 * memory aligned on CPU page boundaries. If the macro is false or undefined,
186 * posix_memalign is used when available. Otherwise the code uses malloc to
187 * over-allocate a chunk with js_gcArenasPerChunk aligned arenas. The
188 * approximate space overhead of this is 1/js_gcArenasPerChunk. For details,
189 * see NewGCChunk/DestroyGCChunk below.
191 * The code also allocates arenas in chunks when JS_GC_USE_MMAP is 1 to
192 * minimize the overhead of mmap/munmap. In this case js_gcArenasPerChunk can
193 * not be a compile-time constant as the system page size is not known until
197 static uint32 js_gcArenasPerChunk
= 0;
198 static JSBool js_gcUseMmap
= JS_FALSE
;
199 #elif HAS_POSIX_MEMALIGN
200 # define js_gcArenasPerChunk 1
202 # define js_gcArenasPerChunk 7
205 #if defined(js_gcArenasPerChunk) && js_gcArenasPerChunk == 1
206 # define CHUNKED_ARENA_ALLOCATION 0
208 # define CHUNKED_ARENA_ALLOCATION 1
211 #define GC_ARENA_SHIFT 12
212 #define GC_ARENA_MASK ((jsuword) JS_BITMASK(GC_ARENA_SHIFT))
213 #define GC_ARENA_SIZE JS_BIT(GC_ARENA_SHIFT)
216 * JS_GC_ARENA_PAD defines the number of bytes to pad JSGCArenaInfo structure.
217 * It is used to improve allocation efficiency when using posix_memalign. If
218 * malloc's implementation uses internal headers, then calling
220 * posix_memalign(&p, GC_ARENA_SIZE, GC_ARENA_SIZE * js_gcArenasPerChunk)
222 * in a sequence leaves holes between allocations of the size GC_ARENA_SIZE
223 * due to the need to fit headers. JS_GC_ARENA_PAD mitigates that so the code
226 * posix_memalign(&p, GC_ARENA_SIZE,
227 * GC_ARENA_SIZE * js_gcArenasPerChunk - JS_GC_ARENA_PAD)
229 * When JS_GC_ARENA_PAD is equal or greater than the number of words in the
230 * system header, the system can pack all allocations together without holes.
232 * With JS_GC_USE_MEMALIGN we want at least 2 word pad unless posix_memalign
233 * comes from jemalloc that does not use any headers/trailers.
235 #ifndef JS_GC_ARENA_PAD
236 # if HAS_POSIX_MEMALIGN && !MOZ_MEMORY
237 # define JS_GC_ARENA_PAD (2 * JS_BYTES_PER_WORD)
239 # define JS_GC_ARENA_PAD 0
243 struct JSGCArenaInfo
{
245 * Allocation list for the arena or NULL if the arena holds double values.
250 * Pointer to the previous arena in a linked list. The arena can either
251 * belong to one of JSContext.gcArenaList lists or, when it does not have
252 * any allocated GC things, to the list of free arenas in the chunk with
253 * head stored in JSGCChunkInfo.lastFreeArena.
257 #if !CHUNKED_ARENA_ALLOCATION
258 jsuword prevUntracedPage
;
261 * A link field for the list of arenas with marked but not yet traced
262 * things. The field is encoded as arena's page to share the space with
263 * firstArena and arenaIndex fields.
265 jsuword prevUntracedPage
: JS_BITS_PER_WORD
- GC_ARENA_SHIFT
;
268 * When firstArena is false, the index of arena in the chunk. When
269 * firstArena is true, the index of a free arena holding JSGCChunkInfo or
270 * NO_FREE_ARENAS if there are no free arenas in the chunk.
272 * GET_ARENA_INDEX and GET_CHUNK_INFO_INDEX are convenience macros to
273 * access either of indexes.
275 jsuword arenaIndex
: GC_ARENA_SHIFT
- 1;
277 /* Flag indicating if the arena is the first in the chunk. */
278 jsuword firstArena
: 1;
282 jsuword untracedThings
; /* bitset for fast search of marked
283 but not yet traced things */
284 JSBool hasMarkedDoubles
; /* the arena has marked doubles */
287 #if JS_GC_ARENA_PAD != 0
288 uint8 pad
[JS_GC_ARENA_PAD
];
293 * Verify that the bit fields are indeed shared and JSGCArenaInfo is as small
294 * as possible. The code does not rely on this check so if on a particular
295 * platform this does not compile, then, as a workaround, comment the assert
296 * out and submit a bug report.
298 JS_STATIC_ASSERT(offsetof(JSGCArenaInfo
, u
) == 3 * sizeof(jsuword
));
301 * Macros to convert between JSGCArenaInfo, the start address of the arena and
302 * arena's page defined as (start address) >> GC_ARENA_SHIFT.
304 #define ARENA_INFO_OFFSET (GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo))
306 #define IS_ARENA_INFO_ADDRESS(arena) \
307 (((jsuword) (arena) & GC_ARENA_MASK) == ARENA_INFO_OFFSET)
309 #define ARENA_START_TO_INFO(arenaStart) \
310 (JS_ASSERT(((arenaStart) & (jsuword) GC_ARENA_MASK) == 0), \
311 (JSGCArenaInfo *) ((arenaStart) + (jsuword) ARENA_INFO_OFFSET))
313 #define ARENA_INFO_TO_START(arena) \
314 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
315 (jsuword) (arena) & ~(jsuword) GC_ARENA_MASK)
317 #define ARENA_PAGE_TO_INFO(arenaPage) \
318 (JS_ASSERT(arenaPage != 0), \
319 JS_ASSERT(!((jsuword)(arenaPage) >> (JS_BITS_PER_WORD-GC_ARENA_SHIFT))), \
320 ARENA_START_TO_INFO((arenaPage) << GC_ARENA_SHIFT))
322 #define ARENA_INFO_TO_PAGE(arena) \
323 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
324 ((jsuword) (arena) >> GC_ARENA_SHIFT))
326 #define GET_ARENA_INFO(chunk, index) \
327 (JS_ASSERT((index) < js_gcArenasPerChunk), \
328 ARENA_START_TO_INFO(chunk + ((index) << GC_ARENA_SHIFT)))
330 #if CHUNKED_ARENA_ALLOCATION
332 * Definitions for allocating arenas in chunks.
334 * All chunks that have at least one free arena are put on the doubly-linked
335 * list with the head stored in JSRuntime.gcChunkList. JSGCChunkInfo contains
336 * the head of the chunk's free arena list together with the link fields for
339 * Structure stored in one of chunk's free arenas. GET_CHUNK_INFO_INDEX gives
340 * the index of this arena. When all arenas in the chunk are used, it is
341 * removed from the list and the index is set to NO_FREE_ARENAS indicating
342 * that the chunk is not on gcChunkList and has no JSGCChunkInfo available.
345 struct JSGCChunkInfo
{
346 JSGCChunkInfo
**prevp
;
348 JSGCArenaInfo
*lastFreeArena
;
349 uint32 numFreeArenas
;
352 #define NO_FREE_ARENAS JS_BITMASK(GC_ARENA_SHIFT - 1)
354 #ifdef js_gcArenasPerChunk
355 JS_STATIC_ASSERT(1 <= js_gcArenasPerChunk
&&
356 js_gcArenasPerChunk
<= NO_FREE_ARENAS
);
359 #define GET_ARENA_CHUNK(arena, index) \
360 (JS_ASSERT(GET_ARENA_INDEX(arena) == index), \
361 ARENA_INFO_TO_START(arena) - ((index) << GC_ARENA_SHIFT))
363 #define GET_ARENA_INDEX(arena) \
364 ((arena)->firstArena ? 0 : (uint32) (arena)->arenaIndex)
366 #define GET_CHUNK_INFO_INDEX(chunk) \
367 ((uint32) ARENA_START_TO_INFO(chunk)->arenaIndex)
369 #define SET_CHUNK_INFO_INDEX(chunk, index) \
370 (JS_ASSERT((index) < js_gcArenasPerChunk || (index) == NO_FREE_ARENAS), \
371 (void) (ARENA_START_TO_INFO(chunk)->arenaIndex = (jsuword) (index)))
373 #define GET_CHUNK_INFO(chunk, infoIndex) \
374 (JS_ASSERT(GET_CHUNK_INFO_INDEX(chunk) == (infoIndex)), \
375 JS_ASSERT((uint32) (infoIndex) < js_gcArenasPerChunk), \
376 (JSGCChunkInfo *) ((chunk) + ((infoIndex) << GC_ARENA_SHIFT)))
378 #define CHUNK_INFO_TO_INDEX(ci) \
379 GET_ARENA_INDEX(ARENA_START_TO_INFO((jsuword)ci))
384 * Macros for GC-thing operations.
386 #define THINGS_PER_ARENA(thingSize) \
387 ((GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo)) / ((thingSize) + 1U))
389 #define THING_TO_ARENA(thing) \
390 ((JSGCArenaInfo *)(((jsuword) (thing) | GC_ARENA_MASK) + \
391 1 - sizeof(JSGCArenaInfo)))
393 #define THING_TO_INDEX(thing, thingSize) \
394 ((uint32) ((jsuword) (thing) & GC_ARENA_MASK) / (uint32) (thingSize))
396 #define THING_FLAGS_END(arena) ((uint8 *)(arena))
398 #define THING_FLAGP(arena, thingIndex) \
399 (JS_ASSERT((jsuword) (thingIndex) \
400 < (jsuword) THINGS_PER_ARENA((arena)->list->thingSize)), \
401 (uint8 *)(arena) - 1 - (thingIndex))
403 #define THING_TO_FLAGP(thing, thingSize) \
404 THING_FLAGP(THING_TO_ARENA(thing), THING_TO_INDEX(thing, thingSize))
406 #define FLAGP_TO_ARENA(flagp) THING_TO_ARENA(flagp)
408 #define FLAGP_TO_INDEX(flagp) \
409 (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) < ARENA_INFO_OFFSET), \
410 (ARENA_INFO_OFFSET - 1 - (uint32) ((jsuword) (flagp) & GC_ARENA_MASK)))
412 #define FLAGP_TO_THING(flagp, thingSize) \
413 (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) >= \
414 (ARENA_INFO_OFFSET - THINGS_PER_ARENA(thingSize))), \
415 (JSGCThing *)(((jsuword) (flagp) & ~GC_ARENA_MASK) + \
416 (thingSize) * FLAGP_TO_INDEX(flagp)))
419 * Macros for the specialized arena for doubles.
421 * DOUBLES_PER_ARENA defines the maximum number of doubles that the arena can
422 * hold. We find it as the following. Let n be the number of doubles in the
423 * arena. Together with the bitmap of flags and JSGCArenaInfo they should fit
424 * the arena. Hence DOUBLES_PER_ARENA or n_max is the maximum value of n for
425 * which the following holds:
427 * n*s + ceil(n/B) <= M (1)
429 * where "/" denotes normal real division,
430 * ceil(r) gives the least integer not smaller than the number r,
431 * s is the number of words in jsdouble,
432 * B is number of bits per word or B == JS_BITS_PER_WORD
433 * M is the number of words in the arena before JSGCArenaInfo or
434 * M == (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / sizeof(jsuword).
435 * M == ARENA_INFO_OFFSET / sizeof(jsuword)
437 * We rewrite the inequality as
439 * n*B*s/B + ceil(n/B) <= M,
440 * ceil(n*B*s/B + n/B) <= M,
441 * ceil(n*(B*s + 1)/B) <= M (2)
443 * We define a helper function e(n, s, B),
445 * e(n, s, B) := ceil(n*(B*s + 1)/B) - n*(B*s + 1)/B, 0 <= e(n, s, B) < 1.
449 * n*(B*s + 1)/B + e(n, s, B) <= M,
450 * n + e*B/(B*s + 1) <= M*B/(B*s + 1)
452 * We apply the floor function to both sides of the last equation, where
453 * floor(r) gives the biggest integer not greater than r. As a consequence we
456 * floor(n + e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
457 * n + floor(e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
458 * n <= floor(M*B/(B*s + 1)), (3)
460 * where floor(e*B/(B*s + 1)) is zero as e*B/(B*s + 1) < B/(B*s + 1) < 1.
461 * Thus any n that satisfies the original constraint (1) or its equivalent (2),
462 * must also satisfy (3). That is, we got an upper estimate for the maximum
463 * value of n. Lets show that this upper estimate,
465 * floor(M*B/(B*s + 1)), (4)
467 * also satisfies (1) and, as such, gives the required maximum value.
468 * Substituting it into (2) gives:
470 * ceil(floor(M*B/(B*s + 1))*(B*s + 1)/B) == ceil(floor(M/X)*X)
472 * where X == (B*s + 1)/B > 1. But then floor(M/X)*X <= M/X*X == M and
474 * ceil(floor(M/X)*X) <= ceil(M) == M.
476 * Thus the value of (4) gives the maximum n satisfying (1).
478 * For the final result we observe that in (4)
480 * M*B == ARENA_INFO_OFFSET / sizeof(jsuword) * JS_BITS_PER_WORD
481 * == ARENA_INFO_OFFSET * JS_BITS_PER_BYTE
485 * B*s == JS_BITS_PER_WORD * sizeof(jsdouble) / sizeof(jsuword)
486 * == JS_BITS_PER_DOUBLE.
488 #define DOUBLES_PER_ARENA \
489 ((ARENA_INFO_OFFSET * JS_BITS_PER_BYTE) / (JS_BITS_PER_DOUBLE + 1))
492 * Check that ARENA_INFO_OFFSET and sizeof(jsdouble) divides sizeof(jsuword).
494 JS_STATIC_ASSERT(ARENA_INFO_OFFSET
% sizeof(jsuword
) == 0);
495 JS_STATIC_ASSERT(sizeof(jsdouble
) % sizeof(jsuword
) == 0);
496 JS_STATIC_ASSERT(sizeof(jsbitmap
) == sizeof(jsuword
));
498 #define DOUBLES_ARENA_BITMAP_WORDS \
499 (JS_HOWMANY(DOUBLES_PER_ARENA, JS_BITS_PER_WORD))
501 /* Check that DOUBLES_PER_ARENA indeed maximises (1). */
502 JS_STATIC_ASSERT(DOUBLES_PER_ARENA
* sizeof(jsdouble
) +
503 DOUBLES_ARENA_BITMAP_WORDS
* sizeof(jsuword
) <=
506 JS_STATIC_ASSERT((DOUBLES_PER_ARENA
+ 1) * sizeof(jsdouble
) +
508 JS_HOWMANY((DOUBLES_PER_ARENA
+ 1), JS_BITS_PER_WORD
) >
512 * When DOUBLES_PER_ARENA % BITS_PER_DOUBLE_FLAG_UNIT != 0, some bits in the
513 * last byte of the occupation bitmap are unused.
515 #define UNUSED_DOUBLE_BITMAP_BITS \
516 (DOUBLES_ARENA_BITMAP_WORDS * JS_BITS_PER_WORD - DOUBLES_PER_ARENA)
518 JS_STATIC_ASSERT(UNUSED_DOUBLE_BITMAP_BITS
< JS_BITS_PER_WORD
);
520 #define DOUBLES_ARENA_BITMAP_OFFSET \
521 (ARENA_INFO_OFFSET - DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword))
523 #define CHECK_DOUBLE_ARENA_INFO(arenaInfo) \
524 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arenaInfo)), \
525 JS_ASSERT(!(arenaInfo)->list)) \
528 * Get the start of the bitmap area containing double mark flags in the arena.
529 * To access the flag the code uses
531 * JS_TEST_BIT(bitmapStart, index)
533 * That is, compared with the case of arenas with non-double things, we count
534 * flags from the start of the bitmap area, not from the end.
536 #define DOUBLE_ARENA_BITMAP(arenaInfo) \
537 (CHECK_DOUBLE_ARENA_INFO(arenaInfo), \
538 (jsbitmap *) arenaInfo - DOUBLES_ARENA_BITMAP_WORDS)
540 #define DOUBLE_THING_TO_INDEX(thing) \
541 (CHECK_DOUBLE_ARENA_INFO(THING_TO_ARENA(thing)), \
542 JS_ASSERT(((jsuword) (thing) & GC_ARENA_MASK) < \
543 DOUBLES_ARENA_BITMAP_OFFSET), \
544 ((uint32) (((jsuword) (thing) & GC_ARENA_MASK) / sizeof(jsdouble))))
547 ClearDoubleArenaFlags(JSGCArenaInfo
*a
)
549 jsbitmap
*bitmap
, mask
;
553 * When some high bits in the last byte of the double occupation bitmap
554 * are unused, we must set them. Otherwise RefillDoubleFreeList will
555 * assume that they corresponds to some free cells and tries to allocate
558 * Note that the code works correctly with UNUSED_DOUBLE_BITMAP_BITS == 0.
560 bitmap
= DOUBLE_ARENA_BITMAP(a
);
561 memset(bitmap
, 0, (DOUBLES_ARENA_BITMAP_WORDS
- 1) * sizeof *bitmap
);
562 mask
= ((jsbitmap
) 1 << UNUSED_DOUBLE_BITMAP_BITS
) - 1;
563 nused
= JS_BITS_PER_WORD
- UNUSED_DOUBLE_BITMAP_BITS
;
564 bitmap
[DOUBLES_ARENA_BITMAP_WORDS
- 1] = mask
<< nused
;
567 static JS_ALWAYS_INLINE JSBool
568 IsMarkedDouble(JSGCArenaInfo
*a
, uint32 index
)
572 JS_ASSERT(a
->u
.hasMarkedDoubles
);
573 bitmap
= DOUBLE_ARENA_BITMAP(a
);
574 return JS_TEST_BIT(bitmap
, index
);
578 * JSRuntime.gcDoubleArenaList.nextDoubleFlags points either to:
580 * 1. The next byte in the bitmap area for doubles to check for unmarked
582 * 2. Or to the end of the bitmap area when all GC cells of the arena are
584 * 3. Or to a special sentinel value indicating that there are no arenas
585 * to check for unmarked doubles.
587 * We set the sentinel to ARENA_INFO_OFFSET so the single check
589 * ((jsuword) nextDoubleFlags & GC_ARENA_MASK) == ARENA_INFO_OFFSET
591 * will cover both the second and the third cases.
593 #define DOUBLE_BITMAP_SENTINEL ((jsbitmap *) ARENA_INFO_OFFSET)
597 * The maximum number of things to put on the local free list by taking
598 * several things from the global free list or from the tail of the last
599 * allocated arena to amortize the cost of rt->gcLock.
601 * We use number 8 based on benchmarks from bug 312238.
603 #define MAX_THREAD_LOCAL_THINGS 8
607 JS_STATIC_ASSERT(sizeof(JSStackHeader
) >= 2 * sizeof(jsval
));
609 JS_STATIC_ASSERT(sizeof(JSGCThing
) >= sizeof(JSString
));
610 JS_STATIC_ASSERT(sizeof(JSGCThing
) >= sizeof(jsdouble
));
612 /* We want to use all the available GC thing space for object's slots. */
613 JS_STATIC_ASSERT(sizeof(JSObject
) % sizeof(JSGCThing
) == 0);
616 * Ensure that JSObject is allocated from a different GC-list rather than
617 * jsdouble and JSString so we can easily finalize JSObject before these 2
618 * types of GC things. See comments in js_GC.
620 JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(JSString
)) !=
621 GC_FREELIST_INDEX(sizeof(JSObject
)));
622 JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(jsdouble
)) !=
623 GC_FREELIST_INDEX(sizeof(JSObject
)));
626 * JSPtrTable capacity growth descriptor. The table grows by powers of two
627 * starting from capacity JSPtrTableInfo.minCapacity, but switching to linear
628 * growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold.
630 typedef struct JSPtrTableInfo
{
632 uint16 linearGrowthThreshold
;
635 #define GC_ITERATOR_TABLE_MIN 4
636 #define GC_ITERATOR_TABLE_LINEAR 1024
638 static const JSPtrTableInfo iteratorTableInfo
= {
639 GC_ITERATOR_TABLE_MIN
,
640 GC_ITERATOR_TABLE_LINEAR
643 /* Calculate table capacity based on the current value of JSPtrTable.count. */
645 PtrTableCapacity(size_t count
, const JSPtrTableInfo
*info
)
647 size_t linear
, log
, capacity
;
649 linear
= info
->linearGrowthThreshold
;
650 JS_ASSERT(info
->minCapacity
<= linear
);
654 } else if (count
< linear
) {
655 log
= JS_CEILING_LOG2W(count
);
656 JS_ASSERT(log
!= JS_BITS_PER_WORD
);
657 capacity
= (size_t)1 << log
;
658 if (capacity
< info
->minCapacity
)
659 capacity
= info
->minCapacity
;
661 capacity
= JS_ROUNDUP(count
, linear
);
664 JS_ASSERT(capacity
>= count
);
669 FreePtrTable(JSPtrTable
*table
, const JSPtrTableInfo
*info
)
672 JS_ASSERT(table
->count
> 0);
677 JS_ASSERT(table
->count
== 0);
681 AddToPtrTable(JSContext
*cx
, JSPtrTable
*table
, const JSPtrTableInfo
*info
,
684 size_t count
, capacity
;
687 count
= table
->count
;
688 capacity
= PtrTableCapacity(count
, info
);
690 if (count
== capacity
) {
691 if (capacity
< info
->minCapacity
) {
692 JS_ASSERT(capacity
== 0);
693 JS_ASSERT(!table
->array
);
694 capacity
= info
->minCapacity
;
697 * Simplify the overflow detection assuming pointer is bigger
700 JS_STATIC_ASSERT(2 <= sizeof table
->array
[0]);
701 capacity
= (capacity
< info
->linearGrowthThreshold
)
703 : capacity
+ info
->linearGrowthThreshold
;
704 if (capacity
> (size_t)-1 / sizeof table
->array
[0])
707 array
= (void **) realloc(table
->array
,
708 capacity
* sizeof table
->array
[0]);
712 memset(array
+ count
, JS_FREE_PATTERN
,
713 (capacity
- count
) * sizeof table
->array
[0]);
715 table
->array
= array
;
718 table
->array
[count
] = ptr
;
719 table
->count
= count
+ 1;
724 JS_ReportOutOfMemory(cx
);
729 ShrinkPtrTable(JSPtrTable
*table
, const JSPtrTableInfo
*info
,
732 size_t oldCapacity
, capacity
;
735 JS_ASSERT(newCount
<= table
->count
);
736 if (newCount
== table
->count
)
739 oldCapacity
= PtrTableCapacity(table
->count
, info
);
740 table
->count
= newCount
;
741 capacity
= PtrTableCapacity(newCount
, info
);
743 if (oldCapacity
!= capacity
) {
744 array
= table
->array
;
751 array
= (void **) realloc(array
, capacity
* sizeof array
[0]);
753 table
->array
= array
;
756 memset(table
->array
+ newCount
, JS_FREE_PATTERN
,
757 (capacity
- newCount
) * sizeof table
->array
[0]);
762 # define METER(x) ((void) (x))
763 # define METER_IF(condition, x) ((void) ((condition) && (x)))
765 # define METER(x) ((void) 0)
766 # define METER_IF(condition, x) ((void) 0)
769 #define METER_UPDATE_MAX(maxLval, rval) \
770 METER_IF((maxLval) < (rval), (maxLval) = (rval))
772 #if JS_GC_USE_MMAP || !HAS_POSIX_MEMALIGN
775 * For chunks allocated via over-sized malloc, get a pointer to store the gap
776 * between the malloc's result and the first arena in the chunk.
779 GetMallocedChunkGapPtr(jsuword chunk
)
781 JS_ASSERT((chunk
& GC_ARENA_MASK
) == 0);
783 /* Use the memory after the chunk, see NewGCChunk for details. */
784 return (uint32
*) (chunk
+ (js_gcArenasPerChunk
<< GC_ARENA_SHIFT
));
797 p
= VirtualAlloc(NULL
, js_gcArenasPerChunk
<< GC_ARENA_SHIFT
,
798 MEM_COMMIT
| MEM_RESERVE
, PAGE_READWRITE
);
801 p
= mmap(NULL
, js_gcArenasPerChunk
<< GC_ARENA_SHIFT
,
802 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
803 return (p
== MAP_FAILED
) ? 0 : (jsuword
) p
;
808 #if HAS_POSIX_MEMALIGN
809 if (0 != posix_memalign(&p
, GC_ARENA_SIZE
,
810 GC_ARENA_SIZE
* js_gcArenasPerChunk
-
817 * Implement chunk allocation using oversized malloc if mmap and
818 * posix_memalign are not available.
820 * Since malloc allocates pointers aligned on the word boundary, to get
821 * js_gcArenasPerChunk aligned arenas, we need to malloc only
823 * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT) - sizeof(size_t)
825 * bytes. But since we stores the gap between the malloced pointer and the
826 * first arena in the chunk after the chunk, we need to ask for
828 * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT)
830 * bytes to ensure that we always have room to store the gap.
832 p
= malloc((js_gcArenasPerChunk
+ 1) << GC_ARENA_SHIFT
);
839 chunk
= ((jsuword
) p
+ GC_ARENA_MASK
) & ~GC_ARENA_MASK
;
840 *GetMallocedChunkGapPtr(chunk
) = (uint32
) (chunk
- (jsuword
) p
);
847 DestroyGCChunk(jsuword chunk
)
849 JS_ASSERT((chunk
& GC_ARENA_MASK
) == 0);
853 VirtualFree((void *) chunk
, 0, MEM_RELEASE
);
854 # elif defined(SOLARIS)
855 munmap((char *) chunk
, js_gcArenasPerChunk
<< GC_ARENA_SHIFT
);
857 munmap((void *) chunk
, js_gcArenasPerChunk
<< GC_ARENA_SHIFT
);
863 #if HAS_POSIX_MEMALIGN
864 free((void *) chunk
);
866 /* See comments in NewGCChunk. */
867 JS_ASSERT(*GetMallocedChunkGapPtr(chunk
) < GC_ARENA_SIZE
);
868 free((void *) (chunk
- *GetMallocedChunkGapPtr(chunk
)));
872 #if CHUNKED_ARENA_ALLOCATION
875 AddChunkToList(JSRuntime
*rt
, JSGCChunkInfo
*ci
)
877 ci
->prevp
= &rt
->gcChunkList
;
878 ci
->next
= rt
->gcChunkList
;
879 if (rt
->gcChunkList
) {
880 JS_ASSERT(rt
->gcChunkList
->prevp
== &rt
->gcChunkList
);
881 rt
->gcChunkList
->prevp
= &ci
->next
;
883 rt
->gcChunkList
= ci
;
887 RemoveChunkFromList(JSRuntime
*rt
, JSGCChunkInfo
*ci
)
889 *ci
->prevp
= ci
->next
;
891 JS_ASSERT(ci
->next
->prevp
== &ci
->next
);
892 ci
->next
->prevp
= ci
->prevp
;
898 static JSGCArenaInfo
*
899 NewGCArena(JSRuntime
*rt
)
904 if (rt
->gcBytes
>= rt
->gcMaxBytes
)
907 #if CHUNKED_ARENA_ALLOCATION
908 if (js_gcArenasPerChunk
== 1) {
910 chunk
= NewGCChunk();
913 a
= ARENA_START_TO_INFO(chunk
);
914 #if CHUNKED_ARENA_ALLOCATION
918 JSGCArenaInfo
*aprev
;
920 ci
= rt
->gcChunkList
;
922 chunk
= NewGCChunk();
925 JS_ASSERT((chunk
& GC_ARENA_MASK
) == 0);
926 a
= GET_ARENA_INFO(chunk
, 0);
927 a
->firstArena
= JS_TRUE
;
935 a
= GET_ARENA_INFO(chunk
, i
);
936 a
->firstArena
= JS_FALSE
;
938 } while (i
!= js_gcArenasPerChunk
- 1);
939 ci
= GET_CHUNK_INFO(chunk
, 0);
940 ci
->lastFreeArena
= aprev
;
941 ci
->numFreeArenas
= js_gcArenasPerChunk
- 1;
942 AddChunkToList(rt
, ci
);
944 JS_ASSERT(ci
->prevp
== &rt
->gcChunkList
);
945 a
= ci
->lastFreeArena
;
948 JS_ASSERT(ci
->numFreeArenas
== 1);
949 JS_ASSERT(ARENA_INFO_TO_START(a
) == (jsuword
) ci
);
950 RemoveChunkFromList(rt
, ci
);
951 chunk
= GET_ARENA_CHUNK(a
, GET_ARENA_INDEX(a
));
952 SET_CHUNK_INFO_INDEX(chunk
, NO_FREE_ARENAS
);
954 JS_ASSERT(ci
->numFreeArenas
>= 2);
955 JS_ASSERT(ARENA_INFO_TO_START(a
) != (jsuword
) ci
);
956 ci
->lastFreeArena
= aprev
;
963 rt
->gcBytes
+= GC_ARENA_SIZE
;
964 a
->prevUntracedPage
= 0;
965 memset(&a
->u
, 0, sizeof(a
->u
));
971 DestroyGCArenas(JSRuntime
*rt
, JSGCArenaInfo
*last
)
979 METER(rt
->gcStats
.afree
++);
980 JS_ASSERT(rt
->gcBytes
>= GC_ARENA_SIZE
);
981 rt
->gcBytes
-= GC_ARENA_SIZE
;
983 #if CHUNKED_ARENA_ALLOCATION
984 if (js_gcArenasPerChunk
== 1) {
986 DestroyGCChunk(ARENA_INFO_TO_START(a
));
987 #if CHUNKED_ARENA_ALLOCATION
991 uint32 chunkInfoIndex
;
996 firstArena
= a
->firstArena
;
997 arenaIndex
= a
->arenaIndex
;
998 memset((void *) ARENA_INFO_TO_START(a
), JS_FREE_PATTERN
,
999 GC_ARENA_SIZE
- JS_GC_ARENA_PAD
);
1000 a
->firstArena
= firstArena
;
1001 a
->arenaIndex
= arenaIndex
;
1003 arenaIndex
= GET_ARENA_INDEX(a
);
1004 chunk
= GET_ARENA_CHUNK(a
, arenaIndex
);
1005 chunkInfoIndex
= GET_CHUNK_INFO_INDEX(chunk
);
1006 if (chunkInfoIndex
== NO_FREE_ARENAS
) {
1007 chunkInfoIndex
= arenaIndex
;
1008 SET_CHUNK_INFO_INDEX(chunk
, arenaIndex
);
1009 ci
= GET_CHUNK_INFO(chunk
, chunkInfoIndex
);
1011 ci
->lastFreeArena
= a
;
1012 ci
->numFreeArenas
= 1;
1013 AddChunkToList(rt
, ci
);
1015 JS_ASSERT(chunkInfoIndex
!= arenaIndex
);
1016 ci
= GET_CHUNK_INFO(chunk
, chunkInfoIndex
);
1017 JS_ASSERT(ci
->numFreeArenas
!= 0);
1018 JS_ASSERT(ci
->lastFreeArena
);
1019 JS_ASSERT(a
!= ci
->lastFreeArena
);
1020 if (ci
->numFreeArenas
== js_gcArenasPerChunk
- 1) {
1021 RemoveChunkFromList(rt
, ci
);
1022 DestroyGCChunk(chunk
);
1024 ++ci
->numFreeArenas
;
1025 a
->prev
= ci
->lastFreeArena
;
1026 ci
->lastFreeArena
= a
;
1035 InitGCArenaLists(JSRuntime
*rt
)
1038 JSGCArenaList
*arenaList
;
1040 for (i
= 0; i
< GC_NUM_FREELISTS
; i
++) {
1041 arenaList
= &rt
->gcArenaList
[i
];
1042 thingSize
= GC_FREELIST_NBYTES(i
);
1043 JS_ASSERT((size_t)(uint16
)thingSize
== thingSize
);
1044 arenaList
->last
= NULL
;
1045 arenaList
->lastCount
= (uint16
) THINGS_PER_ARENA(thingSize
);
1046 arenaList
->thingSize
= (uint16
) thingSize
;
1047 arenaList
->freeList
= NULL
;
1049 rt
->gcDoubleArenaList
.first
= NULL
;
1050 rt
->gcDoubleArenaList
.nextDoubleFlags
= DOUBLE_BITMAP_SENTINEL
;
1054 FinishGCArenaLists(JSRuntime
*rt
)
1057 JSGCArenaList
*arenaList
;
1059 for (i
= 0; i
< GC_NUM_FREELISTS
; i
++) {
1060 arenaList
= &rt
->gcArenaList
[i
];
1061 DestroyGCArenas(rt
, arenaList
->last
);
1062 arenaList
->last
= NULL
;
1063 arenaList
->lastCount
= THINGS_PER_ARENA(arenaList
->thingSize
);
1064 arenaList
->freeList
= NULL
;
1066 DestroyGCArenas(rt
, rt
->gcDoubleArenaList
.first
);
1067 rt
->gcDoubleArenaList
.first
= NULL
;
1068 rt
->gcDoubleArenaList
.nextDoubleFlags
= DOUBLE_BITMAP_SENTINEL
;
1071 JS_ASSERT(rt
->gcChunkList
== 0);
1075 * This function must not be called when thing is jsdouble.
1078 GetGCThingFlags(void *thing
)
1083 a
= THING_TO_ARENA(thing
);
1084 index
= THING_TO_INDEX(thing
, a
->list
->thingSize
);
1085 return THING_FLAGP(a
, index
);
1089 * This function returns null when thing is jsdouble.
1092 GetGCThingFlagsOrNull(void *thing
)
1097 a
= THING_TO_ARENA(thing
);
1100 index
= THING_TO_INDEX(thing
, a
->list
->thingSize
);
1101 return THING_FLAGP(a
, index
);
1105 js_GetExternalStringGCType(JSString
*str
)
1109 type
= (uintN
) *GetGCThingFlags(str
) & GCF_TYPEMASK
;
1110 JS_ASSERT(type
== GCX_STRING
|| type
>= GCX_EXTERNAL_STRING
);
1111 return (type
== GCX_STRING
) ? -1 : (intN
) (type
- GCX_EXTERNAL_STRING
);
1115 MapGCFlagsToTraceKind(uintN flags
)
1119 type
= flags
& GCF_TYPEMASK
;
1120 JS_ASSERT(type
!= GCX_DOUBLE
);
1121 JS_ASSERT(type
< GCX_NTYPES
);
1122 return (type
< GCX_EXTERNAL_STRING
) ? type
: JSTRACE_STRING
;
1125 JS_FRIEND_API(uint32
)
1126 js_GetGCThingTraceKind(void *thing
)
1131 a
= THING_TO_ARENA(thing
);
1133 return JSTRACE_DOUBLE
;
1135 index
= THING_TO_INDEX(thing
, a
->list
->thingSize
);
1136 return MapGCFlagsToTraceKind(*THING_FLAGP(a
, index
));
1140 js_GetGCStringRuntime(JSString
*str
)
1142 JSGCArenaList
*list
;
1144 list
= THING_TO_ARENA(str
)->list
;
1146 JS_ASSERT(list
->thingSize
== sizeof(JSGCThing
));
1147 JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing
)) == 0);
1149 return (JSRuntime
*)((uint8
*)list
- offsetof(JSRuntime
, gcArenaList
));
1153 js_IsAboutToBeFinalized(JSContext
*cx
, void *thing
)
1156 uint32 index
, flags
;
1158 a
= THING_TO_ARENA(thing
);
1161 * Check if arena has no marked doubles. In that case the bitmap with
1162 * the mark flags contains all garbage as it is initialized only when
1163 * marking the first double in the arena.
1165 if (!a
->u
.hasMarkedDoubles
)
1167 index
= DOUBLE_THING_TO_INDEX(thing
);
1168 return !IsMarkedDouble(a
, index
);
1170 index
= THING_TO_INDEX(thing
, a
->list
->thingSize
);
1171 flags
= *THING_FLAGP(a
, index
);
1172 return !(flags
& (GCF_MARK
| GCF_LOCK
| GCF_FINAL
));
1175 /* This is compatible with JSDHashEntryStub. */
1176 typedef struct JSGCRootHashEntry
{
1177 JSDHashEntryHdr hdr
;
1180 } JSGCRootHashEntry
;
1182 /* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
1183 #define GC_ROOTS_SIZE 256
1185 #if CHUNKED_ARENA_ALLOCATION
1188 * For a CPU with extremely large pages using them for GC things wastes
1191 # define GC_ARENAS_PER_CPU_PAGE_LIMIT JS_BIT(18 - GC_ARENA_SHIFT)
1193 JS_STATIC_ASSERT(GC_ARENAS_PER_CPU_PAGE_LIMIT
<= NO_FREE_ARENAS
);
1198 js_InitGC(JSRuntime
*rt
, uint32 maxbytes
)
1201 if (js_gcArenasPerChunk
== 0) {
1202 size_t cpuPageSize
, arenasPerPage
;
1203 # if defined(XP_WIN)
1207 cpuPageSize
= si
.dwPageSize
;
1209 # elif defined(XP_UNIX) || defined(XP_BEOS)
1210 cpuPageSize
= (size_t) sysconf(_SC_PAGESIZE
);
1212 # error "Not implemented"
1214 /* cpuPageSize is a power of 2. */
1215 JS_ASSERT((cpuPageSize
& (cpuPageSize
- 1)) == 0);
1216 arenasPerPage
= cpuPageSize
>> GC_ARENA_SHIFT
;
1218 if (arenasPerPage
== 0) {
1220 "JS engine warning: the size of the CPU page, %u bytes, is too low to use\n"
1221 "paged allocation for the garbage collector. Please report this.\n",
1222 (unsigned) cpuPageSize
);
1225 if (arenasPerPage
- 1 <= (size_t) (GC_ARENAS_PER_CPU_PAGE_LIMIT
- 1)) {
1227 * Use at least 4 GC arenas per paged allocation chunk to minimize
1228 * the overhead of mmap/VirtualAlloc.
1230 js_gcUseMmap
= JS_TRUE
;
1231 js_gcArenasPerChunk
= JS_MAX((uint32
) arenasPerPage
, 4);
1233 js_gcUseMmap
= JS_FALSE
;
1234 js_gcArenasPerChunk
= 7;
1237 JS_ASSERT(1 <= js_gcArenasPerChunk
&&
1238 js_gcArenasPerChunk
<= NO_FREE_ARENAS
);
1241 InitGCArenaLists(rt
);
1242 if (!JS_DHashTableInit(&rt
->gcRootsHash
, JS_DHashGetStubOps(), NULL
,
1243 sizeof(JSGCRootHashEntry
), GC_ROOTS_SIZE
)) {
1244 rt
->gcRootsHash
.ops
= NULL
;
1247 rt
->gcLocksHash
= NULL
; /* create lazily */
1250 * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
1251 * for default backward API compatibility.
1253 rt
->gcMaxBytes
= rt
->gcMaxMallocBytes
= maxbytes
;
1254 rt
->gcEmptyArenaPoolLifespan
= 30000;
1256 METER(memset(&rt
->gcStats
, 0, sizeof rt
->gcStats
));
1263 UpdateArenaStats(JSGCArenaStats
*st
, uint32 nlivearenas
, uint32 nkilledArenas
,
1268 narenas
= nlivearenas
+ nkilledArenas
;
1269 JS_ASSERT(narenas
>= st
->livearenas
);
1271 st
->newarenas
= narenas
- st
->livearenas
;
1272 st
->narenas
= narenas
;
1273 st
->livearenas
= nlivearenas
;
1274 if (st
->maxarenas
< narenas
)
1275 st
->maxarenas
= narenas
;
1276 st
->totalarenas
+= narenas
;
1278 st
->nthings
= nthings
;
1279 if (st
->maxthings
< nthings
)
1280 st
->maxthings
= nthings
;
1281 st
->totalthings
+= nthings
;
1285 js_DumpGCStats(JSRuntime
*rt
, FILE *fp
)
1288 size_t sumArenas
, sumTotalArenas
;
1289 size_t sumThings
, sumMaxThings
;
1290 size_t sumThingSize
, sumTotalThingSize
;
1291 size_t sumArenaCapacity
, sumTotalArenaCapacity
;
1293 size_t thingSize
, thingsPerArena
;
1294 size_t sumAlloc
, sumLocalAlloc
, sumFail
, sumRetry
;
1296 fprintf(fp
, "\nGC allocation statistics:\n");
1298 #define UL(x) ((unsigned long)(x))
1299 #define ULSTAT(x) UL(rt->gcStats.x)
1300 #define PERCENT(x,y) (100.0 * (double) (x) / (double) (y))
1307 sumTotalThingSize
= 0;
1308 sumArenaCapacity
= 0;
1309 sumTotalArenaCapacity
= 0;
1314 for (i
= -1; i
< (int) GC_NUM_FREELISTS
; i
++) {
1316 thingSize
= sizeof(jsdouble
);
1317 thingsPerArena
= DOUBLES_PER_ARENA
;
1318 st
= &rt
->gcStats
.doubleArenaStats
;
1320 "Arena list for double values (%lu doubles per arena):",
1321 UL(thingsPerArena
));
1323 thingSize
= rt
->gcArenaList
[i
].thingSize
;
1324 thingsPerArena
= THINGS_PER_ARENA(thingSize
);
1325 st
= &rt
->gcStats
.arenaStats
[i
];
1327 "Arena list %d (thing size %lu, %lu things per arena):",
1328 i
, UL(GC_FREELIST_NBYTES(i
)), UL(thingsPerArena
));
1330 if (st
->maxarenas
== 0) {
1331 fputs(" NEVER USED\n", fp
);
1335 fprintf(fp
, " arenas before GC: %lu\n", UL(st
->narenas
));
1336 fprintf(fp
, " new arenas before GC: %lu (%.1f%%)\n",
1337 UL(st
->newarenas
), PERCENT(st
->newarenas
, st
->narenas
));
1338 fprintf(fp
, " arenas after GC: %lu (%.1f%%)\n",
1339 UL(st
->livearenas
), PERCENT(st
->livearenas
, st
->narenas
));
1340 fprintf(fp
, " max arenas: %lu\n", UL(st
->maxarenas
));
1341 fprintf(fp
, " things: %lu\n", UL(st
->nthings
));
1342 fprintf(fp
, " GC cell utilization: %.1f%%\n",
1343 PERCENT(st
->nthings
, thingsPerArena
* st
->narenas
));
1344 fprintf(fp
, " average cell utilization: %.1f%%\n",
1345 PERCENT(st
->totalthings
, thingsPerArena
* st
->totalarenas
));
1346 fprintf(fp
, " max things: %lu\n", UL(st
->maxthings
));
1347 fprintf(fp
, " alloc attempts: %lu\n", UL(st
->alloc
));
1348 fprintf(fp
, " alloc without locks: %1u (%.1f%%)\n",
1349 UL(st
->localalloc
), PERCENT(st
->localalloc
, st
->alloc
));
1350 sumArenas
+= st
->narenas
;
1351 sumTotalArenas
+= st
->totalarenas
;
1352 sumThings
+= st
->nthings
;
1353 sumMaxThings
+= st
->maxthings
;
1354 sumThingSize
+= thingSize
* st
->nthings
;
1355 sumTotalThingSize
+= thingSize
* st
->totalthings
;
1356 sumArenaCapacity
+= thingSize
* thingsPerArena
* st
->narenas
;
1357 sumTotalArenaCapacity
+= thingSize
* thingsPerArena
* st
->totalarenas
;
1358 sumAlloc
+= st
->alloc
;
1359 sumLocalAlloc
+= st
->localalloc
;
1360 sumFail
+= st
->fail
;
1361 sumRetry
+= st
->retry
;
1363 fprintf(fp
, "TOTAL STATS:\n");
1364 fprintf(fp
, " bytes allocated: %lu\n", UL(rt
->gcBytes
));
1365 fprintf(fp
, " total GC arenas: %lu\n", UL(sumArenas
));
1366 fprintf(fp
, " total GC things: %lu\n", UL(sumThings
));
1367 fprintf(fp
, " max total GC things: %lu\n", UL(sumMaxThings
));
1368 fprintf(fp
, " GC cell utilization: %.1f%%\n",
1369 PERCENT(sumThingSize
, sumArenaCapacity
));
1370 fprintf(fp
, " average cell utilization: %.1f%%\n",
1371 PERCENT(sumTotalThingSize
, sumTotalArenaCapacity
));
1372 fprintf(fp
, "allocation retries after GC: %lu\n", UL(sumRetry
));
1373 fprintf(fp
, " alloc attempts: %lu\n", UL(sumAlloc
));
1374 fprintf(fp
, " alloc without locks: %1u (%.1f%%)\n",
1375 UL(sumLocalAlloc
), PERCENT(sumLocalAlloc
, sumAlloc
));
1376 fprintf(fp
, " allocation failures: %lu\n", UL(sumFail
));
1377 fprintf(fp
, " things born locked: %lu\n", ULSTAT(lockborn
));
1378 fprintf(fp
, " valid lock calls: %lu\n", ULSTAT(lock
));
1379 fprintf(fp
, " valid unlock calls: %lu\n", ULSTAT(unlock
));
1380 fprintf(fp
, " mark recursion depth: %lu\n", ULSTAT(depth
));
1381 fprintf(fp
, " maximum mark recursion: %lu\n", ULSTAT(maxdepth
));
1382 fprintf(fp
, " mark C recursion depth: %lu\n", ULSTAT(cdepth
));
1383 fprintf(fp
, " maximum mark C recursion: %lu\n", ULSTAT(maxcdepth
));
1384 fprintf(fp
, " delayed tracing calls: %lu\n", ULSTAT(untraced
));
1386 fprintf(fp
, " max trace later count: %lu\n", ULSTAT(maxuntraced
));
1388 fprintf(fp
, " maximum GC nesting level: %lu\n", ULSTAT(maxlevel
));
1389 fprintf(fp
, "potentially useful GC calls: %lu\n", ULSTAT(poke
));
1390 fprintf(fp
, " thing arenas freed so far: %lu\n", ULSTAT(afree
));
1391 fprintf(fp
, " stack segments scanned: %lu\n", ULSTAT(stackseg
));
1392 fprintf(fp
, "stack segment slots scanned: %lu\n", ULSTAT(segslots
));
1393 fprintf(fp
, "reachable closeable objects: %lu\n", ULSTAT(nclose
));
1394 fprintf(fp
, " max reachable closeable: %lu\n", ULSTAT(maxnclose
));
1395 fprintf(fp
, " scheduled close hooks: %lu\n", ULSTAT(closelater
));
1396 fprintf(fp
, " max scheduled close hooks: %lu\n", ULSTAT(maxcloselater
));
1402 #ifdef JS_ARENAMETER
1403 JS_DumpArenaStats(fp
);
1410 CheckLeakedRoots(JSRuntime
*rt
);
1413 #ifdef JS_THREADSAFE
1415 TrimGCFreeListsPool(JSRuntime
*rt
, uintN keepCount
);
1419 js_FinishGC(JSRuntime
*rt
)
1421 #ifdef JS_ARENAMETER
1422 JS_DumpArenaStats(stdout
);
1425 js_DumpGCStats(rt
, stdout
);
1428 FreePtrTable(&rt
->gcIteratorTable
, &iteratorTableInfo
);
1429 #ifdef JS_THREADSAFE
1430 TrimGCFreeListsPool(rt
, 0);
1431 JS_ASSERT(!rt
->gcFreeListsPool
);
1433 FinishGCArenaLists(rt
);
1435 if (rt
->gcRootsHash
.ops
) {
1437 CheckLeakedRoots(rt
);
1439 JS_DHashTableFinish(&rt
->gcRootsHash
);
1440 rt
->gcRootsHash
.ops
= NULL
;
1442 if (rt
->gcLocksHash
) {
1443 JS_DHashTableDestroy(rt
->gcLocksHash
);
1444 rt
->gcLocksHash
= NULL
;
1449 js_AddRoot(JSContext
*cx
, void *rp
, const char *name
)
1451 JSBool ok
= js_AddRootRT(cx
->runtime
, rp
, name
);
1453 JS_ReportOutOfMemory(cx
);
1458 js_AddRootRT(JSRuntime
*rt
, void *rp
, const char *name
)
1461 JSGCRootHashEntry
*rhe
;
1464 * Due to the long-standing, but now removed, use of rt->gcLock across the
1465 * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
1466 * properly with a racing GC, without calling JS_AddRoot from a request.
1467 * We have to preserve API compatibility here, now that we avoid holding
1468 * rt->gcLock across the mark phase (including the root hashtable mark).
1470 * If the GC is running and we're called on another thread, wait for this
1471 * GC activation to finish. We can safely wait here (in the case where we
1472 * are called within a request on another thread's context) without fear
1473 * of deadlock because the GC doesn't set rt->gcRunning until after it has
1474 * waited for all active requests to end.
1477 #ifdef JS_THREADSAFE
1478 JS_ASSERT(!rt
->gcRunning
|| rt
->gcLevel
> 0);
1479 if (rt
->gcRunning
&& rt
->gcThread
->id
!= js_CurrentThreadId()) {
1481 JS_AWAIT_GC_DONE(rt
);
1482 } while (rt
->gcLevel
> 0);
1485 rhe
= (JSGCRootHashEntry
*)
1486 JS_DHashTableOperate(&rt
->gcRootsHash
, rp
, JS_DHASH_ADD
);
1499 js_RemoveRoot(JSRuntime
*rt
, void *rp
)
1502 * Due to the JS_RemoveRootRT API, we may be called outside of a request.
1503 * Same synchronization drill as above in js_AddRoot.
1506 #ifdef JS_THREADSAFE
1507 JS_ASSERT(!rt
->gcRunning
|| rt
->gcLevel
> 0);
1508 if (rt
->gcRunning
&& rt
->gcThread
->id
!= js_CurrentThreadId()) {
1510 JS_AWAIT_GC_DONE(rt
);
1511 } while (rt
->gcLevel
> 0);
1514 (void) JS_DHashTableOperate(&rt
->gcRootsHash
, rp
, JS_DHASH_REMOVE
);
1515 rt
->gcPoke
= JS_TRUE
;
1522 static JSDHashOperator
1523 js_root_printer(JSDHashTable
*table
, JSDHashEntryHdr
*hdr
, uint32 i
, void *arg
)
1525 uint32
*leakedroots
= (uint32
*)arg
;
1526 JSGCRootHashEntry
*rhe
= (JSGCRootHashEntry
*)hdr
;
1530 "JS engine warning: leaking GC root \'%s\' at %p\n",
1531 rhe
->name
? (char *)rhe
->name
: "", rhe
->root
);
1533 return JS_DHASH_NEXT
;
1537 CheckLeakedRoots(JSRuntime
*rt
)
1539 uint32 leakedroots
= 0;
1541 /* Warn (but don't assert) debug builds of any remaining roots. */
1542 JS_DHashTableEnumerate(&rt
->gcRootsHash
, js_root_printer
,
1544 if (leakedroots
> 0) {
1545 if (leakedroots
== 1) {
1547 "JS engine warning: 1 GC root remains after destroying the JSRuntime at %p.\n"
1548 " This root may point to freed memory. Objects reachable\n"
1549 " through it have not been finalized.\n",
1553 "JS engine warning: %lu GC roots remain after destroying the JSRuntime at %p.\n"
1554 " These roots may point to freed memory. Objects reachable\n"
1555 " through them have not been finalized.\n",
1556 (unsigned long) leakedroots
, (void *) rt
);
1561 typedef struct NamedRootDumpArgs
{
1562 void (*dump
)(const char *name
, void *rp
, void *data
);
1564 } NamedRootDumpArgs
;
1566 static JSDHashOperator
1567 js_named_root_dumper(JSDHashTable
*table
, JSDHashEntryHdr
*hdr
, uint32 number
,
1570 NamedRootDumpArgs
*args
= (NamedRootDumpArgs
*) arg
;
1571 JSGCRootHashEntry
*rhe
= (JSGCRootHashEntry
*)hdr
;
1574 args
->dump(rhe
->name
, rhe
->root
, args
->data
);
1575 return JS_DHASH_NEXT
;
1580 js_DumpNamedRoots(JSRuntime
*rt
,
1581 void (*dump
)(const char *name
, void *rp
, void *data
),
1584 NamedRootDumpArgs args
;
1588 JS_DHashTableEnumerate(&rt
->gcRootsHash
, js_named_root_dumper
, &args
);
1594 typedef struct GCRootMapArgs
{
1599 static JSDHashOperator
1600 js_gcroot_mapper(JSDHashTable
*table
, JSDHashEntryHdr
*hdr
, uint32 number
,
1603 GCRootMapArgs
*args
= (GCRootMapArgs
*) arg
;
1604 JSGCRootHashEntry
*rhe
= (JSGCRootHashEntry
*)hdr
;
1608 mapflags
= args
->map(rhe
->root
, rhe
->name
, args
->data
);
1610 #if JS_MAP_GCROOT_NEXT == JS_DHASH_NEXT && \
1611 JS_MAP_GCROOT_STOP == JS_DHASH_STOP && \
1612 JS_MAP_GCROOT_REMOVE == JS_DHASH_REMOVE
1613 op
= (JSDHashOperator
)mapflags
;
1616 if (mapflags
& JS_MAP_GCROOT_STOP
)
1617 op
|= JS_DHASH_STOP
;
1618 if (mapflags
& JS_MAP_GCROOT_REMOVE
)
1619 op
|= JS_DHASH_REMOVE
;
1622 return (JSDHashOperator
) op
;
1626 js_MapGCRoots(JSRuntime
*rt
, JSGCRootMapFun map
, void *data
)
1634 rv
= JS_DHashTableEnumerate(&rt
->gcRootsHash
, js_gcroot_mapper
, &args
);
1640 js_RegisterCloseableIterator(JSContext
*cx
, JSObject
*obj
)
1646 JS_ASSERT(!rt
->gcRunning
);
1649 ok
= AddToPtrTable(cx
, &rt
->gcIteratorTable
, &iteratorTableInfo
, obj
);
1655 CloseNativeIterators(JSContext
*cx
)
1658 size_t count
, newCount
, i
;
1663 count
= rt
->gcIteratorTable
.count
;
1664 array
= rt
->gcIteratorTable
.array
;
1667 for (i
= 0; i
!= count
; ++i
) {
1668 obj
= (JSObject
*)array
[i
];
1669 if (js_IsAboutToBeFinalized(cx
, obj
))
1670 js_CloseNativeIterator(cx
, obj
);
1672 array
[newCount
++] = obj
;
1674 ShrinkPtrTable(&rt
->gcIteratorTable
, &iteratorTableInfo
, newCount
);
1677 #if defined(DEBUG_brendan) || defined(DEBUG_timeless)
1678 #define DEBUG_gchist
1684 static struct GCHist
{
1686 JSGCThing
*freeList
;
1689 unsigned gchpos
= 0;
1692 #ifdef JS_THREADSAFE
1694 const JSGCFreeListSet js_GCEmptyFreeListSet
= {
1695 { NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
}, NULL
1699 TrimGCFreeListsPool(JSRuntime
*rt
, uintN keepCount
)
1701 JSGCFreeListSet
**cursor
, *freeLists
, *link
;
1703 cursor
= &rt
->gcFreeListsPool
;
1704 while (keepCount
!= 0) {
1706 freeLists
= *cursor
;
1709 memset(freeLists
->array
, 0, sizeof freeLists
->array
);
1710 cursor
= &freeLists
->link
;
1712 freeLists
= *cursor
;
1716 link
= freeLists
->link
;
1718 } while ((freeLists
= link
) != NULL
);
1723 js_RevokeGCLocalFreeLists(JSContext
*cx
)
1725 JS_ASSERT(!cx
->gcLocalFreeLists
->link
);
1726 if (cx
->gcLocalFreeLists
!= &js_GCEmptyFreeListSet
) {
1727 cx
->gcLocalFreeLists
->link
= cx
->runtime
->gcFreeListsPool
;
1728 cx
->runtime
->gcFreeListsPool
= cx
->gcLocalFreeLists
;
1729 cx
->gcLocalFreeLists
= (JSGCFreeListSet
*) &js_GCEmptyFreeListSet
;
1733 static JSGCFreeListSet
*
1734 EnsureLocalFreeList(JSContext
*cx
)
1736 JSGCFreeListSet
*freeLists
;
1738 freeLists
= cx
->gcLocalFreeLists
;
1739 if (freeLists
!= &js_GCEmptyFreeListSet
) {
1740 JS_ASSERT(freeLists
);
1744 freeLists
= cx
->runtime
->gcFreeListsPool
;
1746 cx
->runtime
->gcFreeListsPool
= freeLists
->link
;
1747 freeLists
->link
= NULL
;
1749 /* JS_malloc is not used as the caller reports out-of-memory itself. */
1750 freeLists
= (JSGCFreeListSet
*) calloc(1, sizeof *freeLists
);
1754 cx
->gcLocalFreeLists
= freeLists
;
1761 js_NewGCThing(JSContext
*cx
, uintN flags
, size_t nbytes
)
1768 JSGCArenaList
*arenaList
;
1771 JSLocalRootStack
*lrs
;
1773 JSGCArenaStats
*astats
;
1775 #ifdef JS_THREADSAFE
1777 uintN localMallocBytes
;
1778 JSGCFreeListSet
*freeLists
;
1779 JSGCThing
**lastptr
;
1780 JSGCThing
*tmpthing
;
1782 uintN maxFreeThings
; /* max to take from the global free list */
1785 JS_ASSERT((flags
& GCF_TYPEMASK
) != GCX_DOUBLE
);
1787 nbytes
= JS_ROUNDUP(nbytes
, sizeof(JSGCThing
));
1788 flindex
= GC_FREELIST_INDEX(nbytes
);
1790 /* Updates of metering counters here may not be thread-safe. */
1791 METER(astats
= &cx
->runtime
->gcStats
.arenaStats
[flindex
]);
1792 METER(astats
->alloc
++);
1794 #ifdef JS_THREADSAFE
1795 gcLocked
= JS_FALSE
;
1796 JS_ASSERT(cx
->thread
);
1797 freeLists
= cx
->gcLocalFreeLists
;
1798 thing
= freeLists
->array
[flindex
];
1799 localMallocBytes
= cx
->thread
->gcMallocBytes
;
1800 if (thing
&& rt
->gcMaxMallocBytes
- rt
->gcMallocBytes
> localMallocBytes
) {
1801 flagp
= thing
->flagp
;
1802 freeLists
->array
[flindex
] = thing
->next
;
1803 METER(astats
->localalloc
++);
1810 /* Transfer thread-local counter to global one. */
1811 if (localMallocBytes
!= 0) {
1812 cx
->thread
->gcMallocBytes
= 0;
1813 if (rt
->gcMaxMallocBytes
- rt
->gcMallocBytes
< localMallocBytes
)
1814 rt
->gcMallocBytes
= rt
->gcMaxMallocBytes
;
1816 rt
->gcMallocBytes
+= localMallocBytes
;
1819 JS_ASSERT(!rt
->gcRunning
);
1820 if (rt
->gcRunning
) {
1821 METER(rt
->gcStats
.finalfail
++);
1826 doGC
= (rt
->gcMallocBytes
>= rt
->gcMaxMallocBytes
&& rt
->gcPoke
);
1828 doGC
= doGC
|| rt
->gcZeal
>= 2 || (rt
->gcZeal
>= 1 && rt
->gcPoke
);
1831 arenaList
= &rt
->gcArenaList
[flindex
];
1833 if (doGC
&& !JS_ON_TRACE(cx
)) {
1835 * Keep rt->gcLock across the call into js_GC so we don't starve
1836 * and lose to racing threads who deplete the heap just after
1837 * js_GC has replenished it (or has synchronized with a racing
1838 * GC that collected a bunch of garbage). This unfair scheduling
1839 * can happen on certain operating systems. For the gory details,
1840 * see bug 162779 at https://bugzilla.mozilla.org/.
1842 js_GC(cx
, GC_LAST_DITCH
);
1843 METER(astats
->retry
++);
1846 /* Try to get thing from the free list. */
1847 thing
= arenaList
->freeList
;
1849 arenaList
->freeList
= thing
->next
;
1850 flagp
= thing
->flagp
;
1851 JS_ASSERT(*flagp
& GCF_FINAL
);
1853 #ifdef JS_THREADSAFE
1855 * Refill the local free list by taking several things from the
1856 * global free list unless we are still at rt->gcMaxMallocBytes
1857 * barrier or the free list is already populated. The former
1858 * happens when GC is canceled due to !gcCallback(cx, JSGC_BEGIN)
1859 * or no gcPoke. The latter is caused via allocating new things
1860 * in gcCallback(cx, JSGC_END).
1862 if (rt
->gcMallocBytes
>= rt
->gcMaxMallocBytes
)
1865 freeLists
= EnsureLocalFreeList(cx
);
1868 if (freeLists
->array
[flindex
])
1871 tmpthing
= arenaList
->freeList
;
1873 maxFreeThings
= MAX_THREAD_LOCAL_THINGS
;
1875 if (!tmpthing
->next
)
1877 tmpthing
= tmpthing
->next
;
1878 } while (--maxFreeThings
!= 0);
1880 freeLists
->array
[flindex
] = arenaList
->freeList
;
1881 arenaList
->freeList
= tmpthing
->next
;
1882 tmpthing
->next
= NULL
;
1889 * Try to allocate things from the last arena. If it is fully used,
1890 * check if we can allocate a new one and, if we cannot, consider
1891 * doing a "last ditch" GC unless already tried.
1893 thingsLimit
= THINGS_PER_ARENA(nbytes
);
1894 if (arenaList
->lastCount
!= thingsLimit
) {
1895 JS_ASSERT(arenaList
->lastCount
< thingsLimit
);
1896 a
= arenaList
->last
;
1900 if (doGC
|| JS_ON_TRACE(cx
))
1905 a
->list
= arenaList
;
1906 a
->prev
= arenaList
->last
;
1907 a
->prevUntracedPage
= 0;
1908 a
->u
.untracedThings
= 0;
1909 arenaList
->last
= a
;
1910 arenaList
->lastCount
= 0;
1913 flagp
= THING_FLAGP(a
, arenaList
->lastCount
);
1914 thing
= FLAGP_TO_THING(flagp
, nbytes
);
1915 arenaList
->lastCount
++;
1917 #ifdef JS_THREADSAFE
1919 * Refill the local free list by taking free things from the last
1920 * arena. Prefer to order free things by ascending address in the
1921 * (unscientific) hope of better cache locality.
1923 if (rt
->gcMallocBytes
>= rt
->gcMaxMallocBytes
)
1926 freeLists
= EnsureLocalFreeList(cx
);
1929 if (freeLists
->array
[flindex
])
1931 lastptr
= &freeLists
->array
[flindex
];
1932 maxFreeThings
= thingsLimit
- arenaList
->lastCount
;
1933 if (maxFreeThings
> MAX_THREAD_LOCAL_THINGS
)
1934 maxFreeThings
= MAX_THREAD_LOCAL_THINGS
;
1935 while (maxFreeThings
!= 0) {
1938 tmpflagp
= THING_FLAGP(a
, arenaList
->lastCount
);
1939 tmpthing
= FLAGP_TO_THING(tmpflagp
, nbytes
);
1940 arenaList
->lastCount
++;
1941 tmpthing
->flagp
= tmpflagp
;
1942 *tmpflagp
= GCF_FINAL
; /* signifying that thing is free */
1944 *lastptr
= tmpthing
;
1945 lastptr
= &tmpthing
->next
;
1952 /* We successfully allocated the thing. */
1953 #ifdef JS_THREADSAFE
1956 lrs
= cx
->localRootStack
;
1959 * If we're in a local root scope, don't set newborn[type] at all, to
1960 * avoid entraining garbage from it for an unbounded amount of time
1961 * on this context. A caller will leave the local root scope and pop
1962 * this reference, allowing thing to be GC'd if it has no other refs.
1963 * See JS_EnterLocalRootScope and related APIs.
1965 if (js_PushLocalRoot(cx
, lrs
, (jsval
) thing
) < 0) {
1967 * When we fail for a thing allocated through the tail of the last
1968 * arena, thing's flag byte is not initialized. So to prevent GC
1969 * accessing the uninitialized flags during the finalization, we
1970 * always mark the thing as final. See bug 337407.
1977 * No local root scope, so we're stuck with the old, fragile model of
1978 * depending on a pigeon-hole newborn per type per context.
1980 cx
->weakRoots
.newborn
[flags
& GCF_TYPEMASK
] = thing
;
1983 /* We can't fail now, so update flags. */
1984 *flagp
= (uint8
)flags
;
1987 gchist
[gchpos
].lastDitch
= doGC
;
1988 gchist
[gchpos
].freeList
= rt
->gcArenaList
[flindex
].freeList
;
1989 if (++gchpos
== NGCHIST
)
1993 /* This is not thread-safe for thread-local allocations. */
1994 METER_IF(flags
& GCF_LOCK
, rt
->gcStats
.lockborn
++);
1996 #ifdef JS_THREADSAFE
2000 JS_COUNT_OPERATION(cx
, JSOW_ALLOCATION
);
2004 #ifdef JS_THREADSAFE
2008 METER(astats
->fail
++);
2009 if (!JS_ON_TRACE(cx
))
2010 JS_ReportOutOfMemory(cx
);
2014 static JSGCDoubleCell
*
2015 RefillDoubleFreeList(JSContext
*cx
)
2018 jsbitmap
*doubleFlags
, usedBits
;
2019 JSBool didGC
= JS_FALSE
;
2022 JSGCDoubleCell
*cell
, *list
, *lastcell
;
2024 JS_ASSERT(!cx
->doubleFreeList
);
2029 JS_ASSERT(!rt
->gcRunning
);
2030 if (rt
->gcRunning
) {
2031 METER(rt
->gcStats
.finalfail
++);
2036 if (rt
->gcMallocBytes
>= rt
->gcMaxMallocBytes
&& rt
->gcPoke
2038 && (rt
->gcZeal
>= 2 || (rt
->gcZeal
>= 1 && rt
->gcPoke
))
2045 * Loop until we find a flag bitmap byte with unset bits indicating free
2046 * double cells, then set all bits as used and put the cells to the free
2047 * list for the current context.
2049 doubleFlags
= rt
->gcDoubleArenaList
.nextDoubleFlags
;
2051 if (((jsuword
) doubleFlags
& GC_ARENA_MASK
) ==
2052 ARENA_INFO_OFFSET
) {
2053 if (doubleFlags
== DOUBLE_BITMAP_SENTINEL
||
2054 !((JSGCArenaInfo
*) doubleFlags
)->prev
) {
2058 if (didGC
|| JS_ON_TRACE(cx
)) {
2059 METER(rt
->gcStats
.doubleArenaStats
.fail
++);
2061 if (!JS_ON_TRACE(cx
))
2062 JS_ReportOutOfMemory(cx
);
2065 js_GC(cx
, GC_LAST_DITCH
);
2066 METER(rt
->gcStats
.doubleArenaStats
.retry
++);
2067 doubleFlags
= rt
->gcDoubleArenaList
.nextDoubleFlags
;
2073 if (doubleFlags
== DOUBLE_BITMAP_SENTINEL
) {
2074 JS_ASSERT(!rt
->gcDoubleArenaList
.first
);
2075 rt
->gcDoubleArenaList
.first
= a
;
2077 JS_ASSERT(rt
->gcDoubleArenaList
.first
);
2078 ((JSGCArenaInfo
*) doubleFlags
)->prev
= a
;
2080 ClearDoubleArenaFlags(a
);
2081 doubleFlags
= DOUBLE_ARENA_BITMAP(a
);
2085 DOUBLE_ARENA_BITMAP(((JSGCArenaInfo
*) doubleFlags
)->prev
);
2089 * When doubleFlags points the last bitmap's word in the arena, its
2090 * high bits corresponds to non-existing cells. ClearDoubleArenaFlags
2091 * sets such bits to 1. Thus even for this last word its bit is unset
2092 * iff the corresponding cell exists and free.
2094 if (*doubleFlags
!= (jsbitmap
) -1)
2099 rt
->gcDoubleArenaList
.nextDoubleFlags
= doubleFlags
+ 1;
2100 usedBits
= *doubleFlags
;
2101 JS_ASSERT(usedBits
!= (jsbitmap
) -1);
2102 *doubleFlags
= (jsbitmap
) -1;
2106 * Find the index corresponding to the first bit in *doubleFlags. The last
2107 * bit will have "index + JS_BITS_PER_WORD - 1".
2109 index
= ((uintN
) ((jsuword
) doubleFlags
& GC_ARENA_MASK
) -
2110 DOUBLES_ARENA_BITMAP_OFFSET
) * JS_BITS_PER_BYTE
;
2111 cell
= (JSGCDoubleCell
*) ((jsuword
) doubleFlags
& ~GC_ARENA_MASK
) + index
;
2113 if (usedBits
== 0) {
2114 /* The common case when all doubles from *doubleFlags are free. */
2115 JS_ASSERT(index
+ JS_BITS_PER_WORD
<= DOUBLES_PER_ARENA
);
2117 for (lastcell
= cell
+ JS_BITS_PER_WORD
- 1; cell
!= lastcell
; ++cell
)
2118 cell
->link
= cell
+ 1;
2119 lastcell
->link
= NULL
;
2122 * Assemble the free list from free cells from *doubleFlags starting
2123 * from the tail. In the loop
2125 * index + bit >= DOUBLES_PER_ARENA
2127 * when bit is one of the unused bits. We do not check for such bits
2128 * explicitly as they must be set and the "if" check filters them out.
2130 JS_ASSERT(index
+ JS_BITS_PER_WORD
<=
2131 DOUBLES_PER_ARENA
+ UNUSED_DOUBLE_BITMAP_BITS
);
2132 bit
= JS_BITS_PER_WORD
;
2138 if (!(((jsbitmap
) 1 << bit
) & usedBits
)) {
2139 JS_ASSERT(index
+ bit
< DOUBLES_PER_ARENA
);
2140 JS_ASSERT_IF(index
+ bit
== DOUBLES_PER_ARENA
- 1, !list
);
2147 JS_COUNT_OPERATION(cx
, JSOW_ALLOCATION
* JS_BITS_PER_WORD
);
2150 * We delegate assigning cx->doubleFreeList to js_NewDoubleInRootedValue as
2151 * it immediately consumes the head of the list.
2157 js_NewDoubleInRootedValue(JSContext
*cx
, jsdouble d
, jsval
*vp
)
2160 JSGCArenaStats
*astats
;
2162 JSGCDoubleCell
*cell
;
2164 /* Updates of metering counters here are not thread-safe. */
2165 METER(astats
= &cx
->runtime
->gcStats
.doubleArenaStats
);
2166 METER(astats
->alloc
++);
2167 cell
= cx
->doubleFreeList
;
2169 cell
= RefillDoubleFreeList(cx
);
2171 METER(astats
->fail
++);
2175 METER(astats
->localalloc
++);
2177 cx
->doubleFreeList
= cell
->link
;
2179 *vp
= DOUBLE_TO_JSVAL(&cell
->number
);
2184 js_NewWeaklyRootedDouble(JSContext
*cx
, jsdouble d
)
2189 if (!js_NewDoubleInRootedValue(cx
, d
, &v
))
2192 JS_ASSERT(JSVAL_IS_DOUBLE(v
));
2193 dp
= JSVAL_TO_DOUBLE(v
);
2194 if (cx
->localRootStack
) {
2195 if (js_PushLocalRoot(cx
, cx
->localRootStack
, v
) < 0)
2198 cx
->weakRoots
.newborn
[GCX_DOUBLE
] = dp
;
2204 js_AddAsGCBytes(JSContext
*cx
, size_t sz
)
2209 if (rt
->gcBytes
>= rt
->gcMaxBytes
||
2210 sz
> (size_t) (rt
->gcMaxBytes
- rt
->gcBytes
)
2212 || rt
->gcZeal
>= 2 || (rt
->gcZeal
>= 1 && rt
->gcPoke
)
2215 if (JS_ON_TRACE(cx
)) {
2219 js_GC(cx
, GC_LAST_DITCH
);
2220 if (rt
->gcBytes
>= rt
->gcMaxBytes
||
2221 sz
> (size_t) (rt
->gcMaxBytes
- rt
->gcBytes
)) {
2223 JS_ReportOutOfMemory(cx
);
2227 rt
->gcBytes
+= (uint32
) sz
;
2232 js_RemoveAsGCBytes(JSRuntime
*rt
, size_t sz
)
2234 JS_ASSERT((size_t) rt
->gcBytes
>= sz
);
2235 rt
->gcBytes
-= (uint32
) sz
;
2239 * Shallow GC-things can be locked just by setting the GCF_LOCK bit, because
2240 * they have no descendants to mark during the GC. Currently the optimization
2241 * is only used for non-dependant strings.
2243 #define GC_THING_IS_SHALLOW(flagp, thing) \
2245 ((*(flagp) & GCF_TYPEMASK) >= GCX_EXTERNAL_STRING || \
2246 ((*(flagp) & GCF_TYPEMASK) == GCX_STRING && \
2247 !JSSTRING_IS_DEPENDENT((JSString *) (thing)))))
2249 /* This is compatible with JSDHashEntryStub. */
2250 typedef struct JSGCLockHashEntry
{
2251 JSDHashEntryHdr hdr
;
2254 } JSGCLockHashEntry
;
2257 js_LockGCThingRT(JSRuntime
*rt
, void *thing
)
2261 JSGCLockHashEntry
*lhe
;
2266 flagp
= GetGCThingFlagsOrNull(thing
);
2268 shallow
= GC_THING_IS_SHALLOW(flagp
, thing
);
2271 * Avoid adding a rt->gcLocksHash entry for shallow things until someone
2274 if (shallow
&& !(*flagp
& GCF_LOCK
)) {
2276 METER(rt
->gcStats
.lock
++);
2281 if (!rt
->gcLocksHash
) {
2282 rt
->gcLocksHash
= JS_NewDHashTable(JS_DHashGetStubOps(), NULL
,
2283 sizeof(JSGCLockHashEntry
),
2285 if (!rt
->gcLocksHash
) {
2291 lhe
= (JSGCLockHashEntry
*)
2292 JS_DHashTableOperate(rt
->gcLocksHash
, thing
, JS_DHASH_ADD
);
2301 JS_ASSERT(lhe
->count
>= 1);
2305 METER(rt
->gcStats
.lock
++);
2313 js_UnlockGCThingRT(JSRuntime
*rt
, void *thing
)
2317 JSGCLockHashEntry
*lhe
;
2322 flagp
= GetGCThingFlagsOrNull(thing
);
2324 shallow
= GC_THING_IS_SHALLOW(flagp
, thing
);
2326 if (shallow
&& !(*flagp
& GCF_LOCK
))
2328 if (!rt
->gcLocksHash
||
2329 (lhe
= (JSGCLockHashEntry
*)
2330 JS_DHashTableOperate(rt
->gcLocksHash
, thing
,
2332 JS_DHASH_ENTRY_IS_FREE(&lhe
->hdr
))) {
2333 /* Shallow entry is not in the hash -> clear its lock bit. */
2335 *flagp
&= ~GCF_LOCK
;
2339 if (--lhe
->count
!= 0)
2341 JS_DHashTableOperate(rt
->gcLocksHash
, thing
, JS_DHASH_REMOVE
);
2344 rt
->gcPoke
= JS_TRUE
;
2345 METER(rt
->gcStats
.unlock
++);
2352 JS_TraceChildren(JSTracer
*trc
, void *thing
, uint32 kind
)
2360 case JSTRACE_OBJECT
:
2361 /* If obj has no map, it must be a newborn. */
2362 obj
= (JSObject
*) thing
;
2365 if (obj
->map
->ops
->trace
) {
2366 obj
->map
->ops
->trace(trc
, obj
);
2368 nslots
= STOBJ_NSLOTS(obj
);
2369 for (i
= 0; i
!= nslots
; ++i
) {
2370 v
= STOBJ_GET_SLOT(obj
, i
);
2371 if (JSVAL_IS_TRACEABLE(v
)) {
2372 JS_SET_TRACING_INDEX(trc
, "slot", i
);
2373 JS_CallTracer(trc
, JSVAL_TO_TRACEABLE(v
),
2374 JSVAL_TRACE_KIND(v
));
2380 case JSTRACE_STRING
:
2381 str
= (JSString
*)thing
;
2382 if (JSSTRING_IS_DEPENDENT(str
))
2383 JS_CALL_STRING_TRACER(trc
, JSSTRDEP_BASE(str
), "base");
2386 #if JS_HAS_XML_SUPPORT
2388 js_TraceXML(trc
, (JSXML
*)thing
);
2395 * Number of things covered by a single bit of JSGCArenaInfo.u.untracedThings.
2397 #define THINGS_PER_UNTRACED_BIT(thingSize) \
2398 JS_HOWMANY(THINGS_PER_ARENA(thingSize), JS_BITS_PER_WORD)
2401 DelayTracingChildren(JSRuntime
*rt
, uint8
*flagp
)
2404 uint32 untracedBitIndex
;
2408 * Things with children to be traced later are marked with
2409 * GCF_MARK | GCF_FINAL flags.
2411 JS_ASSERT((*flagp
& (GCF_MARK
| GCF_FINAL
)) == GCF_MARK
);
2412 *flagp
|= GCF_FINAL
;
2414 METER(rt
->gcStats
.untraced
++);
2416 ++rt
->gcTraceLaterCount
;
2417 METER_UPDATE_MAX(rt
->gcStats
.maxuntraced
, rt
->gcTraceLaterCount
);
2420 a
= FLAGP_TO_ARENA(flagp
);
2421 untracedBitIndex
= FLAGP_TO_INDEX(flagp
) /
2422 THINGS_PER_UNTRACED_BIT(a
->list
->thingSize
);
2423 JS_ASSERT(untracedBitIndex
< JS_BITS_PER_WORD
);
2424 bit
= (jsuword
)1 << untracedBitIndex
;
2425 if (a
->u
.untracedThings
!= 0) {
2426 JS_ASSERT(rt
->gcUntracedArenaStackTop
);
2427 if (a
->u
.untracedThings
& bit
) {
2428 /* bit already covers things with children to trace later. */
2431 a
->u
.untracedThings
|= bit
;
2434 * The thing is the first thing with not yet traced children in the
2435 * whole arena, so push the arena on the stack of arenas with things
2436 * to be traced later unless the arena has already been pushed. We
2437 * detect that through checking prevUntracedPage as the field is 0
2438 * only for not yet pushed arenas. To ensure that
2439 * prevUntracedPage != 0
2440 * even when the stack contains one element, we make prevUntracedPage
2441 * for the arena at the bottom to point to itself.
2443 * See comments in TraceDelayedChildren.
2445 a
->u
.untracedThings
= bit
;
2446 if (a
->prevUntracedPage
== 0) {
2447 if (!rt
->gcUntracedArenaStackTop
) {
2448 /* Stack was empty, mark the arena as the bottom element. */
2449 a
->prevUntracedPage
= ARENA_INFO_TO_PAGE(a
);
2451 JS_ASSERT(rt
->gcUntracedArenaStackTop
->prevUntracedPage
!= 0);
2452 a
->prevUntracedPage
=
2453 ARENA_INFO_TO_PAGE(rt
->gcUntracedArenaStackTop
);
2455 rt
->gcUntracedArenaStackTop
= a
;
2458 JS_ASSERT(rt
->gcUntracedArenaStackTop
);
2462 TraceDelayedChildren(JSTracer
*trc
)
2465 JSGCArenaInfo
*a
, *aprev
;
2467 uint32 thingsPerUntracedBit
;
2468 uint32 untracedBitIndex
, thingIndex
, indexLimit
, endIndex
;
2472 rt
= trc
->context
->runtime
;
2473 a
= rt
->gcUntracedArenaStackTop
;
2475 JS_ASSERT(rt
->gcTraceLaterCount
== 0);
2481 * The following assert verifies that the current arena belongs to the
2482 * untraced stack, since DelayTracingChildren ensures that even for
2483 * stack's bottom prevUntracedPage != 0 but rather points to itself.
2485 JS_ASSERT(a
->prevUntracedPage
!= 0);
2486 JS_ASSERT(rt
->gcUntracedArenaStackTop
->prevUntracedPage
!= 0);
2487 thingSize
= a
->list
->thingSize
;
2488 indexLimit
= (a
== a
->list
->last
)
2489 ? a
->list
->lastCount
2490 : THINGS_PER_ARENA(thingSize
);
2491 thingsPerUntracedBit
= THINGS_PER_UNTRACED_BIT(thingSize
);
2494 * We cannot use do-while loop here as a->u.untracedThings can be zero
2495 * before the loop as a leftover from the previous iterations. See
2496 * comments after the loop.
2498 while (a
->u
.untracedThings
!= 0) {
2499 untracedBitIndex
= JS_FLOOR_LOG2W(a
->u
.untracedThings
);
2500 a
->u
.untracedThings
&= ~((jsuword
)1 << untracedBitIndex
);
2501 thingIndex
= untracedBitIndex
* thingsPerUntracedBit
;
2502 endIndex
= thingIndex
+ thingsPerUntracedBit
;
2505 * endIndex can go beyond the last allocated thing as the real
2506 * limit can be "inside" the bit.
2508 if (endIndex
> indexLimit
)
2509 endIndex
= indexLimit
;
2510 JS_ASSERT(thingIndex
< indexLimit
);
2514 * Skip free or already traced things that share the bit
2515 * with untraced ones.
2517 flagp
= THING_FLAGP(a
, thingIndex
);
2518 if ((*flagp
& (GCF_MARK
|GCF_FINAL
)) != (GCF_MARK
|GCF_FINAL
))
2520 *flagp
&= ~GCF_FINAL
;
2522 JS_ASSERT(rt
->gcTraceLaterCount
!= 0);
2523 --rt
->gcTraceLaterCount
;
2525 thing
= FLAGP_TO_THING(flagp
, thingSize
);
2526 JS_TraceChildren(trc
, thing
, MapGCFlagsToTraceKind(*flagp
));
2527 } while (++thingIndex
!= endIndex
);
2531 * We finished tracing of all things in the the arena but we can only
2532 * pop it from the stack if the arena is the stack's top.
2534 * When JS_TraceChildren from the above calls JS_CallTracer that in
2535 * turn on low C stack calls DelayTracingChildren and the latter
2536 * pushes new arenas to the untraced stack, we have to skip popping
2537 * of this arena until it becomes the top of the stack again.
2539 if (a
== rt
->gcUntracedArenaStackTop
) {
2540 aprev
= ARENA_PAGE_TO_INFO(a
->prevUntracedPage
);
2541 a
->prevUntracedPage
= 0;
2544 * prevUntracedPage points to itself and we reached the
2545 * bottom of the stack.
2549 rt
->gcUntracedArenaStackTop
= a
= aprev
;
2551 a
= rt
->gcUntracedArenaStackTop
;
2554 JS_ASSERT(rt
->gcUntracedArenaStackTop
);
2555 JS_ASSERT(rt
->gcUntracedArenaStackTop
->prevUntracedPage
== 0);
2556 rt
->gcUntracedArenaStackTop
= NULL
;
2557 JS_ASSERT(rt
->gcTraceLaterCount
== 0);
2561 JS_CallTracer(JSTracer
*trc
, void *thing
, uint32 kind
)
2570 JS_ASSERT(JS_IS_VALID_TRACE_KIND(kind
));
2571 JS_ASSERT(trc
->debugPrinter
|| trc
->debugPrintArg
);
2573 if (!IS_GC_MARKING_TRACER(trc
)) {
2574 trc
->callback(trc
, thing
, kind
);
2580 JS_ASSERT(rt
->gcMarkingTracer
== trc
);
2581 JS_ASSERT(rt
->gcLevel
> 0);
2584 * Optimize for string and double as their size is known and their tracing
2588 case JSTRACE_DOUBLE
:
2589 a
= THING_TO_ARENA(thing
);
2590 JS_ASSERT(!a
->list
);
2591 if (!a
->u
.hasMarkedDoubles
) {
2592 ClearDoubleArenaFlags(a
);
2593 a
->u
.hasMarkedDoubles
= JS_TRUE
;
2595 index
= DOUBLE_THING_TO_INDEX(thing
);
2596 JS_SET_BIT(DOUBLE_ARENA_BITMAP(a
), index
);
2599 case JSTRACE_STRING
:
2601 flagp
= THING_TO_FLAGP(thing
, sizeof(JSGCThing
));
2602 JS_ASSERT((*flagp
& GCF_FINAL
) == 0);
2603 JS_ASSERT(kind
== MapGCFlagsToTraceKind(*flagp
));
2604 if (!JSSTRING_IS_DEPENDENT((JSString
*) thing
)) {
2608 if (*flagp
& GCF_MARK
)
2611 thing
= JSSTRDEP_BASE((JSString
*) thing
);
2616 flagp
= GetGCThingFlags(thing
);
2617 JS_ASSERT(kind
== MapGCFlagsToTraceKind(*flagp
));
2618 if (*flagp
& GCF_MARK
)
2622 * We check for non-final flag only if mark is unset as
2623 * DelayTracingChildren uses the flag. See comments in the function.
2625 JS_ASSERT(*flagp
!= GCF_FINAL
);
2627 if (!cx
->insideGCMarkCallback
) {
2629 * With JS_GC_ASSUME_LOW_C_STACK defined the mark phase of GC always
2630 * uses the non-recursive code that otherwise would be called only on
2631 * a low C stack condition.
2633 #ifdef JS_GC_ASSUME_LOW_C_STACK
2634 # define RECURSION_TOO_DEEP() JS_TRUE
2637 # define RECURSION_TOO_DEEP() (!JS_CHECK_STACK_SIZE(cx, stackDummy))
2639 if (RECURSION_TOO_DEEP())
2640 DelayTracingChildren(rt
, flagp
);
2642 JS_TraceChildren(trc
, thing
, kind
);
2645 * For API compatibility we allow for the callback to assume that
2646 * after it calls JS_MarkGCThing for the last time, the callback can
2647 * start to finalize its own objects that are only referenced by
2648 * unmarked GC things.
2650 * Since we do not know which call from inside the callback is the
2651 * last, we ensure that children of all marked things are traced and
2652 * call TraceDelayedChildren(trc) after tracing the thing.
2654 * As TraceDelayedChildren unconditionally invokes JS_TraceChildren
2655 * for the things with untraced children, calling DelayTracingChildren
2656 * is useless here. Hence we always trace thing's children even with a
2659 cx
->insideGCMarkCallback
= JS_FALSE
;
2660 JS_TraceChildren(trc
, thing
, kind
);
2661 TraceDelayedChildren(trc
);
2662 cx
->insideGCMarkCallback
= JS_TRUE
;
2667 trc
->debugPrinter
= NULL
;
2668 trc
->debugPrintArg
= NULL
;
2670 return; /* to avoid out: right_curl when DEBUG is not defined */
2674 js_CallValueTracerIfGCThing(JSTracer
*trc
, jsval v
)
2679 if (JSVAL_IS_DOUBLE(v
) || JSVAL_IS_STRING(v
)) {
2680 thing
= JSVAL_TO_TRACEABLE(v
);
2681 kind
= JSVAL_TRACE_KIND(v
);
2682 JS_ASSERT(kind
== js_GetGCThingTraceKind(JSVAL_TO_GCTHING(v
)));
2683 } else if (JSVAL_IS_OBJECT(v
) && v
!= JSVAL_NULL
) {
2684 /* v can be an arbitrary GC thing reinterpreted as an object. */
2685 thing
= JSVAL_TO_OBJECT(v
);
2686 kind
= js_GetGCThingTraceKind(thing
);
2690 JS_CallTracer(trc
, thing
, kind
);
2693 static JSDHashOperator
2694 gc_root_traversal(JSDHashTable
*table
, JSDHashEntryHdr
*hdr
, uint32 num
,
2697 JSGCRootHashEntry
*rhe
= (JSGCRootHashEntry
*)hdr
;
2698 JSTracer
*trc
= (JSTracer
*)arg
;
2699 jsval
*rp
= (jsval
*)rhe
->root
;
2702 /* Ignore null object and scalar values. */
2703 if (!JSVAL_IS_NULL(v
) && JSVAL_IS_GCTHING(v
)) {
2705 JSBool root_points_to_gcArenaList
= JS_FALSE
;
2706 jsuword thing
= (jsuword
) JSVAL_TO_GCTHING(v
);
2709 JSGCArenaList
*arenaList
;
2714 rt
= trc
->context
->runtime
;
2715 for (i
= 0; i
< GC_NUM_FREELISTS
; i
++) {
2716 arenaList
= &rt
->gcArenaList
[i
];
2717 thingSize
= arenaList
->thingSize
;
2718 limit
= (size_t) arenaList
->lastCount
* thingSize
;
2719 for (a
= arenaList
->last
; a
; a
= a
->prev
) {
2720 if (thing
- ARENA_INFO_TO_START(a
) < limit
) {
2721 root_points_to_gcArenaList
= JS_TRUE
;
2724 limit
= (size_t) THINGS_PER_ARENA(thingSize
) * thingSize
;
2727 if (!root_points_to_gcArenaList
) {
2728 for (a
= rt
->gcDoubleArenaList
.first
; a
; a
= a
->prev
) {
2729 if (thing
- ARENA_INFO_TO_START(a
) <
2730 DOUBLES_PER_ARENA
* sizeof(jsdouble
)) {
2731 root_points_to_gcArenaList
= JS_TRUE
;
2736 if (!root_points_to_gcArenaList
&& rhe
->name
) {
2738 "JS API usage error: the address passed to JS_AddNamedRoot currently holds an\n"
2739 "invalid jsval. This is usually caused by a missing call to JS_RemoveRoot.\n"
2740 "The root's name is \"%s\".\n",
2743 JS_ASSERT(root_points_to_gcArenaList
);
2745 JS_SET_TRACING_NAME(trc
, rhe
->name
? rhe
->name
: "root");
2746 js_CallValueTracerIfGCThing(trc
, v
);
2749 return JS_DHASH_NEXT
;
2752 static JSDHashOperator
2753 gc_lock_traversal(JSDHashTable
*table
, JSDHashEntryHdr
*hdr
, uint32 num
,
2756 JSGCLockHashEntry
*lhe
= (JSGCLockHashEntry
*)hdr
;
2757 void *thing
= (void *)lhe
->thing
;
2758 JSTracer
*trc
= (JSTracer
*)arg
;
2761 JS_ASSERT(lhe
->count
>= 1);
2762 traceKind
= js_GetGCThingTraceKind(thing
);
2763 JS_CALL_TRACER(trc
, thing
, traceKind
, "locked object");
2764 return JS_DHASH_NEXT
;
2767 #define TRACE_JSVALS(trc, len, vec, name) \
2769 jsval _v, *_vp, *_end; \
2771 for (_vp = vec, _end = _vp + len; _vp < _end; _vp++) { \
2773 if (JSVAL_IS_TRACEABLE(_v)) { \
2774 JS_SET_TRACING_INDEX(trc, name, _vp - (vec)); \
2775 JS_CallTracer(trc, JSVAL_TO_TRACEABLE(_v), \
2776 JSVAL_TRACE_KIND(_v)); \
2782 js_TraceStackFrame(JSTracer
*trc
, JSStackFrame
*fp
)
2784 uintN nslots
, minargs
, skip
;
2787 JS_CALL_OBJECT_TRACER(trc
, fp
->callobj
, "call");
2789 JS_CALL_OBJECT_TRACER(trc
, fp
->argsobj
, "arguments");
2791 JS_CALL_OBJECT_TRACER(trc
, fp
->varobj
, "variables");
2793 js_TraceScript(trc
, fp
->script
);
2795 /* fp->slots is null for watch pseudo-frames, see js_watch_set. */
2798 * Don't mark what has not been pushed yet, or what has been
2802 nslots
= (uintN
) (fp
->regs
->sp
- fp
->slots
);
2803 JS_ASSERT(nslots
>= fp
->script
->nfixed
);
2805 nslots
= fp
->script
->nfixed
;
2807 TRACE_JSVALS(trc
, nslots
, fp
->slots
, "slot");
2810 JS_ASSERT(!fp
->slots
);
2811 JS_ASSERT(!fp
->regs
);
2814 /* Allow for primitive this parameter due to JSFUN_THISP_* flags. */
2815 JS_ASSERT(JSVAL_IS_OBJECT((jsval
)fp
->thisp
) ||
2816 (fp
->fun
&& JSFUN_THISP_FLAGS(fp
->fun
->flags
)));
2817 JS_CALL_VALUE_TRACER(trc
, (jsval
)fp
->thisp
, "this");
2820 JS_CALL_OBJECT_TRACER(trc
, fp
->callee
, "callee");
2826 minargs
= FUN_MINARGS(fp
->fun
);
2827 if (minargs
> nslots
)
2829 if (!FUN_INTERPRETED(fp
->fun
)) {
2830 JS_ASSERT(!(fp
->fun
->flags
& JSFUN_FAST_NATIVE
));
2831 nslots
+= fp
->fun
->u
.n
.extra
;
2833 if (fp
->fun
->flags
& JSFRAME_ROOTED_ARGV
)
2834 skip
= 2 + fp
->argc
;
2836 TRACE_JSVALS(trc
, 2 + nslots
- skip
, fp
->argv
- 2 + skip
, "operand");
2839 JS_CALL_VALUE_TRACER(trc
, fp
->rval
, "rval");
2841 JS_CALL_OBJECT_TRACER(trc
, fp
->scopeChain
, "scope chain");
2843 JS_CALL_OBJECT_TRACER(trc
, fp
->sharpArray
, "sharp array");
2845 if (fp
->xmlNamespace
)
2846 JS_CALL_OBJECT_TRACER(trc
, fp
->xmlNamespace
, "xmlNamespace");
2850 TraceWeakRoots(JSTracer
*trc
, JSWeakRoots
*wr
)
2856 static const char *weakRootNames
[JSTRACE_LIMIT
] = {
2864 for (i
= 0; i
!= JSTRACE_LIMIT
; i
++) {
2865 thing
= wr
->newborn
[i
];
2867 JS_CALL_TRACER(trc
, thing
, i
, weakRootNames
[i
]);
2869 JS_ASSERT(i
== GCX_EXTERNAL_STRING
);
2870 for (; i
!= GCX_NTYPES
; ++i
) {
2871 thing
= wr
->newborn
[i
];
2873 JS_SET_TRACING_INDEX(trc
, "newborn external string",
2874 i
- GCX_EXTERNAL_STRING
);
2875 JS_CallTracer(trc
, thing
, JSTRACE_STRING
);
2879 JS_CALL_VALUE_TRACER(trc
, wr
->lastAtom
, "lastAtom");
2880 JS_SET_TRACING_NAME(trc
, "lastInternalResult");
2881 js_CallValueTracerIfGCThing(trc
, wr
->lastInternalResult
);
2885 js_TraceContext(JSTracer
*trc
, JSContext
*acx
)
2887 JSStackFrame
*fp
, *nextChain
;
2889 JSTempValueRooter
*tvr
;
2891 if (IS_GC_MARKING_TRACER(trc
)) {
2893 #define FREE_OLD_ARENAS(pool) \
2896 JSArena * _a = (pool).current; \
2897 if (_a == (pool).first.next && \
2898 _a->avail == _a->base + sizeof(int64)) { \
2899 _age = JS_Now() - *(int64 *) _a->base; \
2900 if (_age > (int64) acx->runtime->gcEmptyArenaPoolLifespan * \
2902 JS_FreeArenaPool(&(pool)); \
2906 #ifdef JS_THREADSAFE
2907 js_RevokeGCLocalFreeLists(acx
);
2911 * Release the stackPool's arenas if the stackPool has existed for
2912 * longer than the limit specified by gcEmptyArenaPoolLifespan.
2914 FREE_OLD_ARENAS(acx
->stackPool
);
2917 * Release the regexpPool's arenas based on the same criterion as for
2920 FREE_OLD_ARENAS(acx
->regexpPool
);
2923 * Clear the double free list to release all the pre-allocated doubles.
2925 acx
->doubleFreeList
= NULL
;
2929 * Iterate frame chain and dormant chains.
2931 * (NB: see comment on this whole "dormant" thing in js_Execute.)
2933 fp
= js_GetTopStackFrame(acx
);
2934 nextChain
= acx
->dormantFrameChain
;
2938 /* The top frame must not be dormant. */
2939 JS_ASSERT(!fp
->dormantNext
);
2942 js_TraceStackFrame(trc
, fp
);
2943 } while ((fp
= fp
->down
) != NULL
);
2949 nextChain
= nextChain
->dormantNext
;
2952 /* Mark other roots-by-definition in acx. */
2953 if (acx
->globalObject
&& !JS_HAS_OPTION(acx
, JSOPTION_UNROOTED_GLOBAL
))
2954 JS_CALL_OBJECT_TRACER(trc
, acx
->globalObject
, "global object");
2955 TraceWeakRoots(trc
, &acx
->weakRoots
);
2956 if (acx
->throwing
) {
2957 JS_CALL_VALUE_TRACER(trc
, acx
->exception
, "exception");
2959 /* Avoid keeping GC-ed junk stored in JSContext.exception. */
2960 acx
->exception
= JSVAL_NULL
;
2962 #if JS_HAS_LVALUE_RETURN
2964 JS_CALL_VALUE_TRACER(trc
, acx
->rval2
, "rval2");
2967 for (sh
= acx
->stackHeaders
; sh
; sh
= sh
->down
) {
2968 METER(trc
->context
->runtime
->gcStats
.stackseg
++);
2969 METER(trc
->context
->runtime
->gcStats
.segslots
+= sh
->nslots
);
2970 TRACE_JSVALS(trc
, sh
->nslots
, JS_STACK_SEGMENT(sh
), "stack");
2973 if (acx
->localRootStack
)
2974 js_TraceLocalRoots(trc
, acx
->localRootStack
);
2976 for (tvr
= acx
->tempValueRooters
; tvr
; tvr
= tvr
->down
) {
2977 switch (tvr
->count
) {
2979 JS_SET_TRACING_NAME(trc
, "tvr->u.value");
2980 js_CallValueTracerIfGCThing(trc
, tvr
->u
.value
);
2983 tvr
->u
.trace(trc
, tvr
);
2986 TRACE_SCOPE_PROPERTY(trc
, tvr
->u
.sprop
);
2988 case JSTVU_WEAK_ROOTS
:
2989 TraceWeakRoots(trc
, tvr
->u
.weakRoots
);
2991 case JSTVU_PARSE_CONTEXT
:
2992 js_TraceParseContext(trc
, tvr
->u
.parseContext
);
2995 js_TraceScript(trc
, tvr
->u
.script
);
2998 JS_ASSERT(tvr
->count
>= 0);
2999 TRACE_JSVALS(trc
, tvr
->count
, tvr
->u
.array
, "tvr->u.array");
3003 if (acx
->sharpObjectMap
.depth
> 0)
3004 js_TraceSharpMap(trc
, &acx
->sharpObjectMap
);
3008 js_TraceTraceMonitor(JSTracer
*trc
, JSTraceMonitor
*tm
)
3010 if (IS_GC_MARKING_TRACER(trc
)) {
3011 tm
->recoveryDoublePoolPtr
= tm
->recoveryDoublePool
;
3012 /* Make sure the global shape changes and will force a flush
3013 of the code cache. */
3014 tm
->globalShape
= -1;
3019 js_TraceRuntime(JSTracer
*trc
, JSBool allAtoms
)
3021 JSRuntime
*rt
= trc
->context
->runtime
;
3022 JSContext
*iter
, *acx
;
3024 JS_DHashTableEnumerate(&rt
->gcRootsHash
, gc_root_traversal
, trc
);
3025 if (rt
->gcLocksHash
)
3026 JS_DHashTableEnumerate(rt
->gcLocksHash
, gc_lock_traversal
, trc
);
3027 js_TraceAtomState(trc
, allAtoms
);
3028 js_TraceNativeEnumerators(trc
);
3029 js_TraceRuntimeNumberState(trc
);
3032 while ((acx
= js_ContextIterator(rt
, JS_TRUE
, &iter
)) != NULL
)
3033 js_TraceContext(trc
, acx
);
3035 if (rt
->gcExtraRootsTraceOp
)
3036 rt
->gcExtraRootsTraceOp(trc
, rt
->gcExtraRootsData
);
3038 #ifdef JS_THREADSAFE
3039 /* Trace the loop table(s) which can contain pointers to code objects. */
3040 while ((acx
= js_ContextIterator(rt
, JS_FALSE
, &iter
)) != NULL
) {
3043 js_TraceTraceMonitor(trc
, &acx
->thread
->traceMonitor
);
3046 js_TraceTraceMonitor(trc
, &rt
->traceMonitor
);
3051 ProcessSetSlotRequest(JSContext
*cx
, JSSetSlotRequest
*ssr
)
3053 JSObject
*obj
, *pobj
;
3061 pobj
= js_GetWrappedObject(cx
, pobj
);
3063 ssr
->errnum
= JSMSG_CYCLIC_VALUE
;
3066 pobj
= JSVAL_TO_OBJECT(STOBJ_GET_SLOT(pobj
, slot
));
3071 if (slot
== JSSLOT_PROTO
&& OBJ_IS_NATIVE(obj
)) {
3072 JSScope
*scope
, *newscope
;
3075 /* Check to see whether obj shares its prototype's scope. */
3076 scope
= OBJ_SCOPE(obj
);
3077 oldproto
= STOBJ_GET_PROTO(obj
);
3078 if (oldproto
&& OBJ_SCOPE(oldproto
) == scope
) {
3079 /* Either obj needs a new empty scope, or it should share pobj's. */
3081 !OBJ_IS_NATIVE(pobj
) ||
3082 OBJ_GET_CLASS(cx
, pobj
) != STOBJ_GET_CLASS(oldproto
)) {
3084 * With no proto and no scope of its own, obj is truly empty.
3086 * If pobj is not native, obj needs its own empty scope -- it
3087 * should not continue to share oldproto's scope once oldproto
3088 * is not on obj's prototype chain. That would put properties
3089 * from oldproto's scope ahead of properties defined by pobj,
3092 * If pobj's class differs from oldproto's, we may need a new
3093 * scope to handle differences in private and reserved slots,
3094 * so we suboptimally but safely make one.
3096 if (!js_GetMutableScope(cx
, obj
)) {
3097 ssr
->errnum
= JSMSG_OUT_OF_MEMORY
;
3100 } else if (OBJ_SCOPE(pobj
) != scope
) {
3101 newscope
= (JSScope
*) js_HoldObjectMap(cx
, pobj
->map
);
3102 obj
->map
= &newscope
->map
;
3103 js_DropObjectMap(cx
, &scope
->map
, obj
);
3104 JS_TRANSFER_SCOPE_LOCK(cx
, scope
, newscope
);
3109 * Regenerate property cache shape ids for all of the scopes along the
3110 * old prototype chain, in case any property cache entries were filled
3111 * by looking up starting from obj.
3113 while (oldproto
&& OBJ_IS_NATIVE(oldproto
)) {
3114 scope
= OBJ_SCOPE(oldproto
);
3115 SCOPE_MAKE_UNIQUE_SHAPE(cx
, scope
);
3116 oldproto
= STOBJ_GET_PROTO(scope
->object
);
3120 /* Finally, do the deed. */
3121 STOBJ_SET_SLOT(obj
, slot
, OBJECT_TO_JSVAL(pobj
));
3125 DestroyScriptsToGC(JSContext
*cx
, JSScript
**listp
)
3129 while ((script
= *listp
) != NULL
) {
3130 *listp
= script
->u
.nextToGC
;
3131 script
->u
.nextToGC
= NULL
;
3132 js_DestroyScript(cx
, script
);
3137 * The gckind flag bit GC_LOCK_HELD indicates a call from js_NewGCThing with
3138 * rt->gcLock already held, so the lock should be kept on return.
3141 js_GC(JSContext
*cx
, JSGCInvocationKind gckind
)
3145 JSGCCallback callback
;
3148 uint32 thingSize
, indexLimit
;
3149 JSGCArenaInfo
*a
, **ap
, *emptyArenas
;
3150 uint8 flags
, *flagp
;
3151 JSGCThing
*thing
, *freeList
;
3152 JSGCArenaList
*arenaList
;
3154 #ifdef JS_THREADSAFE
3155 uint32 requestDebit
;
3156 JSContext
*acx
, *iter
;
3159 uint32 nlivearenas
, nkilledarenas
, nthings
;
3162 JS_ASSERT_IF(gckind
== GC_LAST_DITCH
, !JS_ON_TRACE(cx
));
3164 #ifdef JS_THREADSAFE
3165 /* Avoid deadlock. */
3166 JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt
));
3169 if (gckind
& GC_KEEP_ATOMS
) {
3171 * The set slot request and last ditch GC kinds preserve all atoms and
3174 keepAtoms
= JS_TRUE
;
3176 /* Keep atoms when a suspended compile is running on another context. */
3177 keepAtoms
= (rt
->gcKeepAtoms
!= 0);
3178 JS_CLEAR_WEAK_ROOTS(&cx
->weakRoots
);
3182 * Don't collect garbage if the runtime isn't up, and cx is not the last
3183 * context in the runtime. The last context must force a GC, and nothing
3184 * should suppress that final collection or there may be shutdown leaks,
3185 * or runtime bloat until the next context is created.
3187 if (rt
->state
!= JSRTS_UP
&& gckind
!= GC_LAST_CONTEXT
)
3190 restart_at_beginning
:
3192 * Let the API user decide to defer a GC if it wants to (unless this
3193 * is the last context). Invoke the callback regardless. Sample the
3194 * callback in case we are freely racing with a JS_SetGCCallback{,RT} on
3197 if (gckind
!= GC_SET_SLOT_REQUEST
&& (callback
= rt
->gcCallback
)) {
3200 if (gckind
& GC_LOCK_HELD
)
3202 ok
= callback(cx
, JSGC_BEGIN
);
3203 if (gckind
& GC_LOCK_HELD
)
3205 if (!ok
&& gckind
!= GC_LAST_CONTEXT
) {
3207 * It's possible that we've looped back to this code from the 'goto
3208 * restart_at_beginning' below in the GC_SET_SLOT_REQUEST code and
3209 * that rt->gcLevel is now 0. Don't return without notifying!
3211 if (rt
->gcLevel
== 0 && (gckind
& GC_LOCK_HELD
))
3212 JS_NOTIFY_GC_DONE(rt
);
3217 /* Lock out other GC allocator and collector invocations. */
3218 if (!(gckind
& GC_LOCK_HELD
))
3221 METER(rt
->gcStats
.poke
++);
3222 rt
->gcPoke
= JS_FALSE
;
3224 #ifdef JS_THREADSAFE
3225 JS_ASSERT(cx
->thread
->id
== js_CurrentThreadId());
3227 /* Bump gcLevel and return rather than nest on this thread. */
3228 if (rt
->gcThread
== cx
->thread
) {
3229 JS_ASSERT(rt
->gcLevel
> 0);
3231 METER_UPDATE_MAX(rt
->gcStats
.maxlevel
, rt
->gcLevel
);
3232 if (!(gckind
& GC_LOCK_HELD
))
3238 * If we're in one or more requests (possibly on more than one context)
3239 * running on the current thread, indicate, temporarily, that all these
3240 * requests are inactive. If cx->thread is NULL, then cx is not using
3241 * the request model, and does not contribute to rt->requestCount.
3245 JSCList
*head
, *link
;
3248 * Check all contexts on cx->thread->contextList for active requests,
3249 * counting each such context against requestDebit.
3251 head
= &cx
->thread
->contextList
;
3252 for (link
= head
->next
; link
!= head
; link
= link
->next
) {
3253 acx
= CX_FROM_THREAD_LINKS(link
);
3254 JS_ASSERT(acx
->thread
== cx
->thread
);
3255 if (acx
->requestDepth
)
3260 * We assert, but check anyway, in case someone is misusing the API.
3261 * Avoiding the loop over all of rt's contexts is a win in the event
3262 * that the GC runs only on request-less contexts with null threads,
3263 * in a special thread such as might be used by the UI/DOM/Layout
3264 * "mozilla" or "main" thread in Mozilla-the-browser.
3266 JS_ASSERT(cx
->requestDepth
== 0);
3267 if (cx
->requestDepth
)
3271 JS_ASSERT(requestDebit
<= rt
->requestCount
);
3272 rt
->requestCount
-= requestDebit
;
3273 if (rt
->requestCount
== 0)
3274 JS_NOTIFY_REQUEST_DONE(rt
);
3277 /* If another thread is already in GC, don't attempt GC; wait instead. */
3278 if (rt
->gcLevel
> 0) {
3279 /* Bump gcLevel to restart the current GC, so it finds new garbage. */
3281 METER_UPDATE_MAX(rt
->gcStats
.maxlevel
, rt
->gcLevel
);
3283 /* Wait for the other thread to finish, then resume our request. */
3284 while (rt
->gcLevel
> 0)
3285 JS_AWAIT_GC_DONE(rt
);
3287 rt
->requestCount
+= requestDebit
;
3288 if (!(gckind
& GC_LOCK_HELD
))
3293 /* No other thread is in GC, so indicate that we're now in GC. */
3295 rt
->gcThread
= cx
->thread
;
3297 /* Wait for all other requests to finish. */
3298 while (rt
->requestCount
> 0)
3299 JS_AWAIT_REQUEST_DONE(rt
);
3301 #else /* !JS_THREADSAFE */
3303 /* Bump gcLevel and return rather than nest; the outer gc will restart. */
3305 METER_UPDATE_MAX(rt
->gcStats
.maxlevel
, rt
->gcLevel
);
3306 if (rt
->gcLevel
> 1)
3309 #endif /* !JS_THREADSAFE */
3312 * Set rt->gcRunning here within the GC lock, and after waiting for any
3313 * active requests to end, so that new requests that try to JS_AddRoot,
3314 * JS_RemoveRoot, or JS_RemoveRootRT block in JS_BeginRequest waiting for
3315 * rt->gcLevel to drop to zero, while request-less calls to the *Root*
3316 * APIs block in js_AddRoot or js_RemoveRoot (see above in this file),
3317 * waiting for GC to finish.
3319 rt
->gcRunning
= JS_TRUE
;
3321 if (gckind
== GC_SET_SLOT_REQUEST
) {
3322 JSSetSlotRequest
*ssr
;
3324 while ((ssr
= rt
->setSlotRequests
) != NULL
) {
3325 rt
->setSlotRequests
= ssr
->next
;
3328 ProcessSetSlotRequest(cx
, ssr
);
3333 * We assume here that killing links to parent and prototype objects
3334 * does not create garbage (such objects typically are long-lived and
3335 * widely shared, e.g. global objects, Function.prototype, etc.). We
3336 * collect garbage only if a racing thread attempted GC and is waiting
3337 * for us to finish (gcLevel > 1) or if someone already poked us.
3339 if (rt
->gcLevel
== 1 && !rt
->gcPoke
)
3343 rt
->gcPoke
= JS_FALSE
;
3344 rt
->gcRunning
= JS_FALSE
;
3345 #ifdef JS_THREADSAFE
3346 rt
->gcThread
= NULL
;
3347 rt
->requestCount
+= requestDebit
;
3349 gckind
= GC_LOCK_HELD
;
3350 goto restart_at_beginning
;
3356 if (JS_ON_TRACE(cx
))
3360 /* Reset malloc counter. */
3361 rt
->gcMallocBytes
= 0;
3363 #ifdef JS_DUMP_SCOPE_METERS
3364 { extern void js_DumpScopeMeters(JSRuntime
*rt
);
3365 js_DumpScopeMeters(rt
);
3369 /* Clear property and JIT oracle caches (only for cx->thread if JS_THREADSAFE). */
3370 js_FlushPropertyCache(cx
);
3372 js_FlushJITOracle(cx
);
3375 /* Destroy eval'ed scripts. */
3376 DestroyScriptsToGC(cx
, &JS_SCRIPTS_TO_GC(cx
));
3378 #ifdef JS_THREADSAFE
3380 * Clear thread-based caches. To avoid redundant clearing we unroll the
3381 * current thread's step.
3383 * In case a JSScript wrapped within an object was finalized, we null
3384 * acx->thread->gsnCache.script and finish the cache's hashtable. Note
3385 * that js_DestroyScript, called from script_finalize, will have already
3386 * cleared cx->thread->gsnCache above during finalization, so we don't
3390 while ((acx
= js_ContextIterator(rt
, JS_FALSE
, &iter
)) != NULL
) {
3391 if (!acx
->thread
|| acx
->thread
== cx
->thread
)
3393 GSN_CACHE_CLEAR(&acx
->thread
->gsnCache
);
3394 js_FlushPropertyCache(acx
);
3396 js_FlushJITOracle(acx
);
3398 DestroyScriptsToGC(cx
, &acx
->thread
->scriptsToGC
);
3401 /* The thread-unsafe case just has to clear the runtime's GSN cache. */
3402 GSN_CACHE_CLEAR(&rt
->gsnCache
);
3407 JS_ASSERT(!rt
->gcUntracedArenaStackTop
);
3408 JS_ASSERT(rt
->gcTraceLaterCount
== 0);
3410 /* Reset the property cache's type id generator so we can compress ids. */
3416 JS_TRACER_INIT(&trc
, cx
, NULL
);
3417 rt
->gcMarkingTracer
= &trc
;
3418 JS_ASSERT(IS_GC_MARKING_TRACER(&trc
));
3420 for (a
= rt
->gcDoubleArenaList
.first
; a
; a
= a
->prev
)
3421 a
->u
.hasMarkedDoubles
= JS_FALSE
;
3423 js_TraceRuntime(&trc
, keepAtoms
);
3424 js_MarkScriptFilenames(rt
, keepAtoms
);
3427 * Mark children of things that caused too deep recursion during the above
3430 TraceDelayedChildren(&trc
);
3432 JS_ASSERT(!cx
->insideGCMarkCallback
);
3433 if (rt
->gcCallback
) {
3434 cx
->insideGCMarkCallback
= JS_TRUE
;
3435 (void) rt
->gcCallback(cx
, JSGC_MARK_END
);
3436 JS_ASSERT(cx
->insideGCMarkCallback
);
3437 cx
->insideGCMarkCallback
= JS_FALSE
;
3439 JS_ASSERT(rt
->gcTraceLaterCount
== 0);
3441 rt
->gcMarkingTracer
= NULL
;
3446 * Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set
3447 * so that any attempt to allocate a GC-thing from a finalizer will fail,
3448 * rather than nest badly and leave the unmarked newborn to be swept.
3450 * We first sweep atom state so we can use js_IsAboutToBeFinalized on
3451 * JSString or jsdouble held in a hashtable to check if the hashtable
3452 * entry can be freed. Note that even after the entry is freed, JSObject
3453 * finalizers can continue to access the corresponding jsdouble* and
3454 * JSString* assuming that they are unique. This works since the
3455 * atomization API must not be called during GC.
3457 js_SweepAtomState(cx
);
3459 /* Finalize iterator states before the objects they iterate over. */
3460 CloseNativeIterators(cx
);
3462 /* Finalize watch points associated with unreachable objects. */
3463 js_SweepWatchPoints(cx
);
3466 /* Save the pre-sweep count of scope-mapped properties. */
3467 rt
->liveScopePropsPreSweep
= rt
->liveScopeProps
;
3471 * Here we need to ensure that JSObject instances are finalized before GC-
3472 * allocated JSString and jsdouble instances so object's finalizer can
3473 * access them even if they will be freed. For that we simply finalize the
3474 * list containing JSObject first since the static assert at the beginning
3475 * of the file guarantees that JSString and jsdouble instances are
3476 * allocated from a different list.
3479 for (i
= 0; i
< GC_NUM_FREELISTS
; i
++) {
3480 arenaList
= &rt
->gcArenaList
[i
== 0
3481 ? GC_FREELIST_INDEX(sizeof(JSObject
))
3482 : i
== GC_FREELIST_INDEX(sizeof(JSObject
))
3485 ap
= &arenaList
->last
;
3489 JS_ASSERT(arenaList
->lastCount
> 0);
3490 arenaList
->freeList
= NULL
;
3492 thingSize
= arenaList
->thingSize
;
3493 indexLimit
= THINGS_PER_ARENA(thingSize
);
3494 flagp
= THING_FLAGP(a
, arenaList
->lastCount
- 1);
3495 METER((nlivearenas
= 0, nkilledarenas
= 0, nthings
= 0));
3497 JS_ASSERT(a
->prevUntracedPage
== 0);
3498 JS_ASSERT(a
->u
.untracedThings
== 0);
3502 if (flags
& (GCF_MARK
| GCF_LOCK
)) {
3503 *flagp
&= ~GCF_MARK
;
3504 allClear
= JS_FALSE
;
3507 thing
= FLAGP_TO_THING(flagp
, thingSize
);
3508 if (!(flags
& GCF_FINAL
)) {
3510 * Call the finalizer with GCF_FINAL ORed into flags.
3512 *flagp
= (uint8
)(flags
| GCF_FINAL
);
3513 type
= flags
& GCF_TYPEMASK
;
3516 js_FinalizeObject(cx
, (JSObject
*) thing
);
3521 #if JS_HAS_XML_SUPPORT
3523 js_FinalizeXML(cx
, (JSXML
*) thing
);
3527 JS_ASSERT(type
== GCX_STRING
||
3528 type
- GCX_EXTERNAL_STRING
<
3529 GCX_NTYPES
- GCX_EXTERNAL_STRING
);
3530 js_FinalizeStringRT(rt
, (JSString
*) thing
,
3532 GCX_EXTERNAL_STRING
),
3537 memset(thing
, JS_FREE_PATTERN
, thingSize
);
3540 thing
->flagp
= flagp
;
3541 thing
->next
= freeList
;
3544 } while (++flagp
!= THING_FLAGS_END(a
));
3548 * Forget just assembled free list head for the arena and
3549 * add the arena itself to the destroy list.
3551 freeList
= arenaList
->freeList
;
3552 if (a
== arenaList
->last
)
3553 arenaList
->lastCount
= (uint16
) indexLimit
;
3555 a
->prev
= emptyArenas
;
3557 METER(nkilledarenas
++);
3559 arenaList
->freeList
= freeList
;
3561 METER(nlivearenas
++);
3565 flagp
= THING_FLAGP(a
, indexLimit
- 1);
3569 * We use arenaList - &rt->gcArenaList[0], not i, as the stat index
3570 * due to the enumeration reorder at the beginning of the loop.
3572 METER(UpdateArenaStats(&rt
->gcStats
.arenaStats
[arenaList
-
3573 &rt
->gcArenaList
[0]],
3574 nlivearenas
, nkilledarenas
, nthings
));
3577 #ifdef JS_THREADSAFE
3579 * Release all but two free list sets to avoid allocating a new set in
3582 TrimGCFreeListsPool(rt
, 2);
3585 ap
= &rt
->gcDoubleArenaList
.first
;
3586 METER((nlivearenas
= 0, nkilledarenas
= 0, nthings
= 0));
3587 while ((a
= *ap
) != NULL
) {
3588 if (!a
->u
.hasMarkedDoubles
) {
3589 /* No marked double values in the arena. */
3591 a
->prev
= emptyArenas
;
3593 METER(nkilledarenas
++);
3597 for (i
= 0; i
!= DOUBLES_PER_ARENA
; ++i
) {
3598 if (IsMarkedDouble(a
, index
))
3601 METER(nlivearenas
++);
3605 METER(UpdateArenaStats(&rt
->gcStats
.doubleArenaStats
,
3606 nlivearenas
, nkilledarenas
, nthings
));
3607 rt
->gcDoubleArenaList
.nextDoubleFlags
=
3608 rt
->gcDoubleArenaList
.first
3609 ? DOUBLE_ARENA_BITMAP(rt
->gcDoubleArenaList
.first
)
3610 : DOUBLE_BITMAP_SENTINEL
;
3613 * Sweep the runtime's property tree after finalizing objects, in case any
3614 * had watchpoints referencing tree nodes.
3616 js_SweepScopeProperties(cx
);
3619 * Sweep script filenames after sweeping functions in the generic loop
3620 * above. In this way when a scripted function's finalizer destroys the
3621 * script and calls rt->destroyScriptHook, the hook can still access the
3622 * script's filename. See bug 323267.
3624 js_SweepScriptFilenames(rt
);
3627 * Destroy arenas after we finished the sweeping sofinalizers can safely
3628 * use js_IsAboutToBeFinalized().
3630 DestroyGCArenas(rt
, emptyArenas
);
3633 (void) rt
->gcCallback(cx
, JSGC_FINALIZE_END
);
3634 #ifdef DEBUG_srcnotesize
3635 { extern void DumpSrcNoteSizeHist();
3636 DumpSrcNoteSizeHist();
3637 printf("GC HEAP SIZE %lu\n", (unsigned long)rt
->gcBytes
);
3641 #ifdef JS_SCOPE_DEPTH_METER
3644 fp
= fopen("/tmp/scopedepth.stats", "w");
3647 JS_DumpBasicStats(&rt
->protoLookupDepthStats
, "proto-lookup depth", fp
);
3648 JS_DumpBasicStats(&rt
->scopeSearchDepthStats
, "scope-search depth", fp
);
3649 JS_DumpBasicStats(&rt
->hostenvScopeDepthStats
, "hostenv scope depth", fp
);
3650 JS_DumpBasicStats(&rt
->lexicalScopeDepthStats
, "lexical scope depth", fp
);
3656 #endif /* JS_SCOPE_DEPTH_METER */
3658 #ifdef JS_DUMP_LOOP_STATS
3659 { static FILE *lsfp
;
3661 lsfp
= fopen("/tmp/loopstats", "w");
3663 JS_DumpBasicStats(&rt
->loopStats
, "loops", lsfp
);
3667 #endif /* JS_DUMP_LOOP_STATS */
3675 * We want to restart GC if js_GC was called recursively or if any of the
3676 * finalizers called js_RemoveRoot or js_UnlockGCThingRT.
3678 if (rt
->gcLevel
> 1 || rt
->gcPoke
) {
3680 rt
->gcPoke
= JS_FALSE
;
3685 if (rt
->shapeGen
>= SHAPE_OVERFLOW_BIT
- 1) {
3687 * FIXME bug 440834: The shape id space has overflowed. Currently we
3688 * cope badly with this. Every call to js_GenerateShape does GC, and
3689 * we never re-enable the property cache.
3691 js_DisablePropertyCache(cx
);
3692 #ifdef JS_THREADSAFE
3694 while ((acx
= js_ContextIterator(rt
, JS_FALSE
, &iter
)) != NULL
) {
3695 if (!acx
->thread
|| acx
->thread
== cx
->thread
)
3697 js_DisablePropertyCache(acx
);
3702 rt
->gcLastBytes
= rt
->gcBytes
;
3705 rt
->gcRunning
= JS_FALSE
;
3707 #ifdef JS_THREADSAFE
3708 /* If we were invoked during a request, pay back the temporary debit. */
3710 rt
->requestCount
+= requestDebit
;
3711 rt
->gcThread
= NULL
;
3712 JS_NOTIFY_GC_DONE(rt
);
3715 * Unlock unless we have GC_LOCK_HELD which requires locked GC on return.
3717 if (!(gckind
& GC_LOCK_HELD
))
3722 * Execute JSGC_END callback outside the lock. Again, sample the callback
3723 * pointer in case it changes, since we are outside of the GC vs. requests
3724 * interlock mechanism here.
3726 if (gckind
!= GC_SET_SLOT_REQUEST
&& (callback
= rt
->gcCallback
)) {
3727 JSWeakRoots savedWeakRoots
;
3728 JSTempValueRooter tvr
;
3730 if (gckind
& GC_KEEP_ATOMS
) {
3732 * We allow JSGC_END implementation to force a full GC or allocate
3733 * new GC things. Thus we must protect the weak roots from garbage
3734 * collection and overwrites.
3736 savedWeakRoots
= cx
->weakRoots
;
3737 JS_PUSH_TEMP_ROOT_WEAK_COPY(cx
, &savedWeakRoots
, &tvr
);
3742 (void) callback(cx
, JSGC_END
);
3744 if (gckind
& GC_KEEP_ATOMS
) {
3746 JS_UNKEEP_ATOMS(rt
);
3747 JS_POP_TEMP_ROOT(cx
, &tvr
);
3748 } else if (gckind
== GC_LAST_CONTEXT
&& rt
->gcPoke
) {
3750 * On shutdown iterate until JSGC_END callback stops creating
3753 goto restart_at_beginning
;
3759 js_UpdateMallocCounter(JSContext
*cx
, size_t nbytes
)
3761 uint32
*pbytes
, bytes
;
3763 #ifdef JS_THREADSAFE
3764 pbytes
= &cx
->thread
->gcMallocBytes
;
3766 pbytes
= &cx
->runtime
->gcMallocBytes
;
3769 *pbytes
= ((uint32
)-1 - bytes
<= nbytes
) ? (uint32
)-1 : bytes
+ nbytes
;