1 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sw=4 et tw=78:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
17 * The Original Code is Mozilla Communicator client code, released
20 * The Initial Developer of the Original Code is
21 * Netscape Communications Corporation.
22 * Portions created by the Initial Developer are Copyright (C) 1998
23 * the Initial Developer. All Rights Reserved.
27 * Alternatively, the contents of this file may be used under the terms of
28 * either of the GNU General Public License Version 2 or later (the "GPL"),
29 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
39 * ***** END LICENSE BLOCK ***** */
42 * JS Mark-and-Sweep Garbage Collector.
44 * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see
45 * jsgc.h). It allocates from a special GC arena pool with each arena allocated
46 * using malloc. It uses an ideally parallel array of flag bytes to hold the
47 * mark bit, finalizer type index, etc.
49 * XXX swizzle page to freelist for better locality of reference
52 #include <stdlib.h> /* for free */
53 #include <string.h> /* for memset used when DEBUG */
55 #include "jsutil.h" /* Added by JSIFY */
56 #include "jshash.h" /* Added by JSIFY */
77 #if JS_HAS_XML_SUPPORT
82 * Deduce if mmap or similar is available.
84 #ifndef JS_GC_USE_MMAP
86 # define JS_GC_USE_MMAP 1
87 # elif defined(XP_UNIX) || defined(XP_BEOS)
89 # if defined(_POSIX_MAPPED_FILES) && _POSIX_MAPPED_FILES > 0
90 # define JS_GC_USE_MMAP 1
95 #ifndef JS_GC_USE_MMAP
96 # define JS_GC_USE_MMAP 0
101 # include <windows.h>
102 # elif defined(XP_UNIX) || defined(XP_BEOS)
103 # include <sys/mman.h>
105 /* On Mac OS X MAP_ANONYMOUS is not defined. */
106 # if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
107 # define MAP_ANONYMOUS MAP_ANON
114 * A GC arena contains one flag byte for each thing in its heap, and supports
115 * O(1) lookup of a flag given its thing's address.
117 * To implement this, we allocate things of the same size from a GC arena
118 * containing GC_ARENA_SIZE bytes aligned on GC_ARENA_SIZE boundary. The
119 * following picture shows arena's layout:
121 * +------------------------------+--------------------+---------------+
122 * | allocation area for GC thing | flags of GC things | JSGCArenaInfo |
123 * +------------------------------+--------------------+---------------+
125 * For a GC thing of size thingSize the number of things that the arena can
127 * (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / (thingSize + 1)
129 * The address of thing's flag is given by:
131 * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo) -
132 * (thingAddress & GC_ARENA_MASK) / thingSize
134 * (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo)
135 * is the last byte of flags' area and
136 * (thingAddress & GC_ARENA_MASK) / thingSize
137 * is thing's index counting from arena's start.
139 * Things are allocated from the start of their area and flags are allocated
140 * from the end of their area. This avoids calculating the location of the
141 * boundary separating things and flags.
143 * JS_GC_USE_MMAP macros governs the allocation of aligned arenas. When the
144 * macro is true, a platform specific allocation code like POSIX mmap is used
145 * with no extra overhead. If the macro is false, the code uses malloc to
146 * allocate a chunk of
147 * GC_ARENA_SIZE * (js_gcArenasPerChunk + 1)
148 * bytes. The chunk contains at least js_gcArenasPerChunk aligned arenas so
149 * the overhead of this schema is approximately 1/js_gcArenasPerChunk. See
150 * NewGCChunk/DestroyGCChunk below for details.
152 * Note that even when JS_GC_USE_MMAP is true the code still allocates arenas
153 * in chunks to minimize the overhead of mmap/munmap.
157 * When mmap is available, use the minimal known CPU page size as the size of
158 * GC arena. Otherwise use 1K arenas to minimize the overhead of the aligned
162 # define GC_ARENA_SHIFT 12
164 # define GC_ARENA_SHIFT 10
167 #define GC_ARENA_MASK ((jsuword) JS_BITMASK(GC_ARENA_SHIFT))
168 #define GC_ARENA_SIZE JS_BIT(GC_ARENA_SHIFT)
170 struct JSGCArenaInfo
{
172 * Allocation list for the arena.
177 * Pointer to the previous arena in a linked list. The arena can either
178 * belong to one of JSContext.gcArenaList lists or, when it does not have
179 * any allocated GC things, to the list of free arenas in the chunk with
180 * head stored in JSGCChunkInfo.lastFreeArena.
185 * A link field for the list of arenas with marked but not yet traced
186 * things. The field is encoded as arena's page to share the space with
187 * firstArena and arenaIndex fields.
189 jsuword prevUntracedPage
: JS_BITS_PER_WORD
- GC_ARENA_SHIFT
;
192 * When firstArena is false, the index of arena in the chunk. When
193 * firstArena is true, the index of a free arena holding JSGCChunkInfo or
194 * NO_FREE_ARENAS if there are no free arenas in the chunk.
196 * GET_ARENA_INDEX and GET_CHUNK_INFO_INDEX are convenience macros to
197 * access either of indexes.
199 jsuword arenaIndex
: GC_ARENA_SHIFT
- 1;
202 * Flag indicating if the arena is the first in the chunk.
204 jsuword firstArena
: 1;
207 * Bitset for fast search of marked but not yet traced things.
209 jsuword untracedThings
;
213 * Verify that the bit fields are indeed shared and JSGCArenaInfo is as small
214 * as possible. The code does not rely on this check so if on a particular
215 * platform this does not compile, then, as a workaround, comment the assert
216 * out and submit a bug report.
218 JS_STATIC_ASSERT(sizeof(JSGCArenaInfo
) == 4 * sizeof(jsuword
));
220 #define NO_FREE_ARENAS JS_BITMASK(GC_ARENA_SHIFT - 1)
223 * All chunks that have at least one free arena are put on the doubly-linked
224 * list with the head stored in JSRuntime.gcChunkList. JSGCChunkInfo contains
225 * the head of the chunk's free arena list together with the link fields for
228 * The structure is stored in one of chunk's free arenas. GET_CHUNK_INFO_INDEX
229 * macro gives the index of this arena. When all arenas in the chunk are used,
230 * it is removed from the list and the index is set to NO_FREE_ARENAS
231 * indicating that the chunk is not on gcChunkList and has no JSGCChunkInfo
234 struct JSGCChunkInfo
{
235 JSGCChunkInfo
**prevp
;
237 JSGCArenaInfo
*lastFreeArena
;
238 uint32 numFreeArenas
;
242 * Even when mmap is available, its overhead may be too big so the final
243 * decision to use it is done at runtime.
246 static uint32 js_gcArenasPerChunk
= 0;
247 static JSBool js_gcUseMmap
= JS_FALSE
;
249 # define js_gcArenasPerChunk 31
253 * Macros to convert between JSGCArenaInfo, the start address of the arena and
254 * arena's page defined as (start address) >> GC_ARENA_SHIFT.
256 #define ARENA_INFO_OFFSET (GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo))
258 #define IS_ARENA_INFO_ADDRESS(arena) \
259 (((jsuword) (arena) & GC_ARENA_MASK) == ARENA_INFO_OFFSET)
261 #define ARENA_START_TO_INFO(arenaStart) \
262 (JS_ASSERT(((arenaStart) & (jsuword) GC_ARENA_MASK) == 0), \
263 (JSGCArenaInfo *) ((arenaStart) + ARENA_INFO_OFFSET))
265 #define ARENA_INFO_TO_START(arena) \
266 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
267 (jsuword) (arena) & ~(jsuword) GC_ARENA_MASK)
269 #define ARENA_PAGE_TO_INFO(arenaPage) \
270 (JS_ASSERT(arenaPage != 0), \
271 JS_ASSERT(((arenaPage) >> (JS_BITS_PER_WORD - GC_ARENA_SHIFT)) == 0), \
272 ARENA_START_TO_INFO((arenaPage) << GC_ARENA_SHIFT))
274 #define ARENA_INFO_TO_PAGE(arena) \
275 (JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
276 ((jsuword) (arena) >> GC_ARENA_SHIFT))
278 #define GET_ARENA_INFO(chunk, index) \
279 (JS_ASSERT((index) < js_gcArenasPerChunk), \
280 ARENA_START_TO_INFO(chunk + ((index) << GC_ARENA_SHIFT)))
283 * Macros to access/modify information about the chunk of GC arenas.
285 #define GET_ARENA_CHUNK(arena, index) \
286 (JS_ASSERT(GET_ARENA_INDEX(arena) == index), \
287 ARENA_INFO_TO_START(arena) - ((index) << GC_ARENA_SHIFT))
289 #define GET_ARENA_INDEX(arena) \
290 ((arena)->firstArena ? 0 : (uint32) (arena)->arenaIndex)
292 #define GET_CHUNK_INFO_INDEX(chunk) \
293 ((uint32) ARENA_START_TO_INFO(chunk)->arenaIndex)
295 #define SET_CHUNK_INFO_INDEX(chunk, index) \
296 (JS_ASSERT((index) < js_gcArenasPerChunk || (index) == NO_FREE_ARENAS), \
297 (void) (ARENA_START_TO_INFO(chunk)->arenaIndex = (jsuword) (index)))
299 #define GET_CHUNK_INFO(chunk, infoIndex) \
300 (JS_ASSERT(GET_CHUNK_INFO_INDEX(chunk) == (infoIndex)), \
301 JS_ASSERT((uint32) (infoIndex) < js_gcArenasPerChunk), \
302 (JSGCChunkInfo *) ((chunk) + ((infoIndex) << GC_ARENA_SHIFT)))
304 #define CHUNK_INFO_TO_INDEX(ci) \
305 GET_ARENA_INDEX(ARENA_START_TO_INFO((jsuword)ci))
308 * Macros for GC-thing operations.
310 #define THINGS_PER_ARENA(thingSize) \
311 ((GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo)) / ((thingSize) + 1U))
313 #define THING_TO_ARENA(thing) \
314 ((JSGCArenaInfo *)(((jsuword) (thing) | GC_ARENA_MASK) + \
315 1 - sizeof(JSGCArenaInfo)))
317 #define THING_TO_INDEX(thing, thingSize) \
318 ((uint32) ((jsuword) (thing) & GC_ARENA_MASK) / (uint32) (thingSize))
320 #define THING_FLAGS_END(arena) ((uint8 *)(arena))
322 #define THING_FLAGP(arena, thingIndex) \
323 (JS_ASSERT((jsuword) (thingIndex) \
324 < (jsuword) THINGS_PER_ARENA((arena)->list->thingSize)), \
325 (uint8 *)(arena) - 1 - (thingIndex))
327 #define THING_TO_FLAGP(thing, thingSize) \
328 THING_FLAGP(THING_TO_ARENA(thing), THING_TO_INDEX(thing, thingSize))
330 #define FLAGP_TO_ARENA(flagp) THING_TO_ARENA(flagp)
332 #define FLAGP_TO_INDEX(flagp) \
333 (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) < ARENA_INFO_OFFSET), \
334 (ARENA_INFO_OFFSET - 1 - (uint32) ((jsuword) (flagp) & GC_ARENA_MASK)))
336 #define FLAGP_TO_THING(flagp, thingSize) \
337 (JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) >= \
338 (ARENA_INFO_OFFSET - THINGS_PER_ARENA(thingSize))), \
339 (JSGCThing *)(((jsuword) (flagp) & ~GC_ARENA_MASK) + \
340 (thingSize) * FLAGP_TO_INDEX(flagp)))
344 * The maximum number of things to put on the local free list by taking
345 * several things from the global free list or from the tail of the last
346 * allocated arena to amortize the cost of rt->gcLock.
348 * We use number 8 based on benchmarks from bug 312238.
350 #define MAX_THREAD_LOCAL_THINGS 8
354 JS_STATIC_ASSERT(sizeof(JSStackHeader
) >= 2 * sizeof(jsval
));
356 JS_STATIC_ASSERT(sizeof(JSGCThing
) >= sizeof(JSString
));
357 JS_STATIC_ASSERT(sizeof(JSGCThing
) >= sizeof(jsdouble
));
359 /* We want to use all the available GC thing space for object's slots. */
360 JS_STATIC_ASSERT(sizeof(JSObject
) % sizeof(JSGCThing
) == 0);
362 static uint8 GCTypeToTraceKindMap
[GCX_NTYPES
] = {
363 JSTRACE_OBJECT
, /* GCX_OBJECT */
364 JSTRACE_STRING
, /* GCX_STRING */
365 JSTRACE_DOUBLE
, /* GCX_DOUBLE */
366 JSTRACE_FUNCTION
, /* GCX_FUNCTION */
367 JSTRACE_NAMESPACE
, /* GCX_NAMESPACE */
368 JSTRACE_QNAME
, /* GCX_QNAME */
369 JSTRACE_XML
, /* GCX_XML */
370 (uint8
)-1, /* unused */
371 JSTRACE_STRING
, /* GCX_EXTERNAL_STRING + 0 */
372 JSTRACE_STRING
, /* GCX_EXTERNAL_STRING + 1 */
373 JSTRACE_STRING
, /* GCX_EXTERNAL_STRING + 2 */
374 JSTRACE_STRING
, /* GCX_EXTERNAL_STRING + 3 */
375 JSTRACE_STRING
, /* GCX_EXTERNAL_STRING + 4 */
376 JSTRACE_STRING
, /* GCX_EXTERNAL_STRING + 5 */
377 JSTRACE_STRING
, /* GCX_EXTERNAL_STRING + 6 */
378 JSTRACE_STRING
, /* GCX_EXTERNAL_STRING + 7 */
382 * Ensure that JSObject is allocated from a different GC-list rather than
383 * jsdouble and JSString so we can easily finalize JSObject before these 2
384 * types of GC things. See comments in js_GC.
386 JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(JSString
)) !=
387 GC_FREELIST_INDEX(sizeof(JSObject
)));
388 JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(jsdouble
)) !=
389 GC_FREELIST_INDEX(sizeof(JSObject
)));
392 * JSPtrTable capacity growth descriptor. The table grows by powers of two
393 * starting from capacity JSPtrTableInfo.minCapacity, but switching to linear
394 * growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold.
396 typedef struct JSPtrTableInfo
{
398 uint16 linearGrowthThreshold
;
401 #define GC_ITERATOR_TABLE_MIN 4
402 #define GC_ITERATOR_TABLE_LINEAR 1024
404 static const JSPtrTableInfo iteratorTableInfo
= {
405 GC_ITERATOR_TABLE_MIN
,
406 GC_ITERATOR_TABLE_LINEAR
409 /* Calculate table capacity based on the current value of JSPtrTable.count. */
411 PtrTableCapacity(size_t count
, const JSPtrTableInfo
*info
)
413 size_t linear
, log
, capacity
;
415 linear
= info
->linearGrowthThreshold
;
416 JS_ASSERT(info
->minCapacity
<= linear
);
420 } else if (count
< linear
) {
421 log
= JS_CEILING_LOG2W(count
);
422 JS_ASSERT(log
!= JS_BITS_PER_WORD
);
423 capacity
= (size_t)1 << log
;
424 if (capacity
< info
->minCapacity
)
425 capacity
= info
->minCapacity
;
427 capacity
= JS_ROUNDUP(count
, linear
);
430 JS_ASSERT(capacity
>= count
);
435 FreePtrTable(JSPtrTable
*table
, const JSPtrTableInfo
*info
)
438 JS_ASSERT(table
->count
> 0);
443 JS_ASSERT(table
->count
== 0);
447 AddToPtrTable(JSContext
*cx
, JSPtrTable
*table
, const JSPtrTableInfo
*info
,
450 size_t count
, capacity
;
453 count
= table
->count
;
454 capacity
= PtrTableCapacity(count
, info
);
456 if (count
== capacity
) {
457 if (capacity
< info
->minCapacity
) {
458 JS_ASSERT(capacity
== 0);
459 JS_ASSERT(!table
->array
);
460 capacity
= info
->minCapacity
;
463 * Simplify the overflow detection assuming pointer is bigger
466 JS_STATIC_ASSERT(2 <= sizeof table
->array
[0]);
467 capacity
= (capacity
< info
->linearGrowthThreshold
)
469 : capacity
+ info
->linearGrowthThreshold
;
470 if (capacity
> (size_t)-1 / sizeof table
->array
[0])
473 array
= (void **) realloc(table
->array
,
474 capacity
* sizeof table
->array
[0]);
478 memset(array
+ count
, JS_FREE_PATTERN
,
479 (capacity
- count
) * sizeof table
->array
[0]);
481 table
->array
= array
;
484 table
->array
[count
] = ptr
;
485 table
->count
= count
+ 1;
490 JS_ReportOutOfMemory(cx
);
495 ShrinkPtrTable(JSPtrTable
*table
, const JSPtrTableInfo
*info
,
498 size_t oldCapacity
, capacity
;
501 JS_ASSERT(newCount
<= table
->count
);
502 if (newCount
== table
->count
)
505 oldCapacity
= PtrTableCapacity(table
->count
, info
);
506 table
->count
= newCount
;
507 capacity
= PtrTableCapacity(newCount
, info
);
509 if (oldCapacity
!= capacity
) {
510 array
= table
->array
;
517 array
= (void **) realloc(array
, capacity
* sizeof array
[0]);
519 table
->array
= array
;
522 memset(table
->array
+ newCount
, JS_FREE_PATTERN
,
523 (capacity
- newCount
) * sizeof table
->array
[0]);
530 # define METER(x) ((void) 0)
534 * For chunks allocated via over-sized malloc, get a pointer to store the gap
535 * between the malloc's result and the first arena in the chunk.
538 GetMallocedChunkGapPtr(jsuword chunk
)
540 JS_ASSERT((chunk
& GC_ARENA_MASK
) == 0);
542 /* Use the memory after the chunk, see NewGCChunk for details. */
543 return (uint32
*) (chunk
+ (js_gcArenasPerChunk
<< GC_ARENA_SHIFT
));
555 p
= VirtualAlloc(NULL
, js_gcArenasPerChunk
<< GC_ARENA_SHIFT
,
556 MEM_COMMIT
| MEM_RESERVE
, PAGE_READWRITE
);
558 # elif defined(XP_UNIX) || defined(XP_BEOS)
559 p
= mmap(NULL
, js_gcArenasPerChunk
<< GC_ARENA_SHIFT
,
560 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
561 return (p
== MAP_FAILED
) ? 0 : (jsuword
) p
;
563 # error "Not implemented"
569 * Implement the chunk allocation using over sized malloc if mmap cannot
570 * be used. FIXME bug 396007: the code should use posix_memalign when it
573 * Since malloc allocates pointers aligned on the word boundary, to get
574 * js_gcArenasPerChunk aligned arenas, we need to malloc only
575 * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT) - sizeof(size_t)
576 * bytes. But since we stores the gap between the malloced pointer and the
577 * first arena in the chunk after the chunk, we need to ask for
578 * ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT)
579 * bytes to ensure that we always have room to store the gap.
581 p
= malloc((js_gcArenasPerChunk
+ 1) << GC_ARENA_SHIFT
);
584 chunk
= ((jsuword
) p
+ GC_ARENA_MASK
) & ~GC_ARENA_MASK
;
585 *GetMallocedChunkGapPtr(chunk
) = (uint32
) (chunk
- (jsuword
) p
);
590 DestroyGCChunk(jsuword chunk
)
592 JS_ASSERT((chunk
& GC_ARENA_MASK
) == 0);
596 VirtualFree((void *) chunk
, 0, MEM_RELEASE
);
597 # elif defined(XP_UNIX) || defined(XP_BEOS)
598 munmap((void *) chunk
, js_gcArenasPerChunk
<< GC_ARENA_SHIFT
);
600 # error "Not implemented"
606 /* See comments in NewGCChunk. */
607 JS_ASSERT(*GetMallocedChunkGapPtr(chunk
) < GC_ARENA_SIZE
);
608 free((void *) (chunk
- *GetMallocedChunkGapPtr(chunk
)));
612 AddChunkToList(JSRuntime
*rt
, JSGCChunkInfo
*ci
)
614 ci
->prevp
= &rt
->gcChunkList
;
615 ci
->next
= rt
->gcChunkList
;
616 if (rt
->gcChunkList
) {
617 JS_ASSERT(rt
->gcChunkList
->prevp
== &rt
->gcChunkList
);
618 rt
->gcChunkList
->prevp
= &ci
->next
;
620 rt
->gcChunkList
= ci
;
624 RemoveChunkFromList(JSRuntime
*rt
, JSGCChunkInfo
*ci
)
626 *ci
->prevp
= ci
->next
;
628 JS_ASSERT(ci
->next
->prevp
== &ci
->next
);
629 ci
->next
->prevp
= ci
->prevp
;
633 static JSGCArenaInfo
*
634 NewGCArena(JSRuntime
*rt
)
639 JSGCArenaInfo
*a
, *aprev
;
641 if (js_gcArenasPerChunk
== 1) {
642 chunk
= NewGCChunk();
643 return (chunk
== 0) ? NULL
: ARENA_START_TO_INFO(chunk
);
646 ci
= rt
->gcChunkList
;
648 chunk
= NewGCChunk();
651 JS_ASSERT((chunk
& GC_ARENA_MASK
) == 0);
652 a
= GET_ARENA_INFO(chunk
, 0);
653 a
->firstArena
= JS_TRUE
;
661 a
= GET_ARENA_INFO(chunk
, i
);
662 a
->firstArena
= JS_FALSE
;
664 } while (i
!= js_gcArenasPerChunk
- 1);
665 ci
= GET_CHUNK_INFO(chunk
, 0);
666 ci
->lastFreeArena
= aprev
;
667 ci
->numFreeArenas
= js_gcArenasPerChunk
- 1;
668 AddChunkToList(rt
, ci
);
670 JS_ASSERT(ci
->prevp
== &rt
->gcChunkList
);
671 a
= ci
->lastFreeArena
;
674 JS_ASSERT(ci
->numFreeArenas
== 1);
675 JS_ASSERT(ARENA_INFO_TO_START(a
) == (jsuword
) ci
);
676 RemoveChunkFromList(rt
, ci
);
677 chunk
= GET_ARENA_CHUNK(a
, GET_ARENA_INDEX(a
));
678 SET_CHUNK_INFO_INDEX(chunk
, NO_FREE_ARENAS
);
680 JS_ASSERT(ci
->numFreeArenas
>= 2);
681 JS_ASSERT(ARENA_INFO_TO_START(a
) != (jsuword
) ci
);
682 ci
->lastFreeArena
= aprev
;
691 DestroyGCArena(JSRuntime
*rt
, JSGCArenaInfo
*a
)
695 uint32 chunkInfoIndex
;
698 METER(rt
->gcStats
.afree
++);
700 if (js_gcArenasPerChunk
== 1) {
701 DestroyGCChunk(ARENA_INFO_TO_START(a
));
707 jsuword firstArena
, arenaIndex
;
709 firstArena
= a
->firstArena
;
710 arenaIndex
= a
->arenaIndex
;
711 memset((void *) ARENA_INFO_TO_START(a
), JS_FREE_PATTERN
,
713 a
->firstArena
= firstArena
;
714 a
->arenaIndex
= arenaIndex
;
718 arenaIndex
= GET_ARENA_INDEX(a
);
719 chunk
= GET_ARENA_CHUNK(a
, arenaIndex
);
720 chunkInfoIndex
= GET_CHUNK_INFO_INDEX(chunk
);
721 if (chunkInfoIndex
== NO_FREE_ARENAS
) {
722 chunkInfoIndex
= arenaIndex
;
723 SET_CHUNK_INFO_INDEX(chunk
, arenaIndex
);
724 ci
= GET_CHUNK_INFO(chunk
, chunkInfoIndex
);
726 ci
->lastFreeArena
= a
;
727 ci
->numFreeArenas
= 1;
728 AddChunkToList(rt
, ci
);
730 JS_ASSERT(chunkInfoIndex
!= arenaIndex
);
731 ci
= GET_CHUNK_INFO(chunk
, chunkInfoIndex
);
732 JS_ASSERT(ci
->numFreeArenas
!= 0);
733 JS_ASSERT(ci
->lastFreeArena
);
734 JS_ASSERT(a
!= ci
->lastFreeArena
);
735 if (ci
->numFreeArenas
== js_gcArenasPerChunk
- 1) {
736 RemoveChunkFromList(rt
, ci
);
737 DestroyGCChunk(chunk
);
740 a
->prev
= ci
->lastFreeArena
;
741 ci
->lastFreeArena
= a
;
747 InitGCArenaLists(JSRuntime
*rt
)
750 JSGCArenaList
*arenaList
;
752 for (i
= 0; i
< GC_NUM_FREELISTS
; i
++) {
753 arenaList
= &rt
->gcArenaList
[i
];
754 thingSize
= GC_FREELIST_NBYTES(i
);
755 JS_ASSERT((size_t)(uint16
)thingSize
== thingSize
);
756 arenaList
->last
= NULL
;
757 arenaList
->lastCount
= THINGS_PER_ARENA(thingSize
);
758 arenaList
->thingSize
= (uint16
)thingSize
;
759 arenaList
->freeList
= NULL
;
760 METER(memset(&arenaList
->stats
, 0, sizeof arenaList
->stats
));
765 FinishGCArenaLists(JSRuntime
*rt
)
768 JSGCArenaList
*arenaList
;
769 JSGCArenaInfo
*a
, *aprev
;
771 for (i
= 0; i
< GC_NUM_FREELISTS
; i
++) {
772 arenaList
= &rt
->gcArenaList
[i
];
774 for (a
= arenaList
->last
; a
; a
= aprev
) {
776 DestroyGCArena(rt
, a
);
778 arenaList
->last
= NULL
;
779 arenaList
->lastCount
= THINGS_PER_ARENA(arenaList
->thingSize
);
780 arenaList
->freeList
= NULL
;
781 METER(arenaList
->stats
.narenas
= 0);
784 JS_ASSERT(rt
->gcChunkList
== 0);
787 JS_FRIEND_API(uint8
*)
788 js_GetGCThingFlags(void *thing
)
793 a
= THING_TO_ARENA(thing
);
794 index
= THING_TO_INDEX(thing
, a
->list
->thingSize
);
795 return THING_FLAGP(a
, index
);
799 js_GetGCStringRuntime(JSString
*str
)
803 list
= THING_TO_ARENA(str
)->list
;
805 JS_ASSERT(list
->thingSize
== sizeof(JSGCThing
));
806 JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing
)) == 0);
808 return (JSRuntime
*)((uint8
*)list
- offsetof(JSRuntime
, gcArenaList
));
812 js_IsAboutToBeFinalized(JSContext
*cx
, void *thing
)
814 uint8 flags
= *js_GetGCThingFlags(thing
);
816 return !(flags
& (GCF_MARK
| GCF_LOCK
| GCF_FINAL
));
820 static const char newborn_external_string
[] = "newborn external string";
822 static const char *gc_typenames
[GCX_NTYPES
] = {
826 "newborn mutable string",
831 newborn_external_string
,
832 newborn_external_string
,
833 newborn_external_string
,
834 newborn_external_string
,
835 newborn_external_string
,
836 newborn_external_string
,
837 newborn_external_string
,
838 newborn_external_string
842 /* This is compatible with JSDHashEntryStub. */
843 typedef struct JSGCRootHashEntry
{
849 /* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
850 #define GC_ROOTS_SIZE 256
853 * For a CPU with extremely large pages using them for GC things wastes
856 #define GC_ARENAS_PER_CPU_PAGE_LIMIT JS_BIT(18 - GC_ARENA_SHIFT)
858 JS_STATIC_ASSERT(GC_ARENAS_PER_CPU_PAGE_LIMIT
<= NO_FREE_ARENAS
);
861 js_InitGC(JSRuntime
*rt
, uint32 maxbytes
)
864 if (js_gcArenasPerChunk
== 0) {
865 size_t cpuPageSize
, arenasPerPage
;
870 cpuPageSize
= si
.dwPageSize
;
872 # elif defined(XP_UNIX) || defined(XP_BEOS)
873 cpuPageSize
= (size_t) sysconf(_SC_PAGESIZE
);
875 # error "Not implemented"
877 /* cpuPageSize is a power of 2. */
878 JS_ASSERT((cpuPageSize
& (cpuPageSize
- 1)) == 0);
879 arenasPerPage
= cpuPageSize
>> GC_ARENA_SHIFT
;
881 if (arenasPerPage
== 0) {
883 "JS engine warning: the size of the CPU page, %u bytes, is too low to use\n"
884 "paged allocation for the garbage collector. Please report this.\n",
885 (unsigned) cpuPageSize
);
888 if (arenasPerPage
- 1 <= (size_t) (GC_ARENAS_PER_CPU_PAGE_LIMIT
- 1)) {
890 * Use at least 4 GC arenas per paged allocation chunk to minimize
891 * the overhead of mmap/VirtualAlloc.
893 js_gcUseMmap
= JS_TRUE
;
894 js_gcArenasPerChunk
= JS_MAX((uint32
) arenasPerPage
, 4);
896 js_gcUseMmap
= JS_FALSE
;
897 js_gcArenasPerChunk
= 7;
901 JS_ASSERT(1 <= js_gcArenasPerChunk
&&
902 js_gcArenasPerChunk
<= NO_FREE_ARENAS
);
904 InitGCArenaLists(rt
);
905 if (!JS_DHashTableInit(&rt
->gcRootsHash
, JS_DHashGetStubOps(), NULL
,
906 sizeof(JSGCRootHashEntry
), GC_ROOTS_SIZE
)) {
907 rt
->gcRootsHash
.ops
= NULL
;
910 rt
->gcLocksHash
= NULL
; /* create lazily */
913 * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
914 * for default backward API compatibility.
916 rt
->gcMaxBytes
= rt
->gcMaxMallocBytes
= maxbytes
;
923 js_DumpGCStats(JSRuntime
*rt
, FILE *fp
)
926 size_t thingsPerArena
;
927 size_t totalThings
, totalMaxThings
, totalBytes
;
928 size_t sumArenas
, sumTotalArenas
;
929 size_t sumFreeSize
, sumTotalFreeSize
;
931 fprintf(fp
, "\nGC allocation statistics:\n");
933 #define UL(x) ((unsigned long)(x))
934 #define ULSTAT(x) UL(rt->gcStats.x)
941 sumTotalFreeSize
= 0;
942 for (i
= 0; i
< GC_NUM_FREELISTS
; i
++) {
943 JSGCArenaList
*list
= &rt
->gcArenaList
[i
];
944 JSGCArenaStats
*stats
= &list
->stats
;
945 if (stats
->maxarenas
== 0) {
946 fprintf(fp
, "ARENA LIST %u (thing size %lu): NEVER USED\n",
947 i
, UL(GC_FREELIST_NBYTES(i
)));
950 thingsPerArena
= THINGS_PER_ARENA(list
->thingSize
);
951 fprintf(fp
, "ARENA LIST %u (thing size %lu):\n",
952 i
, UL(GC_FREELIST_NBYTES(i
)));
953 fprintf(fp
, " arenas: %lu\n", UL(stats
->narenas
));
954 fprintf(fp
, " max arenas: %lu\n", UL(stats
->maxarenas
));
955 fprintf(fp
, " things: %lu\n", UL(stats
->nthings
));
956 fprintf(fp
, " max things: %lu\n", UL(stats
->maxthings
));
957 fprintf(fp
, " free list: %lu\n", UL(stats
->freelen
));
958 fprintf(fp
, " free list density: %.1f%%\n",
961 : 100.0 * stats
->freelen
/ (thingsPerArena
* stats
->narenas
));
962 fprintf(fp
, " average free list density: %.1f%%\n",
963 stats
->totalarenas
== 0
965 : 100.0 * stats
->totalfreelen
/
966 (thingsPerArena
* stats
->totalarenas
));
967 fprintf(fp
, " recycles: %lu\n", UL(stats
->recycle
));
968 fprintf(fp
, " recycle/alloc ratio: %.2f\n",
969 (double) stats
->recycle
/ (stats
->totalnew
- stats
->recycle
));
970 totalThings
+= stats
->nthings
;
971 totalMaxThings
+= stats
->maxthings
;
972 totalBytes
+= list
->thingSize
* stats
->nthings
;
973 sumArenas
+= stats
->narenas
;
974 sumTotalArenas
+= stats
->totalarenas
;
975 sumFreeSize
+= list
->thingSize
* stats
->freelen
;
976 sumTotalFreeSize
+= list
->thingSize
* stats
->totalfreelen
;
978 fprintf(fp
, "TOTAL STATS:\n");
979 fprintf(fp
, " bytes allocated: %lu\n", UL(rt
->gcBytes
));
980 fprintf(fp
, " alloc attempts: %lu\n", ULSTAT(alloc
));
982 fprintf(fp
, " alloc without locks: %1u\n", ULSTAT(localalloc
));
984 fprintf(fp
, " total GC things: %lu\n", UL(totalThings
));
985 fprintf(fp
, " max total GC things: %lu\n", UL(totalMaxThings
));
986 fprintf(fp
, " GC things size: %lu\n", UL(totalBytes
));
987 fprintf(fp
, " total GC arenas: %lu\n", UL(sumArenas
));
988 fprintf(fp
, " total free list density: %.1f%%\n",
991 : 100.0 * sumFreeSize
/ (sumArenas
<< GC_ARENA_SHIFT
));
992 fprintf(fp
, " average free list density: %.1f%%\n",
993 sumTotalFreeSize
== 0
995 : 100.0 * sumTotalFreeSize
/ (sumTotalArenas
<< GC_ARENA_SHIFT
));
996 fprintf(fp
, "allocation retries after GC: %lu\n", ULSTAT(retry
));
997 fprintf(fp
, " allocation failures: %lu\n", ULSTAT(fail
));
998 fprintf(fp
, " things born locked: %lu\n", ULSTAT(lockborn
));
999 fprintf(fp
, " valid lock calls: %lu\n", ULSTAT(lock
));
1000 fprintf(fp
, " valid unlock calls: %lu\n", ULSTAT(unlock
));
1001 fprintf(fp
, " mark recursion depth: %lu\n", ULSTAT(depth
));
1002 fprintf(fp
, " maximum mark recursion: %lu\n", ULSTAT(maxdepth
));
1003 fprintf(fp
, " mark C recursion depth: %lu\n", ULSTAT(cdepth
));
1004 fprintf(fp
, " maximum mark C recursion: %lu\n", ULSTAT(maxcdepth
));
1005 fprintf(fp
, " delayed tracing calls: %lu\n", ULSTAT(untraced
));
1007 fprintf(fp
, " max trace later count: %lu\n", ULSTAT(maxuntraced
));
1009 fprintf(fp
, " maximum GC nesting level: %lu\n", ULSTAT(maxlevel
));
1010 fprintf(fp
, "potentially useful GC calls: %lu\n", ULSTAT(poke
));
1011 fprintf(fp
, " thing arenas freed so far: %lu\n", ULSTAT(afree
));
1012 fprintf(fp
, " stack segments scanned: %lu\n", ULSTAT(stackseg
));
1013 fprintf(fp
, "stack segment slots scanned: %lu\n", ULSTAT(segslots
));
1014 fprintf(fp
, "reachable closeable objects: %lu\n", ULSTAT(nclose
));
1015 fprintf(fp
, " max reachable closeable: %lu\n", ULSTAT(maxnclose
));
1016 fprintf(fp
, " scheduled close hooks: %lu\n", ULSTAT(closelater
));
1017 fprintf(fp
, " max scheduled close hooks: %lu\n", ULSTAT(maxcloselater
));
1021 #ifdef JS_ARENAMETER
1022 JS_DumpArenaStats(fp
);
1029 CheckLeakedRoots(JSRuntime
*rt
);
1033 js_FinishGC(JSRuntime
*rt
)
1035 #ifdef JS_ARENAMETER
1036 JS_DumpArenaStats(stdout
);
1039 js_DumpGCStats(rt
, stdout
);
1042 FreePtrTable(&rt
->gcIteratorTable
, &iteratorTableInfo
);
1043 FinishGCArenaLists(rt
);
1045 if (rt
->gcRootsHash
.ops
) {
1047 CheckLeakedRoots(rt
);
1049 JS_DHashTableFinish(&rt
->gcRootsHash
);
1050 rt
->gcRootsHash
.ops
= NULL
;
1052 if (rt
->gcLocksHash
) {
1053 JS_DHashTableDestroy(rt
->gcLocksHash
);
1054 rt
->gcLocksHash
= NULL
;
1059 js_AddRoot(JSContext
*cx
, void *rp
, const char *name
)
1061 JSBool ok
= js_AddRootRT(cx
->runtime
, rp
, name
);
1063 JS_ReportOutOfMemory(cx
);
1068 js_AddRootRT(JSRuntime
*rt
, void *rp
, const char *name
)
1071 JSGCRootHashEntry
*rhe
;
1074 * Due to the long-standing, but now removed, use of rt->gcLock across the
1075 * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
1076 * properly with a racing GC, without calling JS_AddRoot from a request.
1077 * We have to preserve API compatibility here, now that we avoid holding
1078 * rt->gcLock across the mark phase (including the root hashtable mark).
1080 * If the GC is running and we're called on another thread, wait for this
1081 * GC activation to finish. We can safely wait here (in the case where we
1082 * are called within a request on another thread's context) without fear
1083 * of deadlock because the GC doesn't set rt->gcRunning until after it has
1084 * waited for all active requests to end.
1087 #ifdef JS_THREADSAFE
1088 JS_ASSERT(!rt
->gcRunning
|| rt
->gcLevel
> 0);
1089 if (rt
->gcRunning
&& rt
->gcThread
->id
!= js_CurrentThreadId()) {
1091 JS_AWAIT_GC_DONE(rt
);
1092 } while (rt
->gcLevel
> 0);
1095 rhe
= (JSGCRootHashEntry
*)
1096 JS_DHashTableOperate(&rt
->gcRootsHash
, rp
, JS_DHASH_ADD
);
1109 js_RemoveRoot(JSRuntime
*rt
, void *rp
)
1112 * Due to the JS_RemoveRootRT API, we may be called outside of a request.
1113 * Same synchronization drill as above in js_AddRoot.
1116 #ifdef JS_THREADSAFE
1117 JS_ASSERT(!rt
->gcRunning
|| rt
->gcLevel
> 0);
1118 if (rt
->gcRunning
&& rt
->gcThread
->id
!= js_CurrentThreadId()) {
1120 JS_AWAIT_GC_DONE(rt
);
1121 } while (rt
->gcLevel
> 0);
1124 (void) JS_DHashTableOperate(&rt
->gcRootsHash
, rp
, JS_DHASH_REMOVE
);
1125 rt
->gcPoke
= JS_TRUE
;
1132 JS_STATIC_DLL_CALLBACK(JSDHashOperator
)
1133 js_root_printer(JSDHashTable
*table
, JSDHashEntryHdr
*hdr
, uint32 i
, void *arg
)
1135 uint32
*leakedroots
= (uint32
*)arg
;
1136 JSGCRootHashEntry
*rhe
= (JSGCRootHashEntry
*)hdr
;
1140 "JS engine warning: leaking GC root \'%s\' at %p\n",
1141 rhe
->name
? (char *)rhe
->name
: "", rhe
->root
);
1143 return JS_DHASH_NEXT
;
1147 CheckLeakedRoots(JSRuntime
*rt
)
1149 uint32 leakedroots
= 0;
1151 /* Warn (but don't assert) debug builds of any remaining roots. */
1152 JS_DHashTableEnumerate(&rt
->gcRootsHash
, js_root_printer
,
1154 if (leakedroots
> 0) {
1155 if (leakedroots
== 1) {
1157 "JS engine warning: 1 GC root remains after destroying the JSRuntime.\n"
1158 " This root may point to freed memory. Objects reachable\n"
1159 " through it have not been finalized.\n");
1162 "JS engine warning: %lu GC roots remain after destroying the JSRuntime.\n"
1163 " These roots may point to freed memory. Objects reachable\n"
1164 " through them have not been finalized.\n",
1165 (unsigned long) leakedroots
);
1170 typedef struct NamedRootDumpArgs
{
1171 void (*dump
)(const char *name
, void *rp
, void *data
);
1173 } NamedRootDumpArgs
;
1175 JS_STATIC_DLL_CALLBACK(JSDHashOperator
)
1176 js_named_root_dumper(JSDHashTable
*table
, JSDHashEntryHdr
*hdr
, uint32 number
,
1179 NamedRootDumpArgs
*args
= (NamedRootDumpArgs
*) arg
;
1180 JSGCRootHashEntry
*rhe
= (JSGCRootHashEntry
*)hdr
;
1183 args
->dump(rhe
->name
, rhe
->root
, args
->data
);
1184 return JS_DHASH_NEXT
;
1188 js_DumpNamedRoots(JSRuntime
*rt
,
1189 void (*dump
)(const char *name
, void *rp
, void *data
),
1192 NamedRootDumpArgs args
;
1196 JS_DHashTableEnumerate(&rt
->gcRootsHash
, js_named_root_dumper
, &args
);
1201 typedef struct GCRootMapArgs
{
1206 JS_STATIC_DLL_CALLBACK(JSDHashOperator
)
1207 js_gcroot_mapper(JSDHashTable
*table
, JSDHashEntryHdr
*hdr
, uint32 number
,
1210 GCRootMapArgs
*args
= (GCRootMapArgs
*) arg
;
1211 JSGCRootHashEntry
*rhe
= (JSGCRootHashEntry
*)hdr
;
1215 mapflags
= args
->map(rhe
->root
, rhe
->name
, args
->data
);
1217 #if JS_MAP_GCROOT_NEXT == JS_DHASH_NEXT && \
1218 JS_MAP_GCROOT_STOP == JS_DHASH_STOP && \
1219 JS_MAP_GCROOT_REMOVE == JS_DHASH_REMOVE
1220 op
= (JSDHashOperator
)mapflags
;
1223 if (mapflags
& JS_MAP_GCROOT_STOP
)
1224 op
|= JS_DHASH_STOP
;
1225 if (mapflags
& JS_MAP_GCROOT_REMOVE
)
1226 op
|= JS_DHASH_REMOVE
;
1229 return (JSDHashOperator
) op
;
1233 js_MapGCRoots(JSRuntime
*rt
, JSGCRootMapFun map
, void *data
)
1241 rv
= JS_DHashTableEnumerate(&rt
->gcRootsHash
, js_gcroot_mapper
, &args
);
1247 js_RegisterCloseableIterator(JSContext
*cx
, JSObject
*obj
)
1253 JS_ASSERT(!rt
->gcRunning
);
1256 ok
= AddToPtrTable(cx
, &rt
->gcIteratorTable
, &iteratorTableInfo
, obj
);
1262 CloseNativeIterators(JSContext
*cx
)
1265 size_t count
, newCount
, i
;
1270 count
= rt
->gcIteratorTable
.count
;
1271 array
= rt
->gcIteratorTable
.array
;
1274 for (i
= 0; i
!= count
; ++i
) {
1275 obj
= (JSObject
*)array
[i
];
1276 if (js_IsAboutToBeFinalized(cx
, obj
))
1277 js_CloseNativeIterator(cx
, obj
);
1279 array
[newCount
++] = obj
;
1281 ShrinkPtrTable(&rt
->gcIteratorTable
, &iteratorTableInfo
, newCount
);
1284 #if defined(DEBUG_brendan) || defined(DEBUG_timeless)
1285 #define DEBUG_gchist
1291 static struct GCHist
{
1293 JSGCThing
*freeList
;
1300 js_NewGCThing(JSContext
*cx
, uintN flags
, size_t nbytes
)
1307 JSGCArenaList
*arenaList
;
1310 JSLocalRootStack
*lrs
;
1311 #ifdef JS_THREADSAFE
1313 uintN localMallocBytes
;
1314 JSGCThing
**flbase
, **lastptr
;
1315 JSGCThing
*tmpthing
;
1317 uintN maxFreeThings
; /* max to take from the global free list */
1318 METER(size_t nfree
);
1322 METER(rt
->gcStats
.alloc
++); /* this is not thread-safe */
1323 nbytes
= JS_ROUNDUP(nbytes
, sizeof(JSGCThing
));
1324 flindex
= GC_FREELIST_INDEX(nbytes
);
1326 #ifdef JS_THREADSAFE
1327 gcLocked
= JS_FALSE
;
1328 JS_ASSERT(cx
->thread
);
1329 flbase
= cx
->thread
->gcFreeLists
;
1331 thing
= flbase
[flindex
];
1332 localMallocBytes
= cx
->thread
->gcMallocBytes
;
1333 if (thing
&& rt
->gcMaxMallocBytes
- rt
->gcMallocBytes
> localMallocBytes
) {
1334 flagp
= thing
->flagp
;
1335 flbase
[flindex
] = thing
->next
;
1336 METER(rt
->gcStats
.localalloc
++); /* this is not thread-safe */
1343 /* Transfer thread-local counter to global one. */
1344 if (localMallocBytes
!= 0) {
1345 cx
->thread
->gcMallocBytes
= 0;
1346 if (rt
->gcMaxMallocBytes
- rt
->gcMallocBytes
< localMallocBytes
)
1347 rt
->gcMallocBytes
= rt
->gcMaxMallocBytes
;
1349 rt
->gcMallocBytes
+= localMallocBytes
;
1352 JS_ASSERT(!rt
->gcRunning
);
1353 if (rt
->gcRunning
) {
1354 METER(rt
->gcStats
.finalfail
++);
1359 doGC
= (rt
->gcMallocBytes
>= rt
->gcMaxMallocBytes
&& rt
->gcPoke
);
1361 doGC
= doGC
|| rt
->gcZeal
>= 2 || (rt
->gcZeal
>= 1 && rt
->gcPoke
);
1364 arenaList
= &rt
->gcArenaList
[flindex
];
1368 * Keep rt->gcLock across the call into js_GC so we don't starve
1369 * and lose to racing threads who deplete the heap just after
1370 * js_GC has replenished it (or has synchronized with a racing
1371 * GC that collected a bunch of garbage). This unfair scheduling
1372 * can happen on certain operating systems. For the gory details,
1373 * see bug 162779 at https://bugzilla.mozilla.org/.
1375 js_GC(cx
, GC_LAST_DITCH
);
1376 METER(rt
->gcStats
.retry
++);
1379 /* Try to get thing from the free list. */
1380 thing
= arenaList
->freeList
;
1382 arenaList
->freeList
= thing
->next
;
1383 flagp
= thing
->flagp
;
1384 JS_ASSERT(*flagp
& GCF_FINAL
);
1385 METER(arenaList
->stats
.freelen
--);
1386 METER(arenaList
->stats
.recycle
++);
1388 #ifdef JS_THREADSAFE
1390 * Refill the local free list by taking several things from the
1391 * global free list unless we are still at rt->gcMaxMallocBytes
1392 * barrier or the free list is already populated. The former
1393 * happens when GC is canceled due to !gcCallback(cx, JSGC_BEGIN)
1394 * or no gcPoke. The latter is caused via allocating new things
1395 * in gcCallback(cx, JSGC_END).
1397 if (rt
->gcMallocBytes
>= rt
->gcMaxMallocBytes
|| flbase
[flindex
])
1399 tmpthing
= arenaList
->freeList
;
1401 maxFreeThings
= MAX_THREAD_LOCAL_THINGS
;
1403 if (!tmpthing
->next
)
1405 tmpthing
= tmpthing
->next
;
1406 } while (--maxFreeThings
!= 0);
1408 flbase
[flindex
] = arenaList
->freeList
;
1409 arenaList
->freeList
= tmpthing
->next
;
1410 tmpthing
->next
= NULL
;
1417 * Try to allocate things from the last arena. If it is fully used,
1418 * check if we can allocate a new one and, if we cannot, consider
1419 * doing a "last ditch" GC unless already tried.
1421 thingsLimit
= THINGS_PER_ARENA(nbytes
);
1422 if (arenaList
->lastCount
!= thingsLimit
) {
1423 JS_ASSERT(arenaList
->lastCount
< thingsLimit
);
1424 a
= arenaList
->last
;
1426 if (rt
->gcBytes
>= rt
->gcMaxBytes
|| !(a
= NewGCArena(rt
))) {
1433 rt
->gcBytes
+= GC_ARENA_SIZE
;
1434 METER(++arenaList
->stats
.narenas
);
1435 METER(arenaList
->stats
.maxarenas
1436 = JS_MAX(arenaList
->stats
.maxarenas
,
1437 arenaList
->stats
.narenas
));
1439 a
->list
= arenaList
;
1440 a
->prev
= arenaList
->last
;
1441 a
->prevUntracedPage
= 0;
1442 a
->untracedThings
= 0;
1443 arenaList
->last
= a
;
1444 arenaList
->lastCount
= 0;
1447 flagp
= THING_FLAGP(a
, arenaList
->lastCount
);
1448 thing
= FLAGP_TO_THING(flagp
, nbytes
);
1449 arenaList
->lastCount
++;
1451 #ifdef JS_THREADSAFE
1453 * Refill the local free list by taking free things from the last
1454 * arena. Prefer to order free things by ascending address in the
1455 * (unscientific) hope of better cache locality.
1457 if (rt
->gcMallocBytes
>= rt
->gcMaxMallocBytes
|| flbase
[flindex
])
1460 lastptr
= &flbase
[flindex
];
1461 maxFreeThings
= thingsLimit
- arenaList
->lastCount
;
1462 if (maxFreeThings
> MAX_THREAD_LOCAL_THINGS
)
1463 maxFreeThings
= MAX_THREAD_LOCAL_THINGS
;
1464 METER(arenaList
->stats
.freelen
+= maxFreeThings
);
1465 while (maxFreeThings
!= 0) {
1468 tmpflagp
= THING_FLAGP(a
, arenaList
->lastCount
);
1469 tmpthing
= FLAGP_TO_THING(tmpflagp
, nbytes
);
1470 arenaList
->lastCount
++;
1471 tmpthing
->flagp
= tmpflagp
;
1472 *tmpflagp
= GCF_FINAL
; /* signifying that thing is free */
1474 *lastptr
= tmpthing
;
1475 lastptr
= &tmpthing
->next
;
1482 /* We successfully allocated the thing. */
1483 #ifdef JS_THREADSAFE
1486 lrs
= cx
->localRootStack
;
1489 * If we're in a local root scope, don't set newborn[type] at all, to
1490 * avoid entraining garbage from it for an unbounded amount of time
1491 * on this context. A caller will leave the local root scope and pop
1492 * this reference, allowing thing to be GC'd if it has no other refs.
1493 * See JS_EnterLocalRootScope and related APIs.
1495 if (js_PushLocalRoot(cx
, lrs
, (jsval
) thing
) < 0) {
1497 * When we fail for a thing allocated through the tail of the last
1498 * arena, thing's flag byte is not initialized. So to prevent GC
1499 * accessing the uninitialized flags during the finalization, we
1500 * always mark the thing as final. See bug 337407.
1507 * No local root scope, so we're stuck with the old, fragile model of
1508 * depending on a pigeon-hole newborn per type per context.
1510 cx
->weakRoots
.newborn
[flags
& GCF_TYPEMASK
] = thing
;
1513 /* We can't fail now, so update flags. */
1514 *flagp
= (uint8
)flags
;
1517 gchist
[gchpos
].lastDitch
= doGC
;
1518 gchist
[gchpos
].freeList
= rt
->gcArenaList
[flindex
].freeList
;
1519 if (++gchpos
== NGCHIST
)
1524 JSGCArenaStats
*stats
= &rt
->gcArenaList
[flindex
].stats
;
1526 /* This is not thread-safe for thread-local allocations. */
1527 if (flags
& GCF_LOCK
)
1528 rt
->gcStats
.lockborn
++;
1531 if (stats
->nthings
> stats
->maxthings
)
1532 stats
->maxthings
= stats
->nthings
;
1535 #ifdef JS_THREADSAFE
1539 JS_COUNT_OPERATION(cx
, JSOW_ALLOCATION
);
1543 #ifdef JS_THREADSAFE
1547 METER(rt
->gcStats
.fail
++);
1548 JS_ReportOutOfMemory(cx
);
1553 js_LockGCThing(JSContext
*cx
, void *thing
)
1555 JSBool ok
= js_LockGCThingRT(cx
->runtime
, thing
);
1557 JS_ReportOutOfMemory(cx
);
1562 * Deep GC-things can't be locked just by setting the GCF_LOCK bit, because
1563 * their descendants must be marked by the GC. To find them during the mark
1564 * phase, they are added to rt->gcLocksHash, which is created lazily.
1566 * NB: we depend on the order of GC-thing type indexes here!
1568 #define GC_TYPE_IS_STRING(t) ((t) == GCX_STRING || \
1569 (t) >= GCX_EXTERNAL_STRING)
1570 #define GC_TYPE_IS_XML(t) ((unsigned)((t) - GCX_NAMESPACE) <= \
1571 (unsigned)(GCX_XML - GCX_NAMESPACE))
1572 #define GC_TYPE_IS_DEEP(t) ((t) == GCX_OBJECT || GC_TYPE_IS_XML(t))
1574 #define IS_DEEP_STRING(t,o) (GC_TYPE_IS_STRING(t) && \
1575 JSSTRING_IS_DEPENDENT((JSString *)(o)))
1577 #define GC_THING_IS_DEEP(t,o) (GC_TYPE_IS_DEEP(t) || IS_DEEP_STRING(t, o))
1579 /* This is compatible with JSDHashEntryStub. */
1580 typedef struct JSGCLockHashEntry
{
1581 JSDHashEntryHdr hdr
;
1582 const JSGCThing
*thing
;
1584 } JSGCLockHashEntry
;
1587 js_LockGCThingRT(JSRuntime
*rt
, void *thing
)
1591 uintN flags
, lock
, type
;
1592 JSGCLockHashEntry
*lhe
;
1598 flagp
= js_GetGCThingFlags(thing
);
1602 lock
= (flags
& GCF_LOCK
);
1603 type
= (flags
& GCF_TYPEMASK
);
1604 deep
= GC_THING_IS_DEEP(type
, thing
);
1607 * Avoid adding a rt->gcLocksHash entry for shallow things until someone
1608 * nests a lock -- then start such an entry with a count of 2, not 1.
1611 if (!rt
->gcLocksHash
) {
1613 JS_NewDHashTable(JS_DHashGetStubOps(), NULL
,
1614 sizeof(JSGCLockHashEntry
),
1616 if (!rt
->gcLocksHash
) {
1620 } else if (lock
== 0) {
1622 JSDHashEntryHdr
*hdr
=
1623 JS_DHashTableOperate(rt
->gcLocksHash
, thing
,
1625 JS_ASSERT(JS_DHASH_ENTRY_IS_FREE(hdr
));
1629 lhe
= (JSGCLockHashEntry
*)
1630 JS_DHashTableOperate(rt
->gcLocksHash
, thing
, JS_DHASH_ADD
);
1636 lhe
->thing
= (JSGCThing
*) thing
;
1637 lhe
->count
= deep
? 1 : 2;
1639 JS_ASSERT(lhe
->count
>= 1);
1644 *flagp
= (uint8
)(flags
| GCF_LOCK
);
1645 METER(rt
->gcStats
.lock
++);
1653 js_UnlockGCThingRT(JSRuntime
*rt
, void *thing
)
1655 uint8
*flagp
, flags
;
1656 JSGCLockHashEntry
*lhe
;
1661 flagp
= js_GetGCThingFlags(thing
);
1665 if (flags
& GCF_LOCK
) {
1666 if (!rt
->gcLocksHash
||
1667 (lhe
= (JSGCLockHashEntry
*)
1668 JS_DHashTableOperate(rt
->gcLocksHash
, thing
,
1670 JS_DHASH_ENTRY_IS_FREE(&lhe
->hdr
))) {
1671 /* Shallow GC-thing with an implicit lock count of 1. */
1672 JS_ASSERT(!GC_THING_IS_DEEP(flags
& GCF_TYPEMASK
, thing
));
1674 /* Basis or nested unlock of a deep thing, or nested of shallow. */
1675 if (--lhe
->count
!= 0)
1677 JS_DHashTableOperate(rt
->gcLocksHash
, thing
, JS_DHASH_REMOVE
);
1679 *flagp
= (uint8
)(flags
& ~GCF_LOCK
);
1682 rt
->gcPoke
= JS_TRUE
;
1684 METER(rt
->gcStats
.unlock
++);
1690 JS_TraceChildren(JSTracer
*trc
, void *thing
, uint32 kind
)
1698 case JSTRACE_OBJECT
:
1699 /* If obj has no map, it must be a newborn. */
1700 obj
= (JSObject
*) thing
;
1703 if (obj
->map
->ops
->trace
) {
1704 obj
->map
->ops
->trace(trc
, obj
);
1706 nslots
= STOBJ_NSLOTS(obj
);
1707 for (i
= 0; i
!= nslots
; ++i
) {
1708 v
= STOBJ_GET_SLOT(obj
, i
);
1709 if (JSVAL_IS_TRACEABLE(v
)) {
1710 JS_SET_TRACING_INDEX(trc
, "slot", i
);
1711 JS_CallTracer(trc
, JSVAL_TO_TRACEABLE(v
),
1712 JSVAL_TRACE_KIND(v
));
1718 case JSTRACE_STRING
:
1719 str
= (JSString
*)thing
;
1720 if (JSSTRING_IS_DEPENDENT(str
))
1721 JS_CALL_STRING_TRACER(trc
, JSSTRDEP_BASE(str
), "base");
1724 case JSTRACE_FUNCTION
:
1726 * No tracing of JSFunction* instance is done for now. See bug 375808.
1730 #if JS_HAS_XML_SUPPORT
1731 case JSTRACE_NAMESPACE
:
1732 js_TraceXMLNamespace(trc
, (JSXMLNamespace
*)thing
);
1736 js_TraceXMLQName(trc
, (JSXMLQName
*)thing
);
1740 js_TraceXML(trc
, (JSXML
*)thing
);
1747 * Number of things covered by a single bit of JSGCArenaInfo.untracedThings.
1749 #define THINGS_PER_UNTRACED_BIT(thingSize) \
1750 JS_HOWMANY(THINGS_PER_ARENA(thingSize), JS_BITS_PER_WORD)
1753 DelayTracingChildren(JSRuntime
*rt
, uint8
*flagp
)
1756 uint32 untracedBitIndex
;
1760 * Things with children to be traced later are marked with
1761 * GCF_MARK | GCF_FINAL flags.
1763 JS_ASSERT((*flagp
& (GCF_MARK
| GCF_FINAL
)) == GCF_MARK
);
1764 *flagp
|= GCF_FINAL
;
1766 METER(rt
->gcStats
.untraced
++);
1768 ++rt
->gcTraceLaterCount
;
1769 METER(if (rt
->gcTraceLaterCount
> rt
->gcStats
.maxuntraced
)
1770 rt
->gcStats
.maxuntraced
= rt
->gcTraceLaterCount
);
1773 a
= FLAGP_TO_ARENA(flagp
);
1774 untracedBitIndex
= FLAGP_TO_INDEX(flagp
) /
1775 THINGS_PER_UNTRACED_BIT(a
->list
->thingSize
);
1776 JS_ASSERT(untracedBitIndex
< JS_BITS_PER_WORD
);
1777 bit
= (jsuword
)1 << untracedBitIndex
;
1778 if (a
->untracedThings
!= 0) {
1779 JS_ASSERT(rt
->gcUntracedArenaStackTop
);
1780 if (a
->untracedThings
& bit
) {
1781 /* bit already covers things with children to trace later. */
1784 a
->untracedThings
|= bit
;
1787 * The thing is the first thing with not yet traced children in the
1788 * whole arena, so push the arena on the stack of arenas with things
1789 * to be traced later unless the arena has already been pushed. We
1790 * detect that through checking prevUntracedPage as the field is 0
1791 * only for not yet pushed arenas. To ensure that
1792 * prevUntracedPage != 0
1793 * even when the stack contains one element, we make prevUntracedPage
1794 * for the arena at the bottom to point to itself.
1796 * See comments in TraceDelayedChildren.
1798 a
->untracedThings
= bit
;
1799 if (a
->prevUntracedPage
== 0) {
1800 if (!rt
->gcUntracedArenaStackTop
) {
1801 /* Stack was empty, mark the arena as the bottom element. */
1802 a
->prevUntracedPage
= ARENA_INFO_TO_PAGE(a
);
1804 JS_ASSERT(rt
->gcUntracedArenaStackTop
->prevUntracedPage
!= 0);
1805 a
->prevUntracedPage
=
1806 ARENA_INFO_TO_PAGE(rt
->gcUntracedArenaStackTop
);
1808 rt
->gcUntracedArenaStackTop
= a
;
1811 JS_ASSERT(rt
->gcUntracedArenaStackTop
);
1815 TraceDelayedChildren(JSTracer
*trc
)
1818 JSGCArenaInfo
*a
, *aprev
;
1820 uint32 thingsPerUntracedBit
;
1821 uint32 untracedBitIndex
, thingIndex
, indexLimit
, endIndex
;
1825 rt
= trc
->context
->runtime
;
1826 a
= rt
->gcUntracedArenaStackTop
;
1828 JS_ASSERT(rt
->gcTraceLaterCount
== 0);
1834 * The following assert verifies that the current arena belongs to the
1835 * untraced stack, since DelayTracingChildren ensures that even for
1836 * stack's bottom prevUntracedPage != 0 but rather points to itself.
1838 JS_ASSERT(a
->prevUntracedPage
!= 0);
1839 JS_ASSERT(rt
->gcUntracedArenaStackTop
->prevUntracedPage
!= 0);
1840 thingSize
= a
->list
->thingSize
;
1841 indexLimit
= (a
== a
->list
->last
)
1842 ? a
->list
->lastCount
1843 : THINGS_PER_ARENA(thingSize
);
1844 thingsPerUntracedBit
= THINGS_PER_UNTRACED_BIT(thingSize
);
1847 * We can not use do-while loop here as a->untracedThings can be zero
1848 * before the loop as a leftover from the previous iterations. See
1849 * comments after the loop.
1851 while (a
->untracedThings
!= 0) {
1852 untracedBitIndex
= JS_FLOOR_LOG2W(a
->untracedThings
);
1853 a
->untracedThings
&= ~((jsuword
)1 << untracedBitIndex
);
1854 thingIndex
= untracedBitIndex
* thingsPerUntracedBit
;
1855 endIndex
= thingIndex
+ thingsPerUntracedBit
;
1858 * endIndex can go beyond the last allocated thing as the real
1859 * limit can be "inside" the bit.
1861 if (endIndex
> indexLimit
)
1862 endIndex
= indexLimit
;
1863 JS_ASSERT(thingIndex
< indexLimit
);
1867 * Skip free or already traced things that share the bit
1868 * with untraced ones.
1870 flagp
= THING_FLAGP(a
, thingIndex
);
1871 if ((*flagp
& (GCF_MARK
|GCF_FINAL
)) != (GCF_MARK
|GCF_FINAL
))
1873 *flagp
&= ~GCF_FINAL
;
1875 JS_ASSERT(rt
->gcTraceLaterCount
!= 0);
1876 --rt
->gcTraceLaterCount
;
1878 thing
= FLAGP_TO_THING(flagp
, thingSize
);
1879 JS_TraceChildren(trc
, thing
,
1880 GCTypeToTraceKindMap
[*flagp
& GCF_TYPEMASK
]);
1881 } while (++thingIndex
!= endIndex
);
1885 * We finished tracing of all things in the the arena but we can only
1886 * pop it from the stack if the arena is the stack's top.
1888 * When JS_TraceChildren from the above calls JS_CallTracer that in
1889 * turn on low C stack calls DelayTracingChildren and the latter
1890 * pushes new arenas to the untraced stack, we have to skip popping
1891 * of this arena until it becomes the top of the stack again.
1893 if (a
== rt
->gcUntracedArenaStackTop
) {
1894 aprev
= ARENA_PAGE_TO_INFO(a
->prevUntracedPage
);
1895 a
->prevUntracedPage
= 0;
1898 * prevUntracedPage points to itself and we reached the
1899 * bottom of the stack.
1903 rt
->gcUntracedArenaStackTop
= a
= aprev
;
1905 a
= rt
->gcUntracedArenaStackTop
;
1908 JS_ASSERT(rt
->gcUntracedArenaStackTop
);
1909 JS_ASSERT(rt
->gcUntracedArenaStackTop
->prevUntracedPage
== 0);
1910 rt
->gcUntracedArenaStackTop
= NULL
;
1911 JS_ASSERT(rt
->gcTraceLaterCount
== 0);
1915 JS_CallTracer(JSTracer
*trc
, void *thing
, uint32 kind
)
1922 JS_ASSERT(JS_IS_VALID_TRACE_KIND(kind
));
1923 JS_ASSERT(trc
->debugPrinter
|| trc
->debugPrintArg
);
1925 if (!IS_GC_MARKING_TRACER(trc
)) {
1926 trc
->callback(trc
, thing
, kind
);
1932 JS_ASSERT(rt
->gcMarkingTracer
== trc
);
1933 JS_ASSERT(rt
->gcLevel
> 0);
1936 * Optimize for string and double as their size is known and their tracing
1940 case JSTRACE_DOUBLE
:
1941 flagp
= THING_TO_FLAGP(thing
, sizeof(JSGCThing
));
1942 JS_ASSERT((*flagp
& GCF_FINAL
) == 0);
1943 JS_ASSERT(GCTypeToTraceKindMap
[*flagp
& GCF_TYPEMASK
] == kind
);
1944 if (rt
->gcThingCallback
)
1945 rt
->gcThingCallback(thing
, *flagp
, rt
->gcThingCallbackClosure
);
1950 case JSTRACE_STRING
:
1952 flagp
= THING_TO_FLAGP(thing
, sizeof(JSGCThing
));
1953 JS_ASSERT((*flagp
& GCF_FINAL
) == 0);
1954 JS_ASSERT(GCTypeToTraceKindMap
[*flagp
& GCF_TYPEMASK
] == kind
);
1955 if (rt
->gcThingCallback
)
1956 rt
->gcThingCallback(thing
, *flagp
, rt
->gcThingCallbackClosure
);
1958 if (!JSSTRING_IS_DEPENDENT((JSString
*) thing
)) {
1962 if (*flagp
& GCF_MARK
)
1965 thing
= JSSTRDEP_BASE((JSString
*) thing
);
1970 flagp
= js_GetGCThingFlags(thing
);
1971 JS_ASSERT(GCTypeToTraceKindMap
[*flagp
& GCF_TYPEMASK
] == kind
);
1973 if (rt
->gcThingCallback
)
1974 rt
->gcThingCallback(thing
, *flagp
, rt
->gcThingCallbackClosure
);
1976 if (*flagp
& GCF_MARK
)
1980 * We check for non-final flag only if mark is unset as
1981 * DelayTracingChildren uses the flag. See comments in the function.
1983 JS_ASSERT(*flagp
!= GCF_FINAL
);
1985 if (!cx
->insideGCMarkCallback
) {
1987 * With JS_GC_ASSUME_LOW_C_STACK defined the mark phase of GC always
1988 * uses the non-recursive code that otherwise would be called only on
1989 * a low C stack condition.
1991 #ifdef JS_GC_ASSUME_LOW_C_STACK
1992 # define RECURSION_TOO_DEEP() JS_TRUE
1995 # define RECURSION_TOO_DEEP() (!JS_CHECK_STACK_SIZE(cx, stackDummy))
1997 if (RECURSION_TOO_DEEP())
1998 DelayTracingChildren(rt
, flagp
);
2000 JS_TraceChildren(trc
, thing
, kind
);
2003 * For API compatibility we allow for the callback to assume that
2004 * after it calls JS_MarkGCThing for the last time, the callback can
2005 * start to finalize its own objects that are only referenced by
2006 * unmarked GC things.
2008 * Since we do not know which call from inside the callback is the
2009 * last, we ensure that children of all marked things are traced and
2010 * call TraceDelayedChildren(trc) after tracing the thing.
2012 * As TraceDelayedChildren unconditionally invokes JS_TraceChildren
2013 * for the things with untraced children, calling DelayTracingChildren
2014 * is useless here. Hence we always trace thing's children even with a
2017 cx
->insideGCMarkCallback
= JS_FALSE
;
2018 JS_TraceChildren(trc
, thing
, kind
);
2019 TraceDelayedChildren(trc
);
2020 cx
->insideGCMarkCallback
= JS_TRUE
;
2025 trc
->debugPrinter
= NULL
;
2026 trc
->debugPrintArg
= NULL
;
2028 return; /* to avoid out: right_curl when DEBUG is not defined */
2032 js_CallValueTracerIfGCThing(JSTracer
*trc
, jsval v
)
2037 if (JSVAL_IS_DOUBLE(v
) || JSVAL_IS_STRING(v
)) {
2038 thing
= JSVAL_TO_TRACEABLE(v
);
2039 kind
= JSVAL_TRACE_KIND(v
);
2040 } else if (JSVAL_IS_OBJECT(v
) && v
!= JSVAL_NULL
) {
2041 /* v can be an arbitrary GC thing reinterpreted as an object. */
2042 thing
= JSVAL_TO_OBJECT(v
);
2043 kind
= GCTypeToTraceKindMap
[*js_GetGCThingFlags(thing
) & GCF_TYPEMASK
];
2047 JS_CallTracer(trc
, thing
, kind
);
2050 JS_STATIC_DLL_CALLBACK(JSDHashOperator
)
2051 gc_root_traversal(JSDHashTable
*table
, JSDHashEntryHdr
*hdr
, uint32 num
,
2054 JSGCRootHashEntry
*rhe
= (JSGCRootHashEntry
*)hdr
;
2055 JSTracer
*trc
= (JSTracer
*)arg
;
2056 jsval
*rp
= (jsval
*)rhe
->root
;
2059 /* Ignore null object and scalar values. */
2060 if (!JSVAL_IS_NULL(v
) && JSVAL_IS_GCTHING(v
)) {
2062 JSBool root_points_to_gcArenaList
= JS_FALSE
;
2063 jsuword thing
= (jsuword
) JSVAL_TO_GCTHING(v
);
2065 JSGCArenaList
*arenaList
;
2070 for (i
= 0; i
< GC_NUM_FREELISTS
; i
++) {
2071 arenaList
= &trc
->context
->runtime
->gcArenaList
[i
];
2072 thingSize
= arenaList
->thingSize
;
2073 limit
= (size_t) arenaList
->lastCount
* thingSize
;
2074 for (a
= arenaList
->last
; a
; a
= a
->prev
) {
2075 if (thing
- ARENA_INFO_TO_START(a
) < limit
) {
2076 root_points_to_gcArenaList
= JS_TRUE
;
2079 limit
= (size_t) THINGS_PER_ARENA(thingSize
) * thingSize
;
2082 if (!root_points_to_gcArenaList
&& rhe
->name
) {
2084 "JS API usage error: the address passed to JS_AddNamedRoot currently holds an\n"
2085 "invalid jsval. This is usually caused by a missing call to JS_RemoveRoot.\n"
2086 "The root's name is \"%s\".\n",
2089 JS_ASSERT(root_points_to_gcArenaList
);
2091 JS_SET_TRACING_NAME(trc
, rhe
->name
? rhe
->name
: "root");
2092 js_CallValueTracerIfGCThing(trc
, v
);
2095 return JS_DHASH_NEXT
;
2098 JS_STATIC_DLL_CALLBACK(JSDHashOperator
)
2099 gc_lock_traversal(JSDHashTable
*table
, JSDHashEntryHdr
*hdr
, uint32 num
,
2102 JSGCLockHashEntry
*lhe
= (JSGCLockHashEntry
*)hdr
;
2103 void *thing
= (void *)lhe
->thing
;
2104 JSTracer
*trc
= (JSTracer
*)arg
;
2110 JS_ASSERT(lhe
->count
>= 1);
2111 flags
= *js_GetGCThingFlags(thing
);
2112 traceKind
= GCTypeToTraceKindMap
[flags
& GCF_TYPEMASK
];
2113 JS_CALL_TRACER(trc
, thing
, traceKind
, "locked object");
2116 * Bug 379455: we called the tracer once, but to communicate the value of
2117 * thing's lock count to the tracer, or to gcThingCallback when the tracer
2118 * is the GC marking phase, we need to call an extra lhe->count - 1 times.
2122 if (IS_GC_MARKING_TRACER(trc
)) {
2123 rt
= trc
->context
->runtime
;
2124 if (rt
->gcThingCallback
) {
2126 rt
->gcThingCallback(thing
, flags
,
2127 rt
->gcThingCallbackClosure
);
2132 JS_CALL_TRACER(trc
, thing
, traceKind
, "locked object");
2136 return JS_DHASH_NEXT
;
2139 #define TRACE_JSVALS(trc, len, vec, name) \
2141 jsval _v, *_vp, *_end; \
2143 for (_vp = vec, _end = _vp + len; _vp < _end; _vp++) { \
2145 if (JSVAL_IS_TRACEABLE(_v)) { \
2146 JS_SET_TRACING_INDEX(trc, name, _vp - (vec)); \
2147 JS_CallTracer(trc, JSVAL_TO_TRACEABLE(_v), \
2148 JSVAL_TRACE_KIND(_v)); \
2154 js_TraceStackFrame(JSTracer
*trc
, JSStackFrame
*fp
)
2156 uintN nslots
, minargs
, skip
;
2159 JS_CALL_OBJECT_TRACER(trc
, fp
->callobj
, "call");
2161 JS_CALL_OBJECT_TRACER(trc
, fp
->argsobj
, "arguments");
2163 JS_CALL_OBJECT_TRACER(trc
, fp
->varobj
, "variables");
2165 js_TraceScript(trc
, fp
->script
);
2168 * Don't mark what has not been pushed yet, or what has been
2171 JS_ASSERT(JS_UPTRDIFF(fp
->sp
, fp
->spbase
) <=
2172 fp
->script
->depth
* sizeof(jsval
));
2173 nslots
= (uintN
) (fp
->sp
- fp
->spbase
);
2174 TRACE_JSVALS(trc
, nslots
, fp
->spbase
, "operand");
2178 /* Allow for primitive this parameter due to JSFUN_THISP_* flags. */
2179 JS_ASSERT(JSVAL_IS_OBJECT((jsval
)fp
->thisp
) ||
2180 (fp
->fun
&& JSFUN_THISP_FLAGS(fp
->fun
->flags
)));
2181 JS_CALL_VALUE_TRACER(trc
, (jsval
)fp
->thisp
, "this");
2184 JS_CALL_OBJECT_TRACER(trc
, fp
->callee
, "callee");
2190 minargs
= FUN_MINARGS(fp
->fun
);
2191 if (minargs
> nslots
)
2193 if (!FUN_INTERPRETED(fp
->fun
)) {
2194 JS_ASSERT(!(fp
->fun
->flags
& JSFUN_FAST_NATIVE
));
2195 nslots
+= fp
->fun
->u
.n
.extra
;
2197 if (fp
->fun
->flags
& JSFRAME_ROOTED_ARGV
)
2198 skip
= 2 + fp
->argc
;
2200 TRACE_JSVALS(trc
, 2 + nslots
- skip
, fp
->argv
- 2 + skip
, "operand");
2202 JS_CALL_VALUE_TRACER(trc
, fp
->rval
, "rval");
2204 TRACE_JSVALS(trc
, fp
->nvars
, fp
->vars
, "var");
2206 JS_CALL_OBJECT_TRACER(trc
, fp
->scopeChain
, "scope chain");
2208 JS_CALL_OBJECT_TRACER(trc
, fp
->sharpArray
, "sharp array");
2210 if (fp
->xmlNamespace
)
2211 JS_CALL_OBJECT_TRACER(trc
, fp
->xmlNamespace
, "xmlNamespace");
2215 TraceWeakRoots(JSTracer
*trc
, JSWeakRoots
*wr
)
2220 for (i
= 0; i
< GCX_NTYPES
; i
++) {
2221 thing
= wr
->newborn
[i
];
2223 JS_CALL_TRACER(trc
, thing
, GCTypeToTraceKindMap
[i
],
2227 JS_CALL_VALUE_TRACER(trc
, wr
->lastAtom
, "lastAtom");
2228 JS_SET_TRACING_NAME(trc
, "lastInternalResult");
2229 js_CallValueTracerIfGCThing(trc
, wr
->lastInternalResult
);
2233 js_TraceContext(JSTracer
*trc
, JSContext
*acx
)
2235 JSStackFrame
*fp
, *nextChain
;
2237 JSTempValueRooter
*tvr
;
2240 * Iterate frame chain and dormant chains.
2242 * (NB: see comment on this whole "dormant" thing in js_Execute.)
2245 nextChain
= acx
->dormantFrameChain
;
2249 /* The top frame must not be dormant. */
2250 JS_ASSERT(!fp
->dormantNext
);
2253 js_TraceStackFrame(trc
, fp
);
2254 } while ((fp
= fp
->down
) != NULL
);
2260 nextChain
= nextChain
->dormantNext
;
2263 /* Mark other roots-by-definition in acx. */
2264 if (acx
->globalObject
)
2265 JS_CALL_OBJECT_TRACER(trc
, acx
->globalObject
, "global object");
2266 TraceWeakRoots(trc
, &acx
->weakRoots
);
2267 if (acx
->throwing
) {
2268 JS_CALL_VALUE_TRACER(trc
, acx
->exception
, "exception");
2270 /* Avoid keeping GC-ed junk stored in JSContext.exception. */
2271 acx
->exception
= JSVAL_NULL
;
2273 #if JS_HAS_LVALUE_RETURN
2275 JS_CALL_VALUE_TRACER(trc
, acx
->rval2
, "rval2");
2278 for (sh
= acx
->stackHeaders
; sh
; sh
= sh
->down
) {
2279 METER(trc
->context
->runtime
->gcStats
.stackseg
++);
2280 METER(trc
->context
->runtime
->gcStats
.segslots
+= sh
->nslots
);
2281 TRACE_JSVALS(trc
, sh
->nslots
, JS_STACK_SEGMENT(sh
), "stack");
2284 if (acx
->localRootStack
)
2285 js_TraceLocalRoots(trc
, acx
->localRootStack
);
2287 for (tvr
= acx
->tempValueRooters
; tvr
; tvr
= tvr
->down
) {
2288 switch (tvr
->count
) {
2290 JS_SET_TRACING_NAME(trc
, "tvr->u.value");
2291 js_CallValueTracerIfGCThing(trc
, tvr
->u
.value
);
2294 tvr
->u
.trace(trc
, tvr
);
2297 TRACE_SCOPE_PROPERTY(trc
, tvr
->u
.sprop
);
2299 case JSTVU_WEAK_ROOTS
:
2300 TraceWeakRoots(trc
, tvr
->u
.weakRoots
);
2302 case JSTVU_PARSE_CONTEXT
:
2303 js_TraceParseContext(trc
, tvr
->u
.parseContext
);
2306 js_TraceScript(trc
, tvr
->u
.script
);
2309 JS_ASSERT(tvr
->count
>= 0);
2310 TRACE_JSVALS(trc
, tvr
->count
, tvr
->u
.array
, "tvr->u.array");
2314 if (acx
->sharpObjectMap
.depth
> 0)
2315 js_TraceSharpMap(trc
, &acx
->sharpObjectMap
);
2319 js_TraceRuntime(JSTracer
*trc
, JSBool allAtoms
)
2321 JSRuntime
*rt
= trc
->context
->runtime
;
2322 JSContext
*iter
, *acx
;
2324 JS_DHashTableEnumerate(&rt
->gcRootsHash
, gc_root_traversal
, trc
);
2325 if (rt
->gcLocksHash
)
2326 JS_DHashTableEnumerate(rt
->gcLocksHash
, gc_lock_traversal
, trc
);
2327 js_TraceAtomState(trc
, allAtoms
);
2328 js_TraceNativeIteratorStates(trc
);
2331 while ((acx
= js_ContextIterator(rt
, JS_TRUE
, &iter
)) != NULL
)
2332 js_TraceContext(trc
, acx
);
2334 if (rt
->gcExtraRootsTraceOp
)
2335 rt
->gcExtraRootsTraceOp(trc
, rt
->gcExtraRootsData
);
2339 * When gckind is GC_LAST_DITCH, it indicates a call from js_NewGCThing with
2340 * rt->gcLock already held and when the lock should be kept on return.
2343 js_GC(JSContext
*cx
, JSGCInvocationKind gckind
)
2349 uint32 thingSize
, indexLimit
;
2350 JSGCArenaInfo
*a
, **ap
, *emptyArenas
;
2351 uint8 flags
, *flagp
;
2352 JSGCThing
*thing
, *freeList
;
2353 JSGCArenaList
*arenaList
;
2355 #ifdef JS_THREADSAFE
2356 uint32 requestDebit
;
2357 JSContext
*acx
, *iter
;
2361 #ifdef JS_THREADSAFE
2362 /* Avoid deadlock. */
2363 JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt
));
2366 if (gckind
== GC_LAST_DITCH
) {
2367 /* The last ditch GC preserves all atoms and weak roots. */
2368 keepAtoms
= JS_TRUE
;
2370 /* Keep atoms when a suspended compile is running on another context. */
2371 keepAtoms
= (rt
->gcKeepAtoms
!= 0);
2372 JS_CLEAR_WEAK_ROOTS(&cx
->weakRoots
);
2376 * Don't collect garbage if the runtime isn't up, and cx is not the last
2377 * context in the runtime. The last context must force a GC, and nothing
2378 * should suppress that final collection or there may be shutdown leaks,
2379 * or runtime bloat until the next context is created.
2381 if (rt
->state
!= JSRTS_UP
&& gckind
!= GC_LAST_CONTEXT
)
2384 restart_after_callback
:
2386 * Let the API user decide to defer a GC if it wants to (unless this
2387 * is the last context). Invoke the callback regardless.
2389 if (rt
->gcCallback
&&
2390 !rt
->gcCallback(cx
, JSGC_BEGIN
) &&
2391 gckind
!= GC_LAST_CONTEXT
) {
2395 /* Lock out other GC allocator and collector invocations. */
2396 if (gckind
!= GC_LAST_DITCH
)
2399 METER(rt
->gcStats
.poke
++);
2400 rt
->gcPoke
= JS_FALSE
;
2402 #ifdef JS_THREADSAFE
2403 JS_ASSERT(cx
->thread
->id
== js_CurrentThreadId());
2405 /* Bump gcLevel and return rather than nest on this thread. */
2406 if (rt
->gcThread
== cx
->thread
) {
2407 JS_ASSERT(rt
->gcLevel
> 0);
2409 METER(if (rt
->gcLevel
> rt
->gcStats
.maxlevel
)
2410 rt
->gcStats
.maxlevel
= rt
->gcLevel
);
2411 if (gckind
!= GC_LAST_DITCH
)
2417 * If we're in one or more requests (possibly on more than one context)
2418 * running on the current thread, indicate, temporarily, that all these
2419 * requests are inactive. If cx->thread is NULL, then cx is not using
2420 * the request model, and does not contribute to rt->requestCount.
2424 JSCList
*head
, *link
;
2427 * Check all contexts on cx->thread->contextList for active requests,
2428 * counting each such context against requestDebit.
2430 head
= &cx
->thread
->contextList
;
2431 for (link
= head
->next
; link
!= head
; link
= link
->next
) {
2432 acx
= CX_FROM_THREAD_LINKS(link
);
2433 JS_ASSERT(acx
->thread
== cx
->thread
);
2434 if (acx
->requestDepth
)
2439 * We assert, but check anyway, in case someone is misusing the API.
2440 * Avoiding the loop over all of rt's contexts is a win in the event
2441 * that the GC runs only on request-less contexts with null threads,
2442 * in a special thread such as might be used by the UI/DOM/Layout
2443 * "mozilla" or "main" thread in Mozilla-the-browser.
2445 JS_ASSERT(cx
->requestDepth
== 0);
2446 if (cx
->requestDepth
)
2450 JS_ASSERT(requestDebit
<= rt
->requestCount
);
2451 rt
->requestCount
-= requestDebit
;
2452 if (rt
->requestCount
== 0)
2453 JS_NOTIFY_REQUEST_DONE(rt
);
2456 /* If another thread is already in GC, don't attempt GC; wait instead. */
2457 if (rt
->gcLevel
> 0) {
2458 /* Bump gcLevel to restart the current GC, so it finds new garbage. */
2460 METER(if (rt
->gcLevel
> rt
->gcStats
.maxlevel
)
2461 rt
->gcStats
.maxlevel
= rt
->gcLevel
);
2463 /* Wait for the other thread to finish, then resume our request. */
2464 while (rt
->gcLevel
> 0)
2465 JS_AWAIT_GC_DONE(rt
);
2467 rt
->requestCount
+= requestDebit
;
2468 if (gckind
!= GC_LAST_DITCH
)
2473 /* No other thread is in GC, so indicate that we're now in GC. */
2475 rt
->gcThread
= cx
->thread
;
2477 /* Wait for all other requests to finish. */
2478 while (rt
->requestCount
> 0)
2479 JS_AWAIT_REQUEST_DONE(rt
);
2481 #else /* !JS_THREADSAFE */
2483 /* Bump gcLevel and return rather than nest; the outer gc will restart. */
2485 METER(if (rt
->gcLevel
> rt
->gcStats
.maxlevel
)
2486 rt
->gcStats
.maxlevel
= rt
->gcLevel
);
2487 if (rt
->gcLevel
> 1)
2490 #endif /* !JS_THREADSAFE */
2493 * Set rt->gcRunning here within the GC lock, and after waiting for any
2494 * active requests to end, so that new requests that try to JS_AddRoot,
2495 * JS_RemoveRoot, or JS_RemoveRootRT block in JS_BeginRequest waiting for
2496 * rt->gcLevel to drop to zero, while request-less calls to the *Root*
2497 * APIs block in js_AddRoot or js_RemoveRoot (see above in this file),
2498 * waiting for GC to finish.
2500 rt
->gcRunning
= JS_TRUE
;
2503 /* Reset malloc counter. */
2504 rt
->gcMallocBytes
= 0;
2506 #ifdef DEBUG_scopemeters
2507 { extern void js_DumpScopeMeters(JSRuntime
*rt
);
2508 js_DumpScopeMeters(rt
);
2512 #ifdef JS_THREADSAFE
2514 * Set all thread local freelists to NULL. We may visit a thread's
2515 * freelist more than once. To avoid redundant clearing we unroll the
2516 * current thread's step.
2518 * Also, in case a JSScript wrapped within an object was finalized, we
2519 * null acx->thread->gsnCache.script and finish the cache's hashtable.
2520 * Note that js_DestroyScript, called from script_finalize, will have
2521 * already cleared cx->thread->gsnCache above during finalization, so we
2522 * don't have to here.
2524 memset(cx
->thread
->gcFreeLists
, 0, sizeof cx
->thread
->gcFreeLists
);
2526 while ((acx
= js_ContextIterator(rt
, JS_FALSE
, &iter
)) != NULL
) {
2527 if (!acx
->thread
|| acx
->thread
== cx
->thread
)
2529 memset(acx
->thread
->gcFreeLists
, 0, sizeof acx
->thread
->gcFreeLists
);
2530 GSN_CACHE_CLEAR(&acx
->thread
->gsnCache
);
2533 /* The thread-unsafe case just has to clear the runtime's GSN cache. */
2534 GSN_CACHE_CLEAR(&rt
->gsnCache
);
2539 JS_ASSERT(!rt
->gcUntracedArenaStackTop
);
2540 JS_ASSERT(rt
->gcTraceLaterCount
== 0);
2545 JS_TRACER_INIT(&trc
, cx
, NULL
);
2546 rt
->gcMarkingTracer
= &trc
;
2547 JS_ASSERT(IS_GC_MARKING_TRACER(&trc
));
2548 js_TraceRuntime(&trc
, keepAtoms
);
2549 js_MarkScriptFilenames(rt
, keepAtoms
);
2552 * Mark children of things that caused too deep recursion during the above
2555 TraceDelayedChildren(&trc
);
2557 JS_ASSERT(!cx
->insideGCMarkCallback
);
2558 if (rt
->gcCallback
) {
2559 cx
->insideGCMarkCallback
= JS_TRUE
;
2560 (void) rt
->gcCallback(cx
, JSGC_MARK_END
);
2561 JS_ASSERT(cx
->insideGCMarkCallback
);
2562 cx
->insideGCMarkCallback
= JS_FALSE
;
2564 JS_ASSERT(rt
->gcTraceLaterCount
== 0);
2566 rt
->gcMarkingTracer
= NULL
;
2571 * Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set
2572 * so that any attempt to allocate a GC-thing from a finalizer will fail,
2573 * rather than nest badly and leave the unmarked newborn to be swept.
2575 * We first sweep atom state so we can use js_IsAboutToBeFinalized on
2576 * JSString or jsdouble held in a hashtable to check if the hashtable
2577 * entry can be freed. Note that even after the entry is freed, JSObject
2578 * finalizers can continue to access the corresponding jsdouble* and
2579 * JSString* assuming that they are unique. This works since the
2580 * atomization API must not be called during GC.
2582 js_SweepAtomState(cx
);
2584 /* Finalize iterator states before the objects they iterate over. */
2585 CloseNativeIterators(cx
);
2587 /* Finalize watch points associated with unreachable objects. */
2588 js_SweepWatchPoints(cx
);
2590 #ifdef DUMP_CALL_TABLE
2592 * Call js_DumpCallTable here so it can meter and then clear weak refs to
2593 * GC-things that are about to be finalized.
2595 js_DumpCallTable(cx
);
2599 * Here we need to ensure that JSObject instances are finalized before GC-
2600 * allocated JSString and jsdouble instances so object's finalizer can
2601 * access them even if they will be freed. For that we simply finalize the
2602 * list containing JSObject first since the static assert at the beginning
2603 * of the file guarantees that JSString and jsdouble instances are
2604 * allocated from a different list.
2607 for (i
= 0; i
< GC_NUM_FREELISTS
; i
++) {
2608 arenaList
= &rt
->gcArenaList
[i
== 0
2609 ? GC_FREELIST_INDEX(sizeof(JSObject
))
2610 : i
== GC_FREELIST_INDEX(sizeof(JSObject
))
2613 ap
= &arenaList
->last
;
2617 JS_ASSERT(arenaList
->lastCount
> 0);
2618 arenaList
->freeList
= NULL
;
2620 METER(arenaList
->stats
.nthings
= 0);
2621 METER(arenaList
->stats
.freelen
= 0);
2622 thingSize
= arenaList
->thingSize
;
2623 indexLimit
= THINGS_PER_ARENA(thingSize
);
2624 flagp
= THING_FLAGP(a
, arenaList
->lastCount
- 1);
2626 METER(size_t nfree
= 0);
2628 JS_ASSERT(a
->prevUntracedPage
== 0);
2629 JS_ASSERT(a
->untracedThings
== 0);
2633 if (flags
& (GCF_MARK
| GCF_LOCK
)) {
2634 *flagp
&= ~GCF_MARK
;
2635 allClear
= JS_FALSE
;
2636 METER(++arenaList
->stats
.nthings
);
2637 } else if (!(flags
& GCF_FINAL
)) {
2638 /* Call the finalizer with GCF_FINAL ORed into flags. */
2639 thing
= FLAGP_TO_THING(flagp
, thingSize
);
2640 *flagp
= (uint8
)(flags
| GCF_FINAL
);
2641 type
= flags
& GCF_TYPEMASK
;
2644 js_FinalizeObject(cx
, (JSObject
*) thing
);
2650 js_FinalizeFunction(cx
, (JSFunction
*) thing
);
2652 #if JS_HAS_XML_SUPPORT
2654 js_FinalizeXMLNamespace(cx
, (JSXMLNamespace
*) thing
);
2657 js_FinalizeXMLQName(cx
, (JSXMLQName
*) thing
);
2660 js_FinalizeXML(cx
, (JSXML
*) thing
);
2664 JS_ASSERT(type
== GCX_STRING
||
2665 type
- GCX_EXTERNAL_STRING
<
2666 GCX_NTYPES
- GCX_EXTERNAL_STRING
);
2667 js_FinalizeStringRT(rt
, (JSString
*) thing
, type
, cx
);
2670 thing
->flagp
= flagp
;
2671 thing
->next
= freeList
;
2675 } while (++flagp
!= THING_FLAGS_END(a
));
2679 * Forget just assembled free list head for the arena and
2680 * add the arena itself to the destroy list.
2682 freeList
= arenaList
->freeList
;
2683 if (a
== arenaList
->last
)
2684 arenaList
->lastCount
= indexLimit
;
2686 JS_ASSERT(rt
->gcBytes
>= GC_ARENA_SIZE
);
2687 rt
->gcBytes
-= GC_ARENA_SIZE
;
2688 a
->prev
= emptyArenas
;
2691 arenaList
->freeList
= freeList
;
2693 METER(arenaList
->stats
.freelen
+= nfree
);
2694 METER(arenaList
->stats
.totalfreelen
+= nfree
);
2695 METER(++arenaList
->stats
.totalarenas
);
2699 flagp
= THING_FLAGP(a
, indexLimit
- 1);
2704 * Sweep the runtime's property tree after finalizing objects, in case any
2705 * had watchpoints referencing tree nodes.
2707 js_SweepScopeProperties(cx
);
2710 * Sweep script filenames after sweeping functions in the generic loop
2711 * above. In this way when a scripted function's finalizer destroys the
2712 * script and calls rt->destroyScriptHook, the hook can still access the
2713 * script's filename. See bug 323267.
2715 js_SweepScriptFilenames(rt
);
2718 * Destroy arenas after we finished the sweeping sofinalizers can safely
2719 * use js_IsAboutToBeFinalized().
2721 while (emptyArenas
) {
2723 emptyArenas
= emptyArenas
->prev
;
2724 DestroyGCArena(rt
, a
);
2728 (void) rt
->gcCallback(cx
, JSGC_FINALIZE_END
);
2729 #ifdef DEBUG_srcnotesize
2730 { extern void DumpSrcNoteSizeHist();
2731 DumpSrcNoteSizeHist();
2732 printf("GC HEAP SIZE %lu\n", (unsigned long)rt
->gcBytes
);
2739 * We want to restart GC if js_GC was called recursively or if any of the
2740 * finalizers called js_RemoveRoot or js_UnlockGCThingRT.
2742 if (rt
->gcLevel
> 1 || rt
->gcPoke
) {
2744 rt
->gcPoke
= JS_FALSE
;
2749 rt
->gcLastBytes
= rt
->gcBytes
;
2750 rt
->gcRunning
= JS_FALSE
;
2752 #ifdef JS_THREADSAFE
2753 /* If we were invoked during a request, pay back the temporary debit. */
2755 rt
->requestCount
+= requestDebit
;
2756 rt
->gcThread
= NULL
;
2757 JS_NOTIFY_GC_DONE(rt
);
2760 * Unlock unless we have GC_LAST_DITCH which requires locked GC on return.
2762 if (gckind
!= GC_LAST_DITCH
)
2766 /* Execute JSGC_END callback outside the lock. */
2767 if (rt
->gcCallback
) {
2768 JSWeakRoots savedWeakRoots
;
2769 JSTempValueRooter tvr
;
2771 if (gckind
== GC_LAST_DITCH
) {
2773 * We allow JSGC_END implementation to force a full GC or allocate
2774 * new GC things. Thus we must protect the weak roots from GC or
2777 savedWeakRoots
= cx
->weakRoots
;
2778 JS_PUSH_TEMP_ROOT_WEAK_COPY(cx
, &savedWeakRoots
, &tvr
);
2783 (void) rt
->gcCallback(cx
, JSGC_END
);
2785 if (gckind
== GC_LAST_DITCH
) {
2787 JS_UNKEEP_ATOMS(rt
);
2788 JS_POP_TEMP_ROOT(cx
, &tvr
);
2789 } else if (gckind
== GC_LAST_CONTEXT
&& rt
->gcPoke
) {
2791 * On shutdown iterate until JSGC_END callback stops creating
2794 goto restart_after_callback
;
2800 js_UpdateMallocCounter(JSContext
*cx
, size_t nbytes
)
2802 uint32
*pbytes
, bytes
;
2804 #ifdef JS_THREADSAFE
2805 pbytes
= &cx
->thread
->gcMallocBytes
;
2807 pbytes
= &cx
->runtime
->gcMallocBytes
;
2810 *pbytes
= ((uint32
)-1 - bytes
<= nbytes
) ? (uint32
)-1 : bytes
+ nbytes
;