1 // -*- mode: c++; c-basic-offset: 4 -*-
3 * This file is part of the KDE libraries
4 * Copyright (C) 2003, 2004, 2005, 2006, 2007 Apple Computer, Inc.
5 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
6 * Copyright (C) 2007 Maksim Orlovich <maksim@kde.org>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "collector.h"
27 #include <wtf/FastMalloc.h>
28 #include <wtf/HashCountedSet.h>
39 #include <mach/mach_port.h>
40 #include <mach/mach_init.h>
41 #include <mach/task.h>
42 #include <mach/thread_act.h>
43 #include <mach/vm_map.h>
45 #elif PLATFORM(WIN_OS) || COMPILER(CYGWIN)
52 #include <sys/types.h>
54 #include <pthread.h> //krazy:exclude=includes (yes, it's duplicate, but in a different #if branch)
57 #if PLATFORM(SOLARIS_OS)
63 #if HAVE(PTHREAD_NP_H)
64 #include <pthread_np.h>
69 #define DEBUG_COLLECTOR 0
71 #if HAVE(VALGRIND_MEMCHECK_H) && !defined(NDEBUG)
72 #include <valgrind/memcheck.h>
73 #if defined(VALGRIND_MAKE_MEM_DEFINED)
74 #define VG_DEFINED(p) VALGRIND_MAKE_MEM_DEFINED(&p, sizeof(void*))
88 const size_t SPARE_EMPTY_BLOCKS
= 2;
89 const size_t MIN_ARRAY_SIZE
= 14;
90 const size_t GROWTH_FACTOR
= 2;
91 const size_t LOW_WATER_FACTOR
= 4;
92 const size_t ALLOCATIONS_PER_COLLECTION
= 4000;
95 // A whee bit like a WTF::Vector, but perfectly POD, so can be used in global context
98 CollectorBlock
** m_data
;
102 CollectorBlock
* operator[](int pos
) {
106 size_t used() const {
110 void append(CollectorBlock
* block
) {
111 if (m_used
== m_capacity
) {
112 m_capacity
= max(MIN_ARRAY_SIZE
, m_capacity
* GROWTH_FACTOR
);
113 m_data
= static_cast<CollectorBlock
**>(fastRealloc(m_data
, m_capacity
* sizeof(CollectorBlock
*)));
115 m_data
[m_used
] = block
;
119 void remove(CollectorBlock
* block
) {
121 for (c
= 0; c
< m_used
; ++c
)
122 if (m_data
[c
] == block
)
128 // Move things up, and shrink..
130 for (; c
< m_used
; ++c
)
131 m_data
[c
] = m_data
[c
+1];
135 struct CollectorHeap
{
136 // This contains the list of both normal and oversize blocks
139 // Just the normal blocks
142 size_t firstBlockWithPossibleSpace
;
144 // The oversize block handling is a bit tricky, since we do not wish to slow down
145 // the usual paths. Hence, we do the following:
146 // 1) We set the marked bits for any extension portions of the block.
147 // this way the stack GC doesn't have to do anything special if a pointer
148 // happens to go that way.
149 // 2) We keep track of an allBlocks list to help the stack GC tests things.
151 // The oversize heap itself represents blocks as follows:
152 // 1) free: marked = false, allocd = false, trailer = false, zeroIfFree = 0
153 // 2) alloc'd, head: marked = depends, allocd = true, trailer = false, zeroIfFree is uncertain
154 // 3) alloc'd, trailer: marked = true (see above), allocd = true, trailer = true, zeroIfFree is uncertain
156 // For now, we don't use a freelist, so performance may be quite bad if
157 // this is used heavily; this is just because the cell does not have
158 // back and forward links; which we need since we can pick a non-first cell
159 // ### actually, it may be possible to shuffle the list. Not now, though
160 BlockList oversizeBlocks
;
162 size_t numLiveObjects
;
163 size_t numLiveObjectsAtLastCollect
;
167 static CollectorHeap heap
;
169 bool Collector::memoryFull
= false;
171 static CollectorBlock
* allocateBlock()
174 vm_address_t address
= 0;
175 vm_map(current_task(), &address
, BLOCK_SIZE
, BLOCK_OFFSET_MASK
, VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
, 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_DEFAULT
, VM_INHERIT_DEFAULT
);
176 #elif PLATFORM(WIN_OS) || COMPILER(CYGWIN)
177 // windows virtual address granularity is naturally 64k
178 LPVOID address
= VirtualAlloc(NULL
, BLOCK_SIZE
, MEM_COMMIT
| MEM_RESERVE
, PAGE_READWRITE
);
179 #elif HAVE(POSIX_MEMALIGN)
181 posix_memalign(address
, BLOCK_SIZE
, BLOCK_SIZE
);
182 memset(reinterpret_cast<void*>(address
), 0, BLOCK_SIZE
);
184 static size_t pagesize
= getpagesize();
187 if (BLOCK_SIZE
> pagesize
)
188 extra
= BLOCK_SIZE
- pagesize
;
190 void* mmapResult
= mmap(NULL
, BLOCK_SIZE
+ extra
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
, -1, 0);
191 uintptr_t address
= reinterpret_cast<uintptr_t>(mmapResult
);
194 if ((address
& BLOCK_OFFSET_MASK
) != 0)
195 adjust
= BLOCK_SIZE
- (address
& BLOCK_OFFSET_MASK
);
198 munmap(reinterpret_cast<void*>(address
), adjust
);
201 munmap(reinterpret_cast<void*>(address
+ adjust
+ BLOCK_SIZE
), extra
- adjust
);
204 memset(reinterpret_cast<void*>(address
), 0, BLOCK_SIZE
);
206 CollectorBlock
* ptr
= reinterpret_cast<CollectorBlock
*>(address
);
207 heap
.allBlocks
.append(ptr
); // Register with the heap
211 static void freeBlock(CollectorBlock
* block
)
213 // Unregister the block first
214 heap
.allBlocks
.remove(block
);
217 vm_deallocate(current_task(), reinterpret_cast<vm_address_t
>(block
), BLOCK_SIZE
);
218 #elif PLATFORM(WIN_OS) || COMPILER(CYGWIN)
219 VirtualFree(block
, BLOCK_SIZE
, MEM_RELEASE
);
220 #elif HAVE(POSIX_MEMALIGN)
223 munmap(block
, BLOCK_SIZE
);
227 void Collector::recordExtraCost(size_t cost
)
229 // Our frequency of garbage collection tries to balance memory use against speed
230 // by collecting based on the number of newly created values. However, for values
231 // that hold on to a great deal of memory that's not in the form of other JS values,
232 // that is not good enough - in some cases a lot of those objects can pile up and
233 // use crazy amounts of memory without a GC happening. So we track these extra
234 // memory costs. Only unusually large objects are noted, and we only keep track
235 // of this extra cost until the next GC. In garbage collected languages, most values
236 // are either very short lived temporaries, or have extremely long lifetimes. So
237 // if a large value survives one garbage collection, there is not much point to
238 // collecting more frequently as long as it stays alive.
240 heap
.extraCost
+= cost
;
243 static void* allocOversize(size_t s
)
245 size_t cellsNeeded
= (s
+ (CELL_SIZE
- 1)) / CELL_SIZE
;
247 // printf("allocOversize, size:%d, cellsNeeded:%d\n", s, cellsNeeded);
249 // We are not oversize enough to deal with things close to 64K in size
250 assert(cellsNeeded
<= CELLS_PER_BLOCK
);
252 // Look through the blocks, see if one has enough, and where.
253 CollectorBlock
* sufficientBlock
= 0;
254 size_t startOffset
= -1;
255 for (size_t b
= 0; b
< heap
.oversizeBlocks
.used() && !sufficientBlock
; ++b
) {
256 CollectorBlock
* candidate
= heap
.oversizeBlocks
[b
];
257 if (cellsNeeded
<= CELLS_PER_BLOCK
- candidate
->usedCells
) {
258 // Well, there is a chance we will fit.. Let's see if we can find a batch long enough..
259 for (size_t i
= 0; i
< CELLS_PER_BLOCK
; i
++) {
260 if (i
% 32 == 0 && candidate
->allocd
.bits
[i
/32] == 0xFFFFFFFF) {
261 // Can skip this and 31 other cells
266 if (candidate
->allocd
.get(i
))
269 // This cell is free -- are there enough free cells after it?
271 size_t last
= i
+ cellsNeeded
- 1;
273 if (last
>= CELLS_PER_BLOCK
) // No way it will fit
277 while (i
<= last
&& !candidate
->allocd
.get(i
))
280 // Did we get through enough?
282 sufficientBlock
= candidate
;
286 // Not enough room -- and the current entry has a non-zero zeroIfFree,
287 // so we should go on at i + 1 on next iteration
289 } // for each cell per block.
290 } // if enough room in block
293 if (!sufficientBlock
) {
294 sufficientBlock
= allocateBlock();
296 heap
.oversizeBlocks
.append(sufficientBlock
);
299 sufficientBlock
->usedCells
+= cellsNeeded
;
301 // Set proper bits for trailers and the head
302 sufficientBlock
->allocd
.set(startOffset
);
303 for (size_t t
= startOffset
+ 1; t
< startOffset
+ cellsNeeded
; ++t
) {
304 sufficientBlock
->trailer
.set(t
);
305 sufficientBlock
->marked
.set(t
);
306 sufficientBlock
->allocd
.set(t
);
309 void* result
= sufficientBlock
->cells
+ startOffset
;
310 memset(result
, 0, s
);
311 heap
.numLiveObjects
= heap
.numLiveObjects
+ 1;
315 void* Collector::allocate(size_t s
)
317 assert(JSLock::lockCount() > 0);
320 size_t numLiveObjects
= heap
.numLiveObjects
;
321 size_t numLiveObjectsAtLastCollect
= heap
.numLiveObjectsAtLastCollect
;
322 size_t numNewObjects
= numLiveObjects
- numLiveObjectsAtLastCollect
;
323 size_t newCost
= numNewObjects
+ heap
.extraCost
;
325 if (newCost
>= ALLOCATIONS_PER_COLLECTION
&& newCost
>= numLiveObjectsAtLastCollect
) {
327 numLiveObjects
= heap
.numLiveObjects
;
331 return allocOversize(s
);
336 size_t usedBlocks
= heap
.blocks
.used();
338 size_t i
= heap
.firstBlockWithPossibleSpace
;
339 CollectorBlock
*targetBlock
;
340 size_t targetBlockUsedCells
;
341 if (i
!= usedBlocks
) {
342 targetBlock
= heap
.blocks
[i
];
343 targetBlockUsedCells
= targetBlock
->usedCells
;
344 assert(targetBlockUsedCells
<= CELLS_PER_BLOCK
);
345 while (targetBlockUsedCells
== CELLS_PER_BLOCK
) {
346 if (++i
== usedBlocks
)
347 goto allocateNewBlock
;
348 targetBlock
= heap
.blocks
[i
];
349 targetBlockUsedCells
= targetBlock
->usedCells
;
350 assert(targetBlockUsedCells
<= CELLS_PER_BLOCK
);
352 heap
.firstBlockWithPossibleSpace
= i
;
355 // didn't find one, need to allocate a new block
356 targetBlock
= allocateBlock();
357 targetBlock
->freeList
= targetBlock
->cells
;
358 targetBlockUsedCells
= 0;
359 heap
.blocks
.append(targetBlock
);
360 heap
.firstBlockWithPossibleSpace
= usedBlocks
; // previous usedBlocks -> new one's index
363 // find a free spot in the block and detach it from the free list
364 CollectorCell
*newCell
= targetBlock
->freeList
;
366 // "next" field is a byte offset -- 0 means next cell, so a zeroed block is already initialized
367 // could avoid the casts by using a cell offset, but this avoids a relatively-slow multiply
368 targetBlock
->freeList
= reinterpret_cast<CollectorCell
*>(reinterpret_cast<char *>(newCell
+ 1) + newCell
->u
.freeCell
.next
);
370 targetBlock
->usedCells
= targetBlockUsedCells
+ 1;
371 heap
.numLiveObjects
= numLiveObjects
+ 1;
376 #if USE(MULTIPLE_THREADS)
378 struct Collector::Thread
{
379 Thread(pthread_t pthread
, mach_port_t mthread
) : posixThread(pthread
), machThread(mthread
) {}
381 pthread_t posixThread
;
382 mach_port_t machThread
;
385 pthread_key_t registeredThreadKey
;
386 pthread_once_t registeredThreadKeyOnce
= PTHREAD_ONCE_INIT
;
388 Collector::Thread
*registeredThreads
;
390 static void destroyRegisteredThread(void *data
)
392 Collector::Thread
*thread
= (Collector::Thread
*)data
;
394 if (registeredThreads
== thread
) {
395 registeredThreads
= registeredThreads
->next
;
397 Collector::Thread
*last
= registeredThreads
;
398 for (Collector::Thread
*t
= registeredThreads
->next
; t
!= NULL
; t
= t
->next
) {
400 last
->next
= t
->next
;
410 static void initializeRegisteredThreadKey()
412 pthread_key_create(®isteredThreadKey
, destroyRegisteredThread
);
415 void Collector::registerThread()
417 pthread_once(®isteredThreadKeyOnce
, initializeRegisteredThreadKey
);
419 if (!pthread_getspecific(registeredThreadKey
)) {
420 pthread_t pthread
= pthread_self();
421 WTF::fastMallocRegisterThread(pthread
);
422 Collector::Thread
*thread
= new Collector::Thread(pthread
, pthread_mach_thread_np(pthread
));
423 thread
->next
= registeredThreads
;
424 registeredThreads
= thread
;
425 pthread_setspecific(registeredThreadKey
, thread
);
431 #define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char *) - 1)) == 0)
433 // cell size needs to be a power of two for this to be valid
434 #define IS_CELL_ALIGNED(p) (((intptr_t)(p) & CELL_MASK) == 0)
436 void Collector::markStackObjectsConservatively(void *start
, void *end
)
444 assert(((char *)end
- (char *)start
) < 0x1000000);
445 assert(IS_POINTER_ALIGNED(start
));
446 assert(IS_POINTER_ALIGNED(end
));
448 char **p
= (char **)start
;
449 char **e
= (char **)end
;
451 // We use allBlocks here since we want to mark oversize cells as well.
452 // Their trailers will have the mark bit set already, to avoid trouble.
453 size_t usedBlocks
= heap
.allBlocks
.used();
454 CollectorBlock
**blocks
= heap
.allBlocks
.m_data
;
456 const size_t lastCellOffset
= sizeof(CollectorCell
) * (CELLS_PER_BLOCK
- 1);
461 if (IS_CELL_ALIGNED(x
) && x
) {
462 uintptr_t offset
= reinterpret_cast<uintptr_t>(x
) & BLOCK_OFFSET_MASK
;
463 CollectorBlock
* blockAddr
= reinterpret_cast<CollectorBlock
*>(x
- offset
);
464 for (size_t block
= 0; block
< usedBlocks
; block
++) {
465 if ((blocks
[block
] == blockAddr
) & (offset
<= lastCellOffset
)) {
466 if (((CollectorCell
*)x
)->u
.freeCell
.zeroIfFree
!= 0) {
467 JSCell
*imp
= reinterpret_cast<JSCell
*>(x
);
474 } // for each pointer
477 static inline void* currentThreadStackBase()
480 pthread_t thread
= pthread_self();
481 void *stackBase
= pthread_get_stackaddr_np(thread
);
482 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(MSVC)
488 void *stackBase
= (void *)pTib
->StackBase
;
489 #elif (PLATFORM(WIN_OS) || COMPILER(CYGWIN)) && PLATFORM(X86) && COMPILER(GCC)
491 __asm__("movl %%fs:0x18,%0"
494 void *stackBase
= (void *)pTib
->StackBase
;
495 #elif PLATFORM(SOLARIS_OS)
502 static void *stackBase
= 0;
503 static pthread_t stackThread
;
504 pthread_t thread
= pthread_self();
505 if (stackBase
== 0 || thread
!= stackThread
) {
506 pthread_attr_t sattr
;
507 #if HAVE(PTHREAD_NP_H) || defined(__NetBSD__)
508 // e.g. on FreeBSD 5.4, neundorf@kde.org
509 // also on NetBSD 3 and 4, raphael.langerhorst@kdemail.net
510 // HIGHLY RECCOMENDED by manpage to allocate storage, avoids
511 // crashing in JS immediately in FreeBSD.
512 pthread_attr_init(&sattr
);
513 pthread_attr_get_np(thread
, &sattr
);
515 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
516 pthread_getattr_np(thread
, &sattr
);
517 #endif // picking the _np function to use --- Linux or BSD
520 pthread_attr_getstack(&sattr
, &stackBase
, &stackSize
);
521 stackBase
= (char *)stackBase
+ stackSize
; // a matter of interpretation, apparently...
523 stackThread
= thread
;
526 #error Need a way to get the stack base on this platform
532 void Collector::markCurrentThreadConservatively()
534 // setjmp forces volatile registers onto the stack
537 #pragma warning(push)
538 #pragma warning(disable: 4611)
546 void* stackPointer
= &dummy
;
547 void* stackBase
= currentThreadStackBase();
549 markStackObjectsConservatively(stackPointer
, stackBase
);
552 #if USE(MULTIPLE_THREADS)
554 typedef unsigned long usword_t
; // word size, assumed to be either 32 or 64 bit
556 void Collector::markOtherThreadConservatively(Thread
*thread
)
558 thread_suspend(thread
->machThread
);
561 i386_thread_state_t regs
;
562 unsigned user_count
= sizeof(regs
)/sizeof(int);
563 thread_state_flavor_t flavor
= i386_THREAD_STATE
;
564 #elif PLATFORM(X86_64)
565 x86_thread_state64_t regs
;
566 unsigned user_count
= x86_THREAD_STATE64_COUNT
;
567 thread_state_flavor_t flavor
= x86_THREAD_STATE64
;
569 ppc_thread_state_t regs
;
570 unsigned user_count
= PPC_THREAD_STATE_COUNT
;
571 thread_state_flavor_t flavor
= PPC_THREAD_STATE
;
572 #elif PLATFORM(PPC64)
573 ppc_thread_state64_t regs
;
574 unsigned user_count
= PPC_THREAD_STATE64_COUNT
;
575 thread_state_flavor_t flavor
= PPC_THREAD_STATE64
;
577 #error Unknown Architecture
579 // get the thread register state
580 thread_get_state(thread
->machThread
, flavor
, (thread_state_t
)®s
, &user_count
);
582 // scan the registers
583 markStackObjectsConservatively((void *)®s
, (void *)((char *)®s
+ (user_count
* sizeof(usword_t
))));
586 #if PLATFORM(X86) && __DARWIN_UNIX03
587 markStackObjectsConservatively((void *)regs
.__esp
, pthread_get_stackaddr_np(thread
->posixThread
));
589 markStackObjectsConservatively((void *)regs
.esp
, pthread_get_stackaddr_np(thread
->posixThread
));
590 #elif PLATFORM(X86_64) && __DARWIN_UNIX03
591 markStackObjectsConservatively((void *)regs
.__rsp
, pthread_get_stackaddr_np(thread
->posixThread
));
592 #elif PLATFORM(X86_64)
593 markStackObjectsConservatively((void *)regs
.rsp
, pthread_get_stackaddr_np(thread
->posixThread
));
594 #elif (PLATFORM(PPC) || PLATFORM(PPC64)) && __DARWIN_UNIX03
595 markStackObjectsConservatively((void *)regs
.__r1
, pthread_get_stackaddr_np(thread
->posixThread
));
596 #elif PLATFORM(PPC) || PLATFORM(PPC64)
597 markStackObjectsConservatively((void *)regs
.r1
, pthread_get_stackaddr_np(thread
->posixThread
));
599 #error Unknown Architecture
602 thread_resume(thread
->machThread
);
607 void Collector::markStackObjectsConservatively()
609 markCurrentThreadConservatively();
611 #if USE(MULTIPLE_THREADS)
612 for (Thread
*thread
= registeredThreads
; thread
!= NULL
; thread
= thread
->next
) {
613 if (thread
->posixThread
!= pthread_self()) {
614 markOtherThreadConservatively(thread
);
620 typedef HashCountedSet
<JSCell
*> ProtectCounts
;
622 static ProtectCounts
& protectedValues()
624 static ProtectCounts pv
;
628 void Collector::protect(JSValue
*k
)
631 assert(JSLock::lockCount() > 0);
633 if (JSImmediate::isImmediate(k
))
636 protectedValues().add(k
->asCell());
639 void Collector::unprotect(JSValue
*k
)
642 assert(JSLock::lockCount() > 0);
644 if (JSImmediate::isImmediate(k
))
647 protectedValues().remove(k
->asCell());
650 void Collector::markProtectedObjects()
652 ProtectCounts
& pv
= protectedValues();
653 ProtectCounts::iterator end
= pv
.end();
654 for (ProtectCounts::iterator it
= pv
.begin(); it
!= end
; ++it
) {
655 JSCell
*val
= it
->first
;
661 bool Collector::collect()
663 assert(JSLock::lockCount() > 0);
665 #if USE(MULTIPLE_THREADS)
666 bool currentThreadIsMainThread
= pthread_main_np();
668 bool currentThreadIsMainThread
= true;
671 if (Interpreter::s_hook
) {
672 Interpreter
* scr
= Interpreter::s_hook
;
674 scr
->mark(currentThreadIsMainThread
);
676 } while (scr
!= Interpreter::s_hook
);
679 // MARK: first mark all referenced objects recursively starting out from the set of root objects
681 markStackObjectsConservatively();
682 markProtectedObjects();
683 List::markProtectedLists();
684 #if USE(MULTIPLE_THREADS)
685 if (!currentThreadIsMainThread
)
686 markMainThreadOnlyObjects();
689 // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else
691 // Note: if a cell has zeroIfFree == 0, it is either free,
692 // or in the middle of being constructed and has not yet
693 // had its vtable filled. Hence, such cells should not be cleaned up
695 size_t emptyBlocks
= 0;
696 size_t numLiveObjects
= heap
.numLiveObjects
;
698 for (size_t block
= 0; block
< heap
.blocks
.used(); block
++) {
699 CollectorBlock
*curBlock
= heap
.blocks
[block
];
701 size_t usedCells
= curBlock
->usedCells
;
702 CollectorCell
*freeList
= curBlock
->freeList
;
704 if (usedCells
== CELLS_PER_BLOCK
) {
705 // special case with a block where all cells are used -- testing indicates this happens often
706 for (size_t i
= 0; i
< CELLS_PER_BLOCK
; i
++) {
707 CollectorCell
*cell
= curBlock
->cells
+ i
;
708 JSCell
*imp
= reinterpret_cast<JSCell
*>(cell
);
709 if (!curBlock
->marked
.get(i
) && currentThreadIsMainThread
) {
710 // special case for allocated but uninitialized object
711 // (We don't need this check earlier because nothing prior this point assumes the object has a valid vptr.)
712 if (cell
->u
.freeCell
.zeroIfFree
== 0)
718 // put cell on the free list
719 cell
->u
.freeCell
.zeroIfFree
= 0;
720 cell
->u
.freeCell
.next
= reinterpret_cast<char *>(freeList
) - reinterpret_cast<char *>(cell
+ 1);
725 size_t minimumCellsToProcess
= usedCells
;
726 for (size_t i
= 0; (i
< minimumCellsToProcess
) & (i
< CELLS_PER_BLOCK
); i
++) {
727 CollectorCell
*cell
= curBlock
->cells
+ i
;
728 if (cell
->u
.freeCell
.zeroIfFree
== 0) {
729 ++minimumCellsToProcess
;
731 JSCell
*imp
= reinterpret_cast<JSCell
*>(cell
);
732 if (!curBlock
->marked
.get(i
) && currentThreadIsMainThread
) {
737 // put cell on the free list
738 cell
->u
.freeCell
.zeroIfFree
= 0;
739 cell
->u
.freeCell
.next
= reinterpret_cast<char *>(freeList
) - reinterpret_cast<char *>(cell
+ 1);
746 curBlock
->marked
.clearAll();
747 curBlock
->usedCells
= usedCells
;
748 curBlock
->freeList
= freeList
;
750 if (usedCells
== 0) {
752 if (emptyBlocks
> SPARE_EMPTY_BLOCKS
) {
756 // swap with the last block so we compact as we go
757 heap
.blocks
.m_data
[block
] = heap
.blocks
[heap
.blocks
.used() - 1];
758 heap
.blocks
.m_used
--;
759 block
--; // Don't move forward a step in this case
764 if (heap
.numLiveObjects
!= numLiveObjects
)
765 heap
.firstBlockWithPossibleSpace
= 0;
767 // Now sweep oversize blocks.
769 for (size_t ob
= 0; ob
< heap
.oversizeBlocks
.used(); ++ob
) {
770 CollectorBlock
* curBlock
= heap
.oversizeBlocks
[ob
];
771 CollectorCell
*freeList
= curBlock
->freeList
;
772 size_t usedCells
= curBlock
->usedCells
;
774 // Go through the cells
775 for (size_t i
= 0; i
< CELLS_PER_BLOCK
; i
++) {
776 if (i
% 32 == 0 && curBlock
->allocd
.bits
[i
/32] == 0) {
777 // Nothing used around here, skip this and 31 next cells
782 CollectorCell
*cell
= curBlock
->cells
+ i
;
783 if (cell
->u
.freeCell
.zeroIfFree
== 0)
786 if (!curBlock
->allocd
.get(i
))
789 JSCell
*imp
= reinterpret_cast<JSCell
*>(cell
);
791 // Live and trailer cells will all have the mark set,
792 // so we only have to collect with it clear --
793 // and we already took care of those that are
794 // already free (or being initialized) above
795 if (!curBlock
->marked
.get(i
)) {
796 // Free this block...
801 // Mark the block and the trailers as available
802 cell
->u
.freeCell
.zeroIfFree
= 0;
803 curBlock
->allocd
.clear(i
);
805 ++i
; // Go to the potential trailer..
806 while (curBlock
->trailer
.get(i
) && i
< CELLS_PER_BLOCK
) {
808 curBlock
->allocd
.clear(i
);
809 curBlock
->trailer
.clear(i
);
810 curBlock
->marked
.clear (i
);
811 curBlock
->cells
[i
].u
.freeCell
.zeroIfFree
= 0;
814 --i
; // Last item we processed.
816 // If this is not a trailer cell, clear the mark
817 if (!curBlock
->trailer
.get(i
))
818 curBlock
->marked
.clear(i
);
821 curBlock
->usedCells
= usedCells
;
822 curBlock
->freeList
= freeList
;
823 if (usedCells
== 0) {
825 if (emptyBlocks
> SPARE_EMPTY_BLOCKS
) {
828 // swap with the last block so we compact as we go
829 heap
.oversizeBlocks
.m_data
[ob
] = heap
.oversizeBlocks
[heap
.oversizeBlocks
.used() - 1];
830 heap
.oversizeBlocks
.m_used
--;
831 ob
--; // Don't move forward a step in this case
834 } // each oversize block
836 bool deleted
= heap
.numLiveObjects
!= numLiveObjects
;
838 heap
.numLiveObjects
= numLiveObjects
;
839 heap
.numLiveObjectsAtLastCollect
= numLiveObjects
;
842 bool newMemoryFull
= (numLiveObjects
>= KJS_MEM_LIMIT
);
843 if (newMemoryFull
&& newMemoryFull
!= memoryFull
)
844 reportOutOfMemoryToAllInterpreters();
845 memoryFull
= newMemoryFull
;
850 size_t Collector::size()
852 return heap
.numLiveObjects
;
856 void Collector::finalCheck()
861 size_t Collector::numInterpreters()
864 if (Interpreter::s_hook
) {
865 Interpreter
* scr
= Interpreter::s_hook
;
869 } while (scr
!= Interpreter::s_hook
);
874 size_t Collector::numProtectedObjects()
876 return protectedValues().size();
879 static const char *typeName(JSCell
*val
)
881 const char *name
= "???";
882 switch (val
->type()) {
883 case UnspecifiedType
:
901 const ClassInfo
*info
= static_cast<JSObject
*>(val
)->classInfo();
902 name
= info
? info
->className
: "Object";
905 case GetterSetterType
:
906 name
= "gettersetter";
912 HashCountedSet
<const char*>* Collector::rootObjectTypeCounts()
914 HashCountedSet
<const char*>* counts
= new HashCountedSet
<const char*>;
916 ProtectCounts
& pv
= protectedValues();
917 ProtectCounts::iterator end
= pv
.end();
918 for (ProtectCounts::iterator it
= pv
.begin(); it
!= end
; ++it
)
919 counts
->add(typeName(it
->first
));
924 void Collector::reportOutOfMemoryToAllInterpreters()
926 if (!Interpreter::s_hook
)
929 Interpreter
* interpreter
= Interpreter::s_hook
;
931 ExecState
* exec
= interpreter
->execState();
933 exec
->setException(Error::create(exec
, GeneralError
, "Out of memory"));
935 interpreter
= interpreter
->next
;
936 } while(interpreter
!= Interpreter::s_hook
);