fix logic
[personal-kdelibs.git] / kjs / collector.cpp
blob6f20da78b3a968e676cd81d3989e04452841d1c0
1 // -*- mode: c++; c-basic-offset: 4 -*-
2 /*
3 * This file is part of the KDE libraries
4 * Copyright (C) 2003, 2004, 2005, 2006, 2007 Apple Computer, Inc.
5 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
6 * Copyright (C) 2007 Maksim Orlovich <maksim@kde.org>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "collector.h"
25 #include <config.h>
27 #include <wtf/FastMalloc.h>
28 #include <wtf/HashCountedSet.h>
29 #include "internal.h"
30 #include "list.h"
31 #include "value.h"
33 #include <setjmp.h>
34 #include <algorithm>
36 #if PLATFORM(DARWIN)
38 #include <pthread.h>
39 #include <mach/mach_port.h>
40 #include <mach/mach_init.h>
41 #include <mach/task.h>
42 #include <mach/thread_act.h>
43 #include <mach/vm_map.h>
45 #elif PLATFORM(WIN_OS) || COMPILER(CYGWIN)
47 #include <windows.h>
49 #elif PLATFORM(UNIX)
51 #include <stdlib.h>
52 #include <sys/types.h>
53 #include <sys/mman.h>
54 #include <pthread.h> //krazy:exclude=includes (yes, it's duplicate, but in a different #if branch)
55 #include <unistd.h>
57 #if PLATFORM(SOLARIS_OS)
58 #include <thread.h>
59 #include <signal.h>
60 using std::memset;
61 #endif
63 #if HAVE(PTHREAD_NP_H)
64 #include <pthread_np.h>
65 #endif
67 #endif
69 #define DEBUG_COLLECTOR 0
71 #if HAVE(VALGRIND_MEMCHECK_H) && !defined(NDEBUG)
72 #include <valgrind/memcheck.h>
73 #if defined(VALGRIND_MAKE_MEM_DEFINED)
74 #define VG_DEFINED(p) VALGRIND_MAKE_MEM_DEFINED(&p, sizeof(void*))
75 #else
76 #define VG_DEFINED(p)
77 #endif
78 #else
79 #define VG_DEFINED(p)
80 #endif
83 using std::max;
85 namespace KJS {
87 // tunable parameters
88 const size_t SPARE_EMPTY_BLOCKS = 2;
89 const size_t MIN_ARRAY_SIZE = 14;
90 const size_t GROWTH_FACTOR = 2;
91 const size_t LOW_WATER_FACTOR = 4;
92 const size_t ALLOCATIONS_PER_COLLECTION = 4000;
95 // A whee bit like a WTF::Vector, but perfectly POD, so can be used in global context
96 // w/o worries.
97 struct BlockList {
98 CollectorBlock** m_data;
99 size_t m_used;
100 size_t m_capacity;
102 CollectorBlock* operator[](int pos) {
103 return m_data[pos];
106 size_t used() const {
107 return m_used;
110 void append(CollectorBlock* block) {
111 if (m_used == m_capacity) {
112 m_capacity = max(MIN_ARRAY_SIZE, m_capacity * GROWTH_FACTOR);
113 m_data = static_cast<CollectorBlock **>(fastRealloc(m_data, m_capacity * sizeof(CollectorBlock *)));
115 m_data[m_used] = block;
116 ++m_used;
119 void remove(CollectorBlock* block) {
120 size_t c;
121 for (c = 0; c < m_used; ++c)
122 if (m_data[c] == block)
123 break;
125 if (c == m_used)
126 return;
128 // Move things up, and shrink..
129 --m_used;
130 for (; c < m_used; ++c)
131 m_data[c] = m_data[c+1];
135 struct CollectorHeap {
136 // This contains the list of both normal and oversize blocks
137 BlockList allBlocks;
139 // Just the normal blocks
140 BlockList blocks;
142 size_t firstBlockWithPossibleSpace;
144 // The oversize block handling is a bit tricky, since we do not wish to slow down
145 // the usual paths. Hence, we do the following:
146 // 1) We set the marked bits for any extension portions of the block.
147 // this way the stack GC doesn't have to do anything special if a pointer
148 // happens to go that way.
149 // 2) We keep track of an allBlocks list to help the stack GC tests things.
151 // The oversize heap itself represents blocks as follows:
152 // 1) free: marked = false, allocd = false, trailer = false, zeroIfFree = 0
153 // 2) alloc'd, head: marked = depends, allocd = true, trailer = false, zeroIfFree is uncertain
154 // 3) alloc'd, trailer: marked = true (see above), allocd = true, trailer = true, zeroIfFree is uncertain
156 // For now, we don't use a freelist, so performance may be quite bad if
157 // this is used heavily; this is just because the cell does not have
158 // back and forward links; which we need since we can pick a non-first cell
159 // ### actually, it may be possible to shuffle the list. Not now, though
160 BlockList oversizeBlocks;
162 size_t numLiveObjects;
163 size_t numLiveObjectsAtLastCollect;
164 size_t extraCost;
167 static CollectorHeap heap;
169 bool Collector::memoryFull = false;
171 static CollectorBlock* allocateBlock()
173 #if PLATFORM(DARWIN)
174 vm_address_t address = 0;
175 vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
176 #elif PLATFORM(WIN_OS) || COMPILER(CYGWIN)
177 // windows virtual address granularity is naturally 64k
178 LPVOID address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
179 #elif HAVE(POSIX_MEMALIGN)
180 void* address;
181 posix_memalign(address, BLOCK_SIZE, BLOCK_SIZE);
182 memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE);
183 #else
184 static size_t pagesize = getpagesize();
186 size_t extra = 0;
187 if (BLOCK_SIZE > pagesize)
188 extra = BLOCK_SIZE - pagesize;
190 void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
191 uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
193 size_t adjust = 0;
194 if ((address & BLOCK_OFFSET_MASK) != 0)
195 adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
197 if (adjust > 0)
198 munmap(reinterpret_cast<void*>(address), adjust);
200 if (adjust < extra)
201 munmap(reinterpret_cast<void*>(address + adjust + BLOCK_SIZE), extra - adjust);
203 address += adjust;
204 memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE);
205 #endif
206 CollectorBlock* ptr = reinterpret_cast<CollectorBlock*>(address);
207 heap.allBlocks.append(ptr); // Register with the heap
208 return ptr;
211 static void freeBlock(CollectorBlock* block)
213 // Unregister the block first
214 heap.allBlocks.remove(block);
216 #if PLATFORM(DARWIN)
217 vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
218 #elif PLATFORM(WIN_OS) || COMPILER(CYGWIN)
219 VirtualFree(block, BLOCK_SIZE, MEM_RELEASE);
220 #elif HAVE(POSIX_MEMALIGN)
221 free(block);
222 #else
223 munmap(block, BLOCK_SIZE);
224 #endif
227 void Collector::recordExtraCost(size_t cost)
229 // Our frequency of garbage collection tries to balance memory use against speed
230 // by collecting based on the number of newly created values. However, for values
231 // that hold on to a great deal of memory that's not in the form of other JS values,
232 // that is not good enough - in some cases a lot of those objects can pile up and
233 // use crazy amounts of memory without a GC happening. So we track these extra
234 // memory costs. Only unusually large objects are noted, and we only keep track
235 // of this extra cost until the next GC. In garbage collected languages, most values
236 // are either very short lived temporaries, or have extremely long lifetimes. So
237 // if a large value survives one garbage collection, there is not much point to
238 // collecting more frequently as long as it stays alive.
240 heap.extraCost += cost;
243 static void* allocOversize(size_t s)
245 size_t cellsNeeded = (s + (CELL_SIZE - 1)) / CELL_SIZE;
247 // printf("allocOversize, size:%d, cellsNeeded:%d\n", s, cellsNeeded);
249 // We are not oversize enough to deal with things close to 64K in size
250 assert(cellsNeeded <= CELLS_PER_BLOCK);
252 // Look through the blocks, see if one has enough, and where.
253 CollectorBlock* sufficientBlock = 0;
254 size_t startOffset = -1;
255 for (size_t b = 0; b < heap.oversizeBlocks.used() && !sufficientBlock; ++b) {
256 CollectorBlock* candidate = heap.oversizeBlocks[b];
257 if (cellsNeeded <= CELLS_PER_BLOCK - candidate->usedCells) {
258 // Well, there is a chance we will fit.. Let's see if we can find a batch long enough..
259 for (size_t i = 0; i < CELLS_PER_BLOCK; i++) {
260 if (i % 32 == 0 && candidate->allocd.bits[i/32] == 0xFFFFFFFF) {
261 // Can skip this and 31 other cells
262 i += 31;
263 continue;
266 if (candidate->allocd.get(i))
267 continue;
269 // This cell is free -- are there enough free cells after it?
270 startOffset = i;
271 size_t last = i + cellsNeeded - 1;
273 if (last >= CELLS_PER_BLOCK) // No way it will fit
274 break;
276 ++i;
277 while (i <= last && !candidate->allocd.get(i))
278 ++i;
280 // Did we get through enough?
281 if (i == last + 1) {
282 sufficientBlock = candidate;
283 break;
286 // Not enough room -- and the current entry has a non-zero zeroIfFree,
287 // so we should go on at i + 1 on next iteration
289 } // for each cell per block.
290 } // if enough room in block
291 } // for each block
293 if (!sufficientBlock) {
294 sufficientBlock = allocateBlock();
295 startOffset = 0;
296 heap.oversizeBlocks.append(sufficientBlock);
299 sufficientBlock->usedCells += cellsNeeded;
301 // Set proper bits for trailers and the head
302 sufficientBlock->allocd.set(startOffset);
303 for (size_t t = startOffset + 1; t < startOffset + cellsNeeded; ++t) {
304 sufficientBlock->trailer.set(t);
305 sufficientBlock->marked.set(t);
306 sufficientBlock->allocd.set(t);
309 void* result = sufficientBlock->cells + startOffset;
310 memset(result, 0, s);
311 heap.numLiveObjects = heap.numLiveObjects + 1;
312 return result;
315 void* Collector::allocate(size_t s)
317 assert(JSLock::lockCount() > 0);
319 // collect if needed
320 size_t numLiveObjects = heap.numLiveObjects;
321 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
322 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
323 size_t newCost = numNewObjects + heap.extraCost;
325 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) {
326 collect();
327 numLiveObjects = heap.numLiveObjects;
330 if (s > CELL_SIZE) {
331 return allocOversize(s);
334 // slab allocator
336 size_t usedBlocks = heap.blocks.used();
338 size_t i = heap.firstBlockWithPossibleSpace;
339 CollectorBlock *targetBlock;
340 size_t targetBlockUsedCells;
341 if (i != usedBlocks) {
342 targetBlock = heap.blocks[i];
343 targetBlockUsedCells = targetBlock->usedCells;
344 assert(targetBlockUsedCells <= CELLS_PER_BLOCK);
345 while (targetBlockUsedCells == CELLS_PER_BLOCK) {
346 if (++i == usedBlocks)
347 goto allocateNewBlock;
348 targetBlock = heap.blocks[i];
349 targetBlockUsedCells = targetBlock->usedCells;
350 assert(targetBlockUsedCells <= CELLS_PER_BLOCK);
352 heap.firstBlockWithPossibleSpace = i;
353 } else {
354 allocateNewBlock:
355 // didn't find one, need to allocate a new block
356 targetBlock = allocateBlock();
357 targetBlock->freeList = targetBlock->cells;
358 targetBlockUsedCells = 0;
359 heap.blocks.append(targetBlock);
360 heap.firstBlockWithPossibleSpace = usedBlocks; // previous usedBlocks -> new one's index
363 // find a free spot in the block and detach it from the free list
364 CollectorCell *newCell = targetBlock->freeList;
366 // "next" field is a byte offset -- 0 means next cell, so a zeroed block is already initialized
367 // could avoid the casts by using a cell offset, but this avoids a relatively-slow multiply
368 targetBlock->freeList = reinterpret_cast<CollectorCell *>(reinterpret_cast<char *>(newCell + 1) + newCell->u.freeCell.next);
370 targetBlock->usedCells = targetBlockUsedCells + 1;
371 heap.numLiveObjects = numLiveObjects + 1;
373 return newCell;
376 #if USE(MULTIPLE_THREADS)
378 struct Collector::Thread {
379 Thread(pthread_t pthread, mach_port_t mthread) : posixThread(pthread), machThread(mthread) {}
380 Thread *next;
381 pthread_t posixThread;
382 mach_port_t machThread;
385 pthread_key_t registeredThreadKey;
386 pthread_once_t registeredThreadKeyOnce = PTHREAD_ONCE_INIT;
388 Collector::Thread *registeredThreads;
390 static void destroyRegisteredThread(void *data)
392 Collector::Thread *thread = (Collector::Thread *)data;
394 if (registeredThreads == thread) {
395 registeredThreads = registeredThreads->next;
396 } else {
397 Collector::Thread *last = registeredThreads;
398 for (Collector::Thread *t = registeredThreads->next; t != NULL; t = t->next) {
399 if (t == thread) {
400 last->next = t->next;
401 break;
403 last = t;
407 delete thread;
410 static void initializeRegisteredThreadKey()
412 pthread_key_create(&registeredThreadKey, destroyRegisteredThread);
415 void Collector::registerThread()
417 pthread_once(&registeredThreadKeyOnce, initializeRegisteredThreadKey);
419 if (!pthread_getspecific(registeredThreadKey)) {
420 pthread_t pthread = pthread_self();
421 WTF::fastMallocRegisterThread(pthread);
422 Collector::Thread *thread = new Collector::Thread(pthread, pthread_mach_thread_np(pthread));
423 thread->next = registeredThreads;
424 registeredThreads = thread;
425 pthread_setspecific(registeredThreadKey, thread);
429 #endif
431 #define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char *) - 1)) == 0)
433 // cell size needs to be a power of two for this to be valid
434 #define IS_CELL_ALIGNED(p) (((intptr_t)(p) & CELL_MASK) == 0)
436 void Collector::markStackObjectsConservatively(void *start, void *end)
438 if (start > end) {
439 void *tmp = start;
440 start = end;
441 end = tmp;
444 assert(((char *)end - (char *)start) < 0x1000000);
445 assert(IS_POINTER_ALIGNED(start));
446 assert(IS_POINTER_ALIGNED(end));
448 char **p = (char **)start;
449 char **e = (char **)end;
451 // We use allBlocks here since we want to mark oversize cells as well.
452 // Their trailers will have the mark bit set already, to avoid trouble.
453 size_t usedBlocks = heap.allBlocks.used();
454 CollectorBlock **blocks = heap.allBlocks.m_data;
456 const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);
458 while (p != e) {
459 char *x = *p++;
460 VG_DEFINED(x);
461 if (IS_CELL_ALIGNED(x) && x) {
462 uintptr_t offset = reinterpret_cast<uintptr_t>(x) & BLOCK_OFFSET_MASK;
463 CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(x - offset);
464 for (size_t block = 0; block < usedBlocks; block++) {
465 if ((blocks[block] == blockAddr) & (offset <= lastCellOffset)) {
466 if (((CollectorCell *)x)->u.freeCell.zeroIfFree != 0) {
467 JSCell *imp = reinterpret_cast<JSCell *>(x);
468 if (!imp->marked())
469 imp->mark();
471 } // if valid block
472 } // for each block
473 } // if cell-aligned
474 } // for each pointer
477 static inline void* currentThreadStackBase()
479 #if PLATFORM(DARWIN)
480 pthread_t thread = pthread_self();
481 void *stackBase = pthread_get_stackaddr_np(thread);
482 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(MSVC)
483 NT_TIB *pTib;
484 __asm {
485 MOV EAX, FS:[18h]
486 MOV pTib, EAX
488 void *stackBase = (void *)pTib->StackBase;
489 #elif (PLATFORM(WIN_OS) || COMPILER(CYGWIN)) && PLATFORM(X86) && COMPILER(GCC)
490 NT_TIB *pTib;
491 __asm__("movl %%fs:0x18,%0"
492 : "=r" (pTib)
494 void *stackBase = (void *)pTib->StackBase;
495 #elif PLATFORM(SOLARIS_OS)
496 stack_t s;
497 thr_stksegment(&s);
498 return s.ss_sp;
499 // NOTREACHED
500 void *stackBase = 0;
501 #elif PLATFORM(UNIX)
502 static void *stackBase = 0;
503 static pthread_t stackThread;
504 pthread_t thread = pthread_self();
505 if (stackBase == 0 || thread != stackThread) {
506 pthread_attr_t sattr;
507 #if HAVE(PTHREAD_NP_H) || defined(__NetBSD__)
508 // e.g. on FreeBSD 5.4, neundorf@kde.org
509 // also on NetBSD 3 and 4, raphael.langerhorst@kdemail.net
510 // HIGHLY RECCOMENDED by manpage to allocate storage, avoids
511 // crashing in JS immediately in FreeBSD.
512 pthread_attr_init(&sattr);
513 pthread_attr_get_np(thread, &sattr);
514 #else
515 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
516 pthread_getattr_np(thread, &sattr);
517 #endif // picking the _np function to use --- Linux or BSD
519 size_t stackSize;
520 pthread_attr_getstack(&sattr, &stackBase, &stackSize);
521 stackBase = (char *)stackBase + stackSize; // a matter of interpretation, apparently...
522 assert(stackBase);
523 stackThread = thread;
525 #else
526 #error Need a way to get the stack base on this platform
527 #endif
528 return stackBase;
532 void Collector::markCurrentThreadConservatively()
534 // setjmp forces volatile registers onto the stack
535 jmp_buf registers;
536 #if COMPILER(MSVC)
537 #pragma warning(push)
538 #pragma warning(disable: 4611)
539 #endif
540 setjmp(registers);
541 #if COMPILER(MSVC)
542 #pragma warning(pop)
543 #endif
545 void* dummy;
546 void* stackPointer = &dummy;
547 void* stackBase = currentThreadStackBase();
549 markStackObjectsConservatively(stackPointer, stackBase);
552 #if USE(MULTIPLE_THREADS)
554 typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
556 void Collector::markOtherThreadConservatively(Thread *thread)
558 thread_suspend(thread->machThread);
560 #if PLATFORM(X86)
561 i386_thread_state_t regs;
562 unsigned user_count = sizeof(regs)/sizeof(int);
563 thread_state_flavor_t flavor = i386_THREAD_STATE;
564 #elif PLATFORM(X86_64)
565 x86_thread_state64_t regs;
566 unsigned user_count = x86_THREAD_STATE64_COUNT;
567 thread_state_flavor_t flavor = x86_THREAD_STATE64;
568 #elif PLATFORM(PPC)
569 ppc_thread_state_t regs;
570 unsigned user_count = PPC_THREAD_STATE_COUNT;
571 thread_state_flavor_t flavor = PPC_THREAD_STATE;
572 #elif PLATFORM(PPC64)
573 ppc_thread_state64_t regs;
574 unsigned user_count = PPC_THREAD_STATE64_COUNT;
575 thread_state_flavor_t flavor = PPC_THREAD_STATE64;
576 #else
577 #error Unknown Architecture
578 #endif
579 // get the thread register state
580 thread_get_state(thread->machThread, flavor, (thread_state_t)&regs, &user_count);
582 // scan the registers
583 markStackObjectsConservatively((void *)&regs, (void *)((char *)&regs + (user_count * sizeof(usword_t))));
585 // scan the stack
586 #if PLATFORM(X86) && __DARWIN_UNIX03
587 markStackObjectsConservatively((void *)regs.__esp, pthread_get_stackaddr_np(thread->posixThread));
588 #elif PLATFORM(X86)
589 markStackObjectsConservatively((void *)regs.esp, pthread_get_stackaddr_np(thread->posixThread));
590 #elif PLATFORM(X86_64) && __DARWIN_UNIX03
591 markStackObjectsConservatively((void *)regs.__rsp, pthread_get_stackaddr_np(thread->posixThread));
592 #elif PLATFORM(X86_64)
593 markStackObjectsConservatively((void *)regs.rsp, pthread_get_stackaddr_np(thread->posixThread));
594 #elif (PLATFORM(PPC) || PLATFORM(PPC64)) && __DARWIN_UNIX03
595 markStackObjectsConservatively((void *)regs.__r1, pthread_get_stackaddr_np(thread->posixThread));
596 #elif PLATFORM(PPC) || PLATFORM(PPC64)
597 markStackObjectsConservatively((void *)regs.r1, pthread_get_stackaddr_np(thread->posixThread));
598 #else
599 #error Unknown Architecture
600 #endif
602 thread_resume(thread->machThread);
605 #endif
607 void Collector::markStackObjectsConservatively()
609 markCurrentThreadConservatively();
611 #if USE(MULTIPLE_THREADS)
612 for (Thread *thread = registeredThreads; thread != NULL; thread = thread->next) {
613 if (thread->posixThread != pthread_self()) {
614 markOtherThreadConservatively(thread);
617 #endif
620 typedef HashCountedSet<JSCell *> ProtectCounts;
622 static ProtectCounts& protectedValues()
624 static ProtectCounts pv;
625 return pv;
628 void Collector::protect(JSValue *k)
630 assert(k);
631 assert(JSLock::lockCount() > 0);
633 if (JSImmediate::isImmediate(k))
634 return;
636 protectedValues().add(k->asCell());
639 void Collector::unprotect(JSValue *k)
641 assert(k);
642 assert(JSLock::lockCount() > 0);
644 if (JSImmediate::isImmediate(k))
645 return;
647 protectedValues().remove(k->asCell());
650 void Collector::markProtectedObjects()
652 ProtectCounts& pv = protectedValues();
653 ProtectCounts::iterator end = pv.end();
654 for (ProtectCounts::iterator it = pv.begin(); it != end; ++it) {
655 JSCell *val = it->first;
656 if (!val->marked())
657 val->mark();
661 bool Collector::collect()
663 assert(JSLock::lockCount() > 0);
665 #if USE(MULTIPLE_THREADS)
666 bool currentThreadIsMainThread = pthread_main_np();
667 #else
668 bool currentThreadIsMainThread = true;
669 #endif
671 if (Interpreter::s_hook) {
672 Interpreter* scr = Interpreter::s_hook;
673 do {
674 scr->mark(currentThreadIsMainThread);
675 scr = scr->next;
676 } while (scr != Interpreter::s_hook);
679 // MARK: first mark all referenced objects recursively starting out from the set of root objects
681 markStackObjectsConservatively();
682 markProtectedObjects();
683 List::markProtectedLists();
684 #if USE(MULTIPLE_THREADS)
685 if (!currentThreadIsMainThread)
686 markMainThreadOnlyObjects();
687 #endif
689 // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else
691 // Note: if a cell has zeroIfFree == 0, it is either free,
692 // or in the middle of being constructed and has not yet
693 // had its vtable filled. Hence, such cells should not be cleaned up
695 size_t emptyBlocks = 0;
696 size_t numLiveObjects = heap.numLiveObjects;
698 for (size_t block = 0; block < heap.blocks.used(); block++) {
699 CollectorBlock *curBlock = heap.blocks[block];
701 size_t usedCells = curBlock->usedCells;
702 CollectorCell *freeList = curBlock->freeList;
704 if (usedCells == CELLS_PER_BLOCK) {
705 // special case with a block where all cells are used -- testing indicates this happens often
706 for (size_t i = 0; i < CELLS_PER_BLOCK; i++) {
707 CollectorCell *cell = curBlock->cells + i;
708 JSCell *imp = reinterpret_cast<JSCell *>(cell);
709 if (!curBlock->marked.get(i) && currentThreadIsMainThread) {
710 // special case for allocated but uninitialized object
711 // (We don't need this check earlier because nothing prior this point assumes the object has a valid vptr.)
712 if (cell->u.freeCell.zeroIfFree == 0)
713 continue;
714 imp->~JSCell();
715 --usedCells;
716 --numLiveObjects;
718 // put cell on the free list
719 cell->u.freeCell.zeroIfFree = 0;
720 cell->u.freeCell.next = reinterpret_cast<char *>(freeList) - reinterpret_cast<char *>(cell + 1);
721 freeList = cell;
724 } else {
725 size_t minimumCellsToProcess = usedCells;
726 for (size_t i = 0; (i < minimumCellsToProcess) & (i < CELLS_PER_BLOCK); i++) {
727 CollectorCell *cell = curBlock->cells + i;
728 if (cell->u.freeCell.zeroIfFree == 0) {
729 ++minimumCellsToProcess;
730 } else {
731 JSCell *imp = reinterpret_cast<JSCell *>(cell);
732 if (!curBlock->marked.get(i) && currentThreadIsMainThread) {
733 imp->~JSCell();
734 --usedCells;
735 --numLiveObjects;
737 // put cell on the free list
738 cell->u.freeCell.zeroIfFree = 0;
739 cell->u.freeCell.next = reinterpret_cast<char *>(freeList) - reinterpret_cast<char *>(cell + 1);
740 freeList = cell;
746 curBlock->marked.clearAll();
747 curBlock->usedCells = usedCells;
748 curBlock->freeList = freeList;
750 if (usedCells == 0) {
751 emptyBlocks++;
752 if (emptyBlocks > SPARE_EMPTY_BLOCKS) {
753 #if !DEBUG_COLLECTOR
754 freeBlock(curBlock);
755 #endif
756 // swap with the last block so we compact as we go
757 heap.blocks.m_data[block] = heap.blocks[heap.blocks.used() - 1];
758 heap.blocks.m_used--;
759 block--; // Don't move forward a step in this case
764 if (heap.numLiveObjects != numLiveObjects)
765 heap.firstBlockWithPossibleSpace = 0;
767 // Now sweep oversize blocks.
768 emptyBlocks = 0;
769 for (size_t ob = 0; ob < heap.oversizeBlocks.used(); ++ob) {
770 CollectorBlock* curBlock = heap.oversizeBlocks[ob];
771 CollectorCell *freeList = curBlock->freeList;
772 size_t usedCells = curBlock->usedCells;
774 // Go through the cells
775 for (size_t i = 0; i < CELLS_PER_BLOCK; i++) {
776 if (i % 32 == 0 && curBlock->allocd.bits[i/32] == 0) {
777 // Nothing used around here, skip this and 31 next cells
778 i += 31;
779 continue;
782 CollectorCell *cell = curBlock->cells + i;
783 if (cell->u.freeCell.zeroIfFree == 0)
784 continue;
786 if (!curBlock->allocd.get(i))
787 continue;
789 JSCell *imp = reinterpret_cast<JSCell *>(cell);
791 // Live and trailer cells will all have the mark set,
792 // so we only have to collect with it clear --
793 // and we already took care of those that are
794 // already free (or being initialized) above
795 if (!curBlock->marked.get(i)) {
796 // Free this block...
797 imp->~JSCell();
798 --numLiveObjects;
799 --usedCells;
801 // Mark the block and the trailers as available
802 cell->u.freeCell.zeroIfFree = 0;
803 curBlock->allocd.clear(i);
805 ++i; // Go to the potential trailer..
806 while (curBlock->trailer.get(i) && i < CELLS_PER_BLOCK) {
807 --usedCells;
808 curBlock->allocd.clear(i);
809 curBlock->trailer.clear(i);
810 curBlock->marked.clear (i);
811 curBlock->cells[i].u.freeCell.zeroIfFree = 0;
812 ++i;
814 --i; // Last item we processed.
815 } else {
816 // If this is not a trailer cell, clear the mark
817 if (!curBlock->trailer.get(i))
818 curBlock->marked.clear(i);
820 } // each cell
821 curBlock->usedCells = usedCells;
822 curBlock->freeList = freeList;
823 if (usedCells == 0) {
824 emptyBlocks++;
825 if (emptyBlocks > SPARE_EMPTY_BLOCKS) {
826 freeBlock(curBlock);
828 // swap with the last block so we compact as we go
829 heap.oversizeBlocks.m_data[ob] = heap.oversizeBlocks[heap.oversizeBlocks.used() - 1];
830 heap.oversizeBlocks.m_used--;
831 ob--; // Don't move forward a step in this case
834 } // each oversize block
836 bool deleted = heap.numLiveObjects != numLiveObjects;
838 heap.numLiveObjects = numLiveObjects;
839 heap.numLiveObjectsAtLastCollect = numLiveObjects;
840 heap.extraCost = 0;
842 bool newMemoryFull = (numLiveObjects >= KJS_MEM_LIMIT);
843 if (newMemoryFull && newMemoryFull != memoryFull)
844 reportOutOfMemoryToAllInterpreters();
845 memoryFull = newMemoryFull;
847 return deleted;
850 size_t Collector::size()
852 return heap.numLiveObjects;
855 #ifdef KJS_DEBUG_MEM
856 void Collector::finalCheck()
859 #endif
861 size_t Collector::numInterpreters()
863 size_t count = 0;
864 if (Interpreter::s_hook) {
865 Interpreter* scr = Interpreter::s_hook;
866 do {
867 ++count;
868 scr = scr->next;
869 } while (scr != Interpreter::s_hook);
871 return count;
874 size_t Collector::numProtectedObjects()
876 return protectedValues().size();
879 static const char *typeName(JSCell *val)
881 const char *name = "???";
882 switch (val->type()) {
883 case UnspecifiedType:
884 break;
885 case UndefinedType:
886 name = "undefined";
887 break;
888 case NullType:
889 name = "null";
890 break;
891 case BooleanType:
892 name = "boolean";
893 break;
894 case StringType:
895 name = "string";
896 break;
897 case NumberType:
898 name = "number";
899 break;
900 case ObjectType: {
901 const ClassInfo *info = static_cast<JSObject *>(val)->classInfo();
902 name = info ? info->className : "Object";
903 break;
905 case GetterSetterType:
906 name = "gettersetter";
907 break;
909 return name;
912 HashCountedSet<const char*>* Collector::rootObjectTypeCounts()
914 HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
916 ProtectCounts& pv = protectedValues();
917 ProtectCounts::iterator end = pv.end();
918 for (ProtectCounts::iterator it = pv.begin(); it != end; ++it)
919 counts->add(typeName(it->first));
921 return counts;
924 void Collector::reportOutOfMemoryToAllInterpreters()
926 if (!Interpreter::s_hook)
927 return;
929 Interpreter* interpreter = Interpreter::s_hook;
930 do {
931 ExecState* exec = interpreter->execState();
933 exec->setException(Error::create(exec, GeneralError, "Out of memory"));
935 interpreter = interpreter->next;
936 } while(interpreter != Interpreter::s_hook);
939 } // namespace KJS