1 // -*- mode: c++; c-basic-offset: 4 -*-
3 * This file is part of the KDE libraries
4 * Copyright (C) 2003, 2004, 2005, 2006, 2007 Apple Inc. All rights reserved.
5 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "collector.h"
26 #include "ExecState.h"
33 #include <wtf/FastMalloc.h>
34 #include <wtf/HashCountedSet.h>
35 #include <wtf/UnusedParam.h>
37 #if USE(MULTIPLE_THREADS)
43 #include <mach/mach_port.h>
44 #include <mach/mach_init.h>
45 #include <mach/task.h>
46 #include <mach/thread_act.h>
47 #include <mach/vm_map.h>
49 #include "CollectorHeapIntrospector.h"
51 #elif PLATFORM(WIN_OS)
61 #if HAVE(PTHREAD_NP_H)
62 #include <pthread_np.h>
69 #define DEBUG_COLLECTOR 0
77 const size_t SPARE_EMPTY_BLOCKS = 2;
78 const size_t MIN_ARRAY_SIZE = 14;
79 const size_t GROWTH_FACTOR = 2;
80 const size_t LOW_WATER_FACTOR = 4;
81 const size_t ALLOCATIONS_PER_COLLECTION = 4000;
83 enum OperationInProgress { NoOperation, Allocation, Collection };
85 struct CollectorHeap {
86 CollectorBlock** blocks;
89 size_t firstBlockWithPossibleSpace;
91 size_t numLiveObjects;
92 size_t numLiveObjectsAtLastCollect;
95 OperationInProgress operationInProgress;
98 static CollectorHeap primaryHeap = { 0, 0, 0, 0, 0, 0, 0, NoOperation };
99 static CollectorHeap numberHeap = { 0, 0, 0, 0, 0, 0, 0, NoOperation };
101 // FIXME: I don't think this needs to be a static data member of the Collector class.
102 // Just a private global like "heap" above would be fine.
103 size_t Collector::mainThreadOnlyObjectCount = 0;
105 bool Collector::memoryFull = false;
107 static CollectorBlock* allocateBlock()
110 vm_address_t address = 0;
111 vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
112 #elif PLATFORM(WIN_OS)
113 // windows virtual address granularity is naturally 64k
114 LPVOID address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
115 #elif HAVE(POSIX_MEMALIGN)
117 posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE);
118 memset(address, 0, BLOCK_SIZE);
120 static size_t pagesize = getpagesize();
123 if (BLOCK_SIZE > pagesize)
124 extra = BLOCK_SIZE - pagesize;
126 void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
127 uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
130 if ((address & BLOCK_OFFSET_MASK) != 0)
131 adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
134 munmap(reinterpret_cast<void*>(address), adjust);
137 munmap(reinterpret_cast<void*>(address + adjust + BLOCK_SIZE), extra - adjust);
140 memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE);
143 return reinterpret_cast<CollectorBlock*>(address);
146 static void freeBlock(CollectorBlock* block)
149 vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
150 #elif PLATFORM(WIN_OS)
151 VirtualFree(block, BLOCK_SIZE, MEM_RELEASE);
152 #elif HAVE(POSIX_MEMALIGN)
155 munmap(block, BLOCK_SIZE);
159 void Collector::recordExtraCost(size_t cost)
161 // Our frequency of garbage collection tries to balance memory use against speed
162 // by collecting based on the number of newly created values. However, for values
163 // that hold on to a great deal of memory that's not in the form of other JS values,
164 // that is not good enough - in some cases a lot of those objects can pile up and
165 // use crazy amounts of memory without a GC happening. So we track these extra
166 // memory costs. Only unusually large objects are noted, and we only keep track
167 // of this extra cost until the next GC. In garbage collected languages, most values
168 // are either very short lived temporaries, or have extremely long lifetimes. So
169 // if a large value survives one garbage collection, there is not much point to
170 // collecting more frequently as long as it stays alive.
171 // NOTE: we target the primaryHeap unconditionally as JSNumber doesn't modify cost
173 primaryHeap.extraCost += cost;
176 template <Collector::HeapType heapType> void* Collector::heapAllocate(size_t s)
178 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
179 ASSERT(JSLock::lockCount() > 0);
180 ASSERT(JSLock::currentThreadIsHoldingLock());
181 ASSERT(s <= CELL_SIZE);
182 UNUSED_PARAM(s); // s is now only used for the above assert
184 ASSERT(heap.operationInProgress == NoOperation);
185 // FIXME: If another global variable access here doesn't hurt performance
186 // too much, we could abort() in NDEBUG builds, which could help ensure we
187 // don't spend any time debugging cases where we allocate inside an object's
188 // deallocation code.
191 size_t numLiveObjects = heap.numLiveObjects;
192 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
193 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
194 const size_t newCost = heapType == PrimaryHeap ? numNewObjects + heap.extraCost : numNewObjects;
196 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) {
198 numLiveObjects = heap.numLiveObjects;
201 ASSERT(heap.operationInProgress == NoOperation);
203 // FIXME: Consider doing this in NDEBUG builds too (see comment above).
204 heap.operationInProgress = Allocation;
209 size_t usedBlocks = heap.usedBlocks;
211 size_t i = heap.firstBlockWithPossibleSpace;
212 CollectorBlock *targetBlock;
213 size_t targetBlockUsedCells;
214 if (i != usedBlocks) {
215 targetBlock = heap.blocks[i];
216 targetBlockUsedCells = targetBlock->usedCells;
217 ASSERT(targetBlockUsedCells <= CELLS_PER_BLOCK);
218 while (targetBlockUsedCells == CELLS_PER_BLOCK) {
219 if (++i == usedBlocks)
220 goto allocateNewBlock;
221 targetBlock = heap.blocks[i];
222 targetBlockUsedCells = targetBlock->usedCells;
223 ASSERT(targetBlockUsedCells <= CELLS_PER_BLOCK);
225 heap.firstBlockWithPossibleSpace = i;
228 // didn't find one, need to allocate a new block
229 size_t numBlocks = heap.numBlocks;
230 if (usedBlocks == numBlocks) {
231 numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);
232 heap.numBlocks = numBlocks;
233 heap.blocks = static_cast<CollectorBlock **>(fastRealloc(heap.blocks, numBlocks * sizeof(CollectorBlock *)));
236 targetBlock = allocateBlock();
237 targetBlock->freeList = targetBlock->cells;
238 targetBlockUsedCells = 0;
239 heap.blocks[usedBlocks] = targetBlock;
240 heap.usedBlocks = usedBlocks + 1;
241 heap.firstBlockWithPossibleSpace = usedBlocks;
244 // find a free spot in the block and detach it from the free list
245 CollectorCell *newCell = targetBlock->freeList;
247 // "next" field is a byte offset -- 0 means next cell, so a zeroed block is already initialized
248 // could avoid the casts by using a cell offset, but this avoids a relatively-slow multiply
249 targetBlock->freeList = reinterpret_cast<CollectorCell *>(reinterpret_cast<char *>(newCell + 1) + newCell->u.freeCell.next);
251 targetBlock->usedCells = static_cast<uint32_t>(targetBlockUsedCells + 1);
252 heap.numLiveObjects = numLiveObjects + 1;
255 // FIXME: Consider doing this in NDEBUG builds too (see comment above).
256 heap.operationInProgress = NoOperation;
262 void* Collector::allocate(size_t s)
264 return heapAllocate<PrimaryHeap>(s);
267 void* Collector::allocateNumber(size_t s)
269 return heapAllocate<NumberHeap>(s);
272 static inline void* currentThreadStackBase()
275 pthread_t thread = pthread_self();
276 return pthread_get_stackaddr_np(thread);
277 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(MSVC)
278 // offset 0x18 from the FS segment register gives a pointer to
279 // the thread information block for the current thread
285 return (void*)pTib->StackBase;
286 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(GCC)
287 // offset 0x18 from the FS segment register gives a pointer to
288 // the thread information block for the current thread
290 asm ( "movl %%fs:0x18, %0\n"
293 return (void*)pTib->StackBase;
295 static void *stackBase = 0;
296 static size_t stackSize = 0;
297 static pthread_t stackThread;
298 pthread_t thread = pthread_self();
299 if (stackBase == 0 || thread != stackThread) {
300 pthread_attr_t sattr;
301 pthread_attr_init(&sattr);
302 #if HAVE(PTHREAD_NP_H)
303 // e.g. on FreeBSD 5.4, neundorf@kde.org
304 pthread_attr_get_np(thread, &sattr);
306 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
307 pthread_getattr_np(thread, &sattr);
309 int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize);
310 (void)rc; // FIXME: deal with error code somehow? seems fatal...
312 pthread_attr_destroy(&sattr);
313 stackThread = thread;
315 return (void*)(size_t(stackBase) + stackSize);
317 #error Need a way to get the stack base on this platform
321 #if USE(MULTIPLE_THREADS)
322 static pthread_t mainThread;
325 void Collector::registerAsMainThread()
327 #if USE(MULTIPLE_THREADS)
328 mainThread = pthread_self();
332 static inline bool onMainThread()
334 #if USE(MULTIPLE_THREADS)
336 return pthread_main_np();
338 return !!pthread_equal(pthread_self(), mainThread);
345 #if USE(MULTIPLE_THREADS)
348 typedef mach_port_t PlatformThread;
349 #elif PLATFORM(WIN_OS)
350 struct PlatformThread {
351 PlatformThread(DWORD _id, HANDLE _handle) : id(_id), handle(_handle) {}
357 static inline PlatformThread getCurrentPlatformThread()
360 return pthread_mach_thread_np(pthread_self());
361 #elif PLATFORM(WIN_OS)
362 HANDLE threadHandle = pthread_getw32threadhandle_np(pthread_self());
363 return PlatformThread(GetCurrentThreadId(), threadHandle);
367 class Collector::Thread {
369 Thread(pthread_t pthread, const PlatformThread& platThread) : posixThread(pthread), platformThread(platThread) {}
371 pthread_t posixThread;
372 PlatformThread platformThread;
375 pthread_key_t registeredThreadKey;
376 pthread_once_t registeredThreadKeyOnce = PTHREAD_ONCE_INIT;
377 Collector::Thread* registeredThreads;
379 static void destroyRegisteredThread(void* data)
381 Collector::Thread* thread = (Collector::Thread*)data;
383 // Can't use JSLock convenience object here because we don't want to re-register
384 // an exiting thread.
387 if (registeredThreads == thread) {
388 registeredThreads = registeredThreads->next;
390 Collector::Thread *last = registeredThreads;
391 Collector::Thread *t;
392 for (t = registeredThreads->next; t != NULL; t = t->next) {
394 last->next = t->next;
399 ASSERT(t); // If t is NULL, we never found ourselves in the list.
407 static void initializeRegisteredThreadKey()
409 pthread_key_create(®isteredThreadKey, destroyRegisteredThread);
412 void Collector::registerThread()
414 ASSERT(JSLock::lockCount() > 0);
415 ASSERT(JSLock::currentThreadIsHoldingLock());
417 pthread_once(®isteredThreadKeyOnce, initializeRegisteredThreadKey);
419 if (!pthread_getspecific(registeredThreadKey)) {
422 CollectorHeapIntrospector::init(&primaryHeap, &numberHeap);
425 Collector::Thread *thread = new Collector::Thread(pthread_self(), getCurrentPlatformThread());
427 thread->next = registeredThreads;
428 registeredThreads = thread;
429 pthread_setspecific(registeredThreadKey, thread);
435 #define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char *) - 1)) == 0)
437 // cell size needs to be a power of two for this to be valid
438 #define IS_CELL_ALIGNED(p) (((intptr_t)(p) & CELL_MASK) == 0)
440 void Collector::markStackObjectsConservatively(void *start, void *end)
448 ASSERT(((char*)end - (char*)start) < 0x1000000);
449 ASSERT(IS_POINTER_ALIGNED(start));
450 ASSERT(IS_POINTER_ALIGNED(end));
452 char** p = (char**)start;
453 char** e = (char**)end;
455 size_t usedPrimaryBlocks = primaryHeap.usedBlocks;
456 size_t usedNumberBlocks = numberHeap.usedBlocks;
457 CollectorBlock **primaryBlocks = primaryHeap.blocks;
458 CollectorBlock **numberBlocks = numberHeap.blocks;
460 const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);
464 if (IS_CELL_ALIGNED(x) && x) {
465 uintptr_t offset = reinterpret_cast<uintptr_t>(x) & BLOCK_OFFSET_MASK;
466 CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(x - offset);
467 // Mark the the number heap, we can mark these Cells directly to avoid the virtual call cost
468 for (size_t block = 0; block < usedNumberBlocks; block++) {
469 if ((numberBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {
470 if (((CollectorCell*)x)->u.freeCell.zeroIfFree != 0)
471 // Don't check whether we need to mark as we're only setting a bit in this case
472 Collector::markCell(reinterpret_cast<JSCell*>(x));
477 // Mark the primary heap
478 for (size_t block = 0; block < usedPrimaryBlocks; block++) {
479 if ((primaryBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {
480 if (((CollectorCell*)x)->u.freeCell.zeroIfFree != 0) {
481 JSCell* imp = reinterpret_cast<JSCell*>(x);
494 void Collector::markCurrentThreadConservatively()
496 // setjmp forces volatile registers onto the stack
499 #pragma warning(push)
500 #pragma warning(disable: 4611)
508 void* stackPointer = &dummy;
509 void* stackBase = currentThreadStackBase();
511 markStackObjectsConservatively(stackPointer, stackBase);
514 #if USE(MULTIPLE_THREADS)
516 static inline void suspendThread(const PlatformThread& platformThread)
519 thread_suspend(platformThread);
520 #elif PLATFORM(WIN_OS)
521 SuspendThread(platformThread.handle);
523 #error Need a way to suspend threads on this platform
527 static inline void resumeThread(const PlatformThread& platformThread)
530 thread_resume(platformThread);
531 #elif PLATFORM(WIN_OS)
532 ResumeThread(platformThread.handle);
534 #error Need a way to resume threads on this platform
538 typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
543 typedef i386_thread_state_t PlatformThreadRegisters;
544 #elif PLATFORM(X86_64)
545 typedef x86_thread_state64_t PlatformThreadRegisters;
547 typedef ppc_thread_state_t PlatformThreadRegisters;
548 #elif PLATFORM(PPC64)
549 typedef ppc_thread_state64_t PlatformThreadRegisters;
551 #error Unknown Architecture
554 #elif PLATFORM(WIN_OS)&& PLATFORM(X86)
555 typedef CONTEXT PlatformThreadRegisters;
557 #error Need a thread register struct for this platform
560 size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs)
565 unsigned user_count = sizeof(regs)/sizeof(int);
566 thread_state_flavor_t flavor = i386_THREAD_STATE;
567 #elif PLATFORM(X86_64)
568 unsigned user_count = x86_THREAD_STATE64_COUNT;
569 thread_state_flavor_t flavor = x86_THREAD_STATE64;
571 unsigned user_count = PPC_THREAD_STATE_COUNT;
572 thread_state_flavor_t flavor = PPC_THREAD_STATE;
573 #elif PLATFORM(PPC64)
574 unsigned user_count = PPC_THREAD_STATE64_COUNT;
575 thread_state_flavor_t flavor = PPC_THREAD_STATE64;
577 #error Unknown Architecture
580 kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)®s, &user_count);
581 if (result != KERN_SUCCESS) {
582 WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
583 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
586 return user_count * sizeof(usword_t);
587 // end PLATFORM(DARWIN)
589 #elif PLATFORM(WIN_OS) && PLATFORM(X86)
590 regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS;
591 GetThreadContext(platformThread.handle, ®s);
592 return sizeof(CONTEXT);
594 #error Need a way to get thread registers on this platform
598 static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
605 return (void*)regs.__esp;
606 #elif PLATFORM(X86_64)
607 return (void*)regs.__rsp;
608 #elif PLATFORM(PPC) || PLATFORM(PPC64)
609 return (void*)regs.__r1;
611 #error Unknown Architecture
614 #else // !__DARWIN_UNIX03
617 return (void*)regs.esp;
618 #elif PLATFORM(X86_64)
619 return (void*)regs.rsp;
620 #elif (PLATFORM(PPC) || PLATFORM(PPC64))
621 return (void*)regs.r1;
623 #error Unknown Architecture
626 #endif // __DARWIN_UNIX03
628 // end PLATFORM(DARWIN)
629 #elif PLATFORM(X86) && PLATFORM(WIN_OS)
630 return (void*)(uintptr_t)regs.Esp;
632 #error Need a way to get the stack pointer for another thread on this platform
636 static inline void* otherThreadStackBase(const PlatformThreadRegisters& regs, Collector::Thread* thread)
640 return pthread_get_stackaddr_np(thread->posixThread);
641 // end PLATFORM(DARWIN);
642 #elif PLATFORM(X86) && PLATFORM(WIN_OS)
645 GetThreadSelectorEntry(thread->platformThread.handle, regs.SegFs, &desc);
646 tib = (NT_TIB*)(uintptr_t)(desc.BaseLow | desc.HighWord.Bytes.BaseMid << 16 | desc.HighWord.Bytes.BaseHi << 24);
647 ASSERT(tib == tib->Self);
648 return tib->StackBase;
650 #error Need a way to get the stack pointer for another thread on this platform
654 void Collector::markOtherThreadConservatively(Thread* thread)
656 suspendThread(thread->platformThread);
658 PlatformThreadRegisters regs;
659 size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);
661 // mark the thread's registers
662 markStackObjectsConservatively((void*)®s, (void*)((char*)®s + regSize));
664 void* stackPointer = otherThreadStackPointer(regs);
665 void* stackBase = otherThreadStackBase(regs, thread);
666 markStackObjectsConservatively(stackPointer, stackBase);
668 resumeThread(thread->platformThread);
673 void Collector::markStackObjectsConservatively()
675 markCurrentThreadConservatively();
677 #if USE(MULTIPLE_THREADS)
678 for (Thread *thread = registeredThreads; thread != NULL; thread = thread->next) {
679 if (!pthread_equal(thread->posixThread, pthread_self())) {
680 markOtherThreadConservatively(thread);
686 typedef HashCountedSet<JSCell*> ProtectCountSet;
688 static ProtectCountSet& protectedValues()
690 static ProtectCountSet staticProtectCountSet;
691 return staticProtectCountSet;
694 void Collector::protect(JSValue *k)
697 ASSERT(JSLock::lockCount() > 0);
698 ASSERT(JSLock::currentThreadIsHoldingLock());
700 if (JSImmediate::isImmediate(k))
703 protectedValues().add(k->asCell());
706 void Collector::unprotect(JSValue *k)
709 ASSERT(JSLock::lockCount() > 0);
710 ASSERT(JSLock::currentThreadIsHoldingLock());
712 if (JSImmediate::isImmediate(k))
715 protectedValues().remove(k->asCell());
718 void Collector::collectOnMainThreadOnly(JSValue* value)
721 ASSERT(JSLock::lockCount() > 0);
722 ASSERT(JSLock::currentThreadIsHoldingLock());
724 if (JSImmediate::isImmediate(value))
727 JSCell* cell = value->asCell();
728 cellBlock(cell)->collectOnMainThreadOnly.set(cellOffset(cell));
729 ++mainThreadOnlyObjectCount;
732 void Collector::markProtectedObjects()
734 ProtectCountSet& protectedValues = KJS::protectedValues();
735 ProtectCountSet::iterator end = protectedValues.end();
736 for (ProtectCountSet::iterator it = protectedValues.begin(); it != end; ++it) {
737 JSCell *val = it->first;
743 void Collector::markMainThreadOnlyObjects()
745 #if USE(MULTIPLE_THREADS)
746 ASSERT(!onMainThread());
749 // Optimization for clients that never register "main thread only" objects.
750 if (!mainThreadOnlyObjectCount)
753 // FIXME: We can optimize this marking algorithm by keeping an exact set of
754 // "main thread only" objects when the "main thread only" object count is
755 // small. We don't want to keep an exact set all the time, because WebCore
756 // tends to create lots of "main thread only" objects, and all that set
757 // thrashing can be expensive.
761 // We don't look at the numberHeap as primitive values can never be marked as main thread only
762 for (size_t block = 0; block < primaryHeap.usedBlocks; block++) {
763 ASSERT(count < mainThreadOnlyObjectCount);
765 CollectorBlock* curBlock = primaryHeap.blocks[block];
766 size_t minimumCellsToProcess = curBlock->usedCells;
767 for (size_t i = 0; (i < minimumCellsToProcess) & (i < CELLS_PER_BLOCK); i++) {
768 CollectorCell* cell = curBlock->cells + i;
769 if (cell->u.freeCell.zeroIfFree == 0)
770 ++minimumCellsToProcess;
772 if (curBlock->collectOnMainThreadOnly.get(i)) {
773 if (!curBlock->marked.get(i)) {
774 JSCell* imp = reinterpret_cast<JSCell*>(cell);
777 if (++count == mainThreadOnlyObjectCount)
785 template <Collector::HeapType heapType> size_t Collector::sweep(bool currentThreadIsMainThread)
787 UNUSED_PARAM(currentThreadIsMainThread); // currentThreadIsMainThread is only used in ASSERTs
788 // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else
789 CollectorHeap& heap = heapType == Collector::PrimaryHeap ? primaryHeap : numberHeap;
791 size_t emptyBlocks = 0;
792 size_t numLiveObjects = heap.numLiveObjects;
794 for (size_t block = 0; block < heap.usedBlocks; block++) {
795 CollectorBlock *curBlock = heap.blocks[block];
797 size_t usedCells = curBlock->usedCells;
798 CollectorCell *freeList = curBlock->freeList;
800 if (usedCells == CELLS_PER_BLOCK) {
801 // special case with a block where all cells are used -- testing indicates this happens often
802 for (size_t i = 0; i < CELLS_PER_BLOCK; i++) {
803 if (!curBlock->marked.get(i)) {
804 CollectorCell* cell = curBlock->cells + i;
806 // special case for allocated but uninitialized object
807 // (We don't need this check earlier because nothing prior this point
808 // assumes the object has a valid vptr.)
809 if (cell->u.freeCell.zeroIfFree == 0)
812 JSCell* imp = reinterpret_cast<JSCell*>(cell);
813 if (heapType != Collector::NumberHeap) {
814 ASSERT(currentThreadIsMainThread || !curBlock->collectOnMainThreadOnly.get(i));
815 if (curBlock->collectOnMainThreadOnly.get(i)) {
816 curBlock->collectOnMainThreadOnly.clear(i);
817 --Collector::mainThreadOnlyObjectCount;
825 // put cell on the free list
826 cell->u.freeCell.zeroIfFree = 0;
827 cell->u.freeCell.next = reinterpret_cast<char *>(freeList) - reinterpret_cast<char *>(cell + 1);
832 size_t minimumCellsToProcess = usedCells;
833 for (size_t i = 0; (i < minimumCellsToProcess) & (i < CELLS_PER_BLOCK); i++) {
834 CollectorCell *cell = curBlock->cells + i;
835 if (cell->u.freeCell.zeroIfFree == 0) {
836 ++minimumCellsToProcess;
838 if (!curBlock->marked.get(i)) {
839 JSCell *imp = reinterpret_cast<JSCell *>(cell);
840 if (heapType != Collector::NumberHeap) {
841 ASSERT(currentThreadIsMainThread || !curBlock->collectOnMainThreadOnly.get(i));
842 if (curBlock->collectOnMainThreadOnly.get(i)) {
843 curBlock->collectOnMainThreadOnly.clear(i);
844 --Collector::mainThreadOnlyObjectCount;
851 // put cell on the free list
852 cell->u.freeCell.zeroIfFree = 0;
853 cell->u.freeCell.next = reinterpret_cast<char *>(freeList) - reinterpret_cast<char *>(cell + 1);
860 curBlock->usedCells = static_cast<uint32_t>(usedCells);
861 curBlock->freeList = freeList;
862 curBlock->marked.clearAll();
864 if (usedCells == 0) {
866 if (emptyBlocks > SPARE_EMPTY_BLOCKS) {
870 // swap with the last block so we compact as we go
871 heap.blocks[block] = heap.blocks[heap.usedBlocks - 1];
873 block--; // Don't move forward a step in this case
875 if (heap.numBlocks > MIN_ARRAY_SIZE && heap.usedBlocks < heap.numBlocks / LOW_WATER_FACTOR) {
876 heap.numBlocks = heap.numBlocks / GROWTH_FACTOR;
877 heap.blocks = (CollectorBlock **)fastRealloc(heap.blocks, heap.numBlocks * sizeof(CollectorBlock *));
883 if (heap.numLiveObjects != numLiveObjects)
884 heap.firstBlockWithPossibleSpace = 0;
886 heap.numLiveObjects = numLiveObjects;
887 heap.numLiveObjectsAtLastCollect = numLiveObjects;
889 return numLiveObjects;
892 bool Collector::collect()
894 ASSERT(JSLock::lockCount() > 0);
895 ASSERT(JSLock::currentThreadIsHoldingLock());
897 ASSERT((primaryHeap.operationInProgress == NoOperation) | (numberHeap.operationInProgress == NoOperation));
898 if ((primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation))
901 primaryHeap.operationInProgress = Collection;
902 numberHeap.operationInProgress = Collection;
904 bool currentThreadIsMainThread = onMainThread();
906 // MARK: first mark all referenced objects recursively starting out from the set of root objects
909 // Forbid malloc during the mark phase. Marking a thread suspends it, so
910 // a malloc inside mark() would risk a deadlock with a thread that had been
911 // suspended while holding the malloc lock.
915 if (Interpreter::s_hook) {
916 Interpreter* scr = Interpreter::s_hook;
920 } while (scr != Interpreter::s_hook);
923 markStackObjectsConservatively();
924 markProtectedObjects();
925 List::markProtectedLists();
926 #if USE(MULTIPLE_THREADS)
927 if (!currentThreadIsMainThread)
928 markMainThreadOnlyObjects();
935 size_t originalLiveObjects = primaryHeap.numLiveObjects + numberHeap.numLiveObjects;
936 size_t numLiveObjects = sweep<PrimaryHeap>(currentThreadIsMainThread);
937 numLiveObjects += sweep<NumberHeap>(currentThreadIsMainThread);
939 primaryHeap.operationInProgress = NoOperation;
940 numberHeap.operationInProgress = NoOperation;
942 bool newMemoryFull = (numLiveObjects >= KJS_MEM_LIMIT);
943 if (newMemoryFull && newMemoryFull != memoryFull)
944 reportOutOfMemoryToAllInterpreters();
945 memoryFull = newMemoryFull;
947 return originalLiveObjects < numLiveObjects;
950 size_t Collector::size()
952 return primaryHeap.numLiveObjects + numberHeap.numLiveObjects;
955 size_t Collector::numInterpreters()
958 if (Interpreter::s_hook) {
959 Interpreter* scr = Interpreter::s_hook;
963 } while (scr != Interpreter::s_hook);
968 size_t Collector::numProtectedObjects()
970 return protectedValues().size();
973 static const char *typeName(JSCell *val)
975 const char *name = "???";
976 switch (val->type()) {
977 case UnspecifiedType:
995 const ClassInfo *info = static_cast<JSObject *>(val)->classInfo();
996 name = info ? info->className : "Object";
999 case GetterSetterType:
1000 name = "gettersetter";
1006 HashCountedSet<const char*>* Collector::rootObjectTypeCounts()
1008 HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
1010 ProtectCountSet& protectedValues = KJS::protectedValues();
1011 ProtectCountSet::iterator end = protectedValues.end();
1012 for (ProtectCountSet::iterator it = protectedValues.begin(); it != end; ++it)
1013 counts->add(typeName(it->first));
1018 bool Collector::isBusy()
1020 return (primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation);
1023 void Collector::reportOutOfMemoryToAllInterpreters()
1025 if (!Interpreter::s_hook)
1028 Interpreter* interpreter = Interpreter::s_hook;
1030 ExecState* exec = interpreter->currentExec() ? interpreter->currentExec() : interpreter->globalExec();
1032 exec->setException(Error::create(exec, GeneralError, "Out of memory"));
1034 interpreter = interpreter->next;
1035 } while(interpreter != Interpreter::s_hook);