1 // -*- mode: c++; c-basic-offset: 4 -*-
3 * This file is part of the KDE libraries
4 * Copyright (C) 2003, 2004, 2005, 2006, 2007 Apple Inc. All rights reserved.
5 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "collector.h"
26 #include "ExecState.h"
33 #include <wtf/FastMalloc.h>
34 #include <wtf/HashCountedSet.h>
35 #include <wtf/UnusedParam.h>
37 #if USE(MULTIPLE_THREADS)
43 #include <mach/mach_port.h>
44 #include <mach/mach_init.h>
45 #include <mach/task.h>
46 #include <mach/thread_act.h>
47 #include <mach/vm_map.h>
49 #include "CollectorHeapIntrospector.h"
51 #elif PLATFORM(WIN_OS)
61 #if HAVE(PTHREAD_NP_H)
62 #include <pthread_np.h>
69 #define DEBUG_COLLECTOR 0
77 const size_t SPARE_EMPTY_BLOCKS = 2;
78 const size_t MIN_ARRAY_SIZE = 14;
79 const size_t GROWTH_FACTOR = 2;
80 const size_t LOW_WATER_FACTOR = 4;
81 const size_t ALLOCATIONS_PER_COLLECTION = 4000;
83 enum OperationInProgress { NoOperation, Allocation, Collection };
85 struct CollectorHeap {
86 CollectorBlock** blocks;
89 size_t firstBlockWithPossibleSpace;
91 size_t numLiveObjects;
92 size_t numLiveObjectsAtLastCollect;
95 OperationInProgress operationInProgress;
98 static CollectorHeap primaryHeap = { 0, 0, 0, 0, 0, 0, 0, NoOperation };
99 static CollectorHeap numberHeap = { 0, 0, 0, 0, 0, 0, 0, NoOperation };
101 // FIXME: I don't think this needs to be a static data member of the Collector class.
102 // Just a private global like "heap" above would be fine.
103 size_t Collector::mainThreadOnlyObjectCount = 0;
105 bool Collector::memoryFull = false;
107 static CollectorBlock* allocateBlock()
110 vm_address_t address = 0;
111 vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
112 #elif PLATFORM(WIN_OS)
113 // windows virtual address granularity is naturally 64k
114 LPVOID address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
115 #elif HAVE(POSIX_MEMALIGN)
117 posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE);
118 memset(address, 0, BLOCK_SIZE);
120 static size_t pagesize = getpagesize();
123 if (BLOCK_SIZE > pagesize)
124 extra = BLOCK_SIZE - pagesize;
126 void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
127 uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
130 if ((address & BLOCK_OFFSET_MASK) != 0)
131 adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
134 munmap(reinterpret_cast<void*>(address), adjust);
137 munmap(reinterpret_cast<void*>(address + adjust + BLOCK_SIZE), extra - adjust);
140 memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE);
143 return reinterpret_cast<CollectorBlock*>(address);
146 static void freeBlock(CollectorBlock* block)
149 vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
150 #elif PLATFORM(WIN_OS)
151 VirtualFree(block, BLOCK_SIZE, MEM_RELEASE);
152 #elif HAVE(POSIX_MEMALIGN)
155 munmap(block, BLOCK_SIZE);
159 void Collector::recordExtraCost(size_t cost)
161 // Our frequency of garbage collection tries to balance memory use against speed
162 // by collecting based on the number of newly created values. However, for values
163 // that hold on to a great deal of memory that's not in the form of other JS values,
164 // that is not good enough - in some cases a lot of those objects can pile up and
165 // use crazy amounts of memory without a GC happening. So we track these extra
166 // memory costs. Only unusually large objects are noted, and we only keep track
167 // of this extra cost until the next GC. In garbage collected languages, most values
168 // are either very short lived temporaries, or have extremely long lifetimes. So
169 // if a large value survives one garbage collection, there is not much point to
170 // collecting more frequently as long as it stays alive.
171 // NOTE: we target the primaryHeap unconditionally as JSNumber doesn't modify cost
173 primaryHeap.extraCost += cost;
176 template <Collector::HeapType heapType> struct HeapConstants;
178 template <> struct HeapConstants<Collector::PrimaryHeap> {
179 static const size_t cellSize = CELL_SIZE;
180 static const size_t cellsPerBlock = CELLS_PER_BLOCK;
181 static const size_t bitmapShift = 0;
182 typedef CollectorCell Cell;
183 typedef CollectorBlock Block;
186 template <> struct HeapConstants<Collector::NumberHeap> {
187 static const size_t cellSize = SMALL_CELL_SIZE;
188 static const size_t cellsPerBlock = SMALL_CELLS_PER_BLOCK;
189 static const size_t bitmapShift = 1;
190 typedef SmallCollectorCell Cell;
191 typedef SmallCellCollectorBlock Block;
194 template <Collector::HeapType heapType> void* Collector::heapAllocate(size_t s)
196 typedef typename HeapConstants<heapType>::Block Block;
197 typedef typename HeapConstants<heapType>::Cell Cell;
199 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
200 ASSERT(JSLock::lockCount() > 0);
201 ASSERT(JSLock::currentThreadIsHoldingLock());
202 ASSERT(s <= HeapConstants<heapType>::cellSize);
203 UNUSED_PARAM(s); // s is now only used for the above assert
205 ASSERT(heap.operationInProgress == NoOperation);
206 ASSERT(heapType == PrimaryHeap || heap.extraCost == 0);
207 // FIXME: If another global variable access here doesn't hurt performance
208 // too much, we could abort() in NDEBUG builds, which could help ensure we
209 // don't spend any time debugging cases where we allocate inside an object's
210 // deallocation code.
212 size_t numLiveObjects = heap.numLiveObjects;
213 size_t usedBlocks = heap.usedBlocks;
214 size_t i = heap.firstBlockWithPossibleSpace;
216 // if we have a huge amount of extra cost, we'll try to collect even if we still have
218 if (heapType == PrimaryHeap && heap.extraCost > ALLOCATIONS_PER_COLLECTION) {
219 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
220 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
221 const size_t newCost = numNewObjects + heap.extraCost;
222 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect)
226 ASSERT(heap.operationInProgress == NoOperation);
228 // FIXME: Consider doing this in NDEBUG builds too (see comment above).
229 heap.operationInProgress = Allocation;
234 size_t targetBlockUsedCells;
235 if (i != usedBlocks) {
236 targetBlock = (Block*)heap.blocks[i];
237 targetBlockUsedCells = targetBlock->usedCells;
238 ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock);
239 while (targetBlockUsedCells == HeapConstants<heapType>::cellsPerBlock) {
240 if (++i == usedBlocks)
242 targetBlock = (Block*)heap.blocks[i];
243 targetBlockUsedCells = targetBlock->usedCells;
244 ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock);
246 heap.firstBlockWithPossibleSpace = i;
250 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
251 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
252 const size_t newCost = numNewObjects + heap.extraCost;
254 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) {
256 heap.operationInProgress = NoOperation;
258 bool collected = collect();
260 heap.operationInProgress = Allocation;
263 numLiveObjects = heap.numLiveObjects;
264 usedBlocks = heap.usedBlocks;
265 i = heap.firstBlockWithPossibleSpace;
270 // didn't find a block, and GC didn't reclaim anything, need to allocate a new block
271 size_t numBlocks = heap.numBlocks;
272 if (usedBlocks == numBlocks) {
273 numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);
274 heap.numBlocks = numBlocks;
275 heap.blocks = static_cast<CollectorBlock **>(fastRealloc(heap.blocks, numBlocks * sizeof(CollectorBlock *)));
278 targetBlock = (Block*)allocateBlock();
279 targetBlock->freeList = targetBlock->cells;
280 targetBlockUsedCells = 0;
281 heap.blocks[usedBlocks] = (CollectorBlock*)targetBlock;
282 heap.usedBlocks = usedBlocks + 1;
283 heap.firstBlockWithPossibleSpace = usedBlocks;
286 // find a free spot in the block and detach it from the free list
287 Cell *newCell = targetBlock->freeList;
289 // "next" field is a byte offset -- 0 means next cell, so a zeroed block is already initialized
290 // could avoid the casts by using a cell offset, but this avoids a relatively-slow multiply
291 targetBlock->freeList = reinterpret_cast<Cell*>(reinterpret_cast<char*>(newCell + 1) + newCell->u.freeCell.next);
293 targetBlock->usedCells = static_cast<uint32_t>(targetBlockUsedCells + 1);
294 heap.numLiveObjects = numLiveObjects + 1;
297 // FIXME: Consider doing this in NDEBUG builds too (see comment above).
298 heap.operationInProgress = NoOperation;
304 void* Collector::allocate(size_t s)
306 return heapAllocate<PrimaryHeap>(s);
309 void* Collector::allocateNumber(size_t s)
311 return heapAllocate<NumberHeap>(s);
314 static inline void* currentThreadStackBase()
317 pthread_t thread = pthread_self();
318 return pthread_get_stackaddr_np(thread);
319 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(MSVC)
320 // offset 0x18 from the FS segment register gives a pointer to
321 // the thread information block for the current thread
327 return (void*)pTib->StackBase;
328 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(GCC)
329 // offset 0x18 from the FS segment register gives a pointer to
330 // the thread information block for the current thread
332 asm ( "movl %%fs:0x18, %0\n"
335 return (void*)pTib->StackBase;
337 static void *stackBase = 0;
338 static size_t stackSize = 0;
339 static pthread_t stackThread;
340 pthread_t thread = pthread_self();
341 if (stackBase == 0 || thread != stackThread) {
342 pthread_attr_t sattr;
343 pthread_attr_init(&sattr);
344 #if HAVE(PTHREAD_NP_H)
345 // e.g. on FreeBSD 5.4, neundorf@kde.org
346 pthread_attr_get_np(thread, &sattr);
348 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
349 pthread_getattr_np(thread, &sattr);
351 int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize);
352 (void)rc; // FIXME: deal with error code somehow? seems fatal...
354 pthread_attr_destroy(&sattr);
355 stackThread = thread;
357 return (void*)(size_t(stackBase) + stackSize);
359 #error Need a way to get the stack base on this platform
363 #if USE(MULTIPLE_THREADS)
364 static pthread_t mainThread;
367 void Collector::registerAsMainThread()
369 #if USE(MULTIPLE_THREADS)
370 mainThread = pthread_self();
374 static inline bool onMainThread()
376 #if USE(MULTIPLE_THREADS)
378 return pthread_main_np();
380 return !!pthread_equal(pthread_self(), mainThread);
387 #if USE(MULTIPLE_THREADS)
390 typedef mach_port_t PlatformThread;
391 #elif PLATFORM(WIN_OS)
392 struct PlatformThread {
393 PlatformThread(DWORD _id, HANDLE _handle) : id(_id), handle(_handle) {}
399 static inline PlatformThread getCurrentPlatformThread()
402 return pthread_mach_thread_np(pthread_self());
403 #elif PLATFORM(WIN_OS)
404 HANDLE threadHandle = pthread_getw32threadhandle_np(pthread_self());
405 return PlatformThread(GetCurrentThreadId(), threadHandle);
409 class Collector::Thread {
411 Thread(pthread_t pthread, const PlatformThread& platThread) : posixThread(pthread), platformThread(platThread) {}
413 pthread_t posixThread;
414 PlatformThread platformThread;
417 pthread_key_t registeredThreadKey;
418 pthread_once_t registeredThreadKeyOnce = PTHREAD_ONCE_INIT;
419 Collector::Thread* registeredThreads;
421 static void destroyRegisteredThread(void* data)
423 Collector::Thread* thread = (Collector::Thread*)data;
425 // Can't use JSLock convenience object here because we don't want to re-register
426 // an exiting thread.
429 if (registeredThreads == thread) {
430 registeredThreads = registeredThreads->next;
432 Collector::Thread *last = registeredThreads;
433 Collector::Thread *t;
434 for (t = registeredThreads->next; t != NULL; t = t->next) {
436 last->next = t->next;
441 ASSERT(t); // If t is NULL, we never found ourselves in the list.
449 static void initializeRegisteredThreadKey()
451 pthread_key_create(®isteredThreadKey, destroyRegisteredThread);
454 void Collector::registerThread()
456 ASSERT(JSLock::lockCount() > 0);
457 ASSERT(JSLock::currentThreadIsHoldingLock());
459 pthread_once(®isteredThreadKeyOnce, initializeRegisteredThreadKey);
461 if (!pthread_getspecific(registeredThreadKey)) {
464 CollectorHeapIntrospector::init(&primaryHeap, &numberHeap);
467 Collector::Thread *thread = new Collector::Thread(pthread_self(), getCurrentPlatformThread());
469 thread->next = registeredThreads;
470 registeredThreads = thread;
471 pthread_setspecific(registeredThreadKey, thread);
477 #define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char *) - 1)) == 0)
479 // cell size needs to be a power of two for this to be valid
480 #define IS_HALF_CELL_ALIGNED(p) (((intptr_t)(p) & (CELL_MASK >> 1)) == 0)
482 void Collector::markStackObjectsConservatively(void *start, void *end)
490 ASSERT(((char*)end - (char*)start) < 0x1000000);
491 ASSERT(IS_POINTER_ALIGNED(start));
492 ASSERT(IS_POINTER_ALIGNED(end));
494 char** p = (char**)start;
495 char** e = (char**)end;
497 size_t usedPrimaryBlocks = primaryHeap.usedBlocks;
498 size_t usedNumberBlocks = numberHeap.usedBlocks;
499 CollectorBlock **primaryBlocks = primaryHeap.blocks;
500 CollectorBlock **numberBlocks = numberHeap.blocks;
502 const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);
506 if (IS_HALF_CELL_ALIGNED(x) && x) {
507 uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x);
508 xAsBits &= CELL_ALIGN_MASK;
509 uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK;
510 CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset);
511 // Mark the the number heap, we can mark these Cells directly to avoid the virtual call cost
512 for (size_t block = 0; block < usedNumberBlocks; block++) {
513 if ((numberBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {
514 Collector::markCell(reinterpret_cast<JSCell*>(xAsBits));
519 // Mark the primary heap
520 for (size_t block = 0; block < usedPrimaryBlocks; block++) {
521 if ((primaryBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {
522 if (((CollectorCell*)xAsBits)->u.freeCell.zeroIfFree != 0) {
523 JSCell* imp = reinterpret_cast<JSCell*>(xAsBits);
536 void Collector::markCurrentThreadConservatively()
538 // setjmp forces volatile registers onto the stack
541 #pragma warning(push)
542 #pragma warning(disable: 4611)
550 void* stackPointer = &dummy;
551 void* stackBase = currentThreadStackBase();
553 markStackObjectsConservatively(stackPointer, stackBase);
556 #if USE(MULTIPLE_THREADS)
558 static inline void suspendThread(const PlatformThread& platformThread)
561 thread_suspend(platformThread);
562 #elif PLATFORM(WIN_OS)
563 SuspendThread(platformThread.handle);
565 #error Need a way to suspend threads on this platform
569 static inline void resumeThread(const PlatformThread& platformThread)
572 thread_resume(platformThread);
573 #elif PLATFORM(WIN_OS)
574 ResumeThread(platformThread.handle);
576 #error Need a way to resume threads on this platform
580 typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
585 typedef i386_thread_state_t PlatformThreadRegisters;
586 #elif PLATFORM(X86_64)
587 typedef x86_thread_state64_t PlatformThreadRegisters;
589 typedef ppc_thread_state_t PlatformThreadRegisters;
590 #elif PLATFORM(PPC64)
591 typedef ppc_thread_state64_t PlatformThreadRegisters;
593 #error Unknown Architecture
596 #elif PLATFORM(WIN_OS)&& PLATFORM(X86)
597 typedef CONTEXT PlatformThreadRegisters;
599 #error Need a thread register struct for this platform
602 size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs)
607 unsigned user_count = sizeof(regs)/sizeof(int);
608 thread_state_flavor_t flavor = i386_THREAD_STATE;
609 #elif PLATFORM(X86_64)
610 unsigned user_count = x86_THREAD_STATE64_COUNT;
611 thread_state_flavor_t flavor = x86_THREAD_STATE64;
613 unsigned user_count = PPC_THREAD_STATE_COUNT;
614 thread_state_flavor_t flavor = PPC_THREAD_STATE;
615 #elif PLATFORM(PPC64)
616 unsigned user_count = PPC_THREAD_STATE64_COUNT;
617 thread_state_flavor_t flavor = PPC_THREAD_STATE64;
619 #error Unknown Architecture
622 kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)®s, &user_count);
623 if (result != KERN_SUCCESS) {
624 WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
625 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
628 return user_count * sizeof(usword_t);
629 // end PLATFORM(DARWIN)
631 #elif PLATFORM(WIN_OS) && PLATFORM(X86)
632 regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS;
633 GetThreadContext(platformThread.handle, ®s);
634 return sizeof(CONTEXT);
636 #error Need a way to get thread registers on this platform
640 static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
647 return (void*)regs.__esp;
648 #elif PLATFORM(X86_64)
649 return (void*)regs.__rsp;
650 #elif PLATFORM(PPC) || PLATFORM(PPC64)
651 return (void*)regs.__r1;
653 #error Unknown Architecture
656 #else // !__DARWIN_UNIX03
659 return (void*)regs.esp;
660 #elif PLATFORM(X86_64)
661 return (void*)regs.rsp;
662 #elif (PLATFORM(PPC) || PLATFORM(PPC64))
663 return (void*)regs.r1;
665 #error Unknown Architecture
668 #endif // __DARWIN_UNIX03
670 // end PLATFORM(DARWIN)
671 #elif PLATFORM(X86) && PLATFORM(WIN_OS)
672 return (void*)(uintptr_t)regs.Esp;
674 #error Need a way to get the stack pointer for another thread on this platform
678 static inline void* otherThreadStackBase(const PlatformThreadRegisters& regs, Collector::Thread* thread)
682 return pthread_get_stackaddr_np(thread->posixThread);
683 // end PLATFORM(DARWIN);
684 #elif PLATFORM(X86) && PLATFORM(WIN_OS)
687 GetThreadSelectorEntry(thread->platformThread.handle, regs.SegFs, &desc);
688 tib = (NT_TIB*)(uintptr_t)(desc.BaseLow | desc.HighWord.Bytes.BaseMid << 16 | desc.HighWord.Bytes.BaseHi << 24);
689 ASSERT(tib == tib->Self);
690 return tib->StackBase;
692 #error Need a way to get the stack pointer for another thread on this platform
696 void Collector::markOtherThreadConservatively(Thread* thread)
698 suspendThread(thread->platformThread);
700 PlatformThreadRegisters regs;
701 size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);
703 // mark the thread's registers
704 markStackObjectsConservatively((void*)®s, (void*)((char*)®s + regSize));
706 void* stackPointer = otherThreadStackPointer(regs);
707 void* stackBase = otherThreadStackBase(regs, thread);
708 markStackObjectsConservatively(stackPointer, stackBase);
710 resumeThread(thread->platformThread);
715 void Collector::markStackObjectsConservatively()
717 markCurrentThreadConservatively();
719 #if USE(MULTIPLE_THREADS)
720 for (Thread *thread = registeredThreads; thread != NULL; thread = thread->next) {
721 if (!pthread_equal(thread->posixThread, pthread_self())) {
722 markOtherThreadConservatively(thread);
728 typedef HashCountedSet<JSCell*> ProtectCountSet;
730 static ProtectCountSet& protectedValues()
732 static ProtectCountSet staticProtectCountSet;
733 return staticProtectCountSet;
736 void Collector::protect(JSValue *k)
739 ASSERT(JSLock::lockCount() > 0);
740 ASSERT(JSLock::currentThreadIsHoldingLock());
742 if (JSImmediate::isImmediate(k))
745 protectedValues().add(k->asCell());
748 void Collector::unprotect(JSValue *k)
751 ASSERT(JSLock::lockCount() > 0);
752 ASSERT(JSLock::currentThreadIsHoldingLock());
754 if (JSImmediate::isImmediate(k))
757 protectedValues().remove(k->asCell());
760 void Collector::collectOnMainThreadOnly(JSValue* value)
763 ASSERT(JSLock::lockCount() > 0);
764 ASSERT(JSLock::currentThreadIsHoldingLock());
766 if (JSImmediate::isImmediate(value))
769 JSCell* cell = value->asCell();
770 cellBlock(cell)->collectOnMainThreadOnly.set(cellOffset(cell));
771 ++mainThreadOnlyObjectCount;
774 void Collector::markProtectedObjects()
776 ProtectCountSet& protectedValues = KJS::protectedValues();
777 ProtectCountSet::iterator end = protectedValues.end();
778 for (ProtectCountSet::iterator it = protectedValues.begin(); it != end; ++it) {
779 JSCell *val = it->first;
785 void Collector::markMainThreadOnlyObjects()
787 #if USE(MULTIPLE_THREADS)
788 ASSERT(!onMainThread());
791 // Optimization for clients that never register "main thread only" objects.
792 if (!mainThreadOnlyObjectCount)
795 // FIXME: We can optimize this marking algorithm by keeping an exact set of
796 // "main thread only" objects when the "main thread only" object count is
797 // small. We don't want to keep an exact set all the time, because WebCore
798 // tends to create lots of "main thread only" objects, and all that set
799 // thrashing can be expensive.
803 // We don't look at the numberHeap as primitive values can never be marked as main thread only
804 for (size_t block = 0; block < primaryHeap.usedBlocks; block++) {
805 ASSERT(count < mainThreadOnlyObjectCount);
807 CollectorBlock* curBlock = primaryHeap.blocks[block];
808 size_t minimumCellsToProcess = curBlock->usedCells;
809 for (size_t i = 0; (i < minimumCellsToProcess) & (i < CELLS_PER_BLOCK); i++) {
810 CollectorCell* cell = curBlock->cells + i;
811 if (cell->u.freeCell.zeroIfFree == 0)
812 ++minimumCellsToProcess;
814 if (curBlock->collectOnMainThreadOnly.get(i)) {
815 if (!curBlock->marked.get(i)) {
816 JSCell* imp = reinterpret_cast<JSCell*>(cell);
819 if (++count == mainThreadOnlyObjectCount)
827 template <Collector::HeapType heapType> size_t Collector::sweep(bool currentThreadIsMainThread)
829 typedef typename HeapConstants<heapType>::Block Block;
830 typedef typename HeapConstants<heapType>::Cell Cell;
832 UNUSED_PARAM(currentThreadIsMainThread); // currentThreadIsMainThread is only used in ASSERTs
833 // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else
834 CollectorHeap& heap = heapType == Collector::PrimaryHeap ? primaryHeap : numberHeap;
836 size_t emptyBlocks = 0;
837 size_t numLiveObjects = heap.numLiveObjects;
839 for (size_t block = 0; block < heap.usedBlocks; block++) {
840 Block* curBlock = (Block*)heap.blocks[block];
842 size_t usedCells = curBlock->usedCells;
843 Cell* freeList = curBlock->freeList;
845 if (usedCells == HeapConstants<heapType>::cellsPerBlock) {
846 // special case with a block where all cells are used -- testing indicates this happens often
847 for (size_t i = 0; i < HeapConstants<heapType>::cellsPerBlock; i++) {
848 if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) {
849 Cell* cell = curBlock->cells + i;
851 if (heapType != Collector::NumberHeap) {
852 JSCell* imp = reinterpret_cast<JSCell*>(cell);
853 // special case for allocated but uninitialized object
854 // (We don't need this check earlier because nothing prior this point
855 // assumes the object has a valid vptr.)
856 if (cell->u.freeCell.zeroIfFree == 0)
859 ASSERT(currentThreadIsMainThread || !curBlock->collectOnMainThreadOnly.get(i));
860 if (curBlock->collectOnMainThreadOnly.get(i)) {
861 curBlock->collectOnMainThreadOnly.clear(i);
862 --Collector::mainThreadOnlyObjectCount;
870 // put cell on the free list
871 cell->u.freeCell.zeroIfFree = 0;
872 cell->u.freeCell.next = reinterpret_cast<char*>(freeList) - reinterpret_cast<char*>(cell + 1);
877 size_t minimumCellsToProcess = usedCells;
878 for (size_t i = 0; (i < minimumCellsToProcess) & (i < HeapConstants<heapType>::cellsPerBlock); i++) {
879 Cell *cell = curBlock->cells + i;
880 if (cell->u.freeCell.zeroIfFree == 0) {
881 ++minimumCellsToProcess;
883 if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) {
884 if (heapType != Collector::NumberHeap) {
885 JSCell *imp = reinterpret_cast<JSCell*>(cell);
886 ASSERT(currentThreadIsMainThread || !curBlock->collectOnMainThreadOnly.get(i));
887 if (curBlock->collectOnMainThreadOnly.get(i)) {
888 curBlock->collectOnMainThreadOnly.clear(i);
889 --Collector::mainThreadOnlyObjectCount;
896 // put cell on the free list
897 cell->u.freeCell.zeroIfFree = 0;
898 cell->u.freeCell.next = reinterpret_cast<char*>(freeList) - reinterpret_cast<char*>(cell + 1);
905 curBlock->usedCells = static_cast<uint32_t>(usedCells);
906 curBlock->freeList = freeList;
907 curBlock->marked.clearAll();
909 if (usedCells == 0) {
911 if (emptyBlocks > SPARE_EMPTY_BLOCKS) {
913 freeBlock((CollectorBlock*)curBlock);
915 // swap with the last block so we compact as we go
916 heap.blocks[block] = heap.blocks[heap.usedBlocks - 1];
918 block--; // Don't move forward a step in this case
920 if (heap.numBlocks > MIN_ARRAY_SIZE && heap.usedBlocks < heap.numBlocks / LOW_WATER_FACTOR) {
921 heap.numBlocks = heap.numBlocks / GROWTH_FACTOR;
922 heap.blocks = (CollectorBlock**)fastRealloc(heap.blocks, heap.numBlocks * sizeof(CollectorBlock *));
928 if (heap.numLiveObjects != numLiveObjects)
929 heap.firstBlockWithPossibleSpace = 0;
931 heap.numLiveObjects = numLiveObjects;
932 heap.numLiveObjectsAtLastCollect = numLiveObjects;
934 return numLiveObjects;
937 bool Collector::collect()
939 ASSERT(JSLock::lockCount() > 0);
940 ASSERT(JSLock::currentThreadIsHoldingLock());
942 ASSERT((primaryHeap.operationInProgress == NoOperation) | (numberHeap.operationInProgress == NoOperation));
943 if ((primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation))
946 primaryHeap.operationInProgress = Collection;
947 numberHeap.operationInProgress = Collection;
949 bool currentThreadIsMainThread = onMainThread();
951 // MARK: first mark all referenced objects recursively starting out from the set of root objects
954 // Forbid malloc during the mark phase. Marking a thread suspends it, so
955 // a malloc inside mark() would risk a deadlock with a thread that had been
956 // suspended while holding the malloc lock.
960 if (Interpreter::s_hook) {
961 Interpreter* scr = Interpreter::s_hook;
965 } while (scr != Interpreter::s_hook);
968 markStackObjectsConservatively();
969 markProtectedObjects();
970 List::markProtectedLists();
971 #if USE(MULTIPLE_THREADS)
972 if (!currentThreadIsMainThread)
973 markMainThreadOnlyObjects();
980 size_t originalLiveObjects = primaryHeap.numLiveObjects + numberHeap.numLiveObjects;
981 size_t numLiveObjects = sweep<PrimaryHeap>(currentThreadIsMainThread);
982 numLiveObjects += sweep<NumberHeap>(currentThreadIsMainThread);
984 primaryHeap.operationInProgress = NoOperation;
985 numberHeap.operationInProgress = NoOperation;
987 bool newMemoryFull = (numLiveObjects >= KJS_MEM_LIMIT);
988 if (newMemoryFull && newMemoryFull != memoryFull)
989 reportOutOfMemoryToAllInterpreters();
990 memoryFull = newMemoryFull;
992 return numLiveObjects < originalLiveObjects;
995 size_t Collector::size()
997 return primaryHeap.numLiveObjects + numberHeap.numLiveObjects;
1000 size_t Collector::numInterpreters()
1003 if (Interpreter::s_hook) {
1004 Interpreter* scr = Interpreter::s_hook;
1008 } while (scr != Interpreter::s_hook);
1013 size_t Collector::numProtectedObjects()
1015 return protectedValues().size();
1018 static const char *typeName(JSCell *val)
1020 const char *name = "???";
1021 switch (val->type()) {
1022 case UnspecifiedType:
1040 const ClassInfo *info = static_cast<JSObject *>(val)->classInfo();
1041 name = info ? info->className : "Object";
1044 case GetterSetterType:
1045 name = "gettersetter";
1051 HashCountedSet<const char*>* Collector::rootObjectTypeCounts()
1053 HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
1055 ProtectCountSet& protectedValues = KJS::protectedValues();
1056 ProtectCountSet::iterator end = protectedValues.end();
1057 for (ProtectCountSet::iterator it = protectedValues.begin(); it != end; ++it)
1058 counts->add(typeName(it->first));
1063 bool Collector::isBusy()
1065 return (primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation);
1068 void Collector::reportOutOfMemoryToAllInterpreters()
1070 if (!Interpreter::s_hook)
1073 Interpreter* interpreter = Interpreter::s_hook;
1075 ExecState* exec = interpreter->currentExec() ? interpreter->currentExec() : interpreter->globalExec();
1077 exec->setException(Error::create(exec, GeneralError, "Out of memory"));
1079 interpreter = interpreter->next;
1080 } while(interpreter != Interpreter::s_hook);