2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "collector.h"
25 #include "CollectorHeapIterator.h"
26 #include "ExecState.h"
27 #include "JSGlobalObject.h"
36 #include <wtf/FastMalloc.h>
37 #include <wtf/HashCountedSet.h>
38 #include <wtf/UnusedParam.h>
42 #include <mach/mach_port.h>
43 #include <mach/mach_init.h>
44 #include <mach/task.h>
45 #include <mach/thread_act.h>
46 #include <mach/vm_map.h>
48 #elif PLATFORM(WIN_OS)
66 #if HAVE(PTHREAD_NP_H)
67 #include <pthread_np.h>
72 #define DEBUG_COLLECTOR 0
73 #define COLLECT_ON_EVERY_ALLOCATION 0
81 const size_t SPARE_EMPTY_BLOCKS = 2;
82 const size_t GROWTH_FACTOR = 2;
83 const size_t LOW_WATER_FACTOR = 4;
84 const size_t ALLOCATIONS_PER_COLLECTION = 4000;
85 // This value has to be a macro to be used in max() without introducing
86 // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>.
87 #define MIN_ARRAY_SIZE (static_cast<size_t>(14))
89 static void freeHeap(CollectorHeap*);
91 #if ENABLE(JSC_MULTIPLE_THREADS)
94 typedef mach_port_t PlatformThread;
95 #elif PLATFORM(WIN_OS)
96 struct PlatformThread {
97 PlatformThread(DWORD _id, HANDLE _handle) : id(_id), handle(_handle) {}
105 Thread(pthread_t pthread, const PlatformThread& platThread, void* base)
106 : posixThread(pthread)
107 , platformThread(platThread)
113 pthread_t posixThread;
114 PlatformThread platformThread;
120 Heap::Heap(JSGlobalData* globalData)
122 #if ENABLE(JSC_MULTIPLE_THREADS)
123 , m_registeredThreads(0)
125 , m_globalData(globalData)
129 #if ENABLE(JSC_MULTIPLE_THREADS)
130 int error = pthread_key_create(&m_currentThreadRegistrar, unregisterThread);
135 memset(&primaryHeap, 0, sizeof(CollectorHeap));
136 memset(&numberHeap, 0, sizeof(CollectorHeap));
141 // The destroy function must already have been called, so assert this.
142 ASSERT(!m_globalData);
152 // The global object is not GC protected at this point, so sweeping may delete it
153 // (and thus the global data) before other objects that may use the global data.
154 RefPtr<JSGlobalData> protect(m_globalData);
156 delete m_markListSet;
159 sweep<PrimaryHeap>();
160 // No need to sweep number heap, because the JSNumber destructor doesn't do anything.
162 ASSERT(!primaryHeap.numLiveObjects);
164 freeHeap(&primaryHeap);
165 freeHeap(&numberHeap);
167 #if ENABLE(JSC_MULTIPLE_THREADS)
171 pthread_key_delete(m_currentThreadRegistrar);
174 MutexLocker registeredThreadsLock(m_registeredThreadsMutex);
175 for (Heap::Thread* t = m_registeredThreads; t;) {
176 Heap::Thread* next = t->next;
185 template <HeapType heapType>
186 static NEVER_INLINE CollectorBlock* allocateBlock()
189 vm_address_t address = 0;
190 // FIXME: tag the region as a JavaScriptCore heap when we get a registered VM tag: <rdar://problem/6054788>.
191 vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
192 #elif PLATFORM(WIN_OS)
193 // windows virtual address granularity is naturally 64k
194 LPVOID address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
195 #elif HAVE(POSIX_MEMALIGN)
197 posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE);
198 memset(address, 0, BLOCK_SIZE);
201 #if ENABLE(JSC_MULTIPLE_THREADS)
202 #error Need to initialize pagesize safely.
204 static size_t pagesize = getpagesize();
207 if (BLOCK_SIZE > pagesize)
208 extra = BLOCK_SIZE - pagesize;
210 void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
211 uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
214 if ((address & BLOCK_OFFSET_MASK) != 0)
215 adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
218 munmap(reinterpret_cast<char*>(address), adjust);
221 munmap(reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), extra - adjust);
224 memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE);
226 reinterpret_cast<CollectorBlock*>(address)->type = heapType;
227 return reinterpret_cast<CollectorBlock*>(address);
230 static void freeBlock(CollectorBlock* block)
233 vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
234 #elif PLATFORM(WIN_OS)
235 VirtualFree(block, 0, MEM_RELEASE);
236 #elif HAVE(POSIX_MEMALIGN)
239 munmap(reinterpret_cast<char*>(block), BLOCK_SIZE);
243 static void freeHeap(CollectorHeap* heap)
245 for (size_t i = 0; i < heap->usedBlocks; ++i)
247 freeBlock(heap->blocks[i]);
248 fastFree(heap->blocks);
249 memset(heap, 0, sizeof(CollectorHeap));
252 void Heap::recordExtraCost(size_t cost)
254 // Our frequency of garbage collection tries to balance memory use against speed
255 // by collecting based on the number of newly created values. However, for values
256 // that hold on to a great deal of memory that's not in the form of other JS values,
257 // that is not good enough - in some cases a lot of those objects can pile up and
258 // use crazy amounts of memory without a GC happening. So we track these extra
259 // memory costs. Only unusually large objects are noted, and we only keep track
260 // of this extra cost until the next GC. In garbage collected languages, most values
261 // are either very short lived temporaries, or have extremely long lifetimes. So
262 // if a large value survives one garbage collection, there is not much point to
263 // collecting more frequently as long as it stays alive.
264 // NOTE: we target the primaryHeap unconditionally as JSNumber doesn't modify cost
266 primaryHeap.extraCost += cost;
269 template <HeapType heapType> ALWAYS_INLINE void* Heap::heapAllocate(size_t s)
271 typedef typename HeapConstants<heapType>::Block Block;
272 typedef typename HeapConstants<heapType>::Cell Cell;
274 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
275 ASSERT(JSLock::lockCount() > 0);
276 ASSERT(JSLock::currentThreadIsHoldingLock());
277 ASSERT(s <= HeapConstants<heapType>::cellSize);
278 UNUSED_PARAM(s); // s is now only used for the above assert
280 ASSERT(heap.operationInProgress == NoOperation);
281 ASSERT(heapType == PrimaryHeap || heap.extraCost == 0);
282 // FIXME: If another global variable access here doesn't hurt performance
283 // too much, we could abort() in NDEBUG builds, which could help ensure we
284 // don't spend any time debugging cases where we allocate inside an object's
285 // deallocation code.
287 size_t numLiveObjects = heap.numLiveObjects;
288 size_t usedBlocks = heap.usedBlocks;
289 size_t i = heap.firstBlockWithPossibleSpace;
291 #if COLLECT_ON_EVERY_ALLOCATION
295 // if we have a huge amount of extra cost, we'll try to collect even if we still have
297 if (heapType == PrimaryHeap && heap.extraCost > ALLOCATIONS_PER_COLLECTION) {
298 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
299 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
300 const size_t newCost = numNewObjects + heap.extraCost;
301 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect)
305 ASSERT(heap.operationInProgress == NoOperation);
307 // FIXME: Consider doing this in NDEBUG builds too (see comment above).
308 heap.operationInProgress = Allocation;
313 size_t targetBlockUsedCells;
314 if (i != usedBlocks) {
315 targetBlock = reinterpret_cast<Block*>(heap.blocks[i]);
316 targetBlockUsedCells = targetBlock->usedCells;
317 ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock);
318 while (targetBlockUsedCells == HeapConstants<heapType>::cellsPerBlock) {
319 if (++i == usedBlocks)
321 targetBlock = reinterpret_cast<Block*>(heap.blocks[i]);
322 targetBlockUsedCells = targetBlock->usedCells;
323 ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock);
325 heap.firstBlockWithPossibleSpace = i;
329 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
330 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
331 const size_t newCost = numNewObjects + heap.extraCost;
333 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) {
335 heap.operationInProgress = NoOperation;
337 bool collected = collect();
339 heap.operationInProgress = Allocation;
342 numLiveObjects = heap.numLiveObjects;
343 usedBlocks = heap.usedBlocks;
344 i = heap.firstBlockWithPossibleSpace;
349 // didn't find a block, and GC didn't reclaim anything, need to allocate a new block
350 size_t numBlocks = heap.numBlocks;
351 if (usedBlocks == numBlocks) {
352 numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);
353 heap.numBlocks = numBlocks;
354 heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, numBlocks * sizeof(CollectorBlock*)));
357 targetBlock = reinterpret_cast<Block*>(allocateBlock<heapType>());
358 targetBlock->freeList = targetBlock->cells;
359 targetBlock->heap = this;
360 targetBlockUsedCells = 0;
361 heap.blocks[usedBlocks] = reinterpret_cast<CollectorBlock*>(targetBlock);
362 heap.usedBlocks = usedBlocks + 1;
363 heap.firstBlockWithPossibleSpace = usedBlocks;
366 // find a free spot in the block and detach it from the free list
367 Cell* newCell = targetBlock->freeList;
369 // "next" field is a cell offset -- 0 means next cell, so a zeroed block is already initialized
370 targetBlock->freeList = (newCell + 1) + newCell->u.freeCell.next;
372 targetBlock->usedCells = static_cast<uint32_t>(targetBlockUsedCells + 1);
373 heap.numLiveObjects = numLiveObjects + 1;
376 // FIXME: Consider doing this in NDEBUG builds too (see comment above).
377 heap.operationInProgress = NoOperation;
383 void* Heap::allocate(size_t s)
385 return heapAllocate<PrimaryHeap>(s);
388 void* Heap::allocateNumber(size_t s)
390 return heapAllocate<NumberHeap>(s);
393 static inline void* currentThreadStackBase()
396 pthread_t thread = pthread_self();
397 return pthread_get_stackaddr_np(thread);
398 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(MSVC)
399 // offset 0x18 from the FS segment register gives a pointer to
400 // the thread information block for the current thread
406 return static_cast<void*>(pTib->StackBase);
407 #elif PLATFORM(WIN_OS) && PLATFORM(X86_64) && COMPILER(MSVC)
408 PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb());
409 return reinterpret_cast<void*>(pTib->StackBase);
410 #elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(GCC)
411 // offset 0x18 from the FS segment register gives a pointer to
412 // the thread information block for the current thread
414 asm ( "movl %%fs:0x18, %0\n"
417 return static_cast<void*>(pTib->StackBase);
418 #elif PLATFORM(SOLARIS)
422 #elif PLATFORM(OPENBSD)
423 pthread_t thread = pthread_self();
425 pthread_stackseg_np(thread, &stack);
428 static void* stackBase = 0;
429 static size_t stackSize = 0;
430 static pthread_t stackThread;
431 pthread_t thread = pthread_self();
432 if (stackBase == 0 || thread != stackThread) {
433 pthread_attr_t sattr;
434 pthread_attr_init(&sattr);
435 #if HAVE(PTHREAD_NP_H)
436 // e.g. on FreeBSD 5.4, neundorf@kde.org
437 pthread_attr_get_np(thread, &sattr);
439 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
440 pthread_getattr_np(thread, &sattr);
442 int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize);
443 (void)rc; // FIXME: Deal with error code somehow? Seems fatal.
445 pthread_attr_destroy(&sattr);
446 stackThread = thread;
448 return static_cast<char*>(stackBase) + stackSize;
450 #error Need a way to get the stack base on this platform
454 #if ENABLE(JSC_MULTIPLE_THREADS)
456 static inline PlatformThread getCurrentPlatformThread()
459 return pthread_mach_thread_np(pthread_self());
460 #elif PLATFORM(WIN_OS)
461 HANDLE threadHandle = pthread_getw32threadhandle_np(pthread_self());
462 return PlatformThread(GetCurrentThreadId(), threadHandle);
466 void Heap::registerThread()
468 if (pthread_getspecific(m_currentThreadRegistrar))
471 pthread_setspecific(m_currentThreadRegistrar, this);
472 Heap::Thread* thread = new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());
474 MutexLocker lock(m_registeredThreadsMutex);
476 thread->next = m_registeredThreads;
477 m_registeredThreads = thread;
480 void Heap::unregisterThread(void* p)
483 static_cast<Heap*>(p)->unregisterThread();
486 void Heap::unregisterThread()
488 pthread_t currentPosixThread = pthread_self();
490 MutexLocker lock(m_registeredThreadsMutex);
492 if (pthread_equal(currentPosixThread, m_registeredThreads->posixThread)) {
493 Thread* t = m_registeredThreads;
494 m_registeredThreads = m_registeredThreads->next;
497 Heap::Thread* last = m_registeredThreads;
499 for (t = m_registeredThreads->next; t; t = t->next) {
500 if (pthread_equal(t->posixThread, currentPosixThread)) {
501 last->next = t->next;
506 ASSERT(t); // If t is NULL, we never found ourselves in the list.
511 #else // ENABLE(JSC_MULTIPLE_THREADS)
513 void Heap::registerThread()
519 #define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char*) - 1)) == 0)
521 // cell size needs to be a power of two for this to be valid
522 #define IS_HALF_CELL_ALIGNED(p) (((intptr_t)(p) & (CELL_MASK >> 1)) == 0)
524 void Heap::markConservatively(void* start, void* end)
532 ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000);
533 ASSERT(IS_POINTER_ALIGNED(start));
534 ASSERT(IS_POINTER_ALIGNED(end));
536 char** p = static_cast<char**>(start);
537 char** e = static_cast<char**>(end);
539 size_t usedPrimaryBlocks = primaryHeap.usedBlocks;
540 size_t usedNumberBlocks = numberHeap.usedBlocks;
541 CollectorBlock** primaryBlocks = primaryHeap.blocks;
542 CollectorBlock** numberBlocks = numberHeap.blocks;
544 const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);
548 if (IS_HALF_CELL_ALIGNED(x) && x) {
549 uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x);
550 xAsBits &= CELL_ALIGN_MASK;
551 uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK;
552 CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset);
553 // Mark the the number heap, we can mark these Cells directly to avoid the virtual call cost
554 for (size_t block = 0; block < usedNumberBlocks; block++) {
555 if ((numberBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {
556 Heap::markCell(reinterpret_cast<JSCell*>(xAsBits));
561 // Mark the primary heap
562 for (size_t block = 0; block < usedPrimaryBlocks; block++) {
563 if ((primaryBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {
564 if (reinterpret_cast<CollectorCell*>(xAsBits)->u.freeCell.zeroIfFree != 0) {
565 JSCell* imp = reinterpret_cast<JSCell*>(xAsBits);
578 void NEVER_INLINE Heap::markCurrentThreadConservativelyInternal()
581 void* stackPointer = &dummy;
582 void* stackBase = currentThreadStackBase();
583 markConservatively(stackPointer, stackBase);
586 void Heap::markCurrentThreadConservatively()
588 // setjmp forces volatile registers onto the stack
591 #pragma warning(push)
592 #pragma warning(disable: 4611)
599 markCurrentThreadConservativelyInternal();
602 #if ENABLE(JSC_MULTIPLE_THREADS)
604 static inline void suspendThread(const PlatformThread& platformThread)
607 thread_suspend(platformThread);
608 #elif PLATFORM(WIN_OS)
609 SuspendThread(platformThread.handle);
611 #error Need a way to suspend threads on this platform
615 static inline void resumeThread(const PlatformThread& platformThread)
618 thread_resume(platformThread);
619 #elif PLATFORM(WIN_OS)
620 ResumeThread(platformThread.handle);
622 #error Need a way to resume threads on this platform
626 typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
631 typedef i386_thread_state_t PlatformThreadRegisters;
632 #elif PLATFORM(X86_64)
633 typedef x86_thread_state64_t PlatformThreadRegisters;
635 typedef ppc_thread_state_t PlatformThreadRegisters;
636 #elif PLATFORM(PPC64)
637 typedef ppc_thread_state64_t PlatformThreadRegisters;
639 typedef arm_thread_state_t PlatformThreadRegisters;
641 #error Unknown Architecture
644 #elif PLATFORM(WIN_OS)&& PLATFORM(X86)
645 typedef CONTEXT PlatformThreadRegisters;
647 #error Need a thread register struct for this platform
650 size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs)
655 unsigned user_count = sizeof(regs)/sizeof(int);
656 thread_state_flavor_t flavor = i386_THREAD_STATE;
657 #elif PLATFORM(X86_64)
658 unsigned user_count = x86_THREAD_STATE64_COUNT;
659 thread_state_flavor_t flavor = x86_THREAD_STATE64;
661 unsigned user_count = PPC_THREAD_STATE_COUNT;
662 thread_state_flavor_t flavor = PPC_THREAD_STATE;
663 #elif PLATFORM(PPC64)
664 unsigned user_count = PPC_THREAD_STATE64_COUNT;
665 thread_state_flavor_t flavor = PPC_THREAD_STATE64;
667 unsigned user_count = ARM_THREAD_STATE_COUNT;
668 thread_state_flavor_t flavor = ARM_THREAD_STATE;
670 #error Unknown Architecture
673 kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)®s, &user_count);
674 if (result != KERN_SUCCESS) {
675 WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
676 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
679 return user_count * sizeof(usword_t);
680 // end PLATFORM(DARWIN)
682 #elif PLATFORM(WIN_OS) && PLATFORM(X86)
683 regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS;
684 GetThreadContext(platformThread.handle, ®s);
685 return sizeof(CONTEXT);
687 #error Need a way to get thread registers on this platform
691 static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
698 return reinterpret_cast<void*>(regs.__esp);
699 #elif PLATFORM(X86_64)
700 return reinterpret_cast<void*>(regs.__rsp);
701 #elif PLATFORM(PPC) || PLATFORM(PPC64)
702 return reinterpret_cast<void*>(regs.__r1);
704 return reinterpret_cast<void*>(regs.__sp);
706 #error Unknown Architecture
709 #else // !__DARWIN_UNIX03
712 return reinterpret_cast<void*>(regs.esp);
713 #elif PLATFORM(X86_64)
714 return reinterpret_cast<void*>(regs.rsp);
715 #elif (PLATFORM(PPC) || PLATFORM(PPC64))
716 return reinterpret_cast<void*>(regs.r1);
718 #error Unknown Architecture
721 #endif // __DARWIN_UNIX03
723 // end PLATFORM(DARWIN)
724 #elif PLATFORM(X86) && PLATFORM(WIN_OS)
725 return reinterpret_cast<void*>((uintptr_t) regs.Esp);
727 #error Need a way to get the stack pointer for another thread on this platform
731 void Heap::markOtherThreadConservatively(Thread* thread)
733 suspendThread(thread->platformThread);
735 PlatformThreadRegisters regs;
736 size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);
738 // mark the thread's registers
739 markConservatively(static_cast<void*>(®s), static_cast<void*>(reinterpret_cast<char*>(®s) + regSize));
741 void* stackPointer = otherThreadStackPointer(regs);
742 markConservatively(stackPointer, thread->stackBase);
744 resumeThread(thread->platformThread);
749 void Heap::markStackObjectsConservatively()
751 markCurrentThreadConservatively();
753 #if ENABLE(JSC_MULTIPLE_THREADS)
755 if (m_currentThreadRegistrar) {
757 MutexLocker lock(m_registeredThreadsMutex);
760 // Forbid malloc during the mark phase. Marking a thread suspends it, so
761 // a malloc inside mark() would risk a deadlock with a thread that had been
762 // suspended while holding the malloc lock.
765 // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
766 // and since this is a shared heap, they are real locks.
767 for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
768 if (!pthread_equal(thread->posixThread, pthread_self()))
769 markOtherThreadConservatively(thread);
778 void Heap::setGCProtectNeedsLocking()
780 // Most clients do not need to call this, with the notable exception of WebCore.
781 // Clients that use shared heap have JSLock protection, while others are supposed
782 // to do explicit locking. WebCore violates this contract in Database code,
783 // which calls gcUnprotect from a secondary thread.
784 if (!m_protectedValuesMutex)
785 m_protectedValuesMutex.set(new Mutex);
788 void Heap::protect(JSValue* k)
791 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance);
793 if (JSImmediate::isImmediate(k))
796 if (m_protectedValuesMutex)
797 m_protectedValuesMutex->lock();
799 m_protectedValues.add(k->asCell());
801 if (m_protectedValuesMutex)
802 m_protectedValuesMutex->unlock();
805 void Heap::unprotect(JSValue* k)
808 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance);
810 if (JSImmediate::isImmediate(k))
813 if (m_protectedValuesMutex)
814 m_protectedValuesMutex->lock();
816 m_protectedValues.remove(k->asCell());
818 if (m_protectedValuesMutex)
819 m_protectedValuesMutex->unlock();
822 Heap* Heap::heap(const JSValue* v)
824 if (JSImmediate::isImmediate(v))
826 return Heap::cellBlock(v->asCell())->heap;
829 void Heap::markProtectedObjects()
831 if (m_protectedValuesMutex)
832 m_protectedValuesMutex->lock();
834 ProtectCountSet::iterator end = m_protectedValues.end();
835 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) {
836 JSCell* val = it->first;
841 if (m_protectedValuesMutex)
842 m_protectedValuesMutex->unlock();
845 template <HeapType heapType> size_t Heap::sweep()
847 typedef typename HeapConstants<heapType>::Block Block;
848 typedef typename HeapConstants<heapType>::Cell Cell;
850 // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else
851 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;
853 size_t emptyBlocks = 0;
854 size_t numLiveObjects = heap.numLiveObjects;
856 for (size_t block = 0; block < heap.usedBlocks; block++) {
857 Block* curBlock = reinterpret_cast<Block*>(heap.blocks[block]);
859 size_t usedCells = curBlock->usedCells;
860 Cell* freeList = curBlock->freeList;
862 if (usedCells == HeapConstants<heapType>::cellsPerBlock) {
863 // special case with a block where all cells are used -- testing indicates this happens often
864 for (size_t i = 0; i < HeapConstants<heapType>::cellsPerBlock; i++) {
865 if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) {
866 Cell* cell = curBlock->cells + i;
868 if (heapType != NumberHeap) {
869 JSCell* imp = reinterpret_cast<JSCell*>(cell);
870 // special case for allocated but uninitialized object
871 // (We don't need this check earlier because nothing prior this point
872 // assumes the object has a valid vptr.)
873 if (cell->u.freeCell.zeroIfFree == 0)
882 // put cell on the free list
883 cell->u.freeCell.zeroIfFree = 0;
884 cell->u.freeCell.next = freeList - (cell + 1);
889 size_t minimumCellsToProcess = usedCells;
890 for (size_t i = 0; (i < minimumCellsToProcess) & (i < HeapConstants<heapType>::cellsPerBlock); i++) {
891 Cell* cell = curBlock->cells + i;
892 if (cell->u.freeCell.zeroIfFree == 0) {
893 ++minimumCellsToProcess;
895 if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) {
896 if (heapType != NumberHeap) {
897 JSCell* imp = reinterpret_cast<JSCell*>(cell);
903 // put cell on the free list
904 cell->u.freeCell.zeroIfFree = 0;
905 cell->u.freeCell.next = freeList - (cell + 1);
912 curBlock->usedCells = static_cast<uint32_t>(usedCells);
913 curBlock->freeList = freeList;
914 curBlock->marked.clearAll();
916 if (usedCells == 0) {
918 if (emptyBlocks > SPARE_EMPTY_BLOCKS) {
920 freeBlock(reinterpret_cast<CollectorBlock*>(curBlock));
922 // swap with the last block so we compact as we go
923 heap.blocks[block] = heap.blocks[heap.usedBlocks - 1];
925 block--; // Don't move forward a step in this case
927 if (heap.numBlocks > MIN_ARRAY_SIZE && heap.usedBlocks < heap.numBlocks / LOW_WATER_FACTOR) {
928 heap.numBlocks = heap.numBlocks / GROWTH_FACTOR;
929 heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, heap.numBlocks * sizeof(CollectorBlock*)));
935 if (heap.numLiveObjects != numLiveObjects)
936 heap.firstBlockWithPossibleSpace = 0;
938 heap.numLiveObjects = numLiveObjects;
939 heap.numLiveObjectsAtLastCollect = numLiveObjects;
941 return numLiveObjects;
947 if (m_globalData->isSharedInstance) {
948 ASSERT(JSLock::lockCount() > 0);
949 ASSERT(JSLock::currentThreadIsHoldingLock());
953 ASSERT((primaryHeap.operationInProgress == NoOperation) | (numberHeap.operationInProgress == NoOperation));
954 if ((primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation))
957 JAVASCRIPTCORE_GC_BEGIN();
958 primaryHeap.operationInProgress = Collection;
959 numberHeap.operationInProgress = Collection;
961 // MARK: first mark all referenced objects recursively starting out from the set of root objects
963 markStackObjectsConservatively();
964 markProtectedObjects();
965 if (m_markListSet && m_markListSet->size())
966 ArgList::markLists(*m_markListSet);
967 if (m_globalData->exception && !m_globalData->exception->marked())
968 m_globalData->exception->mark();
969 m_globalData->machine->registerFile().markCallFrames(this);
970 m_globalData->smallStrings.mark();
972 JSGlobalObject* globalObject = m_globalData->head;
974 globalObject->markCrossHeapDependentObjects();
975 globalObject = globalObject->next();
976 } while (globalObject != m_globalData->head);
978 JAVASCRIPTCORE_GC_MARKED();
980 size_t originalLiveObjects = primaryHeap.numLiveObjects + numberHeap.numLiveObjects;
981 size_t numLiveObjects = sweep<PrimaryHeap>();
982 numLiveObjects += sweep<NumberHeap>();
984 primaryHeap.operationInProgress = NoOperation;
985 numberHeap.operationInProgress = NoOperation;
986 JAVASCRIPTCORE_GC_END(originalLiveObjects, numLiveObjects);
988 return numLiveObjects < originalLiveObjects;
993 return primaryHeap.numLiveObjects + numberHeap.numLiveObjects;
996 size_t Heap::globalObjectCount()
999 if (JSGlobalObject* head = m_globalData->head) {
1000 JSGlobalObject* o = head;
1004 } while (o != head);
1009 size_t Heap::protectedGlobalObjectCount()
1011 if (m_protectedValuesMutex)
1012 m_protectedValuesMutex->lock();
1015 if (JSGlobalObject* head = m_globalData->head) {
1016 JSGlobalObject* o = head;
1018 if (m_protectedValues.contains(o))
1021 } while (o != head);
1024 if (m_protectedValuesMutex)
1025 m_protectedValuesMutex->unlock();
1030 size_t Heap::protectedObjectCount()
1032 if (m_protectedValuesMutex)
1033 m_protectedValuesMutex->lock();
1035 size_t result = m_protectedValues.size();
1037 if (m_protectedValuesMutex)
1038 m_protectedValuesMutex->unlock();
1043 static const char* typeName(JSCell* val)
1045 if (val->isString())
1047 if (val->isNumber())
1049 if (val->isGetterSetter())
1050 return "gettersetter";
1051 ASSERT(val->isObject());
1052 const ClassInfo* info = static_cast<JSObject*>(val)->classInfo();
1053 return info ? info->className : "Object";
1056 HashCountedSet<const char*>* Heap::protectedObjectTypeCounts()
1058 HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
1060 if (m_protectedValuesMutex)
1061 m_protectedValuesMutex->lock();
1063 ProtectCountSet::iterator end = m_protectedValues.end();
1064 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
1065 counts->add(typeName(it->first));
1067 if (m_protectedValuesMutex)
1068 m_protectedValuesMutex->unlock();
1075 return (primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation);
1078 Heap::iterator Heap::primaryHeapBegin()
1080 return iterator(primaryHeap.blocks, primaryHeap.blocks + primaryHeap.usedBlocks);
1083 Heap::iterator Heap::primaryHeapEnd()
1085 return iterator(primaryHeap.blocks + primaryHeap.usedBlocks, primaryHeap.blocks + primaryHeap.usedBlocks);