2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "Collector.h"
25 #include "CallFrame.h"
26 #include "CodeBlock.h"
27 #include "CollectorHeapIterator.h"
28 #include "GCActivityCallback.h"
29 #include "Interpreter.h"
31 #include "JSGlobalObject.h"
33 #include "JSONObject.h"
37 #include "MarkStack.h"
44 #include <wtf/FastMalloc.h>
45 #include <wtf/HashCountedSet.h>
46 #include <wtf/WTFThreadData.h>
47 #include <wtf/UnusedParam.h>
48 #include <wtf/VMTags.h>
52 #include <mach/mach_init.h>
53 #include <mach/mach_port.h>
54 #include <mach/task.h>
55 #include <mach/thread_act.h>
56 #include <mach/vm_map.h>
81 #if HAVE(PTHREAD_NP_H)
82 #include <pthread_np.h>
87 #include <sys/procfs.h>
94 #define COLLECT_ON_EVERY_ALLOCATION 0
100 // tunable parameters
102 const size_t GROWTH_FACTOR = 2;
103 const size_t LOW_WATER_FACTOR = 4;
104 const size_t ALLOCATIONS_PER_COLLECTION = 3600;
105 // This value has to be a macro to be used in max() without introducing
106 // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>.
107 #define MIN_ARRAY_SIZE (static_cast<size_t>(14))
109 #if ENABLE(JSC_MULTIPLE_THREADS)
112 typedef mach_port_t PlatformThread;
114 typedef HANDLE PlatformThread;
119 Thread(pthread_t pthread, const PlatformThread& platThread, void* base)
120 : posixThread(pthread)
121 , platformThread(platThread)
127 pthread_t posixThread;
128 PlatformThread platformThread;
134 Heap::Heap(JSGlobalData* globalData)
136 #if ENABLE(JSC_MULTIPLE_THREADS)
137 , m_registeredThreads(0)
138 , m_currentThreadRegistrar(0)
140 , m_globalData(globalData)
143 memset(&m_heap, 0, sizeof(CollectorHeap));
145 m_activityCallback = DefaultGCActivityCallback::create(this);
146 (*m_activityCallback)();
151 // The destroy function must already have been called, so assert this.
152 ASSERT(!m_globalData);
157 JSLock lock(SilenceAssertionsOnly);
162 ASSERT(!m_globalData->dynamicGlobalObject);
165 // The global object is not GC protected at this point, so sweeping may delete it
166 // (and thus the global data) before other objects that may use the global data.
167 RefPtr<JSGlobalData> protect(m_globalData);
169 delete m_markListSet;
174 for (unsigned i = 0; i < m_weakGCHandlePools.size(); ++i)
175 m_weakGCHandlePools[i].deallocate();
177 #if ENABLE(JSC_MULTIPLE_THREADS)
178 if (m_currentThreadRegistrar) {
179 int error = pthread_key_delete(m_currentThreadRegistrar);
180 ASSERT_UNUSED(error, !error);
183 MutexLocker registeredThreadsLock(m_registeredThreadsMutex);
184 for (Heap::Thread* t = m_registeredThreads; t;) {
185 Heap::Thread* next = t->next;
190 m_blockallocator.destroy();
194 NEVER_INLINE CollectorBlock* Heap::allocateBlock()
196 AlignedCollectorBlock allocation = m_blockallocator.allocate();
197 CollectorBlock* block = static_cast<CollectorBlock*>(allocation.base());
204 clearMarkBits(block);
206 Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get();
207 for (size_t i = 0; i < HeapConstants::cellsPerBlock; ++i)
208 new (&block->cells[i]) JSCell(dummyMarkableCellStructure);
210 // Add block to blocks vector.
212 size_t numBlocks = m_heap.numBlocks;
213 if (m_heap.usedBlocks == numBlocks) {
214 static const size_t maxNumBlocks = ULONG_MAX / sizeof(AlignedCollectorBlock) / GROWTH_FACTOR;
215 if (numBlocks > maxNumBlocks)
217 numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);
218 m_heap.numBlocks = numBlocks;
219 m_heap.blocks = static_cast<AlignedCollectorBlock*>(fastRealloc(m_heap.blocks, numBlocks * sizeof(AlignedCollectorBlock)));
221 m_heap.blocks[m_heap.usedBlocks++] = allocation;
226 NEVER_INLINE void Heap::freeBlock(size_t block)
228 m_heap.didShrink = true;
230 ObjectIterator it(m_heap, block);
231 ObjectIterator end(m_heap, block + 1);
232 for ( ; it != end; ++it)
234 m_heap.blocks[block].deallocate();
236 // swap with the last block so we compact as we go
237 m_heap.blocks[block] = m_heap.blocks[m_heap.usedBlocks - 1];
240 if (m_heap.numBlocks > MIN_ARRAY_SIZE && m_heap.usedBlocks < m_heap.numBlocks / LOW_WATER_FACTOR) {
241 m_heap.numBlocks = m_heap.numBlocks / GROWTH_FACTOR;
242 m_heap.blocks = static_cast<AlignedCollectorBlock*>(fastRealloc(m_heap.blocks, m_heap.numBlocks * sizeof(AlignedCollectorBlock)));
246 void Heap::freeBlocks()
248 ProtectCountSet protectedValuesCopy = m_protectedValues;
251 ProtectCountSet::iterator protectedValuesEnd = protectedValuesCopy.end();
252 for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it)
256 m_heap.nextBlock = 0;
257 DeadObjectIterator it(m_heap, m_heap.nextBlock, m_heap.nextCell);
258 DeadObjectIterator end(m_heap, m_heap.usedBlocks);
259 for ( ; it != end; ++it)
262 ASSERT(!protectedObjectCount());
264 protectedValuesEnd = protectedValuesCopy.end();
265 for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it)
266 it->first->~JSCell();
268 for (size_t block = 0; block < m_heap.usedBlocks; ++block)
269 m_heap.blocks[block].deallocate();
271 fastFree(m_heap.blocks);
273 memset(&m_heap, 0, sizeof(CollectorHeap));
276 void Heap::recordExtraCost(size_t cost)
278 // Our frequency of garbage collection tries to balance memory use against speed
279 // by collecting based on the number of newly created values. However, for values
280 // that hold on to a great deal of memory that's not in the form of other JS values,
281 // that is not good enough - in some cases a lot of those objects can pile up and
282 // use crazy amounts of memory without a GC happening. So we track these extra
283 // memory costs. Only unusually large objects are noted, and we only keep track
284 // of this extra cost until the next GC. In garbage collected languages, most values
285 // are either very short lived temporaries, or have extremely long lifetimes. So
286 // if a large value survives one garbage collection, there is not much point to
287 // collecting more frequently as long as it stays alive.
289 if (m_heap.extraCost > maxExtraCost && m_heap.extraCost > m_heap.usedBlocks * BLOCK_SIZE / 2) {
290 // If the last iteration through the heap deallocated blocks, we need
291 // to clean up remaining garbage before marking. Otherwise, the conservative
292 // marking mechanism might follow a pointer to unmapped memory.
293 if (m_heap.didShrink)
297 m_heap.extraCost += cost;
300 void* Heap::allocate(size_t s)
302 ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
303 typedef HeapConstants::Block Block;
304 typedef HeapConstants::Cell Cell;
306 ASSERT(JSLock::lockCount() > 0);
307 ASSERT(JSLock::currentThreadIsHoldingLock());
308 ASSERT_UNUSED(s, s <= HeapConstants::cellSize);
310 ASSERT(m_heap.operationInProgress == NoOperation);
312 #if COLLECT_ON_EVERY_ALLOCATION
314 ASSERT(m_heap.operationInProgress == NoOperation);
319 // Fast case: find the next garbage cell and recycle it.
322 ASSERT(m_heap.nextBlock < m_heap.usedBlocks);
323 Block* block = m_heap.collectorBlock(m_heap.nextBlock);
325 ASSERT(m_heap.nextCell < HeapConstants::cellsPerBlock);
326 if (!block->marked.get(m_heap.nextCell)) { // Always false for the last cell in the block
327 Cell* cell = &block->cells[m_heap.nextCell];
329 m_heap.operationInProgress = Allocation;
330 JSCell* imp = reinterpret_cast<JSCell*>(cell);
332 m_heap.operationInProgress = NoOperation;
337 block->marked.advanceToNextPossibleFreeCell(m_heap.nextCell);
338 } while (m_heap.nextCell != HeapConstants::cellsPerBlock);
340 } while (++m_heap.nextBlock != m_heap.usedBlocks);
342 // Slow case: reached the end of the heap. Mark live objects and start over.
348 void Heap::resizeBlocks()
350 m_heap.didShrink = false;
352 size_t usedCellCount = markedCells();
353 size_t minCellCount = usedCellCount + max(ALLOCATIONS_PER_COLLECTION, usedCellCount);
354 size_t minBlockCount = (minCellCount + HeapConstants::cellsPerBlock - 1) / HeapConstants::cellsPerBlock;
356 size_t maxCellCount = 1.25f * minCellCount;
357 size_t maxBlockCount = (maxCellCount + HeapConstants::cellsPerBlock - 1) / HeapConstants::cellsPerBlock;
359 if (m_heap.usedBlocks < minBlockCount)
360 growBlocks(minBlockCount);
361 else if (m_heap.usedBlocks > maxBlockCount)
362 shrinkBlocks(maxBlockCount);
365 void Heap::growBlocks(size_t neededBlocks)
367 ASSERT(m_heap.usedBlocks < neededBlocks);
368 while (m_heap.usedBlocks < neededBlocks)
372 void Heap::shrinkBlocks(size_t neededBlocks)
374 ASSERT(m_heap.usedBlocks > neededBlocks);
376 // Clear the always-on last bit, so isEmpty() isn't fooled by it.
377 for (size_t i = 0; i < m_heap.usedBlocks; ++i)
378 m_heap.collectorBlock(i)->marked.clear(HeapConstants::cellsPerBlock - 1);
380 for (size_t i = 0; i != m_heap.usedBlocks && m_heap.usedBlocks != neededBlocks; ) {
381 if (m_heap.collectorBlock(i)->marked.isEmpty()) {
387 // Reset the always-on last bit.
388 for (size_t i = 0; i < m_heap.usedBlocks; ++i)
389 m_heap.collectorBlock(i)->marked.set(HeapConstants::cellsPerBlock - 1);
393 JS_EXPORTDATA void* g_stackBase = 0;
395 inline bool isPageWritable(void* page)
397 MEMORY_BASIC_INFORMATION memoryInformation;
398 DWORD result = VirtualQuery(page, &memoryInformation, sizeof(memoryInformation));
400 // return false on error, including ptr outside memory
401 if (result != sizeof(memoryInformation))
404 DWORD protect = memoryInformation.Protect & ~(PAGE_GUARD | PAGE_NOCACHE);
405 return protect == PAGE_READWRITE
406 || protect == PAGE_WRITECOPY
407 || protect == PAGE_EXECUTE_READWRITE
408 || protect == PAGE_EXECUTE_WRITECOPY;
411 static void* getStackBase(void* previousFrame)
413 // find the address of this stack frame by taking the address of a local variable
414 bool isGrowingDownward;
415 void* thisFrame = (void*)(&isGrowingDownward);
417 isGrowingDownward = previousFrame < &thisFrame;
418 static DWORD pageSize = 0;
420 SYSTEM_INFO systemInfo;
421 GetSystemInfo(&systemInfo);
422 pageSize = systemInfo.dwPageSize;
425 // scan all of memory starting from this frame, and return the last writeable page found
426 register char* currentPage = (char*)((DWORD)thisFrame & ~(pageSize - 1));
427 if (isGrowingDownward) {
428 while (currentPage > 0) {
429 // check for underflow
430 if (currentPage >= (char*)pageSize)
431 currentPage -= pageSize;
434 if (!isPageWritable(currentPage))
435 return currentPage + pageSize;
440 // guaranteed to complete because isPageWritable returns false at end of memory
441 currentPage += pageSize;
442 if (!isPageWritable(currentPage))
450 static inline void *currentThreadStackBaseQNX()
452 static void* stackBase = 0;
453 static size_t stackSize = 0;
454 static pthread_t stackThread;
455 pthread_t thread = pthread_self();
456 if (stackBase == 0 || thread != stackThread) {
457 struct _debug_thread_info threadInfo;
458 memset(&threadInfo, 0, sizeof(threadInfo));
459 threadInfo.tid = pthread_self();
460 int fd = open("/proc/self", O_RDONLY);
462 LOG_ERROR("Unable to open /proc/self (errno: %d)", errno);
465 devctl(fd, DCMD_PROC_TIDSTATUS, &threadInfo, sizeof(threadInfo), 0);
467 stackBase = reinterpret_cast<void*>(threadInfo.stkbase);
468 stackSize = threadInfo.stksize;
470 stackThread = thread;
472 return static_cast<char*>(stackBase) + stackSize;
476 static inline void* currentThreadStackBase()
479 pthread_t thread = pthread_self();
480 return pthread_get_stackaddr_np(thread);
481 #elif OS(WINDOWS) && CPU(X86) && COMPILER(MSVC)
482 // offset 0x18 from the FS segment register gives a pointer to
483 // the thread information block for the current thread
489 return static_cast<void*>(pTib->StackBase);
490 #elif OS(WINDOWS) && CPU(X86) && COMPILER(GCC)
491 // offset 0x18 from the FS segment register gives a pointer to
492 // the thread information block for the current thread
494 asm ( "movl %%fs:0x18, %0\n"
497 return static_cast<void*>(pTib->StackBase);
498 #elif OS(WINDOWS) && CPU(X86_64)
499 PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb());
500 return reinterpret_cast<void*>(pTib->StackBase);
502 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
503 MutexLocker locker(mutex);
504 return currentThreadStackBaseQNX();
510 pthread_t thread = pthread_self();
512 pthread_stackseg_np(thread, &stack);
515 TThreadStackInfo info;
517 thread.StackInfo(info);
518 return (void*)info.iBase;
520 thread_info threadInfo;
521 get_thread_info(find_thread(NULL), &threadInfo);
522 return threadInfo.stack_end;
524 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
525 MutexLocker locker(mutex);
526 static void* stackBase = 0;
527 static size_t stackSize = 0;
528 static pthread_t stackThread;
529 pthread_t thread = pthread_self();
530 if (stackBase == 0 || thread != stackThread) {
531 pthread_attr_t sattr;
532 pthread_attr_init(&sattr);
533 #if HAVE(PTHREAD_NP_H) || OS(NETBSD)
534 // e.g. on FreeBSD 5.4, neundorf@kde.org
535 pthread_attr_get_np(thread, &sattr);
537 // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
538 pthread_getattr_np(thread, &sattr);
540 int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize);
541 (void)rc; // FIXME: Deal with error code somehow? Seems fatal.
543 pthread_attr_destroy(&sattr);
544 stackThread = thread;
546 return static_cast<char*>(stackBase) + stackSize;
548 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
549 MutexLocker locker(mutex);
554 return getStackBase(&dummy);
557 #error Need a way to get the stack base on this platform
561 #if ENABLE(JSC_MULTIPLE_THREADS)
563 static inline PlatformThread getCurrentPlatformThread()
566 return pthread_mach_thread_np(pthread_self());
568 return pthread_getw32threadhandle_np(pthread_self());
572 void Heap::makeUsableFromMultipleThreads()
574 if (m_currentThreadRegistrar)
577 int error = pthread_key_create(&m_currentThreadRegistrar, unregisterThread);
582 void Heap::registerThread()
584 ASSERT(!m_globalData->exclusiveThread || m_globalData->exclusiveThread == currentThread());
586 if (!m_currentThreadRegistrar || pthread_getspecific(m_currentThreadRegistrar))
589 pthread_setspecific(m_currentThreadRegistrar, this);
590 Heap::Thread* thread = new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());
592 MutexLocker lock(m_registeredThreadsMutex);
594 thread->next = m_registeredThreads;
595 m_registeredThreads = thread;
598 void Heap::unregisterThread(void* p)
601 static_cast<Heap*>(p)->unregisterThread();
604 void Heap::unregisterThread()
606 pthread_t currentPosixThread = pthread_self();
608 MutexLocker lock(m_registeredThreadsMutex);
610 if (pthread_equal(currentPosixThread, m_registeredThreads->posixThread)) {
611 Thread* t = m_registeredThreads;
612 m_registeredThreads = m_registeredThreads->next;
615 Heap::Thread* last = m_registeredThreads;
617 for (t = m_registeredThreads->next; t; t = t->next) {
618 if (pthread_equal(t->posixThread, currentPosixThread)) {
619 last->next = t->next;
624 ASSERT(t); // If t is NULL, we never found ourselves in the list.
629 #else // ENABLE(JSC_MULTIPLE_THREADS)
631 void Heap::registerThread()
637 inline bool isPointerAligned(void* p)
639 return (((intptr_t)(p) & (sizeof(char*) - 1)) == 0);
642 // Cell size needs to be a power of two for isPossibleCell to be valid.
643 COMPILE_ASSERT(sizeof(CollectorCell) % 2 == 0, Collector_cell_size_is_power_of_two);
645 static inline bool isCellAligned(void *p)
647 return (((intptr_t)(p) & CELL_MASK) == 0);
650 static inline bool isPossibleCell(void* p)
652 return isCellAligned(p) && p;
655 void Heap::markConservatively(MarkStack& markStack, void* start, void* end)
663 ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000);
664 ASSERT(isPointerAligned(start));
665 ASSERT(isPointerAligned(end));
667 char** p = static_cast<char**>(start);
668 char** e = static_cast<char**>(end);
672 if (isPossibleCell(x)) {
674 uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x);
675 xAsBits &= CELL_ALIGN_MASK;
677 uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK;
678 const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);
679 if (offset > lastCellOffset)
682 CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset);
683 usedBlocks = m_heap.usedBlocks;
684 for (size_t block = 0; block < usedBlocks; block++) {
685 if (m_heap.collectorBlock(block) != blockAddr)
687 markStack.append(reinterpret_cast<JSCell*>(xAsBits));
693 void NEVER_INLINE Heap::markCurrentThreadConservativelyInternal(MarkStack& markStack)
696 void* stackPointer = &dummy;
697 void* stackBase = currentThreadStackBase();
698 markConservatively(markStack, stackPointer, stackBase);
703 #define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*))))
705 #define REGISTER_BUFFER_ALIGNMENT
708 void Heap::markCurrentThreadConservatively(MarkStack& markStack)
710 // setjmp forces volatile registers onto the stack
711 jmp_buf registers REGISTER_BUFFER_ALIGNMENT;
713 #pragma warning(push)
714 #pragma warning(disable: 4611)
721 markCurrentThreadConservativelyInternal(markStack);
724 #if ENABLE(JSC_MULTIPLE_THREADS)
726 static inline void suspendThread(const PlatformThread& platformThread)
729 thread_suspend(platformThread);
731 SuspendThread(platformThread);
733 #error Need a way to suspend threads on this platform
737 static inline void resumeThread(const PlatformThread& platformThread)
740 thread_resume(platformThread);
742 ResumeThread(platformThread);
744 #error Need a way to resume threads on this platform
748 typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
753 typedef i386_thread_state_t PlatformThreadRegisters;
755 typedef x86_thread_state64_t PlatformThreadRegisters;
757 typedef ppc_thread_state_t PlatformThreadRegisters;
759 typedef ppc_thread_state64_t PlatformThreadRegisters;
761 typedef arm_thread_state_t PlatformThreadRegisters;
763 #error Unknown Architecture
766 #elif OS(WINDOWS) && CPU(X86)
767 typedef CONTEXT PlatformThreadRegisters;
769 #error Need a thread register struct for this platform
772 static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs)
777 unsigned user_count = sizeof(regs)/sizeof(int);
778 thread_state_flavor_t flavor = i386_THREAD_STATE;
780 unsigned user_count = x86_THREAD_STATE64_COUNT;
781 thread_state_flavor_t flavor = x86_THREAD_STATE64;
783 unsigned user_count = PPC_THREAD_STATE_COUNT;
784 thread_state_flavor_t flavor = PPC_THREAD_STATE;
786 unsigned user_count = PPC_THREAD_STATE64_COUNT;
787 thread_state_flavor_t flavor = PPC_THREAD_STATE64;
789 unsigned user_count = ARM_THREAD_STATE_COUNT;
790 thread_state_flavor_t flavor = ARM_THREAD_STATE;
792 #error Unknown Architecture
795 kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)®s, &user_count);
796 if (result != KERN_SUCCESS) {
797 WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
798 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
801 return user_count * sizeof(usword_t);
804 #elif OS(WINDOWS) && CPU(X86)
805 regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS;
806 GetThreadContext(platformThread, ®s);
807 return sizeof(CONTEXT);
809 #error Need a way to get thread registers on this platform
813 static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
820 return reinterpret_cast<void*>(regs.__esp);
822 return reinterpret_cast<void*>(regs.__rsp);
823 #elif CPU(PPC) || CPU(PPC64)
824 return reinterpret_cast<void*>(regs.__r1);
826 return reinterpret_cast<void*>(regs.__sp);
828 #error Unknown Architecture
831 #else // !__DARWIN_UNIX03
834 return reinterpret_cast<void*>(regs.esp);
836 return reinterpret_cast<void*>(regs.rsp);
837 #elif CPU(PPC) || CPU(PPC64)
838 return reinterpret_cast<void*>(regs.r1);
840 #error Unknown Architecture
843 #endif // __DARWIN_UNIX03
846 #elif CPU(X86) && OS(WINDOWS)
847 return reinterpret_cast<void*>((uintptr_t) regs.Esp);
849 #error Need a way to get the stack pointer for another thread on this platform
853 void Heap::markOtherThreadConservatively(MarkStack& markStack, Thread* thread)
855 suspendThread(thread->platformThread);
857 PlatformThreadRegisters regs;
858 size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);
860 // mark the thread's registers
861 markConservatively(markStack, static_cast<void*>(®s), static_cast<void*>(reinterpret_cast<char*>(®s) + regSize));
864 void* stackPointer = otherThreadStackPointer(regs);
865 markConservatively(markStack, stackPointer, thread->stackBase);
868 resumeThread(thread->platformThread);
873 void Heap::markStackObjectsConservatively(MarkStack& markStack)
875 markCurrentThreadConservatively(markStack);
877 #if ENABLE(JSC_MULTIPLE_THREADS)
879 if (m_currentThreadRegistrar) {
881 MutexLocker lock(m_registeredThreadsMutex);
884 // Forbid malloc during the mark phase. Marking a thread suspends it, so
885 // a malloc inside markChildren() would risk a deadlock with a thread that had been
886 // suspended while holding the malloc lock.
889 // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
890 // and since this is a shared heap, they are real locks.
891 for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
892 if (!pthread_equal(thread->posixThread, pthread_self()))
893 markOtherThreadConservatively(markStack, thread);
902 void Heap::updateWeakGCHandles()
904 for (unsigned i = 0; i < m_weakGCHandlePools.size(); ++i)
905 weakGCHandlePool(i)->update();
908 void WeakGCHandlePool::update()
910 for (unsigned i = 1; i < WeakGCHandlePool::numPoolEntries; ++i) {
911 if (m_entries[i].isValidPtr()) {
912 JSCell* cell = m_entries[i].get();
913 if (!cell || !Heap::isCellMarked(cell))
914 m_entries[i].invalidate();
919 WeakGCHandle* Heap::addWeakGCHandle(JSCell* ptr)
921 for (unsigned i = 0; i < m_weakGCHandlePools.size(); ++i)
922 if (!weakGCHandlePool(i)->isFull())
923 return weakGCHandlePool(i)->allocate(ptr);
925 AlignedMemory<WeakGCHandlePool::poolSize> allocation = m_weakGCHandlePoolAllocator.allocate();
926 m_weakGCHandlePools.append(allocation);
928 WeakGCHandlePool* pool = new (allocation) WeakGCHandlePool();
929 return pool->allocate(ptr);
932 void Heap::protect(JSValue k)
935 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance());
940 m_protectedValues.add(k.asCell());
943 bool Heap::unprotect(JSValue k)
946 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance());
951 return m_protectedValues.remove(k.asCell());
954 void Heap::markProtectedObjects(MarkStack& markStack)
956 ProtectCountSet::iterator end = m_protectedValues.end();
957 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) {
958 markStack.append(it->first);
963 void Heap::clearMarkBits()
965 for (size_t i = 0; i < m_heap.usedBlocks; ++i)
966 clearMarkBits(m_heap.collectorBlock(i));
969 void Heap::clearMarkBits(CollectorBlock* block)
971 // allocate assumes that the last cell in every block is marked.
972 block->marked.clearAll();
973 block->marked.set(HeapConstants::cellsPerBlock - 1);
976 size_t Heap::markedCells(size_t startBlock, size_t startCell) const
978 ASSERT(startBlock <= m_heap.usedBlocks);
979 ASSERT(startCell < HeapConstants::cellsPerBlock);
981 if (startBlock >= m_heap.usedBlocks)
985 result += m_heap.collectorBlock(startBlock)->marked.count(startCell);
986 for (size_t i = startBlock + 1; i < m_heap.usedBlocks; ++i)
987 result += m_heap.collectorBlock(i)->marked.count();
994 ASSERT(m_heap.operationInProgress == NoOperation);
995 if (m_heap.operationInProgress != NoOperation)
997 m_heap.operationInProgress = Collection;
999 #if !ENABLE(JSC_ZOMBIES)
1000 Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get();
1003 DeadObjectIterator it(m_heap, m_heap.nextBlock, m_heap.nextCell);
1004 DeadObjectIterator end(m_heap, m_heap.usedBlocks);
1005 for ( ; it != end; ++it) {
1007 #if ENABLE(JSC_ZOMBIES)
1008 if (!cell->isZombie()) {
1009 const ClassInfo* info = cell->classInfo();
1011 new (cell) JSZombie(info, JSZombie::leakedZombieStructure());
1012 Heap::markCell(cell);
1016 // Callers of sweep assume it's safe to mark any cell in the heap.
1017 new (cell) JSCell(dummyMarkableCellStructure);
1021 m_heap.operationInProgress = NoOperation;
1024 void Heap::markRoots()
1027 if (m_globalData->isSharedInstance()) {
1028 ASSERT(JSLock::lockCount() > 0);
1029 ASSERT(JSLock::currentThreadIsHoldingLock());
1033 ASSERT(m_heap.operationInProgress == NoOperation);
1034 if (m_heap.operationInProgress != NoOperation)
1037 m_heap.operationInProgress = Collection;
1039 MarkStack& markStack = m_globalData->markStack;
1044 // Mark stack roots.
1045 markStackObjectsConservatively(markStack);
1046 m_globalData->interpreter->registerFile().markCallFrames(markStack, this);
1048 // Mark explicitly registered roots.
1049 markProtectedObjects(markStack);
1051 // Mark misc. other roots.
1052 if (m_markListSet && m_markListSet->size())
1053 MarkedArgumentBuffer::markLists(markStack, *m_markListSet);
1054 if (m_globalData->exception)
1055 markStack.append(m_globalData->exception);
1056 if (m_globalData->firstStringifierToMark)
1057 JSONObject::markStringifiers(markStack, m_globalData->firstStringifierToMark);
1059 // Mark the small strings cache last, since it will clear itself if nothing
1060 // else has marked it.
1061 m_globalData->smallStrings.markChildren(markStack);
1064 markStack.compact();
1066 updateWeakGCHandles();
1068 m_heap.operationInProgress = NoOperation;
1071 size_t Heap::objectCount() const
1073 return m_heap.nextBlock * HeapConstants::cellsPerBlock // allocated full blocks
1074 + m_heap.nextCell // allocated cells in current block
1075 + markedCells(m_heap.nextBlock, m_heap.nextCell) // marked cells in remainder of m_heap
1076 - m_heap.usedBlocks; // 1 cell per block is a dummy sentinel
1079 void Heap::addToStatistics(Heap::Statistics& statistics) const
1081 statistics.size += m_heap.usedBlocks * BLOCK_SIZE;
1082 statistics.free += m_heap.usedBlocks * BLOCK_SIZE - (objectCount() * HeapConstants::cellSize);
1085 Heap::Statistics Heap::statistics() const
1087 Statistics statistics = { 0, 0 };
1088 addToStatistics(statistics);
1092 size_t Heap::size() const
1094 return m_heap.usedBlocks * BLOCK_SIZE;
1097 size_t Heap::globalObjectCount()
1100 if (JSGlobalObject* head = m_globalData->head) {
1101 JSGlobalObject* o = head;
1105 } while (o != head);
1110 size_t Heap::protectedGlobalObjectCount()
1113 if (JSGlobalObject* head = m_globalData->head) {
1114 JSGlobalObject* o = head;
1116 if (m_protectedValues.contains(o))
1119 } while (o != head);
1125 size_t Heap::protectedObjectCount()
1127 return m_protectedValues.size();
1130 static const char* typeName(JSCell* cell)
1132 if (cell->isString())
1134 if (cell->isGetterSetter())
1135 return "Getter-Setter";
1136 if (cell->isAPIValueWrapper())
1137 return "API wrapper";
1138 if (cell->isPropertyNameIterator())
1139 return "For-in iterator";
1140 if (!cell->isObject())
1141 return "[empty cell]";
1142 const ClassInfo* info = cell->classInfo();
1143 return info ? info->className : "Object";
1146 HashCountedSet<const char*>* Heap::protectedObjectTypeCounts()
1148 HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
1150 ProtectCountSet::iterator end = m_protectedValues.end();
1151 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
1152 counts->add(typeName(it->first));
1157 HashCountedSet<const char*>* Heap::objectTypeCounts()
1159 HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
1161 LiveObjectIterator it = primaryHeapBegin();
1162 LiveObjectIterator heapEnd = primaryHeapEnd();
1163 for ( ; it != heapEnd; ++it)
1164 counts->add(typeName(*it));
1171 return m_heap.operationInProgress != NoOperation;
1176 ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
1177 JAVASCRIPTCORE_GC_BEGIN();
1181 JAVASCRIPTCORE_GC_MARKED();
1183 m_heap.nextCell = 0;
1184 m_heap.nextBlock = 0;
1185 m_heap.nextNumber = 0;
1186 m_heap.extraCost = 0;
1187 #if ENABLE(JSC_ZOMBIES)
1192 JAVASCRIPTCORE_GC_END();
1194 (*m_activityCallback)();
1197 void Heap::collectAllGarbage()
1199 ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
1200 JAVASCRIPTCORE_GC_BEGIN();
1202 // If the last iteration through the heap deallocated blocks, we need
1203 // to clean up remaining garbage before marking. Otherwise, the conservative
1204 // marking mechanism might follow a pointer to unmapped memory.
1205 if (m_heap.didShrink)
1210 JAVASCRIPTCORE_GC_MARKED();
1212 m_heap.nextCell = 0;
1213 m_heap.nextBlock = 0;
1214 m_heap.nextNumber = 0;
1215 m_heap.extraCost = 0;
1219 JAVASCRIPTCORE_GC_END();
1222 LiveObjectIterator Heap::primaryHeapBegin()
1224 return LiveObjectIterator(m_heap, 0);
1227 LiveObjectIterator Heap::primaryHeapEnd()
1229 return LiveObjectIterator(m_heap, m_heap.usedBlocks);
1232 void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback)
1234 m_activityCallback = activityCallback;
1237 GCActivityCallback* Heap::activityCallback()
1239 return m_activityCallback.get();