[WTF] Use m_suspendCount instead of m_suspended flag in Thread
[WebKit-https.git] / Source / JavaScriptCore / heap / MarkStack.cpp
index 29dc173..871b301 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2009-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
 #include "config.h"
 #include "MarkStack.h"
 
-#include "CopiedSpace.h"
-#include "CopiedSpaceInlineMethods.h"
-#include "ConservativeRoots.h"
-#include "Heap.h"
-#include "Options.h"
-#include "JSArray.h"
-#include "JSCell.h"
-#include "JSObject.h"
-#include "ScopeChain.h"
-#include "Structure.h"
-#include "UString.h"
-#include "WriteBarrier.h"
-#include <wtf/MainThread.h>
+#include "GCSegmentedArrayInlines.h"
+#include "JSCInlines.h"
 
 namespace JSC {
 
-MarkStackSegmentAllocator::MarkStackSegmentAllocator()
-    : m_nextFreeSegment(0)
+MarkStackArray::MarkStackArray()
+    : GCSegmentedArray<const JSCell*>()
 {
 }
 
-MarkStackSegmentAllocator::~MarkStackSegmentAllocator()
+void MarkStackArray::transferTo(MarkStackArray& other)
 {
-    shrinkReserve();
-}
-
-MarkStackSegment* MarkStackSegmentAllocator::allocate()
-{
-    {
-        MutexLocker locker(m_lock);
-        if (m_nextFreeSegment) {
-            MarkStackSegment* result = m_nextFreeSegment;
-            m_nextFreeSegment = result->m_previous;
-            return result;
-        }
-    }
-
-    return static_cast<MarkStackSegment*>(OSAllocator::reserveAndCommit(Options::gcMarkStackSegmentSize));
-}
-
-void MarkStackSegmentAllocator::release(MarkStackSegment* segment)
-{
-    MutexLocker locker(m_lock);
-    segment->m_previous = m_nextFreeSegment;
-    m_nextFreeSegment = segment;
-}
-
-void MarkStackSegmentAllocator::shrinkReserve()
-{
-    MarkStackSegment* segments;
-    {
-        MutexLocker locker(m_lock);
-        segments = m_nextFreeSegment;
-        m_nextFreeSegment = 0;
-    }
-    while (segments) {
-        MarkStackSegment* toFree = segments;
-        segments = segments->m_previous;
-        OSAllocator::decommitAndRelease(toFree, Options::gcMarkStackSegmentSize);
-    }
-}
-
-MarkStackArray::MarkStackArray(MarkStackSegmentAllocator& allocator)
-    : m_allocator(allocator)
-    , m_segmentCapacity(MarkStackSegment::capacityFromSize(Options::gcMarkStackSegmentSize))
-    , m_top(0)
-    , m_numberOfPreviousSegments(0)
-{
-    m_topSegment = m_allocator.allocate();
-#if !ASSERT_DISABLED
-    m_topSegment->m_top = 0;
-#endif
-    m_topSegment->m_previous = 0;
-}
-
-MarkStackArray::~MarkStackArray()
-{
-    ASSERT(!m_topSegment->m_previous);
-    m_allocator.release(m_topSegment);
-}
-
-void MarkStackArray::expand()
-{
-    ASSERT(m_topSegment->m_top == m_segmentCapacity);
+    RELEASE_ASSERT(this != &other);
     
-    m_numberOfPreviousSegments++;
+    // Remove our head and the head of the other list.
+    GCArraySegment<const JSCell*>* myHead = m_segments.removeHead();
+    GCArraySegment<const JSCell*>* otherHead = other.m_segments.removeHead();
+    m_numberOfSegments--;
+    other.m_numberOfSegments--;
     
-    MarkStackSegment* nextSegment = m_allocator.allocate();
-#if !ASSERT_DISABLED
-    nextSegment->m_top = 0;
-#endif
-    nextSegment->m_previous = m_topSegment;
-    m_topSegment = nextSegment;
-    setTopForEmptySegment();
-    validatePrevious();
-}
-
-bool MarkStackArray::refill()
-{
-    validatePrevious();
-    if (top())
-        return true;
-    MarkStackSegment* toFree = m_topSegment;
-    MarkStackSegment* previous = m_topSegment->m_previous;
-    if (!previous)
-        return false;
-    ASSERT(m_numberOfPreviousSegments);
-    m_numberOfPreviousSegments--;
-    m_topSegment = previous;
-    m_allocator.release(toFree);
-    setTopForFullSegment();
-    validatePrevious();
-    return true;
-}
-
-bool MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
-{
-    ASSERT(m_segmentCapacity == other.m_segmentCapacity);
-    validatePrevious();
-    other.validatePrevious();
-        
-    // Fast check: see if the other mark stack already has enough segments.
-    if (other.m_numberOfPreviousSegments + 1 >= Options::maximumNumberOfSharedSegments)
-        return false;
-        
-    size_t numberOfCellsToKeep = Options::minimumNumberOfCellsToKeep;
-    ASSERT(m_top > numberOfCellsToKeep || m_topSegment->m_previous);
-        
-    // Looks like we should donate! Give the other mark stack all of our
-    // previous segments, and then top it off.
-    MarkStackSegment* previous = m_topSegment->m_previous;
-    while (previous) {
-        ASSERT(m_numberOfPreviousSegments);
-
-        MarkStackSegment* current = previous;
-        previous = current->m_previous;
-            
-        current->m_previous = other.m_topSegment->m_previous;
-        other.m_topSegment->m_previous = current;
-            
-        m_numberOfPreviousSegments--;
-        other.m_numberOfPreviousSegments++;
-    }
-    ASSERT(!m_numberOfPreviousSegments);
-    m_topSegment->m_previous = 0;
-    validatePrevious();
-    other.validatePrevious();
-        
-    // Now top off. We want to keep at a minimum numberOfCellsToKeep, but if
-    // we really have a lot of work, we give up half.
-    if (m_top > numberOfCellsToKeep * 2)
-        numberOfCellsToKeep = m_top / 2;
-    while (m_top > numberOfCellsToKeep)
-        other.append(removeLast());
-        
-    return true;
-}
-
-void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other)
-{
-    ASSERT(m_segmentCapacity == other.m_segmentCapacity);
-    validatePrevious();
-    other.validatePrevious();
-        
-    // If other has an entire segment, steal it and return.
-    if (other.m_topSegment->m_previous) {
-        ASSERT(other.m_topSegment->m_previous->m_top == m_segmentCapacity);
-            
-        // First remove a segment from other.
-        MarkStackSegment* current = other.m_topSegment->m_previous;
-        other.m_topSegment->m_previous = current->m_previous;
-        other.m_numberOfPreviousSegments--;
-            
-        ASSERT(!!other.m_numberOfPreviousSegments == !!other.m_topSegment->m_previous);
-            
-        // Now add it to this.
-        current->m_previous = m_topSegment->m_previous;
-        m_topSegment->m_previous = current;
-        m_numberOfPreviousSegments++;
-            
-        validatePrevious();
-        other.validatePrevious();
-        return;
-    }
-        
-    // Otherwise drain 1/Nth of the shared array where N is the number of
-    // workers, or Options::minimumNumberOfCellsToKeep, whichever is bigger.
-    size_t numberOfCellsToSteal = std::max((size_t)Options::minimumNumberOfCellsToKeep, other.size() / Options::numberOfGCMarkers);
-    while (numberOfCellsToSteal-- > 0 && other.canRemoveLast())
-        append(other.removeLast());
-}
-
-#if ENABLE(PARALLEL_GC)
-void MarkStackThreadSharedData::resetChildren()
-{
-    for (unsigned i = 0; i < m_slaveMarkStacks.size(); ++i)
-       m_slaveMarkStacks[i]->reset();
-}
-
-void MarkStackThreadSharedData::markingThreadMain(SlotVisitor* slotVisitor)
-{
-    WTF::registerGCThread();
-    {
-        ParallelModeEnabler enabler(*slotVisitor);
-        slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
-    }
-    delete slotVisitor;
-}
-
-void MarkStackThreadSharedData::markingThreadStartFunc(void* myVisitor)
-{
-    SlotVisitor* slotVisitor = static_cast<SlotVisitor*>(myVisitor);
-    slotVisitor->sharedData().markingThreadMain(slotVisitor);
-}
-#endif
-
-MarkStackThreadSharedData::MarkStackThreadSharedData(JSGlobalData* globalData)
-    : m_globalData(globalData)
-    , m_copiedSpace(&globalData->heap.m_storageSpace)
-    , m_sharedMarkStack(m_segmentAllocator)
-    , m_numberOfActiveParallelMarkers(0)
-    , m_parallelMarkersShouldExit(false)
-{
-#if ENABLE(PARALLEL_GC)
-    for (unsigned i = 1; i < Options::numberOfGCMarkers; ++i) {
-        SlotVisitor* slotVisitor = new SlotVisitor(*this);
-        m_slaveMarkStacks.append(slotVisitor);
-        m_markingThreads.append(createThread(markingThreadStartFunc, slotVisitor, "JavaScriptCore::Marking"));
-        ASSERT(m_markingThreads.last());
-    }
-#endif
-}
-
-MarkStackThreadSharedData::~MarkStackThreadSharedData()
-{
-#if ENABLE(PARALLEL_GC)    
-    // Destroy our marking threads.
-    {
-        MutexLocker locker(m_markingLock);
-        m_parallelMarkersShouldExit = true;
-        m_markingCondition.broadcast();
-    }
-    for (unsigned i = 0; i < m_markingThreads.size(); ++i)
-        waitForThreadCompletion(m_markingThreads[i]);
-#endif
-}
+    other.m_segments.append(m_segments);
     
-void MarkStackThreadSharedData::reset()
-{
-    ASSERT(!m_numberOfActiveParallelMarkers);
-    ASSERT(!m_parallelMarkersShouldExit);
-    ASSERT(m_sharedMarkStack.isEmpty());
+    other.m_numberOfSegments += m_numberOfSegments;
+    m_numberOfSegments = 0;
     
-#if ENABLE(PARALLEL_GC)
-    m_segmentAllocator.shrinkReserve();
-    m_opaqueRoots.clear();
-#else
-    ASSERT(m_opaqueRoots.isEmpty());
-#endif
-    m_weakReferenceHarvesters.removeAll();
-}
-
-void MarkStack::reset()
-{
-    m_visitCount = 0;
-    ASSERT(m_stack.isEmpty());
-#if ENABLE(PARALLEL_GC)
-    ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
-#else
-    m_opaqueRoots.clear();
-#endif
-    m_uniqueStrings.clear();
-}
-
-void MarkStack::append(ConservativeRoots& conservativeRoots)
-{
-    JSCell** roots = conservativeRoots.roots();
-    size_t size = conservativeRoots.size();
-    for (size_t i = 0; i < size; ++i)
-        internalAppend(roots[i]);
-}
-
-ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell)
-{
-#if ENABLE(SIMPLE_HEAP_PROFILING)
-    m_visitedTypeCounts.count(cell);
-#endif
-
-    ASSERT(Heap::isMarked(cell));
+    // Put the original heads back in their places.
+    m_segments.push(myHead);
+    other.m_segments.push(otherHead);
+    m_numberOfSegments++;
+    other.m_numberOfSegments++;
     
-    if (isJSString(cell)) {
-        JSString::visitChildren(const_cast<JSCell*>(cell), visitor);
-        return;
-    }
-
-    if (isJSFinalObject(cell)) {
-        JSObject::visitChildren(const_cast<JSCell*>(cell), visitor);
-        return;
-    }
-
-    if (isJSArray(cell)) {
-        JSArray::visitChildren(const_cast<JSCell*>(cell), visitor);
-        return;
-    }
-
-    cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor);
-}
-
-void SlotVisitor::donateSlow()
-{
-    // Refuse to donate if shared has more entries than I do.
-    if (m_shared.m_sharedMarkStack.size() > m_stack.size())
-        return;
-    MutexLocker locker(m_shared.m_markingLock);
-    if (m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack)) {
-        // Only wake up threads if the shared stack is big enough; otherwise assume that
-        // it's more profitable for us to just scan this ourselves later.
-        if (m_shared.m_sharedMarkStack.size() >= Options::sharedStackWakeupThreshold)
-            m_shared.m_markingCondition.broadcast();
+    while (!isEmpty()) {
+        refill();
+        while (canRemoveLast())
+            other.append(removeLast());
     }
 }
 
-void SlotVisitor::drain()
+size_t MarkStackArray::transferTo(MarkStackArray& other, size_t limit)
 {
-    ASSERT(m_isInParallelMode);
-   
-#if ENABLE(PARALLEL_GC)
-    if (Options::numberOfGCMarkers > 1) {
-        while (!m_stack.isEmpty()) {
-            m_stack.refill();
-            for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance; m_stack.canRemoveLast() && countdown--;)
-                visitChildren(*this, m_stack.removeLast());
-            donateKnownParallel();
+    size_t count = 0;
+    while (count < limit && !isEmpty()) {
+        refill();
+        while (count < limit && canRemoveLast()) {
+            other.append(removeLast());
+            count++;
         }
-        
-        mergeOpaqueRootsIfNecessary();
-        return;
-    }
-#endif
-    
-    while (!m_stack.isEmpty()) {
-        m_stack.refill();
-        while (m_stack.canRemoveLast())
-            visitChildren(*this, m_stack.removeLast());
     }
+    RELEASE_ASSERT(count <= limit);
+    return count;
 }
 
-void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
+void MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
 {
-    ASSERT(m_isInParallelMode);
-    
-    ASSERT(Options::numberOfGCMarkers);
-    
-    bool shouldBeParallel;
+    // Try to donate about 1 / 2 of our cells. To reduce copying costs,
+    // we prefer donating whole segments over donating individual cells,
+    // even if this skews away from our 1 / 2 target.
 
-#if ENABLE(PARALLEL_GC)
-    shouldBeParallel = Options::numberOfGCMarkers > 1;
-#else
-    ASSERT(Options::numberOfGCMarkers == 1);
-    shouldBeParallel = false;
-#endif
-    
-    if (!shouldBeParallel) {
-        // This call should be a no-op.
-        ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain);
-        ASSERT(m_stack.isEmpty());
-        ASSERT(m_shared.m_sharedMarkStack.isEmpty());
-        return;
-    }
-    
-#if ENABLE(PARALLEL_GC)
-    {
-        MutexLocker locker(m_shared.m_markingLock);
-        m_shared.m_numberOfActiveParallelMarkers++;
-    }
-    while (true) {
-        {
-            MutexLocker locker(m_shared.m_markingLock);
-            m_shared.m_numberOfActiveParallelMarkers--;
+    size_t segmentsToDonate = m_numberOfSegments / 2; // If we only have one segment (our head) we don't donate any segments.
 
-            // How we wait differs depending on drain mode.
-            if (sharedDrainMode == MasterDrain) {
-                // Wait until either termination is reached, or until there is some work
-                // for us to do.
-                while (true) {
-                    // Did we reach termination?
-                    if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) {
-                        // Let any sleeping slaves know it's time for them to give their private CopiedBlocks back
-                        m_shared.m_markingCondition.broadcast();
-                        return;
-                    }
-                    
-                    // Is there work to be done?
-                    if (!m_shared.m_sharedMarkStack.isEmpty())
-                        break;
-                    
-                    // Otherwise wait.
-                    m_shared.m_markingCondition.wait(m_shared.m_markingLock);
-                }
-            } else {
-                ASSERT(sharedDrainMode == SlaveDrain);
-                
-                // Did we detect termination? If so, let the master know.
-                if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
-                    m_shared.m_markingCondition.broadcast();
-                
-                while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit) {
-                    if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
-                        doneCopying();
-                    m_shared.m_markingCondition.wait(m_shared.m_markingLock);
-                }
-                
-                // Is the VM exiting? If so, exit this thread.
-                if (m_shared.m_parallelMarkersShouldExit) {
-                    doneCopying();
-                    return;
-                }
-            }
-           
-            m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack);
-            m_shared.m_numberOfActiveParallelMarkers++;
+    if (!segmentsToDonate) {
+        size_t cellsToDonate = m_top / 2; // Round down to donate 0 / 1 cells.
+        while (cellsToDonate--) {
+            ASSERT(m_top);
+            other.append(removeLast());
         }
-        
-        drain();
+        return;
     }
-#endif
-}
 
-void MarkStack::mergeOpaqueRoots()
-{
-    ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
-    {
-        MutexLocker locker(m_shared.m_opaqueRootsLock);
-        HashSet<void*>::iterator begin = m_opaqueRoots.begin();
-        HashSet<void*>::iterator end = m_opaqueRoots.end();
-        for (HashSet<void*>::iterator iter = begin; iter != end; ++iter)
-            m_shared.m_opaqueRoots.add(*iter);
-    }
-    m_opaqueRoots.clear();
-}
+    validatePrevious();
+    other.validatePrevious();
 
-void SlotVisitor::startCopying()
-{
-    ASSERT(!m_copyBlock);
-    if (!m_shared.m_copiedSpace->borrowBlock(&m_copyBlock))
-        CRASH();
-}    
+    // Remove our head and the head of the other list before we start moving segments around.
+    // We'll add them back on once we're done donating.
+    GCArraySegment<const JSCell*>* myHead = m_segments.removeHead();
+    GCArraySegment<const JSCell*>* otherHead = other.m_segments.removeHead();
 
-void* SlotVisitor::allocateNewSpace(void* ptr, size_t bytes)
-{
-    if (CopiedSpace::isOversize(bytes)) {
-        m_shared.m_copiedSpace->pin(CopiedSpace::oversizeBlockFor(ptr));
-        return 0;
+    while (segmentsToDonate--) {
+        GCArraySegment<const JSCell*>* current = m_segments.removeHead();
+        ASSERT(current);
+        ASSERT(m_numberOfSegments > 1);
+        other.m_segments.push(current);
+        m_numberOfSegments--;
+        other.m_numberOfSegments++;
     }
 
-    if (m_shared.m_copiedSpace->isPinned(ptr))
-        return 0;
-
-    // The only time it's possible to have a null copy block is if we have just started copying.
-    if (!m_copyBlock)
-        startCopying();
+    // Put the original heads back in their places.
+    m_segments.push(myHead);
+    other.m_segments.push(otherHead);
 
-    if (!CopiedSpace::fitsInBlock(m_copyBlock, bytes)) {
-        // We don't need to lock across these two calls because the master thread won't 
-        // call doneCopying() because this thread is considered active.
-        m_shared.m_copiedSpace->doneFillingBlock(m_copyBlock);
-        if (!m_shared.m_copiedSpace->borrowBlock(&m_copyBlock))
-            CRASH();
-    }
-    return CopiedSpace::allocateFromBlock(m_copyBlock, bytes);
+    validatePrevious();
+    other.validatePrevious();
 }
 
-inline void MarkStack::internalAppend(JSValue* slot)
+void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other, size_t idleThreadCount)
 {
-    ASSERT(slot);
-    JSValue value = *slot;
-    ASSERT(value);
-    if (!value.isCell())
-        return;
-
-    if (value.isString()) {
-        JSString* string = jsCast<JSString*>(value.asCell());
-        if (!string->isHashConstSingleton() && string->length() > 1 && !string->isRope()) {
-            UniqueStringMap::AddResult addResult = m_uniqueStrings.add(string->string().impl(), value);
-            if (addResult.isNewEntry)
-                string->setHashConstSingleton();
-            else {
-                JSValue existingJSValue = addResult.iterator->second;
-                if (value != existingJSValue)
-                    jsCast<JSString*>(existingJSValue.asCell())->clearHashConstSingleton();
-                *slot = existingJSValue;
-                return;
-            }
-        }
-    }
-
-    internalAppend(value.asCell());
-}
+    // Try to steal 1 / Nth of the shared array, where N is the number of idle threads.
+    // To reduce copying costs, we prefer stealing a whole segment over stealing
+    // individual cells, even if this skews away from our 1 / N target.
 
+    validatePrevious();
+    other.validatePrevious();
+        
+    // If other has an entire segment, steal it and return.
+    if (other.m_numberOfSegments > 1) {
+        // Move the heads of the lists aside. We'll push them back on after.
+        GCArraySegment<const JSCell*>* otherHead = other.m_segments.removeHead();
+        GCArraySegment<const JSCell*>* myHead = m_segments.removeHead();
 
-void SlotVisitor::copyAndAppend(void** ptr, size_t bytes, JSValue* values, unsigned length)
-{
-    void* oldPtr = *ptr;
-    void* newPtr = allocateNewSpace(oldPtr, bytes);
-    if (newPtr) {
-        size_t jsValuesOffset = static_cast<size_t>(reinterpret_cast<char*>(values) - static_cast<char*>(oldPtr));
+        ASSERT(other.m_segments.head()->m_top == s_segmentCapacity);
 
-        JSValue* newValues = reinterpret_cast_ptr<JSValue*>(static_cast<char*>(newPtr) + jsValuesOffset);
-        for (unsigned i = 0; i < length; i++) {
-            JSValue& value = values[i];
-            newValues[i] = value;
-            if (!value)
-                continue;
-            internalAppend(&newValues[i]);
-        }
+        m_segments.push(other.m_segments.removeHead());
 
-        memcpy(newPtr, oldPtr, jsValuesOffset);
-        *ptr = newPtr;
-    } else
-        append(values, length);
-}
+        m_numberOfSegments++;
+        other.m_numberOfSegments--;
+        
+        m_segments.push(myHead);
+        other.m_segments.push(otherHead);
     
-void SlotVisitor::doneCopying()
-{
-    if (!m_copyBlock)
+        validatePrevious();
+        other.validatePrevious();
         return;
+    }
 
-    m_shared.m_copiedSpace->doneFillingBlock(m_copyBlock);
-
-    m_copyBlock = 0;
-}
-
-void SlotVisitor::harvestWeakReferences()
-{
-    for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next())
-        current->visitWeakReferences(*this);
-}
-
-void SlotVisitor::finalizeUnconditionalFinalizers()
-{
-    while (m_shared.m_unconditionalFinalizers.hasNext())
-        m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally();
-}
-
-#if ENABLE(GC_VALIDATION)
-void MarkStack::validate(JSCell* cell)
-{
-    if (!cell)
-        CRASH();
-
-    if (!cell->structure())
-        CRASH();
-
-    // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
-    // I hate this sentence.
-    if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo())
-        CRASH();
-}
-#else
-void MarkStack::validate(JSCell*)
-{
+    // Steal ceil(other.size() / idleThreadCount) things.
+    size_t numberOfCellsToSteal = (other.size() + idleThreadCount - 1) / idleThreadCount;
+    while (numberOfCellsToSteal-- > 0 && other.canRemoveLast())
+        append(other.removeLast());
 }
-#endif
 
 } // namespace JSC