Unreviewed, rolling out r209766.
[WebKit.git] / Source / JavaScriptCore / heap / SlotVisitor.cpp
index 3f8b233..2927949 100644 (file)
 
 #include "config.h"
 #include "SlotVisitor.h"
-#include "SlotVisitorInlines.h"
 
+#include "CPU.h"
 #include "ConservativeRoots.h"
+#include "GCSegmentedArrayInlines.h"
 #include "HeapCellInlines.h"
 #include "HeapProfiler.h"
 #include "HeapSnapshotBuilder.h"
@@ -36,6 +37,7 @@
 #include "JSObject.h"
 #include "JSString.h"
 #include "JSCInlines.h"
+#include "SlotVisitorInlines.h"
 #include "SuperSampler.h"
 #include "VM.h"
 #include <wtf/Lock.h>
@@ -73,12 +75,10 @@ static void validate(JSCell* cell)
 #endif
 
 SlotVisitor::SlotVisitor(Heap& heap)
-    : m_stack()
-    , m_bytesVisited(0)
-    , m_bytesCopied(0)
+    : m_bytesVisited(0)
     , m_visitCount(0)
     , m_isInParallelMode(false)
-    , m_version(MarkedSpace::initialVersion)
+    , m_markingVersion(MarkedSpace::initialVersion)
     , m_heap(heap)
 #if !ASSERT_DISABLED
     , m_isCheckingForDefaultMarkViolation(false)
@@ -89,12 +89,12 @@ SlotVisitor::SlotVisitor(Heap& heap)
 
 SlotVisitor::~SlotVisitor()
 {
-    clearMarkStack();
+    clearMarkStacks();
 }
 
 void SlotVisitor::didStartMarking()
 {
-    if (heap()->operationInProgress() == FullCollection)
+    if (heap()->collectionScope() == CollectionScope::Full)
         ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
     else
         reset();
@@ -102,21 +102,22 @@ void SlotVisitor::didStartMarking()
     if (HeapProfiler* heapProfiler = vm().heapProfiler())
         m_heapSnapshotBuilder = heapProfiler->activeSnapshotBuilder();
     
-    m_version = heap()->objectSpace().version();
+    m_markingVersion = heap()->objectSpace().markingVersion();
 }
 
 void SlotVisitor::reset()
 {
+    RELEASE_ASSERT(!m_opaqueRoots.size());
     m_bytesVisited = 0;
-    m_bytesCopied = 0;
     m_visitCount = 0;
     m_heapSnapshotBuilder = nullptr;
-    ASSERT(!m_currentCell);
+    RELEASE_ASSERT(!m_currentCell);
 }
 
-void SlotVisitor::clearMarkStack()
+void SlotVisitor::clearMarkStacks()
 {
-    m_stack.clear();
+    m_collectorStack.clear();
+    m_mutatorStack.clear();
 }
 
 void SlotVisitor::append(ConservativeRoots& conservativeRoots)
@@ -134,19 +135,76 @@ void SlotVisitor::appendJSCellOrAuxiliary(HeapCell* heapCell)
     
     ASSERT(!m_isCheckingForDefaultMarkViolation);
     
-    if (Heap::testAndSetMarked(m_version, heapCell))
+    auto validateCell = [&] (JSCell* jsCell) {
+        StructureID structureID = jsCell->structureID();
+        
+        auto die = [&] (const char* text) {
+            WTF::dataFile().atomically(
+                [&] (PrintStream& out) {
+                    out.print(text);
+                    out.print("GC type: ", heap()->collectionScope(), "\n");
+                    out.print("Object at: ", RawPointer(jsCell), "\n");
+#if USE(JSVALUE64)
+                    out.print("Structure ID: ", structureID, " (0x", format("%x", structureID), ")\n");
+                    out.print("Structure ID table size: ", heap()->structureIDTable().size(), "\n");
+#else
+                    out.print("Structure: ", RawPointer(structureID), "\n");
+#endif
+                    out.print("Object contents:");
+                    for (unsigned i = 0; i < 2; ++i)
+                        out.print(" ", format("0x%016llx", bitwise_cast<uint64_t*>(jsCell)[i]));
+                    out.print("\n");
+                    CellContainer container = jsCell->cellContainer();
+                    out.print("Is marked: ", container.isMarked(jsCell), "\n");
+                    out.print("Is newly allocated: ", container.isNewlyAllocated(jsCell), "\n");
+                    if (container.isMarkedBlock()) {
+                        MarkedBlock& block = container.markedBlock();
+                        out.print("Block: ", RawPointer(&block), "\n");
+                        block.handle().dumpState(out);
+                        out.print("\n");
+                        out.print("Is marked raw: ", block.isMarkedRaw(jsCell), "\n");
+                        out.print("Marking version: ", block.markingVersion(), "\n");
+                        out.print("Heap marking version: ", heap()->objectSpace().markingVersion(), "\n");
+                        out.print("Is newly allocated raw: ", block.handle().isNewlyAllocated(jsCell), "\n");
+                        out.print("Newly allocated version: ", block.handle().newlyAllocatedVersion(), "\n");
+                        out.print("Heap newly allocated version: ", heap()->objectSpace().newlyAllocatedVersion(), "\n");
+                    }
+                    UNREACHABLE_FOR_PLATFORM();
+                });
+        };
+        
+        // It's not OK for the structure to be null at any GC scan point. We must not GC while
+        // an object is not fully initialized.
+        if (!structureID)
+            die("GC scan found corrupt object: structureID is zero!\n");
+        
+        // It's not OK for the structure to be nuked at any GC scan point.
+        if (isNuked(structureID))
+            die("GC scan found object in bad state: structureID is nuked!\n");
+        
+#if USE(JSVALUE64)
+        // This detects the worst of the badness.
+        if (structureID >= heap()->structureIDTable().size())
+            die("GC scan found corrupt object: structureID is out of bounds!\n");
+#endif
+    };
+    
+    // In debug mode, we validate before marking since this makes it clearer what the problem
+    // was. It's also slower, so we don't do it normally.
+    if (!ASSERT_DISABLED && heapCell->cellKind() == HeapCell::JSCell)
+        validateCell(static_cast<JSCell*>(heapCell));
+    
+    if (Heap::testAndSetMarked(m_markingVersion, heapCell))
         return;
     
     switch (heapCell->cellKind()) {
     case HeapCell::JSCell: {
+        // We have ample budget to perform validation here.
+    
         JSCell* jsCell = static_cast<JSCell*>(heapCell);
+        validateCell(jsCell);
         
-        if (!jsCell->structure()) {
-            ASSERT_NOT_REACHED();
-            return;
-        }
-        
-        jsCell->setCellState(CellState::NewGrey);
+        jsCell->setCellState(CellState::Grey);
 
         appendToMarkStack(jsCell);
         return;
@@ -189,8 +247,6 @@ void SlotVisitor::setMarkedAndAppendToMarkStack(JSCell* cell)
     validate(cell);
 #endif
     
-    //dataLog("    Marking ", RawPointer(cell), "\n");
-    
     if (cell->isLargeAllocation())
         setMarkedAndAppendToMarkStack(cell->largeAllocation(), cell);
     else
@@ -200,7 +256,7 @@ void SlotVisitor::setMarkedAndAppendToMarkStack(JSCell* cell)
 template<typename ContainerType>
 ALWAYS_INLINE void SlotVisitor::setMarkedAndAppendToMarkStack(ContainerType& container, JSCell* cell)
 {
-    container.aboutToMark(m_version);
+    container.aboutToMark(m_markingVersion);
     
     if (container.testAndSetMarked(cell))
         return;
@@ -210,7 +266,7 @@ ALWAYS_INLINE void SlotVisitor::setMarkedAndAppendToMarkStack(ContainerType& con
     // Indicate that the object is grey and that:
     // In case of concurrent GC: it's the first time it is grey in this GC cycle.
     // In case of eden collection: it's a new object that became grey rather than an old remembered object.
-    cell->setCellState(CellState::NewGrey);
+    cell->setCellState(CellState::Grey);
     
     appendToMarkStack(container, cell);
 }
@@ -228,18 +284,19 @@ ALWAYS_INLINE void SlotVisitor::appendToMarkStack(ContainerType& container, JSCe
 {
     ASSERT(Heap::isMarkedConcurrently(cell));
     ASSERT(!cell->isZapped());
+    ASSERT(cell->cellState() == CellState::Grey);
     
     container.noteMarked();
     
-    // FIXME: These "just work" because the GC resets these fields before doing anything else. But
-    // that won't be the case when we do concurrent GC.
     m_visitCount++;
     m_bytesVisited += container.cellSize();
     
-    m_stack.append(cell);
+    m_collectorStack.append(cell);
+}
 
-    if (UNLIKELY(m_heapSnapshotBuilder))
-        m_heapSnapshotBuilder->appendNode(cell);
+void SlotVisitor::appendToMutatorMarkStack(const JSCell* cell)
+{
+    m_mutatorStack.append(cell);
 }
 
 void SlotVisitor::markAuxiliary(const void* base)
@@ -248,7 +305,7 @@ void SlotVisitor::markAuxiliary(const void* base)
     
     ASSERT(cell->heap() == heap());
     
-    if (Heap::testAndSetMarked(m_version, cell))
+    if (Heap::testAndSetMarked(m_markingVersion, cell))
         return;
     
     noteLiveAuxiliaryCell(cell);
@@ -264,6 +321,7 @@ void SlotVisitor::noteLiveAuxiliaryCell(HeapCell* cell)
     
     CellContainer container = cell->cellContainer();
     
+    container.assertValidCell(vm(), cell);
     container.noteMarked();
     
     m_visitCount++;
@@ -296,39 +354,60 @@ ALWAYS_INLINE void SlotVisitor::visitChildren(const JSCell* cell)
     
     SetCurrentCellScope currentCellScope(*this, cell);
     
-    m_currentObjectCellStateBeforeVisiting = cell->cellState();
-    cell->setCellState(CellState::OldBlack);
+    if (false) {
+        dataLog("Visiting ", RawPointer(cell));
+        if (m_isVisitingMutatorStack)
+            dataLog(" (mutator)");
+        dataLog("\n");
+    }
+    
+    // Funny story: it's possible for the object to be black already, if we barrier the object at
+    // about the same time that it's marked. That's fine. It's a gnarly and super-rare race. It's
+    // not clear to me that it would be correct or profitable to bail here if the object is already
+    // black.
+    
+    cell->setCellState(CellState::AnthraciteOrBlack);
     
-    if (isJSString(cell)) {
+    WTF::storeLoadFence();
+    
+    switch (cell->type()) {
+    case StringType:
         JSString::visitChildren(const_cast<JSCell*>(cell), *this);
-        return;
-    }
-
-    if (isJSFinalObject(cell)) {
+        break;
+        
+    case FinalObjectType:
         JSFinalObject::visitChildren(const_cast<JSCell*>(cell), *this);
-        return;
-    }
+        break;
 
-    if (isJSArray(cell)) {
+    case ArrayType:
         JSArray::visitChildren(const_cast<JSCell*>(cell), *this);
-        return;
+        break;
+        
+    default:
+        // FIXME: This could be so much better.
+        // https://bugs.webkit.org/show_bug.cgi?id=162462
+        cell->methodTable(vm())->visitChildren(const_cast<JSCell*>(cell), *this);
+        break;
+    }
+    
+    if (UNLIKELY(m_heapSnapshotBuilder)) {
+        if (!m_isVisitingMutatorStack)
+            m_heapSnapshotBuilder->appendNode(const_cast<JSCell*>(cell));
     }
-
-    cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), *this);
 }
 
-void SlotVisitor::donateKnownParallel()
+void SlotVisitor::donateKnownParallel(MarkStackArray& from, MarkStackArray& to)
 {
     // NOTE: Because we re-try often, we can afford to be conservative, and
     // assume that donating is not profitable.
 
     // Avoid locking when a thread reaches a dead end in the object graph.
-    if (m_stack.size() < 2)
+    if (from.size() < 2)
         return;
 
     // If there's already some shared work queued up, be conservative and assume
     // that donating more is not profitable.
-    if (m_heap.m_sharedMarkStack.size())
+    if (to.size())
         return;
 
     // If we're contending on the lock, be conservative and assume that another
@@ -338,93 +417,186 @@ void SlotVisitor::donateKnownParallel()
         return;
 
     // Otherwise, assume that a thread will go idle soon, and donate.
-    m_stack.donateSomeCellsTo(m_heap.m_sharedMarkStack);
+    from.donateSomeCellsTo(to);
 
     m_heap.m_markingConditionVariable.notifyAll();
 }
 
-void SlotVisitor::drain()
+void SlotVisitor::donateKnownParallel()
+{
+    donateKnownParallel(m_collectorStack, *m_heap.m_sharedCollectorMarkStack);
+    donateKnownParallel(m_mutatorStack, *m_heap.m_sharedMutatorMarkStack);
+}
+
+void SlotVisitor::updateMutatorIsStopped(const AbstractLocker&)
+{
+    m_mutatorIsStopped = (m_heap.collectorBelievesThatTheWorldIsStopped() & m_canOptimizeForStoppedMutator);
+}
+
+void SlotVisitor::updateMutatorIsStopped()
+{
+    if (mutatorIsStoppedIsUpToDate())
+        return;
+    updateMutatorIsStopped(holdLock(m_rightToRun));
+}
+
+bool SlotVisitor::hasAcknowledgedThatTheMutatorIsResumed() const
+{
+    return !m_mutatorIsStopped;
+}
+
+bool SlotVisitor::mutatorIsStoppedIsUpToDate() const
+{
+    return m_mutatorIsStopped == (m_heap.collectorBelievesThatTheWorldIsStopped() & m_canOptimizeForStoppedMutator);
+}
+
+void SlotVisitor::optimizeForStoppedMutator()
+{
+    m_canOptimizeForStoppedMutator = true;
+}
+
+void SlotVisitor::drain(MonotonicTime timeout)
 {
     ASSERT(m_isInParallelMode);
-   
-    while (!m_stack.isEmpty()) {
-        m_stack.refill();
-        for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_stack.canRemoveLast() && countdown--;)
-            visitChildren(m_stack.removeLast());
+    
+    auto locker = holdLock(m_rightToRun);
+    
+    while ((!m_collectorStack.isEmpty() || !m_mutatorStack.isEmpty()) && !hasElapsed(timeout)) {
+        updateMutatorIsStopped(locker);
+        if (!m_collectorStack.isEmpty()) {
+            m_collectorStack.refill();
+            m_isVisitingMutatorStack = false;
+            for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_collectorStack.canRemoveLast() && countdown--;)
+                visitChildren(m_collectorStack.removeLast());
+        } else if (!m_mutatorStack.isEmpty()) {
+            m_mutatorStack.refill();
+            // We know for sure that we are visiting objects because of the barrier, not because of
+            // marking. Marking will visit an object exactly once. The barrier will visit it
+            // possibly many times, and always after it was already marked.
+            m_isVisitingMutatorStack = true;
+            for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_mutatorStack.canRemoveLast() && countdown--;)
+                visitChildren(m_mutatorStack.removeLast());
+        }
+        m_rightToRun.safepoint();
         donateKnownParallel();
     }
     
     mergeOpaqueRootsIfNecessary();
 }
 
-void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
+bool SlotVisitor::didReachTermination()
+{
+    return !m_heap.m_numberOfActiveParallelMarkers
+        && m_heap.m_sharedCollectorMarkStack->isEmpty()
+        && m_heap.m_sharedMutatorMarkStack->isEmpty();
+}
+
+bool SlotVisitor::hasWork()
+{
+    return !m_heap.m_sharedCollectorMarkStack->isEmpty()
+        || !m_heap.m_sharedMutatorMarkStack->isEmpty();
+}
+
+SlotVisitor::SharedDrainResult SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode, MonotonicTime timeout)
 {
     ASSERT(m_isInParallelMode);
     
     ASSERT(Options::numberOfGCMarkers());
     
     {
-        std::lock_guard<Lock> lock(m_heap.m_markingMutex);
+        LockHolder locker(m_heap.m_markingMutex);
         m_heap.m_numberOfActiveParallelMarkers++;
     }
     while (true) {
         {
-            std::unique_lock<Lock> lock(m_heap.m_markingMutex);
+            LockHolder locker(m_heap.m_markingMutex);
             m_heap.m_numberOfActiveParallelMarkers--;
             m_heap.m_numberOfWaitingParallelMarkers++;
 
-            // How we wait differs depending on drain mode.
             if (sharedDrainMode == MasterDrain) {
-                // Wait until either termination is reached, or until there is some work
-                // for us to do.
                 while (true) {
-                    // Did we reach termination?
-                    if (!m_heap.m_numberOfActiveParallelMarkers
-                        && m_heap.m_sharedMarkStack.isEmpty()) {
-                        // Let any sleeping slaves know it's time for them to return;
+                    if (hasElapsed(timeout))
+                        return SharedDrainResult::TimedOut;
+                    
+                    if (didReachTermination()) {
                         m_heap.m_markingConditionVariable.notifyAll();
-                        return;
+                        return SharedDrainResult::Done;
                     }
                     
-                    // Is there work to be done?
-                    if (!m_heap.m_sharedMarkStack.isEmpty())
+                    if (hasWork())
                         break;
                     
-                    // Otherwise wait.
-                    m_heap.m_markingConditionVariable.wait(lock);
+                    m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout);
                 }
             } else {
                 ASSERT(sharedDrainMode == SlaveDrain);
+
+                if (hasElapsed(timeout))
+                    return SharedDrainResult::TimedOut;
                 
-                // Did we detect termination? If so, let the master know.
-                if (!m_heap.m_numberOfActiveParallelMarkers
-                    && m_heap.m_sharedMarkStack.isEmpty())
+                if (didReachTermination())
                     m_heap.m_markingConditionVariable.notifyAll();
 
-                m_heap.m_markingConditionVariable.wait(
-                    lock,
+                m_heap.m_markingConditionVariable.waitUntil(
+                    m_heap.m_markingMutex, timeout,
                     [this] {
-                        return !m_heap.m_sharedMarkStack.isEmpty()
+                        return hasWork()
                             || m_heap.m_parallelMarkersShouldExit;
                     });
                 
-                // Is the current phase done? If so, return from this function.
                 if (m_heap.m_parallelMarkersShouldExit)
-                    return;
+                    return SharedDrainResult::Done;
             }
 
-            m_stack.stealSomeCellsFrom(
-                m_heap.m_sharedMarkStack, m_heap.m_numberOfWaitingParallelMarkers);
+            m_collectorStack.stealSomeCellsFrom(
+                *m_heap.m_sharedCollectorMarkStack, m_heap.m_numberOfWaitingParallelMarkers);
+            m_mutatorStack.stealSomeCellsFrom(
+                *m_heap.m_sharedMutatorMarkStack, m_heap.m_numberOfWaitingParallelMarkers);
             m_heap.m_numberOfActiveParallelMarkers++;
             m_heap.m_numberOfWaitingParallelMarkers--;
         }
         
-        drain();
+        drain(timeout);
+    }
+}
+
+SlotVisitor::SharedDrainResult SlotVisitor::drainInParallel(MonotonicTime timeout)
+{
+    donateAndDrain(timeout);
+    return drainFromShared(MasterDrain, timeout);
+}
+
+SlotVisitor::SharedDrainResult SlotVisitor::drainInParallelPassively(MonotonicTime timeout)
+{
+    ASSERT(m_isInParallelMode);
+    
+    ASSERT(Options::numberOfGCMarkers());
+    
+    if (!m_heap.hasHeapAccess() || m_heap.collectorBelievesThatTheWorldIsStopped()) {
+        // This is an optimization over drainInParallel() when we have a concurrent mutator but
+        // otherwise it is not profitable.
+        return drainInParallel(timeout);
+    }
+    
+    LockHolder locker(m_heap.m_markingMutex);
+    for (;;) {
+        if (hasElapsed(timeout))
+            return SharedDrainResult::TimedOut;
+        
+        if (didReachTermination()) {
+            m_heap.m_markingConditionVariable.notifyAll();
+            return SharedDrainResult::Done;
+        }
+        
+        m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout);
     }
 }
 
 void SlotVisitor::addOpaqueRoot(void* root)
 {
+    if (!root)
+        return;
+    
     if (Options::numberOfGCMarkers() == 1) {
         // Put directly into the shared HashSet.
         m_heap.m_opaqueRoots.add(root);
@@ -438,13 +610,18 @@ void SlotVisitor::addOpaqueRoot(void* root)
 
 bool SlotVisitor::containsOpaqueRoot(void* root) const
 {
+    if (!root)
+        return false;
+    
     ASSERT(!m_isInParallelMode);
-    ASSERT(m_opaqueRoots.isEmpty());
     return m_heap.m_opaqueRoots.contains(root);
 }
 
 TriState SlotVisitor::containsOpaqueRootTriState(void* root) const
 {
+    if (!root)
+        return FalseTriState;
+    
     if (m_opaqueRoots.contains(root))
         return TrueTriState;
     std::lock_guard<Lock> lock(m_heap.m_opaqueRootsMutex);
@@ -453,13 +630,6 @@ TriState SlotVisitor::containsOpaqueRootTriState(void* root) const
     return MixedTriState;
 }
 
-int SlotVisitor::opaqueRootCount()
-{
-    ASSERT(!m_isInParallelMode);
-    ASSERT(m_opaqueRoots.isEmpty());
-    return m_heap.m_opaqueRoots.size();
-}
-
 void SlotVisitor::mergeOpaqueRootsIfNecessary()
 {
     if (m_opaqueRoots.isEmpty())
@@ -483,15 +653,14 @@ void SlotVisitor::donate()
     donateKnownParallel();
 }
 
-void SlotVisitor::donateAndDrain()
+void SlotVisitor::donateAndDrain(MonotonicTime timeout)
 {
     donate();
-    drain();
+    drain(timeout);
 }
 
 void SlotVisitor::mergeOpaqueRoots()
 {
-    ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
     {
         std::lock_guard<Lock> lock(m_heap.m_opaqueRootsMutex);
         for (auto* root : m_opaqueRoots)
@@ -500,22 +669,30 @@ void SlotVisitor::mergeOpaqueRoots()
     m_opaqueRoots.clear();
 }
 
-void SlotVisitor::harvestWeakReferences()
+void SlotVisitor::addWeakReferenceHarvester(WeakReferenceHarvester* weakReferenceHarvester)
 {
-    for (WeakReferenceHarvester* current = m_heap.m_weakReferenceHarvesters.head(); current; current = current->next())
-        current->visitWeakReferences(*this);
+    m_heap.m_weakReferenceHarvesters.addThreadSafe(weakReferenceHarvester);
 }
 
-void SlotVisitor::finalizeUnconditionalFinalizers()
+void SlotVisitor::addUnconditionalFinalizer(UnconditionalFinalizer* unconditionalFinalizer)
 {
-    while (m_heap.m_unconditionalFinalizers.hasNext())
-        m_heap.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally();
+    m_heap.m_unconditionalFinalizers.addThreadSafe(unconditionalFinalizer);
+}
+
+void SlotVisitor::didRace(const VisitRaceKey& race)
+{
+    if (Options::verboseVisitRace())
+        dataLog(toCString("GC visit race: ", race, "\n"));
+    
+    if (!ASSERT_DISABLED) {
+        auto locker = holdLock(heap()->m_visitRaceLock);
+        heap()->m_visitRaces.add(race);
+    }
 }
 
-void SlotVisitor::dump(PrintStream&) const
+void SlotVisitor::dump(PrintStream& out) const
 {
-    for (const JSCell* cell : markStack())
-        dataLog(*cell, "\n");
+    out.print("Collector: [", pointerListDump(collectorMarkStack()), "], Mutator: [", pointerListDump(mutatorMarkStack()), "]");
 }
 
 } // namespace JSC