84afa4594a88f62e45cb59686d8465419ae71fad
[WebKit-https.git] / Source / JavaScriptCore / heap / SlotVisitor.cpp
1 /*
2  * Copyright (C) 2012-2019 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "SlotVisitor.h"
28
29 #include "BlockDirectoryInlines.h"
30 #include "CPU.h"
31 #include "ConservativeRoots.h"
32 #include "GCSegmentedArrayInlines.h"
33 #include "HeapAnalyzer.h"
34 #include "HeapCellInlines.h"
35 #include "HeapProfiler.h"
36 #include "JSArray.h"
37 #include "JSDestructibleObject.h"
38 #include "JSObject.h"
39 #include "JSString.h"
40 #include "JSCInlines.h"
41 #include "MarkedBlockInlines.h"
42 #include "MarkingConstraintSolver.h"
43 #include "SlotVisitorInlines.h"
44 #include "StopIfNecessaryTimer.h"
45 #include "SuperSampler.h"
46 #include "VM.h"
47 #include <wtf/ListDump.h>
48 #include <wtf/Lock.h>
49 #include <wtf/StdLibExtras.h>
50
51 namespace JSC {
52
53 #if ENABLE(GC_VALIDATION)
54 static void validate(JSCell* cell)
55 {
56     RELEASE_ASSERT(cell);
57
58     if (!cell->structure()) {
59         dataLogF("cell at %p has a null structure\n" , cell);
60         CRASH();
61     }
62
63     // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
64     // I hate this sentence.
65     VM& vm = cell->vm();
66     if (cell->structure()->structure()->JSCell::classInfo(vm) != cell->structure()->JSCell::classInfo(vm)) {
67         const char* parentClassName = 0;
68         const char* ourClassName = 0;
69         if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo(vm))
70             parentClassName = cell->structure()->structure()->JSCell::classInfo(vm)->className;
71         if (cell->structure()->JSCell::classInfo(vm))
72             ourClassName = cell->structure()->JSCell::classInfo(vm)->className;
73         dataLogF("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
74             cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName);
75         CRASH();
76     }
77
78     // Make sure we can walk the ClassInfo chain
79     const ClassInfo* info = cell->classInfo(vm);
80     do { } while ((info = info->parentClass));
81 }
82 #endif
83
84 SlotVisitor::SlotVisitor(Heap& heap, CString codeName)
85     : m_bytesVisited(0)
86     , m_visitCount(0)
87     , m_isInParallelMode(false)
88     , m_markingVersion(MarkedSpace::initialVersion)
89     , m_heap(heap)
90     , m_codeName(codeName)
91 #if !ASSERT_DISABLED
92     , m_isCheckingForDefaultMarkViolation(false)
93     , m_isDraining(false)
94 #endif
95 {
96 }
97
98 SlotVisitor::~SlotVisitor()
99 {
100     clearMarkStacks();
101 }
102
103 void SlotVisitor::didStartMarking()
104 {
105     auto scope = heap()->collectionScope();
106     if (scope) {
107         switch (*scope) {
108         case CollectionScope::Eden:
109             reset();
110             break;
111         case CollectionScope::Full:
112             m_extraMemorySize = 0;
113             break;
114         }
115     }
116
117     if (HeapProfiler* heapProfiler = vm().heapProfiler())
118         m_heapAnalyzer = heapProfiler->activeHeapAnalyzer();
119
120     m_markingVersion = heap()->objectSpace().markingVersion();
121 }
122
123 void SlotVisitor::reset()
124 {
125     m_bytesVisited = 0;
126     m_visitCount = 0;
127     m_heapAnalyzer = nullptr;
128     RELEASE_ASSERT(!m_currentCell);
129 }
130
131 void SlotVisitor::clearMarkStacks()
132 {
133     forEachMarkStack(
134         [&] (MarkStackArray& stack) -> IterationStatus {
135             stack.clear();
136             return IterationStatus::Continue;
137         });
138 }
139
140 void SlotVisitor::append(const ConservativeRoots& conservativeRoots)
141 {
142     HeapCell** roots = conservativeRoots.roots();
143     size_t size = conservativeRoots.size();
144     for (size_t i = 0; i < size; ++i)
145         appendJSCellOrAuxiliary(roots[i]);
146 }
147
148 void SlotVisitor::appendJSCellOrAuxiliary(HeapCell* heapCell)
149 {
150     if (!heapCell)
151         return;
152     
153     ASSERT(!m_isCheckingForDefaultMarkViolation);
154     
155     auto validateCell = [&] (JSCell* jsCell) {
156         StructureID structureID = jsCell->structureID();
157         
158         auto die = [&] (const char* text) {
159             WTF::dataFile().atomically(
160                 [&] (PrintStream& out) {
161                     out.print(text);
162                     out.print("GC type: ", heap()->collectionScope(), "\n");
163                     out.print("Object at: ", RawPointer(jsCell), "\n");
164 #if USE(JSVALUE64)
165                     out.print("Structure ID: ", structureID, " (0x", format("%x", structureID), ")\n");
166                     out.print("Structure ID table size: ", heap()->structureIDTable().size(), "\n");
167 #else
168                     out.print("Structure: ", RawPointer(structureID), "\n");
169 #endif
170                     out.print("Object contents:");
171                     for (unsigned i = 0; i < 2; ++i)
172                         out.print(" ", format("0x%016llx", bitwise_cast<uint64_t*>(jsCell)[i]));
173                     out.print("\n");
174                     CellContainer container = jsCell->cellContainer();
175                     out.print("Is marked: ", container.isMarked(jsCell), "\n");
176                     out.print("Is newly allocated: ", container.isNewlyAllocated(jsCell), "\n");
177                     if (container.isMarkedBlock()) {
178                         MarkedBlock& block = container.markedBlock();
179                         out.print("Block: ", RawPointer(&block), "\n");
180                         block.handle().dumpState(out);
181                         out.print("\n");
182                         out.print("Is marked raw: ", block.isMarkedRaw(jsCell), "\n");
183                         out.print("Marking version: ", block.markingVersion(), "\n");
184                         out.print("Heap marking version: ", heap()->objectSpace().markingVersion(), "\n");
185                         out.print("Is newly allocated raw: ", block.isNewlyAllocated(jsCell), "\n");
186                         out.print("Newly allocated version: ", block.newlyAllocatedVersion(), "\n");
187                         out.print("Heap newly allocated version: ", heap()->objectSpace().newlyAllocatedVersion(), "\n");
188                     }
189                     UNREACHABLE_FOR_PLATFORM();
190                 });
191         };
192         
193         // It's not OK for the structure to be null at any GC scan point. We must not GC while
194         // an object is not fully initialized.
195         if (!structureID)
196             die("GC scan found corrupt object: structureID is zero!\n");
197         
198         // It's not OK for the structure to be nuked at any GC scan point.
199         if (isNuked(structureID))
200             die("GC scan found object in bad state: structureID is nuked!\n");
201         
202 #if USE(JSVALUE64)
203         // This detects the worst of the badness.
204         if (!heap()->structureIDTable().isValid(structureID))
205             die("GC scan found corrupt object: structureID is invalid!\n");
206 #endif
207     };
208     
209     // In debug mode, we validate before marking since this makes it clearer what the problem
210     // was. It's also slower, so we don't do it normally.
211     if (!ASSERT_DISABLED && isJSCellKind(heapCell->cellKind()))
212         validateCell(static_cast<JSCell*>(heapCell));
213     
214     if (Heap::testAndSetMarked(m_markingVersion, heapCell))
215         return;
216     
217     switch (heapCell->cellKind()) {
218     case HeapCell::JSCell:
219     case HeapCell::JSCellWithInteriorPointers: {
220         // We have ample budget to perform validation here.
221     
222         JSCell* jsCell = static_cast<JSCell*>(heapCell);
223         validateCell(jsCell);
224         
225         jsCell->setCellState(CellState::PossiblyGrey);
226
227         appendToMarkStack(jsCell);
228         return;
229     }
230         
231     case HeapCell::Auxiliary: {
232         noteLiveAuxiliaryCell(heapCell);
233         return;
234     } }
235 }
236
237 void SlotVisitor::appendSlow(JSCell* cell, Dependency dependency)
238 {
239     if (UNLIKELY(m_heapAnalyzer))
240         m_heapAnalyzer->analyzeEdge(m_currentCell, cell, m_rootMarkReason);
241
242     appendHiddenSlowImpl(cell, dependency);
243 }
244
245 void SlotVisitor::appendHiddenSlow(JSCell* cell, Dependency dependency)
246 {
247     appendHiddenSlowImpl(cell, dependency);
248 }
249
250 ALWAYS_INLINE void SlotVisitor::appendHiddenSlowImpl(JSCell* cell, Dependency dependency)
251 {
252     ASSERT(!m_isCheckingForDefaultMarkViolation);
253
254 #if ENABLE(GC_VALIDATION)
255     validate(cell);
256 #endif
257     
258     if (cell->isLargeAllocation())
259         setMarkedAndAppendToMarkStack(cell->largeAllocation(), cell, dependency);
260     else
261         setMarkedAndAppendToMarkStack(cell->markedBlock(), cell, dependency);
262 }
263
264 template<typename ContainerType>
265 ALWAYS_INLINE void SlotVisitor::setMarkedAndAppendToMarkStack(ContainerType& container, JSCell* cell, Dependency dependency)
266 {
267     if (container.testAndSetMarked(cell, dependency))
268         return;
269     
270     ASSERT(cell->structure());
271     
272     // Indicate that the object is grey and that:
273     // In case of concurrent GC: it's the first time it is grey in this GC cycle.
274     // In case of eden collection: it's a new object that became grey rather than an old remembered object.
275     cell->setCellState(CellState::PossiblyGrey);
276     
277     appendToMarkStack(container, cell);
278 }
279
280 void SlotVisitor::appendToMarkStack(JSCell* cell)
281 {
282     if (cell->isLargeAllocation())
283         appendToMarkStack(cell->largeAllocation(), cell);
284     else
285         appendToMarkStack(cell->markedBlock(), cell);
286 }
287
288 template<typename ContainerType>
289 ALWAYS_INLINE void SlotVisitor::appendToMarkStack(ContainerType& container, JSCell* cell)
290 {
291     ASSERT(m_heap.isMarked(cell));
292 #if CPU(X86_64)
293     if (Options::dumpZappedCellCrashData()) {
294         if (UNLIKELY(cell->isZapped()))
295             reportZappedCellAndCrash(cell);
296     }
297 #endif
298     ASSERT(!cell->isZapped());
299
300     container.noteMarked();
301     
302     m_visitCount++;
303     m_bytesVisited += container.cellSize();
304
305     m_collectorStack.append(cell);
306 }
307
308 void SlotVisitor::markAuxiliary(const void* base)
309 {
310     HeapCell* cell = bitwise_cast<HeapCell*>(base);
311     
312     ASSERT(cell->heap() == heap());
313     
314     if (Heap::testAndSetMarked(m_markingVersion, cell))
315         return;
316     
317     noteLiveAuxiliaryCell(cell);
318 }
319
320 void SlotVisitor::noteLiveAuxiliaryCell(HeapCell* cell)
321 {
322     // We get here once per GC under these circumstances:
323     //
324     // Eden collection: if the cell was allocated since the last collection and is live somehow.
325     //
326     // Full collection: if the cell is live somehow.
327     
328     CellContainer container = cell->cellContainer();
329     
330     container.assertValidCell(vm(), cell);
331     container.noteMarked();
332     
333     m_visitCount++;
334
335     size_t cellSize = container.cellSize();
336     m_bytesVisited += cellSize;
337     m_nonCellVisitCount += cellSize;
338 }
339
340 class SetCurrentCellScope {
341 public:
342     SetCurrentCellScope(SlotVisitor& visitor, const JSCell* cell)
343         : m_visitor(visitor)
344     {
345         ASSERT(!m_visitor.m_currentCell);
346         m_visitor.m_currentCell = const_cast<JSCell*>(cell);
347     }
348
349     ~SetCurrentCellScope()
350     {
351         ASSERT(m_visitor.m_currentCell);
352         m_visitor.m_currentCell = nullptr;
353     }
354
355 private:
356     SlotVisitor& m_visitor;
357 };
358
359 ALWAYS_INLINE void SlotVisitor::visitChildren(const JSCell* cell)
360 {
361     ASSERT(m_heap.isMarked(cell));
362     
363     SetCurrentCellScope currentCellScope(*this, cell);
364     
365     if (false) {
366         dataLog("Visiting ", RawPointer(cell));
367         if (!m_isFirstVisit)
368             dataLog(" (subsequent)");
369         dataLog("\n");
370     }
371     
372     // Funny story: it's possible for the object to be black already, if we barrier the object at
373     // about the same time that it's marked. That's fine. It's a gnarly and super-rare race. It's
374     // not clear to me that it would be correct or profitable to bail here if the object is already
375     // black.
376     
377     cell->setCellState(CellState::PossiblyBlack);
378     
379     WTF::storeLoadFence();
380     
381     switch (cell->type()) {
382     case StringType:
383         JSString::visitChildren(const_cast<JSCell*>(cell), *this);
384         break;
385         
386     case FinalObjectType:
387         JSFinalObject::visitChildren(const_cast<JSCell*>(cell), *this);
388         break;
389
390     case ArrayType:
391         JSArray::visitChildren(const_cast<JSCell*>(cell), *this);
392         break;
393         
394     default:
395         // FIXME: This could be so much better.
396         // https://bugs.webkit.org/show_bug.cgi?id=162462
397 #if CPU(X86_64)
398         if (Options::dumpZappedCellCrashData()) {
399             Structure* structure = cell->structure(vm());
400             if (LIKELY(structure)) {
401                 const MethodTable* methodTable = &structure->classInfo()->methodTable;
402                 methodTable->visitChildren(const_cast<JSCell*>(cell), *this);
403                 break;
404             }
405             reportZappedCellAndCrash(const_cast<JSCell*>(cell));
406         }
407 #endif
408         cell->methodTable(vm())->visitChildren(const_cast<JSCell*>(cell), *this);
409         break;
410     }
411
412     if (UNLIKELY(m_heapAnalyzer)) {
413         if (m_isFirstVisit)
414             m_heapAnalyzer->analyzeNode(const_cast<JSCell*>(cell));
415     }
416 }
417
418 void SlotVisitor::visitAsConstraint(const JSCell* cell)
419 {
420     m_isFirstVisit = false;
421     visitChildren(cell);
422 }
423
424 inline void SlotVisitor::propagateExternalMemoryVisitedIfNecessary()
425 {
426     if (m_isFirstVisit) {
427         if (m_extraMemorySize.hasOverflowed())
428             heap()->reportExtraMemoryVisited(std::numeric_limits<size_t>::max());
429         else if (m_extraMemorySize)
430             heap()->reportExtraMemoryVisited(m_extraMemorySize.unsafeGet());
431         m_extraMemorySize = 0;
432     }
433 }
434
435 void SlotVisitor::donateKnownParallel(MarkStackArray& from, MarkStackArray& to)
436 {
437     // NOTE: Because we re-try often, we can afford to be conservative, and
438     // assume that donating is not profitable.
439
440     // Avoid locking when a thread reaches a dead end in the object graph.
441     if (from.size() < 2)
442         return;
443
444     // If there's already some shared work queued up, be conservative and assume
445     // that donating more is not profitable.
446     if (to.size())
447         return;
448
449     // If we're contending on the lock, be conservative and assume that another
450     // thread is already donating.
451     std::unique_lock<Lock> lock(m_heap.m_markingMutex, std::try_to_lock);
452     if (!lock.owns_lock())
453         return;
454
455     // Otherwise, assume that a thread will go idle soon, and donate.
456     from.donateSomeCellsTo(to);
457
458     m_heap.m_markingConditionVariable.notifyAll();
459 }
460
461 void SlotVisitor::donateKnownParallel()
462 {
463     forEachMarkStack(
464         [&] (MarkStackArray& stack) -> IterationStatus {
465             donateKnownParallel(stack, correspondingGlobalStack(stack));
466             return IterationStatus::Continue;
467         });
468 }
469
470 void SlotVisitor::updateMutatorIsStopped(const AbstractLocker&)
471 {
472     m_mutatorIsStopped = (m_heap.worldIsStopped() & m_canOptimizeForStoppedMutator);
473 }
474
475 void SlotVisitor::updateMutatorIsStopped()
476 {
477     if (mutatorIsStoppedIsUpToDate())
478         return;
479     updateMutatorIsStopped(holdLock(m_rightToRun));
480 }
481
482 bool SlotVisitor::hasAcknowledgedThatTheMutatorIsResumed() const
483 {
484     return !m_mutatorIsStopped;
485 }
486
487 bool SlotVisitor::mutatorIsStoppedIsUpToDate() const
488 {
489     return m_mutatorIsStopped == (m_heap.worldIsStopped() & m_canOptimizeForStoppedMutator);
490 }
491
492 void SlotVisitor::optimizeForStoppedMutator()
493 {
494     m_canOptimizeForStoppedMutator = true;
495 }
496
497 NEVER_INLINE void SlotVisitor::drain(MonotonicTime timeout)
498 {
499     if (!m_isInParallelMode) {
500         dataLog("FATAL: attempting to drain when not in parallel mode.\n");
501         RELEASE_ASSERT_NOT_REACHED();
502     }
503     
504     auto locker = holdLock(m_rightToRun);
505     
506     while (!hasElapsed(timeout)) {
507         updateMutatorIsStopped(locker);
508         IterationStatus status = forEachMarkStack(
509             [&] (MarkStackArray& stack) -> IterationStatus {
510                 if (stack.isEmpty())
511                     return IterationStatus::Continue;
512
513                 stack.refill();
514                 
515                 m_isFirstVisit = (&stack == &m_collectorStack);
516
517                 for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); stack.canRemoveLast() && countdown--;)
518                     visitChildren(stack.removeLast());
519                 return IterationStatus::Done;
520             });
521         propagateExternalMemoryVisitedIfNecessary();
522         if (status == IterationStatus::Continue)
523             break;
524         
525         m_rightToRun.safepoint();
526         donateKnownParallel();
527     }
528 }
529
530 size_t SlotVisitor::performIncrementOfDraining(size_t bytesRequested)
531 {
532     RELEASE_ASSERT(m_isInParallelMode);
533
534     size_t cellsRequested = bytesRequested / MarkedBlock::atomSize;
535     {
536         auto locker = holdLock(m_heap.m_markingMutex);
537         forEachMarkStack(
538             [&] (MarkStackArray& stack) -> IterationStatus {
539                 cellsRequested -= correspondingGlobalStack(stack).transferTo(stack, cellsRequested);
540                 return cellsRequested ? IterationStatus::Continue : IterationStatus::Done;
541             });
542     }
543
544     size_t cellBytesVisited = 0;
545     m_nonCellVisitCount = 0;
546
547     auto bytesVisited = [&] () -> size_t {
548         return cellBytesVisited + m_nonCellVisitCount;
549     };
550
551     auto isDone = [&] () -> bool {
552         return bytesVisited() >= bytesRequested;
553     };
554     
555     {
556         auto locker = holdLock(m_rightToRun);
557         
558         while (!isDone()) {
559             updateMutatorIsStopped(locker);
560             IterationStatus status = forEachMarkStack(
561                 [&] (MarkStackArray& stack) -> IterationStatus {
562                     if (stack.isEmpty() || isDone())
563                         return IterationStatus::Continue;
564
565                     stack.refill();
566                     
567                     m_isFirstVisit = (&stack == &m_collectorStack);
568
569                     unsigned countdown = Options::minimumNumberOfScansBetweenRebalance();
570                     while (countdown && stack.canRemoveLast() && !isDone()) {
571                         const JSCell* cell = stack.removeLast();
572                         cellBytesVisited += cell->cellSize();
573                         visitChildren(cell);
574                         countdown--;
575                     }
576                     return IterationStatus::Done;
577                 });
578             propagateExternalMemoryVisitedIfNecessary();
579             if (status == IterationStatus::Continue)
580                 break;
581             m_rightToRun.safepoint();
582             donateKnownParallel();
583         }
584     }
585
586     donateAll();
587
588     return bytesVisited();
589 }
590
591 bool SlotVisitor::didReachTermination()
592 {
593     LockHolder locker(m_heap.m_markingMutex);
594     return didReachTermination(locker);
595 }
596
597 bool SlotVisitor::didReachTermination(const AbstractLocker& locker)
598 {
599     return !m_heap.m_numberOfActiveParallelMarkers
600         && !hasWork(locker);
601 }
602
603 bool SlotVisitor::hasWork(const AbstractLocker&)
604 {
605     return !isEmpty()
606         || !m_heap.m_sharedCollectorMarkStack->isEmpty()
607         || !m_heap.m_sharedMutatorMarkStack->isEmpty();
608 }
609
610 NEVER_INLINE SlotVisitor::SharedDrainResult SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode, MonotonicTime timeout)
611 {
612     ASSERT(m_isInParallelMode);
613     
614     ASSERT(Options::numberOfGCMarkers());
615
616     bool isActive = false;
617     while (true) {
618         RefPtr<SharedTask<void(SlotVisitor&)>> bonusTask;
619         
620         {
621             auto locker = holdLock(m_heap.m_markingMutex);
622             if (isActive)
623                 m_heap.m_numberOfActiveParallelMarkers--;
624             m_heap.m_numberOfWaitingParallelMarkers++;
625             
626             if (sharedDrainMode == MasterDrain) {
627                 while (true) {
628                     if (hasElapsed(timeout))
629                         return SharedDrainResult::TimedOut;
630
631                     if (didReachTermination(locker)) {
632                         m_heap.m_markingConditionVariable.notifyAll();
633                         return SharedDrainResult::Done;
634                     }
635                     
636                     if (hasWork(locker))
637                         break;
638
639                     m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout);
640                 }
641             } else {
642                 ASSERT(sharedDrainMode == SlaveDrain);
643
644                 if (hasElapsed(timeout))
645                     return SharedDrainResult::TimedOut;
646                 
647                 if (didReachTermination(locker)) {
648                     m_heap.m_markingConditionVariable.notifyAll();
649                     
650                     // If we're in concurrent mode, then we know that the mutator will eventually do
651                     // the right thing because:
652                     // - It's possible that the collector has the conn. In that case, the collector will
653                     //   wake up from the notification above. This will happen if the app released heap
654                     //   access. Native apps can spend a lot of time with heap access released.
655                     // - It's possible that the mutator will allocate soon. Then it will check if we
656                     //   reached termination. This is the most likely outcome in programs that allocate
657                     //   a lot.
658                     // - WebCore never releases access. But WebCore has a runloop. The runloop will check
659                     //   if we reached termination.
660                     // So, this tells the runloop that it's got things to do.
661                     m_heap.m_stopIfNecessaryTimer->scheduleSoon();
662                 }
663
664                 auto isReady = [&] () -> bool {
665                     return hasWork(locker)
666                         || m_heap.m_bonusVisitorTask
667                         || m_heap.m_parallelMarkersShouldExit;
668                 };
669
670                 m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout, isReady);
671                 
672                 if (!hasWork(locker)
673                     && m_heap.m_bonusVisitorTask)
674                     bonusTask = m_heap.m_bonusVisitorTask;
675                 
676                 if (m_heap.m_parallelMarkersShouldExit)
677                     return SharedDrainResult::Done;
678             }
679             
680             if (!bonusTask && isEmpty()) {
681                 forEachMarkStack(
682                     [&] (MarkStackArray& stack) -> IterationStatus {
683                         stack.stealSomeCellsFrom(
684                             correspondingGlobalStack(stack),
685                             m_heap.m_numberOfWaitingParallelMarkers);
686                         return IterationStatus::Continue;
687                     });
688             }
689
690             m_heap.m_numberOfActiveParallelMarkers++;
691             m_heap.m_numberOfWaitingParallelMarkers--;
692         }
693         
694         if (bonusTask) {
695             bonusTask->run(*this);
696             
697             // The main thread could still be running, and may run for a while. Unless we clear the task
698             // ourselves, we will keep looping around trying to run the task.
699             {
700                 auto locker = holdLock(m_heap.m_markingMutex);
701                 if (m_heap.m_bonusVisitorTask == bonusTask)
702                     m_heap.m_bonusVisitorTask = nullptr;
703                 bonusTask = nullptr;
704                 m_heap.m_markingConditionVariable.notifyAll();
705             }
706         } else {
707             RELEASE_ASSERT(!isEmpty());
708             drain(timeout);
709         }
710         
711         isActive = true;
712     }
713 }
714
715 SlotVisitor::SharedDrainResult SlotVisitor::drainInParallel(MonotonicTime timeout)
716 {
717     donateAndDrain(timeout);
718     return drainFromShared(MasterDrain, timeout);
719 }
720
721 SlotVisitor::SharedDrainResult SlotVisitor::drainInParallelPassively(MonotonicTime timeout)
722 {
723     ASSERT(m_isInParallelMode);
724     
725     ASSERT(Options::numberOfGCMarkers());
726     
727     if (Options::numberOfGCMarkers() == 1
728         || (m_heap.m_worldState.load() & Heap::mutatorWaitingBit)
729         || !m_heap.hasHeapAccess()
730         || m_heap.worldIsStopped()) {
731         // This is an optimization over drainInParallel() when we have a concurrent mutator but
732         // otherwise it is not profitable.
733         return drainInParallel(timeout);
734     }
735
736     donateAll(holdLock(m_heap.m_markingMutex));
737     return waitForTermination(timeout);
738 }
739
740 SlotVisitor::SharedDrainResult SlotVisitor::waitForTermination(MonotonicTime timeout)
741 {
742     auto locker = holdLock(m_heap.m_markingMutex);
743     for (;;) {
744         if (hasElapsed(timeout))
745             return SharedDrainResult::TimedOut;
746         
747         if (didReachTermination(locker)) {
748             m_heap.m_markingConditionVariable.notifyAll();
749             return SharedDrainResult::Done;
750         }
751         
752         m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout);
753     }
754 }
755
756 void SlotVisitor::donateAll()
757 {
758     if (isEmpty())
759         return;
760     
761     donateAll(holdLock(m_heap.m_markingMutex));
762 }
763
764 void SlotVisitor::donateAll(const AbstractLocker&)
765 {
766     forEachMarkStack(
767         [&] (MarkStackArray& stack) -> IterationStatus {
768             stack.transferTo(correspondingGlobalStack(stack));
769             return IterationStatus::Continue;
770         });
771
772     m_heap.m_markingConditionVariable.notifyAll();
773 }
774
775 void SlotVisitor::donate()
776 {
777     if (!m_isInParallelMode) {
778         dataLog("FATAL: Attempting to donate when not in parallel mode.\n");
779         RELEASE_ASSERT_NOT_REACHED();
780     }
781     
782     if (Options::numberOfGCMarkers() == 1)
783         return;
784     
785     donateKnownParallel();
786 }
787
788 void SlotVisitor::donateAndDrain(MonotonicTime timeout)
789 {
790     donate();
791     drain(timeout);
792 }
793
794 void SlotVisitor::didRace(const VisitRaceKey& race)
795 {
796     if (Options::verboseVisitRace())
797         dataLog(toCString("GC visit race: ", race, "\n"));
798     
799     auto locker = holdLock(heap()->m_raceMarkStackLock);
800     JSCell* cell = race.cell();
801     cell->setCellState(CellState::PossiblyGrey);
802     heap()->m_raceMarkStack->append(cell);
803 }
804
805 void SlotVisitor::dump(PrintStream& out) const
806 {
807     out.print("Collector: [", pointerListDump(collectorMarkStack()), "], Mutator: [", pointerListDump(mutatorMarkStack()), "]");
808 }
809
810 MarkStackArray& SlotVisitor::correspondingGlobalStack(MarkStackArray& stack)
811 {
812     if (&stack == &m_collectorStack)
813         return *m_heap.m_sharedCollectorMarkStack;
814     RELEASE_ASSERT(&stack == &m_mutatorStack);
815     return *m_heap.m_sharedMutatorMarkStack;
816 }
817
818 void SlotVisitor::addParallelConstraintTask(RefPtr<SharedTask<void(SlotVisitor&)>> task)
819 {
820     RELEASE_ASSERT(m_currentSolver);
821     RELEASE_ASSERT(m_currentConstraint);
822     RELEASE_ASSERT(task);
823     
824     m_currentSolver->addParallelTask(task, *m_currentConstraint);
825 }
826
827 #if CPU(X86_64)
828 NEVER_INLINE NO_RETURN_DUE_TO_CRASH NOT_TAIL_CALLED void SlotVisitor::reportZappedCellAndCrash(JSCell* cell)
829 {
830     MarkedBlock::Handle* foundBlockHandle = nullptr;
831     uint64_t* cellWords = reinterpret_cast_ptr<uint64_t*>(cell);
832
833     uintptr_t cellAddress = bitwise_cast<uintptr_t>(cell);
834     uint64_t headerWord = cellWords[0];
835     uint64_t zapReasonAndMore = cellWords[1];
836     unsigned subspaceHash = 0;
837     size_t cellSize = 0;
838
839     m_heap.objectSpace().forEachBlock([&] (MarkedBlock::Handle* blockHandle) {
840         if (blockHandle->contains(cell)) {
841             foundBlockHandle = blockHandle;
842             return IterationStatus::Done;
843         }
844         return IterationStatus::Continue;
845     });
846
847     uint64_t variousState = 0;
848     MarkedBlock* foundBlock = nullptr;
849     if (foundBlockHandle) {
850         foundBlock = &foundBlockHandle->block();
851         subspaceHash = StringHasher::computeHash(foundBlockHandle->subspace()->name());
852         cellSize = foundBlockHandle->cellSize();
853
854         variousState |= static_cast<uint64_t>(foundBlockHandle->isFreeListed()) << 0;
855         variousState |= static_cast<uint64_t>(foundBlockHandle->isAllocated()) << 1;
856         variousState |= static_cast<uint64_t>(foundBlockHandle->isEmpty()) << 2;
857         variousState |= static_cast<uint64_t>(foundBlockHandle->needsDestruction()) << 3;
858         variousState |= static_cast<uint64_t>(foundBlock->isNewlyAllocated(cell)) << 4;
859
860         ptrdiff_t cellOffset = cellAddress - reinterpret_cast<uint64_t>(foundBlockHandle->start());
861         bool cellIsProperlyAligned = !(cellOffset % cellSize);
862         variousState |= static_cast<uint64_t>(cellIsProperlyAligned) << 5;
863     }
864
865     CRASH_WITH_INFO(cellAddress, headerWord, zapReasonAndMore, subspaceHash, cellSize, foundBlock, variousState);
866 }
867 #endif // PLATFORM(MAC)
868
869 } // namespace JSC