CodeBlocks should be in IsoSubspaces
[WebKit-https.git] / Source / JavaScriptCore / heap / SlotVisitor.cpp
1 /*
2  * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "SlotVisitor.h"
28
29 #include "CPU.h"
30 #include "ConservativeRoots.h"
31 #include "GCSegmentedArrayInlines.h"
32 #include "HeapCellInlines.h"
33 #include "HeapProfiler.h"
34 #include "HeapSnapshotBuilder.h"
35 #include "JSArray.h"
36 #include "JSDestructibleObject.h"
37 #include "JSObject.h"
38 #include "JSString.h"
39 #include "JSCInlines.h"
40 #include "SlotVisitorInlines.h"
41 #include "StopIfNecessaryTimer.h"
42 #include "SuperSampler.h"
43 #include "VM.h"
44 #include <wtf/ListDump.h>
45 #include <wtf/Lock.h>
46
47 namespace JSC {
48
49 #if ENABLE(GC_VALIDATION)
50 static void validate(JSCell* cell)
51 {
52     RELEASE_ASSERT(cell);
53
54     if (!cell->structure()) {
55         dataLogF("cell at %p has a null structure\n" , cell);
56         CRASH();
57     }
58
59     // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
60     // I hate this sentence.
61     VM& vm = *cell->vm();
62     if (cell->structure()->structure()->JSCell::classInfo(vm) != cell->structure()->JSCell::classInfo(vm)) {
63         const char* parentClassName = 0;
64         const char* ourClassName = 0;
65         if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo(vm))
66             parentClassName = cell->structure()->structure()->JSCell::classInfo(vm)->className;
67         if (cell->structure()->JSCell::classInfo(vm))
68             ourClassName = cell->structure()->JSCell::classInfo(vm)->className;
69         dataLogF("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
70             cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName);
71         CRASH();
72     }
73
74     // Make sure we can walk the ClassInfo chain
75     const ClassInfo* info = cell->classInfo(vm);
76     do { } while ((info = info->parentClass));
77 }
78 #endif
79
80 SlotVisitor::SlotVisitor(Heap& heap, CString codeName)
81     : m_bytesVisited(0)
82     , m_visitCount(0)
83     , m_isInParallelMode(false)
84     , m_markingVersion(MarkedSpace::initialVersion)
85     , m_heap(heap)
86     , m_codeName(codeName)
87 #if !ASSERT_DISABLED
88     , m_isCheckingForDefaultMarkViolation(false)
89     , m_isDraining(false)
90 #endif
91 {
92 }
93
94 SlotVisitor::~SlotVisitor()
95 {
96     clearMarkStacks();
97 }
98
99 void SlotVisitor::didStartMarking()
100 {
101     if (heap()->collectionScope() == CollectionScope::Eden)
102         reset();
103
104     if (HeapProfiler* heapProfiler = vm().heapProfiler())
105         m_heapSnapshotBuilder = heapProfiler->activeSnapshotBuilder();
106     
107     m_markingVersion = heap()->objectSpace().markingVersion();
108 }
109
110 void SlotVisitor::reset()
111 {
112     m_bytesVisited = 0;
113     m_visitCount = 0;
114     m_heapSnapshotBuilder = nullptr;
115     RELEASE_ASSERT(!m_currentCell);
116 }
117
118 void SlotVisitor::clearMarkStacks()
119 {
120     forEachMarkStack(
121         [&] (MarkStackArray& stack) -> IterationStatus {
122             stack.clear();
123             return IterationStatus::Continue;
124         });
125 }
126
127 void SlotVisitor::append(ConservativeRoots& conservativeRoots)
128 {
129     HeapCell** roots = conservativeRoots.roots();
130     size_t size = conservativeRoots.size();
131     for (size_t i = 0; i < size; ++i)
132         appendJSCellOrAuxiliary(roots[i]);
133 }
134
135 void SlotVisitor::appendJSCellOrAuxiliary(HeapCell* heapCell)
136 {
137     if (!heapCell)
138         return;
139     
140     ASSERT(!m_isCheckingForDefaultMarkViolation);
141     
142     auto validateCell = [&] (JSCell* jsCell) {
143         StructureID structureID = jsCell->structureID();
144         
145         auto die = [&] (const char* text) {
146             WTF::dataFile().atomically(
147                 [&] (PrintStream& out) {
148                     out.print(text);
149                     out.print("GC type: ", heap()->collectionScope(), "\n");
150                     out.print("Object at: ", RawPointer(jsCell), "\n");
151 #if USE(JSVALUE64)
152                     out.print("Structure ID: ", structureID, " (0x", format("%x", structureID), ")\n");
153                     out.print("Structure ID table size: ", heap()->structureIDTable().size(), "\n");
154 #else
155                     out.print("Structure: ", RawPointer(structureID), "\n");
156 #endif
157                     out.print("Object contents:");
158                     for (unsigned i = 0; i < 2; ++i)
159                         out.print(" ", format("0x%016llx", bitwise_cast<uint64_t*>(jsCell)[i]));
160                     out.print("\n");
161                     CellContainer container = jsCell->cellContainer();
162                     out.print("Is marked: ", container.isMarked(jsCell), "\n");
163                     out.print("Is newly allocated: ", container.isNewlyAllocated(jsCell), "\n");
164                     if (container.isMarkedBlock()) {
165                         MarkedBlock& block = container.markedBlock();
166                         out.print("Block: ", RawPointer(&block), "\n");
167                         block.handle().dumpState(out);
168                         out.print("\n");
169                         out.print("Is marked raw: ", block.isMarkedRaw(jsCell), "\n");
170                         out.print("Marking version: ", block.markingVersion(), "\n");
171                         out.print("Heap marking version: ", heap()->objectSpace().markingVersion(), "\n");
172                         out.print("Is newly allocated raw: ", block.handle().isNewlyAllocated(jsCell), "\n");
173                         out.print("Newly allocated version: ", block.handle().newlyAllocatedVersion(), "\n");
174                         out.print("Heap newly allocated version: ", heap()->objectSpace().newlyAllocatedVersion(), "\n");
175                     }
176                     UNREACHABLE_FOR_PLATFORM();
177                 });
178         };
179         
180         // It's not OK for the structure to be null at any GC scan point. We must not GC while
181         // an object is not fully initialized.
182         if (!structureID)
183             die("GC scan found corrupt object: structureID is zero!\n");
184         
185         // It's not OK for the structure to be nuked at any GC scan point.
186         if (isNuked(structureID))
187             die("GC scan found object in bad state: structureID is nuked!\n");
188         
189 #if USE(JSVALUE64)
190         // This detects the worst of the badness.
191         if (structureID >= heap()->structureIDTable().size())
192             die("GC scan found corrupt object: structureID is out of bounds!\n");
193 #endif
194     };
195     
196     // In debug mode, we validate before marking since this makes it clearer what the problem
197     // was. It's also slower, so we don't do it normally.
198     if (!ASSERT_DISABLED && heapCell->cellKind() == HeapCell::JSCell)
199         validateCell(static_cast<JSCell*>(heapCell));
200     
201     if (Heap::testAndSetMarked(m_markingVersion, heapCell))
202         return;
203     
204     switch (heapCell->cellKind()) {
205     case HeapCell::JSCell: {
206         // We have ample budget to perform validation here.
207     
208         JSCell* jsCell = static_cast<JSCell*>(heapCell);
209         validateCell(jsCell);
210         
211         jsCell->setCellState(CellState::PossiblyGrey);
212
213         appendToMarkStack(jsCell);
214         return;
215     }
216         
217     case HeapCell::Auxiliary: {
218         noteLiveAuxiliaryCell(heapCell);
219         return;
220     } }
221 }
222
223 void SlotVisitor::appendSlow(JSCell* cell, Dependency dependency)
224 {
225     if (UNLIKELY(m_heapSnapshotBuilder))
226         m_heapSnapshotBuilder->appendEdge(m_currentCell, cell);
227     
228     appendHiddenSlowImpl(cell, dependency);
229 }
230
231 void SlotVisitor::appendHiddenSlow(JSCell* cell, Dependency dependency)
232 {
233     appendHiddenSlowImpl(cell, dependency);
234 }
235
236 ALWAYS_INLINE void SlotVisitor::appendHiddenSlowImpl(JSCell* cell, Dependency dependency)
237 {
238     ASSERT(!m_isCheckingForDefaultMarkViolation);
239
240 #if ENABLE(GC_VALIDATION)
241     validate(cell);
242 #endif
243     
244     if (cell->isLargeAllocation())
245         setMarkedAndAppendToMarkStack(cell->largeAllocation(), cell, dependency);
246     else
247         setMarkedAndAppendToMarkStack(cell->markedBlock(), cell, dependency);
248 }
249
250 template<typename ContainerType>
251 ALWAYS_INLINE void SlotVisitor::setMarkedAndAppendToMarkStack(ContainerType& container, JSCell* cell, Dependency dependency)
252 {
253     if (container.testAndSetMarked(cell, dependency))
254         return;
255     
256     ASSERT(cell->structure());
257     
258     // Indicate that the object is grey and that:
259     // In case of concurrent GC: it's the first time it is grey in this GC cycle.
260     // In case of eden collection: it's a new object that became grey rather than an old remembered object.
261     cell->setCellState(CellState::PossiblyGrey);
262     
263     appendToMarkStack(container, cell);
264 }
265
266 void SlotVisitor::appendToMarkStack(JSCell* cell)
267 {
268     if (cell->isLargeAllocation())
269         appendToMarkStack(cell->largeAllocation(), cell);
270     else
271         appendToMarkStack(cell->markedBlock(), cell);
272 }
273
274 template<typename ContainerType>
275 ALWAYS_INLINE void SlotVisitor::appendToMarkStack(ContainerType& container, JSCell* cell)
276 {
277     ASSERT(Heap::isMarked(cell));
278     ASSERT(!cell->isZapped());
279     
280     container.noteMarked();
281     
282     m_visitCount++;
283     m_bytesVisited += container.cellSize();
284
285     m_collectorStack.append(cell);
286 }
287
288 void SlotVisitor::appendToMutatorMarkStack(const JSCell* cell)
289 {
290     m_mutatorStack.append(cell);
291 }
292
293 void SlotVisitor::markAuxiliary(const void* base)
294 {
295     HeapCell* cell = bitwise_cast<HeapCell*>(base);
296     
297     ASSERT(cell->heap() == heap());
298     
299     if (Heap::testAndSetMarked(m_markingVersion, cell))
300         return;
301     
302     noteLiveAuxiliaryCell(cell);
303 }
304
305 void SlotVisitor::noteLiveAuxiliaryCell(HeapCell* cell)
306 {
307     // We get here once per GC under these circumstances:
308     //
309     // Eden collection: if the cell was allocated since the last collection and is live somehow.
310     //
311     // Full collection: if the cell is live somehow.
312     
313     CellContainer container = cell->cellContainer();
314     
315     container.assertValidCell(vm(), cell);
316     container.noteMarked();
317     
318     m_visitCount++;
319
320     size_t cellSize = container.cellSize();
321     m_bytesVisited += cellSize;
322     m_nonCellVisitCount += cellSize;
323 }
324
325 class SetCurrentCellScope {
326 public:
327     SetCurrentCellScope(SlotVisitor& visitor, const JSCell* cell)
328         : m_visitor(visitor)
329     {
330         ASSERT(!m_visitor.m_currentCell);
331         m_visitor.m_currentCell = const_cast<JSCell*>(cell);
332     }
333
334     ~SetCurrentCellScope()
335     {
336         ASSERT(m_visitor.m_currentCell);
337         m_visitor.m_currentCell = nullptr;
338     }
339
340 private:
341     SlotVisitor& m_visitor;
342 };
343
344 ALWAYS_INLINE void SlotVisitor::visitChildren(const JSCell* cell)
345 {
346     ASSERT(Heap::isMarked(cell));
347     
348     SetCurrentCellScope currentCellScope(*this, cell);
349     
350     if (false) {
351         dataLog("Visiting ", RawPointer(cell));
352         if (!m_isFirstVisit)
353             dataLog(" (subsequent)");
354         dataLog("\n");
355     }
356     
357     // Funny story: it's possible for the object to be black already, if we barrier the object at
358     // about the same time that it's marked. That's fine. It's a gnarly and super-rare race. It's
359     // not clear to me that it would be correct or profitable to bail here if the object is already
360     // black.
361     
362     cell->setCellState(CellState::PossiblyBlack);
363     
364     WTF::storeLoadFence();
365     
366     switch (cell->type()) {
367     case StringType:
368         JSString::visitChildren(const_cast<JSCell*>(cell), *this);
369         break;
370         
371     case FinalObjectType:
372         JSFinalObject::visitChildren(const_cast<JSCell*>(cell), *this);
373         break;
374
375     case ArrayType:
376         JSArray::visitChildren(const_cast<JSCell*>(cell), *this);
377         break;
378         
379     default:
380         // FIXME: This could be so much better.
381         // https://bugs.webkit.org/show_bug.cgi?id=162462
382         cell->methodTable(vm())->visitChildren(const_cast<JSCell*>(cell), *this);
383         break;
384     }
385     
386     if (UNLIKELY(m_heapSnapshotBuilder)) {
387         if (m_isFirstVisit)
388             m_heapSnapshotBuilder->appendNode(const_cast<JSCell*>(cell));
389     }
390 }
391
392 void SlotVisitor::visitAsConstraint(const JSCell* cell)
393 {
394     m_isFirstVisit = false;
395     visitChildren(cell);
396 }
397
398 void SlotVisitor::donateKnownParallel(MarkStackArray& from, MarkStackArray& to)
399 {
400     // NOTE: Because we re-try often, we can afford to be conservative, and
401     // assume that donating is not profitable.
402
403     // Avoid locking when a thread reaches a dead end in the object graph.
404     if (from.size() < 2)
405         return;
406
407     // If there's already some shared work queued up, be conservative and assume
408     // that donating more is not profitable.
409     if (to.size())
410         return;
411
412     // If we're contending on the lock, be conservative and assume that another
413     // thread is already donating.
414     std::unique_lock<Lock> lock(m_heap.m_markingMutex, std::try_to_lock);
415     if (!lock.owns_lock())
416         return;
417
418     // Otherwise, assume that a thread will go idle soon, and donate.
419     from.donateSomeCellsTo(to);
420
421     m_heap.m_markingConditionVariable.notifyAll();
422 }
423
424 void SlotVisitor::donateKnownParallel()
425 {
426     forEachMarkStack(
427         [&] (MarkStackArray& stack) -> IterationStatus {
428             donateKnownParallel(stack, correspondingGlobalStack(stack));
429             return IterationStatus::Continue;
430         });
431 }
432
433 void SlotVisitor::updateMutatorIsStopped(const AbstractLocker&)
434 {
435     m_mutatorIsStopped = (m_heap.worldIsStopped() & m_canOptimizeForStoppedMutator);
436 }
437
438 void SlotVisitor::updateMutatorIsStopped()
439 {
440     if (mutatorIsStoppedIsUpToDate())
441         return;
442     updateMutatorIsStopped(holdLock(m_rightToRun));
443 }
444
445 bool SlotVisitor::hasAcknowledgedThatTheMutatorIsResumed() const
446 {
447     return !m_mutatorIsStopped;
448 }
449
450 bool SlotVisitor::mutatorIsStoppedIsUpToDate() const
451 {
452     return m_mutatorIsStopped == (m_heap.worldIsStopped() & m_canOptimizeForStoppedMutator);
453 }
454
455 void SlotVisitor::optimizeForStoppedMutator()
456 {
457     m_canOptimizeForStoppedMutator = true;
458 }
459
460 NEVER_INLINE void SlotVisitor::drain(MonotonicTime timeout)
461 {
462     if (!m_isInParallelMode) {
463         dataLog("FATAL: attempting to drain when not in parallel mode.\n");
464         RELEASE_ASSERT_NOT_REACHED();
465     }
466     
467     auto locker = holdLock(m_rightToRun);
468     
469     while (!hasElapsed(timeout)) {
470         updateMutatorIsStopped(locker);
471         IterationStatus status = forEachMarkStack(
472             [&] (MarkStackArray& stack) -> IterationStatus {
473                 if (stack.isEmpty())
474                     return IterationStatus::Continue;
475
476                 stack.refill();
477                 
478                 m_isFirstVisit = (&stack == &m_collectorStack);
479
480                 for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); stack.canRemoveLast() && countdown--;)
481                     visitChildren(stack.removeLast());
482                 return IterationStatus::Done;
483             });
484         if (status == IterationStatus::Continue)
485             break;
486         
487         m_rightToRun.safepoint();
488         donateKnownParallel();
489     }
490 }
491
492 size_t SlotVisitor::performIncrementOfDraining(size_t bytesRequested)
493 {
494     RELEASE_ASSERT(m_isInParallelMode);
495
496     size_t cellsRequested = bytesRequested / MarkedBlock::atomSize;
497     {
498         auto locker = holdLock(m_heap.m_markingMutex);
499         forEachMarkStack(
500             [&] (MarkStackArray& stack) -> IterationStatus {
501                 cellsRequested -= correspondingGlobalStack(stack).transferTo(stack, cellsRequested);
502                 return cellsRequested ? IterationStatus::Continue : IterationStatus::Done;
503             });
504     }
505
506     size_t cellBytesVisited = 0;
507     m_nonCellVisitCount = 0;
508
509     auto bytesVisited = [&] () -> size_t {
510         return cellBytesVisited + m_nonCellVisitCount;
511     };
512
513     auto isDone = [&] () -> bool {
514         return bytesVisited() >= bytesRequested;
515     };
516     
517     {
518         auto locker = holdLock(m_rightToRun);
519         
520         while (!isDone()) {
521             updateMutatorIsStopped(locker);
522             IterationStatus status = forEachMarkStack(
523                 [&] (MarkStackArray& stack) -> IterationStatus {
524                     if (stack.isEmpty() || isDone())
525                         return IterationStatus::Continue;
526
527                     stack.refill();
528                     
529                     m_isFirstVisit = (&stack == &m_collectorStack);
530
531                     unsigned countdown = Options::minimumNumberOfScansBetweenRebalance();
532                     while (countdown && stack.canRemoveLast() && !isDone()) {
533                         const JSCell* cell = stack.removeLast();
534                         cellBytesVisited += cell->cellSize();
535                         visitChildren(cell);
536                         countdown--;
537                     }
538                     return IterationStatus::Done;
539                 });
540             if (status == IterationStatus::Continue)
541                 break;
542             m_rightToRun.safepoint();
543             donateKnownParallel();
544         }
545     }
546
547     donateAll();
548
549     return bytesVisited();
550 }
551
552 bool SlotVisitor::didReachTermination()
553 {
554     LockHolder locker(m_heap.m_markingMutex);
555     return didReachTermination(locker);
556 }
557
558 bool SlotVisitor::didReachTermination(const AbstractLocker& locker)
559 {
560     return !m_heap.m_numberOfActiveParallelMarkers
561         && !hasWork(locker);
562 }
563
564 bool SlotVisitor::hasWork(const AbstractLocker&)
565 {
566     return !isEmpty()
567         || !m_heap.m_sharedCollectorMarkStack->isEmpty()
568         || !m_heap.m_sharedMutatorMarkStack->isEmpty();
569 }
570
571 NEVER_INLINE SlotVisitor::SharedDrainResult SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode, MonotonicTime timeout)
572 {
573     ASSERT(m_isInParallelMode);
574     
575     ASSERT(Options::numberOfGCMarkers());
576
577     bool isActive = false;
578     while (true) {
579         RefPtr<SharedTask<void(SlotVisitor&)>> bonusTask;
580         
581         {
582             auto locker = holdLock(m_heap.m_markingMutex);
583             if (isActive)
584                 m_heap.m_numberOfActiveParallelMarkers--;
585             m_heap.m_numberOfWaitingParallelMarkers++;
586             
587             if (sharedDrainMode == MasterDrain) {
588                 while (true) {
589                     if (hasElapsed(timeout))
590                         return SharedDrainResult::TimedOut;
591
592                     if (didReachTermination(locker)) {
593                         m_heap.m_markingConditionVariable.notifyAll();
594                         return SharedDrainResult::Done;
595                     }
596                     
597                     if (hasWork(locker))
598                         break;
599
600                     m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout);
601                 }
602             } else {
603                 ASSERT(sharedDrainMode == SlaveDrain);
604
605                 if (hasElapsed(timeout))
606                     return SharedDrainResult::TimedOut;
607                 
608                 if (didReachTermination(locker)) {
609                     m_heap.m_markingConditionVariable.notifyAll();
610                     
611                     // If we're in concurrent mode, then we know that the mutator will eventually do
612                     // the right thing because:
613                     // - It's possible that the collector has the conn. In that case, the collector will
614                     //   wake up from the notification above. This will happen if the app released heap
615                     //   access. Native apps can spend a lot of time with heap access released.
616                     // - It's possible that the mutator will allocate soon. Then it will check if we
617                     //   reached termination. This is the most likely outcome in programs that allocate
618                     //   a lot.
619                     // - WebCore never releases access. But WebCore has a runloop. The runloop will check
620                     //   if we reached termination.
621                     // So, this tells the runloop that it's got things to do.
622                     m_heap.m_stopIfNecessaryTimer->scheduleSoon();
623                 }
624
625                 auto isReady = [&] () -> bool {
626                     return hasWork(locker)
627                         || m_heap.m_bonusVisitorTask
628                         || m_heap.m_parallelMarkersShouldExit;
629                 };
630
631                 m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout, isReady);
632                 
633                 if (!hasWork(locker)
634                     && m_heap.m_bonusVisitorTask)
635                     bonusTask = m_heap.m_bonusVisitorTask;
636                 
637                 if (m_heap.m_parallelMarkersShouldExit)
638                     return SharedDrainResult::Done;
639             }
640             
641             if (!bonusTask && isEmpty()) {
642                 forEachMarkStack(
643                     [&] (MarkStackArray& stack) -> IterationStatus {
644                         stack.stealSomeCellsFrom(
645                             correspondingGlobalStack(stack),
646                             m_heap.m_numberOfWaitingParallelMarkers);
647                         return IterationStatus::Continue;
648                     });
649             }
650
651             m_heap.m_numberOfActiveParallelMarkers++;
652             m_heap.m_numberOfWaitingParallelMarkers--;
653         }
654         
655         if (bonusTask) {
656             bonusTask->run(*this);
657             
658             // The main thread could still be running, and may run for a while. Unless we clear the task
659             // ourselves, we will keep looping around trying to run the task.
660             {
661                 auto locker = holdLock(m_heap.m_markingMutex);
662                 if (m_heap.m_bonusVisitorTask == bonusTask)
663                     m_heap.m_bonusVisitorTask = nullptr;
664                 bonusTask = nullptr;
665                 m_heap.m_markingConditionVariable.notifyAll();
666             }
667         } else {
668             RELEASE_ASSERT(!isEmpty());
669             drain(timeout);
670         }
671         
672         isActive = true;
673     }
674 }
675
676 SlotVisitor::SharedDrainResult SlotVisitor::drainInParallel(MonotonicTime timeout)
677 {
678     donateAndDrain(timeout);
679     return drainFromShared(MasterDrain, timeout);
680 }
681
682 SlotVisitor::SharedDrainResult SlotVisitor::drainInParallelPassively(MonotonicTime timeout)
683 {
684     ASSERT(m_isInParallelMode);
685     
686     ASSERT(Options::numberOfGCMarkers());
687     
688     if (Options::numberOfGCMarkers() == 1
689         || (m_heap.m_worldState.load() & Heap::mutatorWaitingBit)
690         || !m_heap.hasHeapAccess()
691         || m_heap.worldIsStopped()) {
692         // This is an optimization over drainInParallel() when we have a concurrent mutator but
693         // otherwise it is not profitable.
694         return drainInParallel(timeout);
695     }
696
697     donateAll(holdLock(m_heap.m_markingMutex));
698     return waitForTermination(timeout);
699 }
700
701 SlotVisitor::SharedDrainResult SlotVisitor::waitForTermination(MonotonicTime timeout)
702 {
703     auto locker = holdLock(m_heap.m_markingMutex);
704     for (;;) {
705         if (hasElapsed(timeout))
706             return SharedDrainResult::TimedOut;
707         
708         if (didReachTermination(locker)) {
709             m_heap.m_markingConditionVariable.notifyAll();
710             return SharedDrainResult::Done;
711         }
712         
713         m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout);
714     }
715 }
716
717 void SlotVisitor::donateAll()
718 {
719     if (isEmpty())
720         return;
721     
722     donateAll(holdLock(m_heap.m_markingMutex));
723 }
724
725 void SlotVisitor::donateAll(const AbstractLocker&)
726 {
727     forEachMarkStack(
728         [&] (MarkStackArray& stack) -> IterationStatus {
729             stack.transferTo(correspondingGlobalStack(stack));
730             return IterationStatus::Continue;
731         });
732
733     m_heap.m_markingConditionVariable.notifyAll();
734 }
735
736 void SlotVisitor::donate()
737 {
738     if (!m_isInParallelMode) {
739         dataLog("FATAL: Attempting to donate when not in parallel mode.\n");
740         RELEASE_ASSERT_NOT_REACHED();
741     }
742     
743     if (Options::numberOfGCMarkers() == 1)
744         return;
745     
746     donateKnownParallel();
747 }
748
749 void SlotVisitor::donateAndDrain(MonotonicTime timeout)
750 {
751     donate();
752     drain(timeout);
753 }
754
755 void SlotVisitor::addWeakReferenceHarvester(WeakReferenceHarvester* weakReferenceHarvester)
756 {
757     m_heap.m_weakReferenceHarvesters.addThreadSafe(weakReferenceHarvester);
758 }
759
760 void SlotVisitor::addUnconditionalFinalizer(UnconditionalFinalizer* unconditionalFinalizer)
761 {
762     m_heap.m_unconditionalFinalizers.addThreadSafe(unconditionalFinalizer);
763 }
764
765 void SlotVisitor::didRace(const VisitRaceKey& race)
766 {
767     if (Options::verboseVisitRace())
768         dataLog(toCString("GC visit race: ", race, "\n"));
769     
770     auto locker = holdLock(heap()->m_raceMarkStackLock);
771     JSCell* cell = race.cell();
772     cell->setCellState(CellState::PossiblyGrey);
773     heap()->m_raceMarkStack->append(cell);
774 }
775
776 void SlotVisitor::dump(PrintStream& out) const
777 {
778     out.print("Collector: [", pointerListDump(collectorMarkStack()), "], Mutator: [", pointerListDump(mutatorMarkStack()), "]");
779 }
780
781 MarkStackArray& SlotVisitor::correspondingGlobalStack(MarkStackArray& stack)
782 {
783     if (&stack == &m_collectorStack)
784         return *m_heap.m_sharedCollectorMarkStack;
785     RELEASE_ASSERT(&stack == &m_mutatorStack);
786     return *m_heap.m_sharedMutatorMarkStack;
787 }
788
789 void SlotVisitor::addParallelConstraintTask(RefPtr<SharedTask<void(SlotVisitor&)>> task)
790 {
791     RELEASE_ASSERT(m_currentSolver);
792     RELEASE_ASSERT(m_currentConstraint);
793     RELEASE_ASSERT(task);
794     
795     m_currentSolver->addParallelTask(task, *m_currentConstraint);
796 }
797
798 } // namespace JSC