f7b8eb74192c4fc9653e7c3903cb3c0366ccc1f2
[WebKit-https.git] / Source / JavaScriptCore / heap / SlotVisitor.cpp
1 /*
2  * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "SlotVisitor.h"
28
29 #include "CPU.h"
30 #include "ConservativeRoots.h"
31 #include "GCSegmentedArrayInlines.h"
32 #include "HeapCellInlines.h"
33 #include "HeapProfiler.h"
34 #include "HeapSnapshotBuilder.h"
35 #include "JSArray.h"
36 #include "JSDestructibleObject.h"
37 #include "JSObject.h"
38 #include "JSString.h"
39 #include "JSCInlines.h"
40 #include "SlotVisitorInlines.h"
41 #include "StopIfNecessaryTimer.h"
42 #include "SuperSampler.h"
43 #include "VM.h"
44 #include <wtf/Lock.h>
45
46 namespace JSC {
47
48 #if ENABLE(GC_VALIDATION)
49 static void validate(JSCell* cell)
50 {
51     RELEASE_ASSERT(cell);
52
53     if (!cell->structure()) {
54         dataLogF("cell at %p has a null structure\n" , cell);
55         CRASH();
56     }
57
58     // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
59     // I hate this sentence.
60     VM& vm = *cell->vm();
61     if (cell->structure()->structure()->JSCell::classInfo(vm) != cell->structure()->JSCell::classInfo(vm)) {
62         const char* parentClassName = 0;
63         const char* ourClassName = 0;
64         if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo(vm))
65             parentClassName = cell->structure()->structure()->JSCell::classInfo(vm)->className;
66         if (cell->structure()->JSCell::classInfo(vm))
67             ourClassName = cell->structure()->JSCell::classInfo(vm)->className;
68         dataLogF("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
69             cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName);
70         CRASH();
71     }
72
73     // Make sure we can walk the ClassInfo chain
74     const ClassInfo* info = cell->classInfo(vm);
75     do { } while ((info = info->parentClass));
76 }
77 #endif
78
79 SlotVisitor::SlotVisitor(Heap& heap, CString codeName)
80     : m_bytesVisited(0)
81     , m_visitCount(0)
82     , m_isInParallelMode(false)
83     , m_markingVersion(MarkedSpace::initialVersion)
84     , m_heap(heap)
85     , m_codeName(codeName)
86 #if !ASSERT_DISABLED
87     , m_isCheckingForDefaultMarkViolation(false)
88     , m_isDraining(false)
89 #endif
90 {
91 }
92
93 SlotVisitor::~SlotVisitor()
94 {
95     clearMarkStacks();
96 }
97
98 void SlotVisitor::didStartMarking()
99 {
100     if (heap()->collectionScope() == CollectionScope::Full)
101         RELEASE_ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
102     else
103         reset();
104
105     if (HeapProfiler* heapProfiler = vm().heapProfiler())
106         m_heapSnapshotBuilder = heapProfiler->activeSnapshotBuilder();
107     
108     m_markingVersion = heap()->objectSpace().markingVersion();
109 }
110
111 void SlotVisitor::reset()
112 {
113     RELEASE_ASSERT(!m_opaqueRoots.size());
114     m_bytesVisited = 0;
115     m_visitCount = 0;
116     m_heapSnapshotBuilder = nullptr;
117     RELEASE_ASSERT(!m_currentCell);
118 }
119
120 void SlotVisitor::clearMarkStacks()
121 {
122     forEachMarkStack(
123         [&] (MarkStackArray& stack) -> IterationStatus {
124             stack.clear();
125             return IterationStatus::Continue;
126         });
127 }
128
129 void SlotVisitor::append(ConservativeRoots& conservativeRoots)
130 {
131     HeapCell** roots = conservativeRoots.roots();
132     size_t size = conservativeRoots.size();
133     for (size_t i = 0; i < size; ++i)
134         appendJSCellOrAuxiliary(roots[i]);
135 }
136
137 void SlotVisitor::appendJSCellOrAuxiliary(HeapCell* heapCell)
138 {
139     if (!heapCell)
140         return;
141     
142     ASSERT(!m_isCheckingForDefaultMarkViolation);
143     
144     auto validateCell = [&] (JSCell* jsCell) {
145         StructureID structureID = jsCell->structureID();
146         
147         auto die = [&] (const char* text) {
148             WTF::dataFile().atomically(
149                 [&] (PrintStream& out) {
150                     out.print(text);
151                     out.print("GC type: ", heap()->collectionScope(), "\n");
152                     out.print("Object at: ", RawPointer(jsCell), "\n");
153 #if USE(JSVALUE64)
154                     out.print("Structure ID: ", structureID, " (0x", format("%x", structureID), ")\n");
155                     out.print("Structure ID table size: ", heap()->structureIDTable().size(), "\n");
156 #else
157                     out.print("Structure: ", RawPointer(structureID), "\n");
158 #endif
159                     out.print("Object contents:");
160                     for (unsigned i = 0; i < 2; ++i)
161                         out.print(" ", format("0x%016llx", bitwise_cast<uint64_t*>(jsCell)[i]));
162                     out.print("\n");
163                     CellContainer container = jsCell->cellContainer();
164                     out.print("Is marked: ", container.isMarked(jsCell), "\n");
165                     out.print("Is newly allocated: ", container.isNewlyAllocated(jsCell), "\n");
166                     if (container.isMarkedBlock()) {
167                         MarkedBlock& block = container.markedBlock();
168                         out.print("Block: ", RawPointer(&block), "\n");
169                         block.handle().dumpState(out);
170                         out.print("\n");
171                         out.print("Is marked raw: ", block.isMarkedRaw(jsCell), "\n");
172                         out.print("Marking version: ", block.markingVersion(), "\n");
173                         out.print("Heap marking version: ", heap()->objectSpace().markingVersion(), "\n");
174                         out.print("Is newly allocated raw: ", block.handle().isNewlyAllocated(jsCell), "\n");
175                         out.print("Newly allocated version: ", block.handle().newlyAllocatedVersion(), "\n");
176                         out.print("Heap newly allocated version: ", heap()->objectSpace().newlyAllocatedVersion(), "\n");
177                     }
178                     UNREACHABLE_FOR_PLATFORM();
179                 });
180         };
181         
182         // It's not OK for the structure to be null at any GC scan point. We must not GC while
183         // an object is not fully initialized.
184         if (!structureID)
185             die("GC scan found corrupt object: structureID is zero!\n");
186         
187         // It's not OK for the structure to be nuked at any GC scan point.
188         if (isNuked(structureID))
189             die("GC scan found object in bad state: structureID is nuked!\n");
190         
191 #if USE(JSVALUE64)
192         // This detects the worst of the badness.
193         if (structureID >= heap()->structureIDTable().size())
194             die("GC scan found corrupt object: structureID is out of bounds!\n");
195 #endif
196     };
197     
198     // In debug mode, we validate before marking since this makes it clearer what the problem
199     // was. It's also slower, so we don't do it normally.
200     if (!ASSERT_DISABLED && heapCell->cellKind() == HeapCell::JSCell)
201         validateCell(static_cast<JSCell*>(heapCell));
202     
203     if (Heap::testAndSetMarked(m_markingVersion, heapCell))
204         return;
205     
206     switch (heapCell->cellKind()) {
207     case HeapCell::JSCell: {
208         // We have ample budget to perform validation here.
209     
210         JSCell* jsCell = static_cast<JSCell*>(heapCell);
211         validateCell(jsCell);
212         
213         jsCell->setCellState(CellState::PossiblyGrey);
214
215         appendToMarkStack(jsCell);
216         return;
217     }
218         
219     case HeapCell::Auxiliary: {
220         noteLiveAuxiliaryCell(heapCell);
221         return;
222     } }
223 }
224
225 void SlotVisitor::appendSlow(JSCell* cell, Dependency dependency)
226 {
227     if (UNLIKELY(m_heapSnapshotBuilder))
228         m_heapSnapshotBuilder->appendEdge(m_currentCell, cell);
229     
230     appendHiddenSlowImpl(cell, dependency);
231 }
232
233 void SlotVisitor::appendHiddenSlow(JSCell* cell, Dependency dependency)
234 {
235     appendHiddenSlowImpl(cell, dependency);
236 }
237
238 ALWAYS_INLINE void SlotVisitor::appendHiddenSlowImpl(JSCell* cell, Dependency dependency)
239 {
240     ASSERT(!m_isCheckingForDefaultMarkViolation);
241
242 #if ENABLE(GC_VALIDATION)
243     validate(cell);
244 #endif
245     
246     if (cell->isLargeAllocation())
247         setMarkedAndAppendToMarkStack(cell->largeAllocation(), cell, dependency);
248     else
249         setMarkedAndAppendToMarkStack(cell->markedBlock(), cell, dependency);
250 }
251
252 template<typename ContainerType>
253 ALWAYS_INLINE void SlotVisitor::setMarkedAndAppendToMarkStack(ContainerType& container, JSCell* cell, Dependency dependency)
254 {
255     if (container.testAndSetMarked(cell, dependency))
256         return;
257     
258     ASSERT(cell->structure());
259     
260     // Indicate that the object is grey and that:
261     // In case of concurrent GC: it's the first time it is grey in this GC cycle.
262     // In case of eden collection: it's a new object that became grey rather than an old remembered object.
263     cell->setCellState(CellState::PossiblyGrey);
264     
265     appendToMarkStack(container, cell);
266 }
267
268 void SlotVisitor::appendToMarkStack(JSCell* cell)
269 {
270     if (cell->isLargeAllocation())
271         appendToMarkStack(cell->largeAllocation(), cell);
272     else
273         appendToMarkStack(cell->markedBlock(), cell);
274 }
275
276 template<typename ContainerType>
277 ALWAYS_INLINE void SlotVisitor::appendToMarkStack(ContainerType& container, JSCell* cell)
278 {
279     ASSERT(Heap::isMarkedConcurrently(cell));
280     ASSERT(!cell->isZapped());
281     
282     container.noteMarked();
283     
284     m_visitCount++;
285     m_bytesVisited += container.cellSize();
286
287     m_collectorStack.append(cell);
288 }
289
290 void SlotVisitor::appendToMutatorMarkStack(const JSCell* cell)
291 {
292     m_mutatorStack.append(cell);
293 }
294
295 void SlotVisitor::markAuxiliary(const void* base)
296 {
297     HeapCell* cell = bitwise_cast<HeapCell*>(base);
298     
299     ASSERT(cell->heap() == heap());
300     
301     if (Heap::testAndSetMarked(m_markingVersion, cell))
302         return;
303     
304     noteLiveAuxiliaryCell(cell);
305 }
306
307 void SlotVisitor::noteLiveAuxiliaryCell(HeapCell* cell)
308 {
309     // We get here once per GC under these circumstances:
310     //
311     // Eden collection: if the cell was allocated since the last collection and is live somehow.
312     //
313     // Full collection: if the cell is live somehow.
314     
315     CellContainer container = cell->cellContainer();
316     
317     container.assertValidCell(vm(), cell);
318     container.noteMarked();
319     
320     m_visitCount++;
321
322     size_t cellSize = container.cellSize();
323     m_bytesVisited += cellSize;
324     m_nonCellVisitCount += cellSize;
325 }
326
327 class SetCurrentCellScope {
328 public:
329     SetCurrentCellScope(SlotVisitor& visitor, const JSCell* cell)
330         : m_visitor(visitor)
331     {
332         ASSERT(!m_visitor.m_currentCell);
333         m_visitor.m_currentCell = const_cast<JSCell*>(cell);
334     }
335
336     ~SetCurrentCellScope()
337     {
338         ASSERT(m_visitor.m_currentCell);
339         m_visitor.m_currentCell = nullptr;
340     }
341
342 private:
343     SlotVisitor& m_visitor;
344 };
345
346 ALWAYS_INLINE void SlotVisitor::visitChildren(const JSCell* cell)
347 {
348     ASSERT(Heap::isMarkedConcurrently(cell));
349     
350     SetCurrentCellScope currentCellScope(*this, cell);
351     
352     if (false) {
353         dataLog("Visiting ", RawPointer(cell));
354         if (!m_isFirstVisit)
355             dataLog(" (subsequent)");
356         dataLog("\n");
357     }
358     
359     // Funny story: it's possible for the object to be black already, if we barrier the object at
360     // about the same time that it's marked. That's fine. It's a gnarly and super-rare race. It's
361     // not clear to me that it would be correct or profitable to bail here if the object is already
362     // black.
363     
364     cell->setCellState(CellState::PossiblyBlack);
365     
366     WTF::storeLoadFence();
367     
368     switch (cell->type()) {
369     case StringType:
370         JSString::visitChildren(const_cast<JSCell*>(cell), *this);
371         break;
372         
373     case FinalObjectType:
374         JSFinalObject::visitChildren(const_cast<JSCell*>(cell), *this);
375         break;
376
377     case ArrayType:
378         JSArray::visitChildren(const_cast<JSCell*>(cell), *this);
379         break;
380         
381     default:
382         // FIXME: This could be so much better.
383         // https://bugs.webkit.org/show_bug.cgi?id=162462
384         cell->methodTable(vm())->visitChildren(const_cast<JSCell*>(cell), *this);
385         break;
386     }
387     
388     if (UNLIKELY(m_heapSnapshotBuilder)) {
389         if (m_isFirstVisit)
390             m_heapSnapshotBuilder->appendNode(const_cast<JSCell*>(cell));
391     }
392 }
393
394 void SlotVisitor::visitAsConstraint(const JSCell* cell)
395 {
396     m_isFirstVisit = false;
397     visitChildren(cell);
398 }
399
400 void SlotVisitor::donateKnownParallel(MarkStackArray& from, MarkStackArray& to)
401 {
402     // NOTE: Because we re-try often, we can afford to be conservative, and
403     // assume that donating is not profitable.
404
405     // Avoid locking when a thread reaches a dead end in the object graph.
406     if (from.size() < 2)
407         return;
408
409     // If there's already some shared work queued up, be conservative and assume
410     // that donating more is not profitable.
411     if (to.size())
412         return;
413
414     // If we're contending on the lock, be conservative and assume that another
415     // thread is already donating.
416     std::unique_lock<Lock> lock(m_heap.m_markingMutex, std::try_to_lock);
417     if (!lock.owns_lock())
418         return;
419
420     // Otherwise, assume that a thread will go idle soon, and donate.
421     from.donateSomeCellsTo(to);
422
423     m_heap.m_markingConditionVariable.notifyAll();
424 }
425
426 void SlotVisitor::donateKnownParallel()
427 {
428     forEachMarkStack(
429         [&] (MarkStackArray& stack) -> IterationStatus {
430             donateKnownParallel(stack, correspondingGlobalStack(stack));
431             return IterationStatus::Continue;
432         });
433 }
434
435 void SlotVisitor::updateMutatorIsStopped(const AbstractLocker&)
436 {
437     m_mutatorIsStopped = (m_heap.collectorBelievesThatTheWorldIsStopped() & m_canOptimizeForStoppedMutator);
438 }
439
440 void SlotVisitor::updateMutatorIsStopped()
441 {
442     if (mutatorIsStoppedIsUpToDate())
443         return;
444     updateMutatorIsStopped(holdLock(m_rightToRun));
445 }
446
447 bool SlotVisitor::hasAcknowledgedThatTheMutatorIsResumed() const
448 {
449     return !m_mutatorIsStopped;
450 }
451
452 bool SlotVisitor::mutatorIsStoppedIsUpToDate() const
453 {
454     return m_mutatorIsStopped == (m_heap.collectorBelievesThatTheWorldIsStopped() & m_canOptimizeForStoppedMutator);
455 }
456
457 void SlotVisitor::optimizeForStoppedMutator()
458 {
459     m_canOptimizeForStoppedMutator = true;
460 }
461
462 NEVER_INLINE void SlotVisitor::drain(MonotonicTime timeout)
463 {
464     if (!m_isInParallelMode) {
465         dataLog("FATAL: attempting to drain when not in parallel mode.\n");
466         RELEASE_ASSERT_NOT_REACHED();
467     }
468     
469     auto locker = holdLock(m_rightToRun);
470     
471     while (!hasElapsed(timeout)) {
472         updateMutatorIsStopped(locker);
473         IterationStatus status = forEachMarkStack(
474             [&] (MarkStackArray& stack) -> IterationStatus {
475                 if (stack.isEmpty())
476                     return IterationStatus::Continue;
477
478                 stack.refill();
479                 
480                 m_isFirstVisit = (&stack == &m_collectorStack);
481
482                 for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); stack.canRemoveLast() && countdown--;)
483                     visitChildren(stack.removeLast());
484                 return IterationStatus::Done;
485             });
486         if (status == IterationStatus::Continue)
487             break;
488         
489         m_rightToRun.safepoint();
490         donateKnownParallel();
491     }
492     
493     mergeIfNecessary();
494 }
495
496 size_t SlotVisitor::performIncrementOfDraining(size_t bytesRequested)
497 {
498     RELEASE_ASSERT(m_isInParallelMode);
499
500     size_t cellsRequested = bytesRequested / MarkedBlock::atomSize;
501     {
502         auto locker = holdLock(m_heap.m_markingMutex);
503         forEachMarkStack(
504             [&] (MarkStackArray& stack) -> IterationStatus {
505                 cellsRequested -= correspondingGlobalStack(stack).transferTo(stack, cellsRequested);
506                 return cellsRequested ? IterationStatus::Continue : IterationStatus::Done;
507             });
508     }
509
510     size_t cellBytesVisited = 0;
511     m_nonCellVisitCount = 0;
512
513     auto bytesVisited = [&] () -> size_t {
514         return cellBytesVisited + m_nonCellVisitCount;
515     };
516
517     auto isDone = [&] () -> bool {
518         return bytesVisited() >= bytesRequested;
519     };
520     
521     {
522         auto locker = holdLock(m_rightToRun);
523         
524         while (!isDone()) {
525             updateMutatorIsStopped(locker);
526             IterationStatus status = forEachMarkStack(
527                 [&] (MarkStackArray& stack) -> IterationStatus {
528                     if (stack.isEmpty() || isDone())
529                         return IterationStatus::Continue;
530
531                     stack.refill();
532                     
533                     m_isFirstVisit = (&stack == &m_collectorStack);
534
535                     unsigned countdown = Options::minimumNumberOfScansBetweenRebalance();
536                     while (countdown && stack.canRemoveLast() && !isDone()) {
537                         const JSCell* cell = stack.removeLast();
538                         cellBytesVisited += cell->cellSize();
539                         visitChildren(cell);
540                         countdown--;
541                     }
542                     return IterationStatus::Done;
543                 });
544             if (status == IterationStatus::Continue)
545                 break;
546             m_rightToRun.safepoint();
547             donateKnownParallel();
548         }
549     }
550
551     donateAll();
552     mergeIfNecessary();
553
554     return bytesVisited();
555 }
556
557 bool SlotVisitor::didReachTermination()
558 {
559     LockHolder locker(m_heap.m_markingMutex);
560     return didReachTermination(locker);
561 }
562
563 bool SlotVisitor::didReachTermination(const AbstractLocker&)
564 {
565     return isEmpty()
566         && !m_heap.m_numberOfActiveParallelMarkers
567         && m_heap.m_sharedCollectorMarkStack->isEmpty()
568         && m_heap.m_sharedMutatorMarkStack->isEmpty();
569 }
570
571 bool SlotVisitor::hasWork(const AbstractLocker&)
572 {
573     return !m_heap.m_sharedCollectorMarkStack->isEmpty()
574         || !m_heap.m_sharedMutatorMarkStack->isEmpty();
575 }
576
577 NEVER_INLINE SlotVisitor::SharedDrainResult SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode, MonotonicTime timeout)
578 {
579     ASSERT(m_isInParallelMode);
580     
581     ASSERT(Options::numberOfGCMarkers());
582
583     bool isActive = false;
584     while (true) {
585         {
586             LockHolder locker(m_heap.m_markingMutex);
587             if (isActive)
588                 m_heap.m_numberOfActiveParallelMarkers--;
589             m_heap.m_numberOfWaitingParallelMarkers++;
590
591             if (sharedDrainMode == MasterDrain) {
592                 while (true) {
593                     if (hasElapsed(timeout))
594                         return SharedDrainResult::TimedOut;
595
596                     if (didReachTermination(locker)) {
597                         m_heap.m_markingConditionVariable.notifyAll();
598                         return SharedDrainResult::Done;
599                     }
600                     
601                     if (hasWork(locker))
602                         break;
603
604                     m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout);
605                 }
606             } else {
607                 ASSERT(sharedDrainMode == SlaveDrain);
608
609                 if (hasElapsed(timeout))
610                     return SharedDrainResult::TimedOut;
611                 
612                 if (didReachTermination(locker)) {
613                     m_heap.m_markingConditionVariable.notifyAll();
614                     
615                     // If we're in concurrent mode, then we know that the mutator will eventually do
616                     // the right thing because:
617                     // - It's possible that the collector has the conn. In that case, the collector will
618                     //   wake up from the notification above. This will happen if the app released heap
619                     //   access. Native apps can spend a lot of time with heap access released.
620                     // - It's possible that the mutator will allocate soon. Then it will check if we
621                     //   reached termination. This is the most likely outcome in programs that allocate
622                     //   a lot.
623                     // - WebCore never releases access. But WebCore has a runloop. The runloop will check
624                     //   if we reached termination.
625                     // So, this tells the runloop that it's got things to do.
626                     m_heap.m_stopIfNecessaryTimer->scheduleSoon();
627                 }
628
629                 auto isReady = [&] () -> bool {
630                     return hasWork(locker)
631                         || m_heap.m_parallelMarkersShouldExit;
632                 };
633
634                 m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout, isReady);
635                 
636                 if (m_heap.m_parallelMarkersShouldExit)
637                     return SharedDrainResult::Done;
638             }
639
640             forEachMarkStack(
641                 [&] (MarkStackArray& stack) -> IterationStatus {
642                     stack.stealSomeCellsFrom(
643                         correspondingGlobalStack(stack),
644                         m_heap.m_numberOfWaitingParallelMarkers);
645                     return IterationStatus::Continue;
646                 });
647
648             m_heap.m_numberOfActiveParallelMarkers++;
649             m_heap.m_numberOfWaitingParallelMarkers--;
650         }
651         
652         drain(timeout);
653         isActive = true;
654     }
655 }
656
657 SlotVisitor::SharedDrainResult SlotVisitor::drainInParallel(MonotonicTime timeout)
658 {
659     donateAndDrain(timeout);
660     return drainFromShared(MasterDrain, timeout);
661 }
662
663 SlotVisitor::SharedDrainResult SlotVisitor::drainInParallelPassively(MonotonicTime timeout)
664 {
665     ASSERT(m_isInParallelMode);
666     
667     ASSERT(Options::numberOfGCMarkers());
668     
669     if (Options::numberOfGCMarkers() == 1
670         || (m_heap.m_worldState.load() & Heap::mutatorWaitingBit)
671         || !m_heap.hasHeapAccess()
672         || m_heap.collectorBelievesThatTheWorldIsStopped()) {
673         // This is an optimization over drainInParallel() when we have a concurrent mutator but
674         // otherwise it is not profitable.
675         return drainInParallel(timeout);
676     }
677
678     LockHolder locker(m_heap.m_markingMutex);
679     donateAll(locker);
680     
681     for (;;) {
682         if (hasElapsed(timeout))
683             return SharedDrainResult::TimedOut;
684         
685         if (didReachTermination(locker)) {
686             m_heap.m_markingConditionVariable.notifyAll();
687             return SharedDrainResult::Done;
688         }
689         
690         m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout);
691     }
692 }
693
694 void SlotVisitor::donateAll()
695 {
696     if (isEmpty())
697         return;
698     
699     donateAll(holdLock(m_heap.m_markingMutex));
700 }
701
702 void SlotVisitor::donateAll(const AbstractLocker&)
703 {
704     forEachMarkStack(
705         [&] (MarkStackArray& stack) -> IterationStatus {
706             stack.transferTo(correspondingGlobalStack(stack));
707             return IterationStatus::Continue;
708         });
709
710     m_heap.m_markingConditionVariable.notifyAll();
711 }
712
713 void SlotVisitor::addOpaqueRoot(void* root)
714 {
715     if (!root)
716         return;
717     
718     if (m_ignoreNewOpaqueRoots)
719         return;
720     
721     if (Options::numberOfGCMarkers() == 1) {
722         // Put directly into the shared HashSet.
723         m_heap.m_opaqueRoots.add(root);
724         return;
725     }
726     // Put into the local set, but merge with the shared one every once in
727     // a while to make sure that the local sets don't grow too large.
728     mergeOpaqueRootsIfProfitable();
729     m_opaqueRoots.add(root);
730 }
731
732 bool SlotVisitor::containsOpaqueRoot(void* root) const
733 {
734     if (!root)
735         return false;
736     
737     ASSERT(!m_isInParallelMode);
738     return m_heap.m_opaqueRoots.contains(root);
739 }
740
741 TriState SlotVisitor::containsOpaqueRootTriState(void* root) const
742 {
743     if (!root)
744         return FalseTriState;
745     
746     if (m_opaqueRoots.contains(root))
747         return TrueTriState;
748     std::lock_guard<Lock> lock(m_heap.m_opaqueRootsMutex);
749     if (m_heap.m_opaqueRoots.contains(root))
750         return TrueTriState;
751     return MixedTriState;
752 }
753
754 void SlotVisitor::mergeIfNecessary()
755 {
756     if (m_opaqueRoots.isEmpty())
757         return;
758     mergeOpaqueRoots();
759 }
760
761 void SlotVisitor::mergeOpaqueRootsIfProfitable()
762 {
763     if (static_cast<unsigned>(m_opaqueRoots.size()) < Options::opaqueRootMergeThreshold())
764         return;
765     mergeOpaqueRoots();
766 }
767     
768 void SlotVisitor::donate()
769 {
770     if (!m_isInParallelMode) {
771         dataLog("FATAL: Attempting to donate when not in parallel mode.\n");
772         RELEASE_ASSERT_NOT_REACHED();
773     }
774     
775     if (Options::numberOfGCMarkers() == 1)
776         return;
777     
778     donateKnownParallel();
779 }
780
781 void SlotVisitor::donateAndDrain(MonotonicTime timeout)
782 {
783     donate();
784     drain(timeout);
785 }
786
787 void SlotVisitor::mergeOpaqueRoots()
788 {
789     {
790         std::lock_guard<Lock> lock(m_heap.m_opaqueRootsMutex);
791         for (auto* root : m_opaqueRoots)
792             m_heap.m_opaqueRoots.add(root);
793     }
794     m_opaqueRoots.clear();
795 }
796
797 void SlotVisitor::addWeakReferenceHarvester(WeakReferenceHarvester* weakReferenceHarvester)
798 {
799     m_heap.m_weakReferenceHarvesters.addThreadSafe(weakReferenceHarvester);
800 }
801
802 void SlotVisitor::addUnconditionalFinalizer(UnconditionalFinalizer* unconditionalFinalizer)
803 {
804     m_heap.m_unconditionalFinalizers.addThreadSafe(unconditionalFinalizer);
805 }
806
807 void SlotVisitor::didRace(const VisitRaceKey& race)
808 {
809     if (Options::verboseVisitRace())
810         dataLog(toCString("GC visit race: ", race, "\n"));
811     
812     auto locker = holdLock(heap()->m_raceMarkStackLock);
813     JSCell* cell = race.cell();
814     cell->setCellState(CellState::PossiblyGrey);
815     heap()->m_raceMarkStack->append(cell);
816 }
817
818 void SlotVisitor::dump(PrintStream& out) const
819 {
820     out.print("Collector: [", pointerListDump(collectorMarkStack()), "], Mutator: [", pointerListDump(mutatorMarkStack()), "]");
821 }
822
823 MarkStackArray& SlotVisitor::correspondingGlobalStack(MarkStackArray& stack)
824 {
825     if (&stack == &m_collectorStack)
826         return *m_heap.m_sharedCollectorMarkStack;
827     RELEASE_ASSERT(&stack == &m_mutatorStack);
828     return *m_heap.m_sharedMutatorMarkStack;
829 }
830
831 } // namespace JSC