Make opaque root scanning truly constraint-based
[WebKit-https.git] / Source / JavaScriptCore / heap / SlotVisitor.cpp
1 /*
2  * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "SlotVisitor.h"
28
29 #include "CPU.h"
30 #include "ConservativeRoots.h"
31 #include "GCSegmentedArrayInlines.h"
32 #include "HeapCellInlines.h"
33 #include "HeapProfiler.h"
34 #include "HeapSnapshotBuilder.h"
35 #include "JSArray.h"
36 #include "JSDestructibleObject.h"
37 #include "JSObject.h"
38 #include "JSString.h"
39 #include "JSCInlines.h"
40 #include "SlotVisitorInlines.h"
41 #include "SuperSampler.h"
42 #include "VM.h"
43 #include <wtf/Lock.h>
44
45 namespace JSC {
46
47 #if ENABLE(GC_VALIDATION)
48 static void validate(JSCell* cell)
49 {
50     RELEASE_ASSERT(cell);
51
52     if (!cell->structure()) {
53         dataLogF("cell at %p has a null structure\n" , cell);
54         CRASH();
55     }
56
57     // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
58     // I hate this sentence.
59     if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo()) {
60         const char* parentClassName = 0;
61         const char* ourClassName = 0;
62         if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo())
63             parentClassName = cell->structure()->structure()->JSCell::classInfo()->className;
64         if (cell->structure()->JSCell::classInfo())
65             ourClassName = cell->structure()->JSCell::classInfo()->className;
66         dataLogF("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
67             cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName);
68         CRASH();
69     }
70
71     // Make sure we can walk the ClassInfo chain
72     const ClassInfo* info = cell->classInfo();
73     do { } while ((info = info->parentClass));
74 }
75 #endif
76
77 SlotVisitor::SlotVisitor(Heap& heap)
78     : m_bytesVisited(0)
79     , m_visitCount(0)
80     , m_isInParallelMode(false)
81     , m_markingVersion(MarkedSpace::initialVersion)
82     , m_heap(heap)
83 #if !ASSERT_DISABLED
84     , m_isCheckingForDefaultMarkViolation(false)
85     , m_isDraining(false)
86 #endif
87 {
88 }
89
90 SlotVisitor::~SlotVisitor()
91 {
92     clearMarkStacks();
93 }
94
95 void SlotVisitor::didStartMarking()
96 {
97     if (heap()->collectionScope() == CollectionScope::Full)
98         ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
99     else
100         reset();
101
102     if (HeapProfiler* heapProfiler = vm().heapProfiler())
103         m_heapSnapshotBuilder = heapProfiler->activeSnapshotBuilder();
104     
105     m_markingVersion = heap()->objectSpace().markingVersion();
106 }
107
108 void SlotVisitor::reset()
109 {
110     RELEASE_ASSERT(!m_opaqueRoots.size());
111     m_bytesVisited = 0;
112     m_visitCount = 0;
113     m_heapSnapshotBuilder = nullptr;
114     RELEASE_ASSERT(!m_currentCell);
115 }
116
117 void SlotVisitor::clearMarkStacks()
118 {
119     m_collectorStack.clear();
120     m_mutatorStack.clear();
121 }
122
123 void SlotVisitor::append(ConservativeRoots& conservativeRoots)
124 {
125     HeapCell** roots = conservativeRoots.roots();
126     size_t size = conservativeRoots.size();
127     for (size_t i = 0; i < size; ++i)
128         appendJSCellOrAuxiliary(roots[i]);
129 }
130
131 void SlotVisitor::appendJSCellOrAuxiliary(HeapCell* heapCell)
132 {
133     if (!heapCell)
134         return;
135     
136     ASSERT(!m_isCheckingForDefaultMarkViolation);
137     
138     auto validateCell = [&] (JSCell* jsCell) {
139         StructureID structureID = jsCell->structureID();
140         
141         auto die = [&] (const char* text) {
142             WTF::dataFile().atomically(
143                 [&] (PrintStream& out) {
144                     out.print(text);
145                     out.print("GC type: ", heap()->collectionScope(), "\n");
146                     out.print("Object at: ", RawPointer(jsCell), "\n");
147 #if USE(JSVALUE64)
148                     out.print("Structure ID: ", structureID, " (0x", format("%x", structureID), ")\n");
149                     out.print("Structure ID table size: ", heap()->structureIDTable().size(), "\n");
150 #else
151                     out.print("Structure: ", RawPointer(structureID), "\n");
152 #endif
153                     out.print("Object contents:");
154                     for (unsigned i = 0; i < 2; ++i)
155                         out.print(" ", format("0x%016llx", bitwise_cast<uint64_t*>(jsCell)[i]));
156                     out.print("\n");
157                     CellContainer container = jsCell->cellContainer();
158                     out.print("Is marked: ", container.isMarked(jsCell), "\n");
159                     out.print("Is newly allocated: ", container.isNewlyAllocated(jsCell), "\n");
160                     if (container.isMarkedBlock()) {
161                         MarkedBlock& block = container.markedBlock();
162                         out.print("Block: ", RawPointer(&block), "\n");
163                         block.handle().dumpState(out);
164                         out.print("\n");
165                         out.print("Is marked raw: ", block.isMarkedRaw(jsCell), "\n");
166                         out.print("Marking version: ", block.markingVersion(), "\n");
167                         out.print("Heap marking version: ", heap()->objectSpace().markingVersion(), "\n");
168                         out.print("Is newly allocated raw: ", block.handle().isNewlyAllocated(jsCell), "\n");
169                         out.print("Newly allocated version: ", block.handle().newlyAllocatedVersion(), "\n");
170                         out.print("Heap newly allocated version: ", heap()->objectSpace().newlyAllocatedVersion(), "\n");
171                     }
172                     UNREACHABLE_FOR_PLATFORM();
173                 });
174         };
175         
176         // It's not OK for the structure to be null at any GC scan point. We must not GC while
177         // an object is not fully initialized.
178         if (!structureID)
179             die("GC scan found corrupt object: structureID is zero!\n");
180         
181         // It's not OK for the structure to be nuked at any GC scan point.
182         if (isNuked(structureID))
183             die("GC scan found object in bad state: structureID is nuked!\n");
184         
185 #if USE(JSVALUE64)
186         // This detects the worst of the badness.
187         if (structureID >= heap()->structureIDTable().size())
188             die("GC scan found corrupt object: structureID is out of bounds!\n");
189 #endif
190     };
191     
192     // In debug mode, we validate before marking since this makes it clearer what the problem
193     // was. It's also slower, so we don't do it normally.
194     if (!ASSERT_DISABLED && heapCell->cellKind() == HeapCell::JSCell)
195         validateCell(static_cast<JSCell*>(heapCell));
196     
197     if (Heap::testAndSetMarked(m_markingVersion, heapCell))
198         return;
199     
200     switch (heapCell->cellKind()) {
201     case HeapCell::JSCell: {
202         // We have ample budget to perform validation here.
203     
204         JSCell* jsCell = static_cast<JSCell*>(heapCell);
205         validateCell(jsCell);
206         
207         jsCell->setCellState(CellState::PossiblyGrey);
208
209         appendToMarkStack(jsCell);
210         return;
211     }
212         
213     case HeapCell::Auxiliary: {
214         noteLiveAuxiliaryCell(heapCell);
215         return;
216     } }
217 }
218
219 void SlotVisitor::appendUnbarriered(JSValue value)
220 {
221     if (!value || !value.isCell())
222         return;
223
224     if (UNLIKELY(m_heapSnapshotBuilder))
225         m_heapSnapshotBuilder->appendEdge(m_currentCell, value.asCell());
226
227     setMarkedAndAppendToMarkStack(value.asCell());
228 }
229
230 void SlotVisitor::appendHidden(JSValue value)
231 {
232     if (!value || !value.isCell())
233         return;
234
235     setMarkedAndAppendToMarkStack(value.asCell());
236 }
237
238 void SlotVisitor::setMarkedAndAppendToMarkStack(JSCell* cell)
239 {
240     SuperSamplerScope superSamplerScope(false);
241     
242     ASSERT(!m_isCheckingForDefaultMarkViolation);
243     if (!cell)
244         return;
245
246 #if ENABLE(GC_VALIDATION)
247     validate(cell);
248 #endif
249     
250     if (cell->isLargeAllocation())
251         setMarkedAndAppendToMarkStack(cell->largeAllocation(), cell);
252     else
253         setMarkedAndAppendToMarkStack(cell->markedBlock(), cell);
254 }
255
256 template<typename ContainerType>
257 ALWAYS_INLINE void SlotVisitor::setMarkedAndAppendToMarkStack(ContainerType& container, JSCell* cell)
258 {
259     container.aboutToMark(m_markingVersion);
260     
261     if (container.testAndSetMarked(cell))
262         return;
263     
264     ASSERT(cell->structure());
265     
266     // Indicate that the object is grey and that:
267     // In case of concurrent GC: it's the first time it is grey in this GC cycle.
268     // In case of eden collection: it's a new object that became grey rather than an old remembered object.
269     cell->setCellState(CellState::PossiblyGrey);
270     
271     appendToMarkStack(container, cell);
272 }
273
274 void SlotVisitor::appendToMarkStack(JSCell* cell)
275 {
276     if (cell->isLargeAllocation())
277         appendToMarkStack(cell->largeAllocation(), cell);
278     else
279         appendToMarkStack(cell->markedBlock(), cell);
280 }
281
282 template<typename ContainerType>
283 ALWAYS_INLINE void SlotVisitor::appendToMarkStack(ContainerType& container, JSCell* cell)
284 {
285     ASSERT(Heap::isMarkedConcurrently(cell));
286     ASSERT(!cell->isZapped());
287     
288     container.noteMarked();
289     
290     m_visitCount++;
291     m_bytesVisited += container.cellSize();
292     
293     m_collectorStack.append(cell);
294 }
295
296 void SlotVisitor::appendToMutatorMarkStack(const JSCell* cell)
297 {
298     m_mutatorStack.append(cell);
299 }
300
301 void SlotVisitor::markAuxiliary(const void* base)
302 {
303     HeapCell* cell = bitwise_cast<HeapCell*>(base);
304     
305     ASSERT(cell->heap() == heap());
306     
307     if (Heap::testAndSetMarked(m_markingVersion, cell))
308         return;
309     
310     noteLiveAuxiliaryCell(cell);
311 }
312
313 void SlotVisitor::noteLiveAuxiliaryCell(HeapCell* cell)
314 {
315     // We get here once per GC under these circumstances:
316     //
317     // Eden collection: if the cell was allocated since the last collection and is live somehow.
318     //
319     // Full collection: if the cell is live somehow.
320     
321     CellContainer container = cell->cellContainer();
322     
323     container.assertValidCell(vm(), cell);
324     container.noteMarked();
325     
326     m_visitCount++;
327     m_bytesVisited += container.cellSize();
328 }
329
330 class SetCurrentCellScope {
331 public:
332     SetCurrentCellScope(SlotVisitor& visitor, const JSCell* cell)
333         : m_visitor(visitor)
334     {
335         ASSERT(!m_visitor.m_currentCell);
336         m_visitor.m_currentCell = const_cast<JSCell*>(cell);
337     }
338
339     ~SetCurrentCellScope()
340     {
341         ASSERT(m_visitor.m_currentCell);
342         m_visitor.m_currentCell = nullptr;
343     }
344
345 private:
346     SlotVisitor& m_visitor;
347 };
348
349 ALWAYS_INLINE void SlotVisitor::visitChildren(const JSCell* cell)
350 {
351     ASSERT(Heap::isMarkedConcurrently(cell));
352     
353     SetCurrentCellScope currentCellScope(*this, cell);
354     
355     if (false) {
356         dataLog("Visiting ", RawPointer(cell));
357         if (!m_isFirstVisit)
358             dataLog(" (subsequent)");
359         dataLog("\n");
360     }
361     
362     // Funny story: it's possible for the object to be black already, if we barrier the object at
363     // about the same time that it's marked. That's fine. It's a gnarly and super-rare race. It's
364     // not clear to me that it would be correct or profitable to bail here if the object is already
365     // black.
366     
367     cell->setCellState(CellState::PossiblyBlack);
368     
369     WTF::storeLoadFence();
370     
371     switch (cell->type()) {
372     case StringType:
373         JSString::visitChildren(const_cast<JSCell*>(cell), *this);
374         break;
375         
376     case FinalObjectType:
377         JSFinalObject::visitChildren(const_cast<JSCell*>(cell), *this);
378         break;
379
380     case ArrayType:
381         JSArray::visitChildren(const_cast<JSCell*>(cell), *this);
382         break;
383         
384     default:
385         // FIXME: This could be so much better.
386         // https://bugs.webkit.org/show_bug.cgi?id=162462
387         cell->methodTable(vm())->visitChildren(const_cast<JSCell*>(cell), *this);
388         break;
389     }
390     
391     if (UNLIKELY(m_heapSnapshotBuilder)) {
392         if (m_isFirstVisit)
393             m_heapSnapshotBuilder->appendNode(const_cast<JSCell*>(cell));
394     }
395 }
396
397 void SlotVisitor::visitAsConstraint(const JSCell* cell)
398 {
399     m_isFirstVisit = false;
400     visitChildren(cell);
401 }
402
403 void SlotVisitor::donateKnownParallel(MarkStackArray& from, MarkStackArray& to)
404 {
405     // NOTE: Because we re-try often, we can afford to be conservative, and
406     // assume that donating is not profitable.
407
408     // Avoid locking when a thread reaches a dead end in the object graph.
409     if (from.size() < 2)
410         return;
411
412     // If there's already some shared work queued up, be conservative and assume
413     // that donating more is not profitable.
414     if (to.size())
415         return;
416
417     // If we're contending on the lock, be conservative and assume that another
418     // thread is already donating.
419     std::unique_lock<Lock> lock(m_heap.m_markingMutex, std::try_to_lock);
420     if (!lock.owns_lock())
421         return;
422
423     // Otherwise, assume that a thread will go idle soon, and donate.
424     from.donateSomeCellsTo(to);
425
426     m_heap.m_markingConditionVariable.notifyAll();
427 }
428
429 void SlotVisitor::donateKnownParallel()
430 {
431     donateKnownParallel(m_collectorStack, *m_heap.m_sharedCollectorMarkStack);
432     donateKnownParallel(m_mutatorStack, *m_heap.m_sharedMutatorMarkStack);
433 }
434
435 void SlotVisitor::updateMutatorIsStopped(const AbstractLocker&)
436 {
437     m_mutatorIsStopped = (m_heap.collectorBelievesThatTheWorldIsStopped() & m_canOptimizeForStoppedMutator);
438 }
439
440 void SlotVisitor::updateMutatorIsStopped()
441 {
442     if (mutatorIsStoppedIsUpToDate())
443         return;
444     updateMutatorIsStopped(holdLock(m_rightToRun));
445 }
446
447 bool SlotVisitor::hasAcknowledgedThatTheMutatorIsResumed() const
448 {
449     return !m_mutatorIsStopped;
450 }
451
452 bool SlotVisitor::mutatorIsStoppedIsUpToDate() const
453 {
454     return m_mutatorIsStopped == (m_heap.collectorBelievesThatTheWorldIsStopped() & m_canOptimizeForStoppedMutator);
455 }
456
457 void SlotVisitor::optimizeForStoppedMutator()
458 {
459     m_canOptimizeForStoppedMutator = true;
460 }
461
462 void SlotVisitor::drain(MonotonicTime timeout)
463 {
464     ASSERT(m_isInParallelMode);
465     
466     auto locker = holdLock(m_rightToRun);
467     
468     while (!hasElapsed(timeout)) {
469         updateMutatorIsStopped(locker);
470         if (!m_collectorStack.isEmpty()) {
471             m_collectorStack.refill();
472             m_isFirstVisit = true;
473             for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_collectorStack.canRemoveLast() && countdown--;)
474                 visitChildren(m_collectorStack.removeLast());
475         } else if (!m_mutatorStack.isEmpty()) {
476             m_mutatorStack.refill();
477             // We know for sure that we are visiting objects because of the barrier, not because of
478             // marking. Marking will visit an object exactly once. The barrier will visit it
479             // possibly many times, and always after it was already marked.
480             m_isFirstVisit = false;
481             for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_mutatorStack.canRemoveLast() && countdown--;)
482                 visitChildren(m_mutatorStack.removeLast());
483         } else
484             break;
485         m_rightToRun.safepoint();
486         donateKnownParallel();
487     }
488     
489     mergeIfNecessary();
490 }
491
492 bool SlotVisitor::didReachTermination()
493 {
494     LockHolder locker(m_heap.m_markingMutex);
495     return isEmpty() && didReachTermination(locker);
496 }
497
498 bool SlotVisitor::didReachTermination(const LockHolder&)
499 {
500     return !m_heap.m_numberOfActiveParallelMarkers
501         && m_heap.m_sharedCollectorMarkStack->isEmpty()
502         && m_heap.m_sharedMutatorMarkStack->isEmpty();
503 }
504
505 bool SlotVisitor::hasWork(const LockHolder&)
506 {
507     return !m_heap.m_sharedCollectorMarkStack->isEmpty()
508         || !m_heap.m_sharedMutatorMarkStack->isEmpty();
509 }
510
511 SlotVisitor::SharedDrainResult SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode, MonotonicTime timeout)
512 {
513     ASSERT(m_isInParallelMode);
514     
515     ASSERT(Options::numberOfGCMarkers());
516     
517     {
518         LockHolder locker(m_heap.m_markingMutex);
519         m_heap.m_numberOfActiveParallelMarkers++;
520     }
521     while (true) {
522         {
523             LockHolder locker(m_heap.m_markingMutex);
524             m_heap.m_numberOfActiveParallelMarkers--;
525             m_heap.m_numberOfWaitingParallelMarkers++;
526
527             if (sharedDrainMode == MasterDrain) {
528                 while (true) {
529                     if (hasElapsed(timeout))
530                         return SharedDrainResult::TimedOut;
531                     
532                     if (didReachTermination(locker)) {
533                         m_heap.m_markingConditionVariable.notifyAll();
534                         return SharedDrainResult::Done;
535                     }
536                     
537                     if (hasWork(locker))
538                         break;
539                     
540                     m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout);
541                 }
542             } else {
543                 ASSERT(sharedDrainMode == SlaveDrain);
544
545                 if (hasElapsed(timeout))
546                     return SharedDrainResult::TimedOut;
547                 
548                 if (didReachTermination(locker))
549                     m_heap.m_markingConditionVariable.notifyAll();
550
551                 m_heap.m_markingConditionVariable.waitUntil(
552                     m_heap.m_markingMutex, timeout,
553                     [&] {
554                         return hasWork(locker)
555                             || m_heap.m_parallelMarkersShouldExit;
556                     });
557                 
558                 if (m_heap.m_parallelMarkersShouldExit)
559                     return SharedDrainResult::Done;
560             }
561
562             m_collectorStack.stealSomeCellsFrom(
563                 *m_heap.m_sharedCollectorMarkStack, m_heap.m_numberOfWaitingParallelMarkers);
564             m_mutatorStack.stealSomeCellsFrom(
565                 *m_heap.m_sharedMutatorMarkStack, m_heap.m_numberOfWaitingParallelMarkers);
566             m_heap.m_numberOfActiveParallelMarkers++;
567             m_heap.m_numberOfWaitingParallelMarkers--;
568         }
569         
570         drain(timeout);
571     }
572 }
573
574 SlotVisitor::SharedDrainResult SlotVisitor::drainInParallel(MonotonicTime timeout)
575 {
576     donateAndDrain(timeout);
577     return drainFromShared(MasterDrain, timeout);
578 }
579
580 SlotVisitor::SharedDrainResult SlotVisitor::drainInParallelPassively(MonotonicTime timeout)
581 {
582     ASSERT(m_isInParallelMode);
583     
584     ASSERT(Options::numberOfGCMarkers());
585     
586     if (Options::numberOfGCMarkers() < 4
587         || !m_heap.hasHeapAccess()
588         || m_heap.collectorBelievesThatTheWorldIsStopped()) {
589         // This is an optimization over drainInParallel() when we have a concurrent mutator but
590         // otherwise it is not profitable.
591         return drainInParallel(timeout);
592     }
593
594     LockHolder locker(m_heap.m_markingMutex);
595     m_collectorStack.transferTo(*m_heap.m_sharedCollectorMarkStack);
596     m_mutatorStack.transferTo(*m_heap.m_sharedMutatorMarkStack);
597     m_heap.m_markingConditionVariable.notifyAll();
598     
599     for (;;) {
600         if (hasElapsed(timeout))
601             return SharedDrainResult::TimedOut;
602         
603         if (didReachTermination(locker)) {
604             m_heap.m_markingConditionVariable.notifyAll();
605             return SharedDrainResult::Done;
606         }
607         
608         m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout);
609     }
610 }
611
612 void SlotVisitor::addOpaqueRoot(void* root)
613 {
614     if (!root)
615         return;
616     
617     if (m_ignoreNewOpaqueRoots)
618         return;
619     
620     if (Options::numberOfGCMarkers() == 1) {
621         // Put directly into the shared HashSet.
622         m_heap.m_opaqueRoots.add(root);
623         return;
624     }
625     // Put into the local set, but merge with the shared one every once in
626     // a while to make sure that the local sets don't grow too large.
627     mergeOpaqueRootsIfProfitable();
628     m_opaqueRoots.add(root);
629 }
630
631 bool SlotVisitor::containsOpaqueRoot(void* root) const
632 {
633     if (!root)
634         return false;
635     
636     ASSERT(!m_isInParallelMode);
637     return m_heap.m_opaqueRoots.contains(root);
638 }
639
640 TriState SlotVisitor::containsOpaqueRootTriState(void* root) const
641 {
642     if (!root)
643         return FalseTriState;
644     
645     if (m_opaqueRoots.contains(root))
646         return TrueTriState;
647     std::lock_guard<Lock> lock(m_heap.m_opaqueRootsMutex);
648     if (m_heap.m_opaqueRoots.contains(root))
649         return TrueTriState;
650     return MixedTriState;
651 }
652
653 void SlotVisitor::mergeIfNecessary()
654 {
655     if (m_opaqueRoots.isEmpty())
656         return;
657     mergeOpaqueRoots();
658 }
659
660 void SlotVisitor::mergeOpaqueRootsIfProfitable()
661 {
662     if (static_cast<unsigned>(m_opaqueRoots.size()) < Options::opaqueRootMergeThreshold())
663         return;
664     mergeOpaqueRoots();
665 }
666     
667 void SlotVisitor::donate()
668 {
669     ASSERT(m_isInParallelMode);
670     if (Options::numberOfGCMarkers() == 1)
671         return;
672     
673     donateKnownParallel();
674 }
675
676 void SlotVisitor::donateAndDrain(MonotonicTime timeout)
677 {
678     donate();
679     drain(timeout);
680 }
681
682 void SlotVisitor::mergeOpaqueRoots()
683 {
684     {
685         std::lock_guard<Lock> lock(m_heap.m_opaqueRootsMutex);
686         for (auto* root : m_opaqueRoots)
687             m_heap.m_opaqueRoots.add(root);
688     }
689     m_opaqueRoots.clear();
690 }
691
692 void SlotVisitor::addWeakReferenceHarvester(WeakReferenceHarvester* weakReferenceHarvester)
693 {
694     m_heap.m_weakReferenceHarvesters.addThreadSafe(weakReferenceHarvester);
695 }
696
697 void SlotVisitor::addUnconditionalFinalizer(UnconditionalFinalizer* unconditionalFinalizer)
698 {
699     m_heap.m_unconditionalFinalizers.addThreadSafe(unconditionalFinalizer);
700 }
701
702 void SlotVisitor::didRace(const VisitRaceKey& race)
703 {
704     if (Options::verboseVisitRace())
705         dataLog(toCString("GC visit race: ", race, "\n"));
706     
707     auto locker = holdLock(heap()->m_raceMarkStackLock);
708     JSCell* cell = race.cell();
709     cell->setCellState(CellState::PossiblyGrey);
710     heap()->m_raceMarkStack->append(cell);
711 }
712
713 void SlotVisitor::dump(PrintStream& out) const
714 {
715     out.print("Collector: [", pointerListDump(collectorMarkStack()), "], Mutator: [", pointerListDump(mutatorMarkStack()), "]");
716 }
717
718 } // namespace JSC