Get rid of HeapRootVisitor and make SlotVisitor less painful to use
[WebKit-https.git] / Source / JavaScriptCore / heap / SlotVisitor.cpp
1 /*
2  * Copyright (C) 2012, 2015-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "SlotVisitor.h"
28
29 #include "CPU.h"
30 #include "ConservativeRoots.h"
31 #include "GCSegmentedArrayInlines.h"
32 #include "HeapCellInlines.h"
33 #include "HeapProfiler.h"
34 #include "HeapSnapshotBuilder.h"
35 #include "JSArray.h"
36 #include "JSDestructibleObject.h"
37 #include "JSObject.h"
38 #include "JSString.h"
39 #include "JSCInlines.h"
40 #include "SlotVisitorInlines.h"
41 #include "SuperSampler.h"
42 #include "VM.h"
43 #include <wtf/Lock.h>
44
45 namespace JSC {
46
47 #if ENABLE(GC_VALIDATION)
48 static void validate(JSCell* cell)
49 {
50     RELEASE_ASSERT(cell);
51
52     if (!cell->structure()) {
53         dataLogF("cell at %p has a null structure\n" , cell);
54         CRASH();
55     }
56
57     // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
58     // I hate this sentence.
59     if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo()) {
60         const char* parentClassName = 0;
61         const char* ourClassName = 0;
62         if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo())
63             parentClassName = cell->structure()->structure()->JSCell::classInfo()->className;
64         if (cell->structure()->JSCell::classInfo())
65             ourClassName = cell->structure()->JSCell::classInfo()->className;
66         dataLogF("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
67             cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName);
68         CRASH();
69     }
70
71     // Make sure we can walk the ClassInfo chain
72     const ClassInfo* info = cell->classInfo();
73     do { } while ((info = info->parentClass));
74 }
75 #endif
76
77 SlotVisitor::SlotVisitor(Heap& heap)
78     : m_bytesVisited(0)
79     , m_visitCount(0)
80     , m_isInParallelMode(false)
81     , m_markingVersion(MarkedSpace::initialVersion)
82     , m_heap(heap)
83 #if !ASSERT_DISABLED
84     , m_isCheckingForDefaultMarkViolation(false)
85     , m_isDraining(false)
86 #endif
87 {
88 }
89
90 SlotVisitor::~SlotVisitor()
91 {
92     clearMarkStacks();
93 }
94
95 void SlotVisitor::didStartMarking()
96 {
97     if (heap()->collectionScope() == CollectionScope::Full)
98         ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
99     else
100         reset();
101
102     if (HeapProfiler* heapProfiler = vm().heapProfiler())
103         m_heapSnapshotBuilder = heapProfiler->activeSnapshotBuilder();
104     
105     m_markingVersion = heap()->objectSpace().markingVersion();
106 }
107
108 void SlotVisitor::reset()
109 {
110     RELEASE_ASSERT(!m_opaqueRoots.size());
111     m_bytesVisited = 0;
112     m_visitCount = 0;
113     m_heapSnapshotBuilder = nullptr;
114     RELEASE_ASSERT(!m_currentCell);
115 }
116
117 void SlotVisitor::clearMarkStacks()
118 {
119     m_collectorStack.clear();
120     m_mutatorStack.clear();
121 }
122
123 void SlotVisitor::append(ConservativeRoots& conservativeRoots)
124 {
125     HeapCell** roots = conservativeRoots.roots();
126     size_t size = conservativeRoots.size();
127     for (size_t i = 0; i < size; ++i)
128         appendJSCellOrAuxiliary(roots[i]);
129 }
130
131 void SlotVisitor::appendJSCellOrAuxiliary(HeapCell* heapCell)
132 {
133     if (!heapCell)
134         return;
135     
136     ASSERT(!m_isCheckingForDefaultMarkViolation);
137     
138     auto validateCell = [&] (JSCell* jsCell) {
139         StructureID structureID = jsCell->structureID();
140         
141         auto die = [&] (const char* text) {
142             WTF::dataFile().atomically(
143                 [&] (PrintStream& out) {
144                     out.print(text);
145                     out.print("GC type: ", heap()->collectionScope(), "\n");
146                     out.print("Object at: ", RawPointer(jsCell), "\n");
147 #if USE(JSVALUE64)
148                     out.print("Structure ID: ", structureID, " (0x", format("%x", structureID), ")\n");
149                     out.print("Structure ID table size: ", heap()->structureIDTable().size(), "\n");
150 #else
151                     out.print("Structure: ", RawPointer(structureID), "\n");
152 #endif
153                     out.print("Object contents:");
154                     for (unsigned i = 0; i < 2; ++i)
155                         out.print(" ", format("0x%016llx", bitwise_cast<uint64_t*>(jsCell)[i]));
156                     out.print("\n");
157                     CellContainer container = jsCell->cellContainer();
158                     out.print("Is marked: ", container.isMarked(jsCell), "\n");
159                     out.print("Is newly allocated: ", container.isNewlyAllocated(jsCell), "\n");
160                     if (container.isMarkedBlock()) {
161                         MarkedBlock& block = container.markedBlock();
162                         out.print("Block: ", RawPointer(&block), "\n");
163                         block.handle().dumpState(out);
164                         out.print("\n");
165                         out.print("Is marked raw: ", block.isMarkedRaw(jsCell), "\n");
166                         out.print("Marking version: ", block.markingVersion(), "\n");
167                         out.print("Heap marking version: ", heap()->objectSpace().markingVersion(), "\n");
168                         out.print("Is newly allocated raw: ", block.handle().isNewlyAllocated(jsCell), "\n");
169                         out.print("Newly allocated version: ", block.handle().newlyAllocatedVersion(), "\n");
170                         out.print("Heap newly allocated version: ", heap()->objectSpace().newlyAllocatedVersion(), "\n");
171                     }
172                     UNREACHABLE_FOR_PLATFORM();
173                 });
174         };
175         
176         // It's not OK for the structure to be null at any GC scan point. We must not GC while
177         // an object is not fully initialized.
178         if (!structureID)
179             die("GC scan found corrupt object: structureID is zero!\n");
180         
181         // It's not OK for the structure to be nuked at any GC scan point.
182         if (isNuked(structureID))
183             die("GC scan found object in bad state: structureID is nuked!\n");
184         
185 #if USE(JSVALUE64)
186         // This detects the worst of the badness.
187         if (structureID >= heap()->structureIDTable().size())
188             die("GC scan found corrupt object: structureID is out of bounds!\n");
189 #endif
190     };
191     
192     // In debug mode, we validate before marking since this makes it clearer what the problem
193     // was. It's also slower, so we don't do it normally.
194     if (!ASSERT_DISABLED && heapCell->cellKind() == HeapCell::JSCell)
195         validateCell(static_cast<JSCell*>(heapCell));
196     
197     if (Heap::testAndSetMarked(m_markingVersion, heapCell))
198         return;
199     
200     switch (heapCell->cellKind()) {
201     case HeapCell::JSCell: {
202         // We have ample budget to perform validation here.
203     
204         JSCell* jsCell = static_cast<JSCell*>(heapCell);
205         validateCell(jsCell);
206         
207         jsCell->setCellState(CellState::Grey);
208
209         appendToMarkStack(jsCell);
210         return;
211     }
212         
213     case HeapCell::Auxiliary: {
214         noteLiveAuxiliaryCell(heapCell);
215         return;
216     } }
217 }
218
219 void SlotVisitor::appendUnbarriered(JSValue value)
220 {
221     if (!value || !value.isCell())
222         return;
223
224     if (UNLIKELY(m_heapSnapshotBuilder))
225         m_heapSnapshotBuilder->appendEdge(m_currentCell, value.asCell());
226
227     setMarkedAndAppendToMarkStack(value.asCell());
228 }
229
230 void SlotVisitor::appendHidden(JSValue value)
231 {
232     if (!value || !value.isCell())
233         return;
234
235     setMarkedAndAppendToMarkStack(value.asCell());
236 }
237
238 void SlotVisitor::setMarkedAndAppendToMarkStack(JSCell* cell)
239 {
240     SuperSamplerScope superSamplerScope(false);
241     
242     ASSERT(!m_isCheckingForDefaultMarkViolation);
243     if (!cell)
244         return;
245
246 #if ENABLE(GC_VALIDATION)
247     validate(cell);
248 #endif
249     
250     if (cell->isLargeAllocation())
251         setMarkedAndAppendToMarkStack(cell->largeAllocation(), cell);
252     else
253         setMarkedAndAppendToMarkStack(cell->markedBlock(), cell);
254 }
255
256 template<typename ContainerType>
257 ALWAYS_INLINE void SlotVisitor::setMarkedAndAppendToMarkStack(ContainerType& container, JSCell* cell)
258 {
259     container.aboutToMark(m_markingVersion);
260     
261     if (container.testAndSetMarked(cell))
262         return;
263     
264     ASSERT(cell->structure());
265     
266     // Indicate that the object is grey and that:
267     // In case of concurrent GC: it's the first time it is grey in this GC cycle.
268     // In case of eden collection: it's a new object that became grey rather than an old remembered object.
269     cell->setCellState(CellState::Grey);
270     
271     appendToMarkStack(container, cell);
272 }
273
274 void SlotVisitor::appendToMarkStack(JSCell* cell)
275 {
276     if (cell->isLargeAllocation())
277         appendToMarkStack(cell->largeAllocation(), cell);
278     else
279         appendToMarkStack(cell->markedBlock(), cell);
280 }
281
282 template<typename ContainerType>
283 ALWAYS_INLINE void SlotVisitor::appendToMarkStack(ContainerType& container, JSCell* cell)
284 {
285     ASSERT(Heap::isMarkedConcurrently(cell));
286     ASSERT(!cell->isZapped());
287     ASSERT(cell->cellState() == CellState::Grey);
288     
289     container.noteMarked();
290     
291     m_visitCount++;
292     m_bytesVisited += container.cellSize();
293     
294     m_collectorStack.append(cell);
295 }
296
297 void SlotVisitor::appendToMutatorMarkStack(const JSCell* cell)
298 {
299     m_mutatorStack.append(cell);
300 }
301
302 void SlotVisitor::markAuxiliary(const void* base)
303 {
304     HeapCell* cell = bitwise_cast<HeapCell*>(base);
305     
306     ASSERT(cell->heap() == heap());
307     
308     if (Heap::testAndSetMarked(m_markingVersion, cell))
309         return;
310     
311     noteLiveAuxiliaryCell(cell);
312 }
313
314 void SlotVisitor::noteLiveAuxiliaryCell(HeapCell* cell)
315 {
316     // We get here once per GC under these circumstances:
317     //
318     // Eden collection: if the cell was allocated since the last collection and is live somehow.
319     //
320     // Full collection: if the cell is live somehow.
321     
322     CellContainer container = cell->cellContainer();
323     
324     container.assertValidCell(vm(), cell);
325     container.noteMarked();
326     
327     m_visitCount++;
328     m_bytesVisited += container.cellSize();
329 }
330
331 class SetCurrentCellScope {
332 public:
333     SetCurrentCellScope(SlotVisitor& visitor, const JSCell* cell)
334         : m_visitor(visitor)
335     {
336         ASSERT(!m_visitor.m_currentCell);
337         m_visitor.m_currentCell = const_cast<JSCell*>(cell);
338     }
339
340     ~SetCurrentCellScope()
341     {
342         ASSERT(m_visitor.m_currentCell);
343         m_visitor.m_currentCell = nullptr;
344     }
345
346 private:
347     SlotVisitor& m_visitor;
348 };
349
350
351 ALWAYS_INLINE void SlotVisitor::visitChildren(const JSCell* cell)
352 {
353     ASSERT(Heap::isMarkedConcurrently(cell));
354     
355     SetCurrentCellScope currentCellScope(*this, cell);
356     
357     if (false) {
358         dataLog("Visiting ", RawPointer(cell));
359         if (m_isVisitingMutatorStack)
360             dataLog(" (mutator)");
361         dataLog("\n");
362     }
363     
364     // Funny story: it's possible for the object to be black already, if we barrier the object at
365     // about the same time that it's marked. That's fine. It's a gnarly and super-rare race. It's
366     // not clear to me that it would be correct or profitable to bail here if the object is already
367     // black.
368     
369     cell->setCellState(CellState::AnthraciteOrBlack);
370     
371     WTF::storeLoadFence();
372     
373     switch (cell->type()) {
374     case StringType:
375         JSString::visitChildren(const_cast<JSCell*>(cell), *this);
376         break;
377         
378     case FinalObjectType:
379         JSFinalObject::visitChildren(const_cast<JSCell*>(cell), *this);
380         break;
381
382     case ArrayType:
383         JSArray::visitChildren(const_cast<JSCell*>(cell), *this);
384         break;
385         
386     default:
387         // FIXME: This could be so much better.
388         // https://bugs.webkit.org/show_bug.cgi?id=162462
389         cell->methodTable(vm())->visitChildren(const_cast<JSCell*>(cell), *this);
390         break;
391     }
392     
393     if (UNLIKELY(m_heapSnapshotBuilder)) {
394         if (!m_isVisitingMutatorStack)
395             m_heapSnapshotBuilder->appendNode(const_cast<JSCell*>(cell));
396     }
397 }
398
399 void SlotVisitor::donateKnownParallel(MarkStackArray& from, MarkStackArray& to)
400 {
401     // NOTE: Because we re-try often, we can afford to be conservative, and
402     // assume that donating is not profitable.
403
404     // Avoid locking when a thread reaches a dead end in the object graph.
405     if (from.size() < 2)
406         return;
407
408     // If there's already some shared work queued up, be conservative and assume
409     // that donating more is not profitable.
410     if (to.size())
411         return;
412
413     // If we're contending on the lock, be conservative and assume that another
414     // thread is already donating.
415     std::unique_lock<Lock> lock(m_heap.m_markingMutex, std::try_to_lock);
416     if (!lock.owns_lock())
417         return;
418
419     // Otherwise, assume that a thread will go idle soon, and donate.
420     from.donateSomeCellsTo(to);
421
422     m_heap.m_markingConditionVariable.notifyAll();
423 }
424
425 void SlotVisitor::donateKnownParallel()
426 {
427     donateKnownParallel(m_collectorStack, *m_heap.m_sharedCollectorMarkStack);
428     donateKnownParallel(m_mutatorStack, *m_heap.m_sharedMutatorMarkStack);
429 }
430
431 void SlotVisitor::updateMutatorIsStopped(const AbstractLocker&)
432 {
433     m_mutatorIsStopped = (m_heap.collectorBelievesThatTheWorldIsStopped() & m_canOptimizeForStoppedMutator);
434 }
435
436 void SlotVisitor::updateMutatorIsStopped()
437 {
438     if (mutatorIsStoppedIsUpToDate())
439         return;
440     updateMutatorIsStopped(holdLock(m_rightToRun));
441 }
442
443 bool SlotVisitor::hasAcknowledgedThatTheMutatorIsResumed() const
444 {
445     return !m_mutatorIsStopped;
446 }
447
448 bool SlotVisitor::mutatorIsStoppedIsUpToDate() const
449 {
450     return m_mutatorIsStopped == (m_heap.collectorBelievesThatTheWorldIsStopped() & m_canOptimizeForStoppedMutator);
451 }
452
453 void SlotVisitor::optimizeForStoppedMutator()
454 {
455     m_canOptimizeForStoppedMutator = true;
456 }
457
458 void SlotVisitor::drain(MonotonicTime timeout)
459 {
460     ASSERT(m_isInParallelMode);
461     
462     auto locker = holdLock(m_rightToRun);
463     
464     while ((!m_collectorStack.isEmpty() || !m_mutatorStack.isEmpty()) && !hasElapsed(timeout)) {
465         updateMutatorIsStopped(locker);
466         if (!m_collectorStack.isEmpty()) {
467             m_collectorStack.refill();
468             m_isVisitingMutatorStack = false;
469             for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_collectorStack.canRemoveLast() && countdown--;)
470                 visitChildren(m_collectorStack.removeLast());
471         } else if (!m_mutatorStack.isEmpty()) {
472             m_mutatorStack.refill();
473             // We know for sure that we are visiting objects because of the barrier, not because of
474             // marking. Marking will visit an object exactly once. The barrier will visit it
475             // possibly many times, and always after it was already marked.
476             m_isVisitingMutatorStack = true;
477             for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_mutatorStack.canRemoveLast() && countdown--;)
478                 visitChildren(m_mutatorStack.removeLast());
479         }
480         m_rightToRun.safepoint();
481         donateKnownParallel();
482     }
483     
484     mergeOpaqueRootsIfNecessary();
485 }
486
487 bool SlotVisitor::didReachTermination()
488 {
489     return !m_heap.m_numberOfActiveParallelMarkers
490         && m_heap.m_sharedCollectorMarkStack->isEmpty()
491         && m_heap.m_sharedMutatorMarkStack->isEmpty();
492 }
493
494 bool SlotVisitor::hasWork()
495 {
496     return !m_heap.m_sharedCollectorMarkStack->isEmpty()
497         || !m_heap.m_sharedMutatorMarkStack->isEmpty();
498 }
499
500 SlotVisitor::SharedDrainResult SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode, MonotonicTime timeout)
501 {
502     ASSERT(m_isInParallelMode);
503     
504     ASSERT(Options::numberOfGCMarkers());
505     
506     {
507         LockHolder locker(m_heap.m_markingMutex);
508         m_heap.m_numberOfActiveParallelMarkers++;
509     }
510     while (true) {
511         {
512             LockHolder locker(m_heap.m_markingMutex);
513             m_heap.m_numberOfActiveParallelMarkers--;
514             m_heap.m_numberOfWaitingParallelMarkers++;
515
516             if (sharedDrainMode == MasterDrain) {
517                 while (true) {
518                     if (hasElapsed(timeout))
519                         return SharedDrainResult::TimedOut;
520                     
521                     if (didReachTermination()) {
522                         m_heap.m_markingConditionVariable.notifyAll();
523                         return SharedDrainResult::Done;
524                     }
525                     
526                     if (hasWork())
527                         break;
528                     
529                     m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout);
530                 }
531             } else {
532                 ASSERT(sharedDrainMode == SlaveDrain);
533
534                 if (hasElapsed(timeout))
535                     return SharedDrainResult::TimedOut;
536                 
537                 if (didReachTermination())
538                     m_heap.m_markingConditionVariable.notifyAll();
539
540                 m_heap.m_markingConditionVariable.waitUntil(
541                     m_heap.m_markingMutex, timeout,
542                     [this] {
543                         return hasWork()
544                             || m_heap.m_parallelMarkersShouldExit;
545                     });
546                 
547                 if (m_heap.m_parallelMarkersShouldExit)
548                     return SharedDrainResult::Done;
549             }
550
551             m_collectorStack.stealSomeCellsFrom(
552                 *m_heap.m_sharedCollectorMarkStack, m_heap.m_numberOfWaitingParallelMarkers);
553             m_mutatorStack.stealSomeCellsFrom(
554                 *m_heap.m_sharedMutatorMarkStack, m_heap.m_numberOfWaitingParallelMarkers);
555             m_heap.m_numberOfActiveParallelMarkers++;
556             m_heap.m_numberOfWaitingParallelMarkers--;
557         }
558         
559         drain(timeout);
560     }
561 }
562
563 SlotVisitor::SharedDrainResult SlotVisitor::drainInParallel(MonotonicTime timeout)
564 {
565     donateAndDrain(timeout);
566     return drainFromShared(MasterDrain, timeout);
567 }
568
569 SlotVisitor::SharedDrainResult SlotVisitor::drainInParallelPassively(MonotonicTime timeout)
570 {
571     ASSERT(m_isInParallelMode);
572     
573     ASSERT(Options::numberOfGCMarkers());
574     
575     if (Options::numberOfGCMarkers() < 4
576         || !m_heap.hasHeapAccess()
577         || m_heap.collectorBelievesThatTheWorldIsStopped()) {
578         // This is an optimization over drainInParallel() when we have a concurrent mutator but
579         // otherwise it is not profitable.
580         return drainInParallel(timeout);
581     }
582
583     LockHolder locker(m_heap.m_markingMutex);
584     m_collectorStack.transferTo(*m_heap.m_sharedCollectorMarkStack);
585     m_mutatorStack.transferTo(*m_heap.m_sharedMutatorMarkStack);
586     m_heap.m_markingConditionVariable.notifyAll();
587     
588     for (;;) {
589         if (hasElapsed(timeout))
590             return SharedDrainResult::TimedOut;
591         
592         if (didReachTermination()) {
593             m_heap.m_markingConditionVariable.notifyAll();
594             return SharedDrainResult::Done;
595         }
596         
597         m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout);
598     }
599 }
600
601 void SlotVisitor::addOpaqueRoot(void* root)
602 {
603     if (!root)
604         return;
605     
606     if (Options::numberOfGCMarkers() == 1) {
607         // Put directly into the shared HashSet.
608         m_heap.m_opaqueRoots.add(root);
609         return;
610     }
611     // Put into the local set, but merge with the shared one every once in
612     // a while to make sure that the local sets don't grow too large.
613     mergeOpaqueRootsIfProfitable();
614     m_opaqueRoots.add(root);
615 }
616
617 bool SlotVisitor::containsOpaqueRoot(void* root) const
618 {
619     if (!root)
620         return false;
621     
622     ASSERT(!m_isInParallelMode);
623     return m_heap.m_opaqueRoots.contains(root);
624 }
625
626 TriState SlotVisitor::containsOpaqueRootTriState(void* root) const
627 {
628     if (!root)
629         return FalseTriState;
630     
631     if (m_opaqueRoots.contains(root))
632         return TrueTriState;
633     std::lock_guard<Lock> lock(m_heap.m_opaqueRootsMutex);
634     if (m_heap.m_opaqueRoots.contains(root))
635         return TrueTriState;
636     return MixedTriState;
637 }
638
639 void SlotVisitor::mergeOpaqueRootsIfNecessary()
640 {
641     if (m_opaqueRoots.isEmpty())
642         return;
643     mergeOpaqueRoots();
644 }
645     
646 void SlotVisitor::mergeOpaqueRootsIfProfitable()
647 {
648     if (static_cast<unsigned>(m_opaqueRoots.size()) < Options::opaqueRootMergeThreshold())
649         return;
650     mergeOpaqueRoots();
651 }
652     
653 void SlotVisitor::donate()
654 {
655     ASSERT(m_isInParallelMode);
656     if (Options::numberOfGCMarkers() == 1)
657         return;
658     
659     donateKnownParallel();
660 }
661
662 void SlotVisitor::donateAndDrain(MonotonicTime timeout)
663 {
664     donate();
665     drain(timeout);
666 }
667
668 void SlotVisitor::mergeOpaqueRoots()
669 {
670     {
671         std::lock_guard<Lock> lock(m_heap.m_opaqueRootsMutex);
672         for (auto* root : m_opaqueRoots)
673             m_heap.m_opaqueRoots.add(root);
674     }
675     m_opaqueRoots.clear();
676 }
677
678 void SlotVisitor::addWeakReferenceHarvester(WeakReferenceHarvester* weakReferenceHarvester)
679 {
680     m_heap.m_weakReferenceHarvesters.addThreadSafe(weakReferenceHarvester);
681 }
682
683 void SlotVisitor::addUnconditionalFinalizer(UnconditionalFinalizer* unconditionalFinalizer)
684 {
685     m_heap.m_unconditionalFinalizers.addThreadSafe(unconditionalFinalizer);
686 }
687
688 void SlotVisitor::didRace(const VisitRaceKey& race)
689 {
690     if (Options::verboseVisitRace())
691         dataLog(toCString("GC visit race: ", race, "\n"));
692     
693     if (!ASSERT_DISABLED) {
694         auto locker = holdLock(heap()->m_visitRaceLock);
695         heap()->m_visitRaces.add(race);
696     }
697 }
698
699 void SlotVisitor::dump(PrintStream& out) const
700 {
701     out.print("Collector: [", pointerListDump(collectorMarkStack()), "], Mutator: [", pointerListDump(mutatorMarkStack()), "]");
702 }
703
704 } // namespace JSC