1785af614e285bb43ebad22e4738ff8d7ec12f3b
[WebKit-https.git] / Source / JavaScriptCore / heap / SlotVisitor.cpp
1 /*
2  * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "SlotVisitor.h"
28
29 #include "CPU.h"
30 #include "ConservativeRoots.h"
31 #include "GCSegmentedArrayInlines.h"
32 #include "HeapCellInlines.h"
33 #include "HeapProfiler.h"
34 #include "HeapSnapshotBuilder.h"
35 #include "JSArray.h"
36 #include "JSDestructibleObject.h"
37 #include "JSObject.h"
38 #include "JSString.h"
39 #include "JSCInlines.h"
40 #include "SlotVisitorInlines.h"
41 #include "StopIfNecessaryTimer.h"
42 #include "SuperSampler.h"
43 #include "VM.h"
44 #include <wtf/ListDump.h>
45 #include <wtf/Lock.h>
46
47 namespace JSC {
48
49 #if ENABLE(GC_VALIDATION)
50 static void validate(JSCell* cell)
51 {
52     RELEASE_ASSERT(cell);
53
54     if (!cell->structure()) {
55         dataLogF("cell at %p has a null structure\n" , cell);
56         CRASH();
57     }
58
59     // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
60     // I hate this sentence.
61     VM& vm = *cell->vm();
62     if (cell->structure()->structure()->JSCell::classInfo(vm) != cell->structure()->JSCell::classInfo(vm)) {
63         const char* parentClassName = 0;
64         const char* ourClassName = 0;
65         if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo(vm))
66             parentClassName = cell->structure()->structure()->JSCell::classInfo(vm)->className;
67         if (cell->structure()->JSCell::classInfo(vm))
68             ourClassName = cell->structure()->JSCell::classInfo(vm)->className;
69         dataLogF("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
70             cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName);
71         CRASH();
72     }
73
74     // Make sure we can walk the ClassInfo chain
75     const ClassInfo* info = cell->classInfo(vm);
76     do { } while ((info = info->parentClass));
77 }
78 #endif
79
80 SlotVisitor::SlotVisitor(Heap& heap, CString codeName)
81     : m_bytesVisited(0)
82     , m_visitCount(0)
83     , m_isInParallelMode(false)
84     , m_markingVersion(MarkedSpace::initialVersion)
85     , m_heap(heap)
86     , m_codeName(codeName)
87 #if !ASSERT_DISABLED
88     , m_isCheckingForDefaultMarkViolation(false)
89     , m_isDraining(false)
90 #endif
91 {
92 }
93
94 SlotVisitor::~SlotVisitor()
95 {
96     clearMarkStacks();
97 }
98
99 void SlotVisitor::didStartMarking()
100 {
101     if (heap()->collectionScope() == CollectionScope::Full)
102         RELEASE_ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
103     else
104         reset();
105
106     if (HeapProfiler* heapProfiler = vm().heapProfiler())
107         m_heapSnapshotBuilder = heapProfiler->activeSnapshotBuilder();
108     
109     m_markingVersion = heap()->objectSpace().markingVersion();
110 }
111
112 void SlotVisitor::reset()
113 {
114     RELEASE_ASSERT(!m_opaqueRoots.size());
115     m_bytesVisited = 0;
116     m_visitCount = 0;
117     m_heapSnapshotBuilder = nullptr;
118     RELEASE_ASSERT(!m_currentCell);
119 }
120
121 void SlotVisitor::clearMarkStacks()
122 {
123     forEachMarkStack(
124         [&] (MarkStackArray& stack) -> IterationStatus {
125             stack.clear();
126             return IterationStatus::Continue;
127         });
128 }
129
130 void SlotVisitor::append(ConservativeRoots& conservativeRoots)
131 {
132     HeapCell** roots = conservativeRoots.roots();
133     size_t size = conservativeRoots.size();
134     for (size_t i = 0; i < size; ++i)
135         appendJSCellOrAuxiliary(roots[i]);
136 }
137
138 void SlotVisitor::appendJSCellOrAuxiliary(HeapCell* heapCell)
139 {
140     if (!heapCell)
141         return;
142     
143     ASSERT(!m_isCheckingForDefaultMarkViolation);
144     
145     auto validateCell = [&] (JSCell* jsCell) {
146         StructureID structureID = jsCell->structureID();
147         
148         auto die = [&] (const char* text) {
149             WTF::dataFile().atomically(
150                 [&] (PrintStream& out) {
151                     out.print(text);
152                     out.print("GC type: ", heap()->collectionScope(), "\n");
153                     out.print("Object at: ", RawPointer(jsCell), "\n");
154 #if USE(JSVALUE64)
155                     out.print("Structure ID: ", structureID, " (0x", format("%x", structureID), ")\n");
156                     out.print("Structure ID table size: ", heap()->structureIDTable().size(), "\n");
157 #else
158                     out.print("Structure: ", RawPointer(structureID), "\n");
159 #endif
160                     out.print("Object contents:");
161                     for (unsigned i = 0; i < 2; ++i)
162                         out.print(" ", format("0x%016llx", bitwise_cast<uint64_t*>(jsCell)[i]));
163                     out.print("\n");
164                     CellContainer container = jsCell->cellContainer();
165                     out.print("Is marked: ", container.isMarked(jsCell), "\n");
166                     out.print("Is newly allocated: ", container.isNewlyAllocated(jsCell), "\n");
167                     if (container.isMarkedBlock()) {
168                         MarkedBlock& block = container.markedBlock();
169                         out.print("Block: ", RawPointer(&block), "\n");
170                         block.handle().dumpState(out);
171                         out.print("\n");
172                         out.print("Is marked raw: ", block.isMarkedRaw(jsCell), "\n");
173                         out.print("Marking version: ", block.markingVersion(), "\n");
174                         out.print("Heap marking version: ", heap()->objectSpace().markingVersion(), "\n");
175                         out.print("Is newly allocated raw: ", block.handle().isNewlyAllocated(jsCell), "\n");
176                         out.print("Newly allocated version: ", block.handle().newlyAllocatedVersion(), "\n");
177                         out.print("Heap newly allocated version: ", heap()->objectSpace().newlyAllocatedVersion(), "\n");
178                     }
179                     UNREACHABLE_FOR_PLATFORM();
180                 });
181         };
182         
183         // It's not OK for the structure to be null at any GC scan point. We must not GC while
184         // an object is not fully initialized.
185         if (!structureID)
186             die("GC scan found corrupt object: structureID is zero!\n");
187         
188         // It's not OK for the structure to be nuked at any GC scan point.
189         if (isNuked(structureID))
190             die("GC scan found object in bad state: structureID is nuked!\n");
191         
192 #if USE(JSVALUE64)
193         // This detects the worst of the badness.
194         if (structureID >= heap()->structureIDTable().size())
195             die("GC scan found corrupt object: structureID is out of bounds!\n");
196 #endif
197     };
198     
199     // In debug mode, we validate before marking since this makes it clearer what the problem
200     // was. It's also slower, so we don't do it normally.
201     if (!ASSERT_DISABLED && heapCell->cellKind() == HeapCell::JSCell)
202         validateCell(static_cast<JSCell*>(heapCell));
203     
204     if (Heap::testAndSetMarked(m_markingVersion, heapCell))
205         return;
206     
207     switch (heapCell->cellKind()) {
208     case HeapCell::JSCell: {
209         // We have ample budget to perform validation here.
210     
211         JSCell* jsCell = static_cast<JSCell*>(heapCell);
212         validateCell(jsCell);
213         
214         jsCell->setCellState(CellState::PossiblyGrey);
215
216         appendToMarkStack(jsCell);
217         return;
218     }
219         
220     case HeapCell::Auxiliary: {
221         noteLiveAuxiliaryCell(heapCell);
222         return;
223     } }
224 }
225
226 void SlotVisitor::appendSlow(JSCell* cell, Dependency dependency)
227 {
228     if (UNLIKELY(m_heapSnapshotBuilder))
229         m_heapSnapshotBuilder->appendEdge(m_currentCell, cell);
230     
231     appendHiddenSlowImpl(cell, dependency);
232 }
233
234 void SlotVisitor::appendHiddenSlow(JSCell* cell, Dependency dependency)
235 {
236     appendHiddenSlowImpl(cell, dependency);
237 }
238
239 ALWAYS_INLINE void SlotVisitor::appendHiddenSlowImpl(JSCell* cell, Dependency dependency)
240 {
241     ASSERT(!m_isCheckingForDefaultMarkViolation);
242
243 #if ENABLE(GC_VALIDATION)
244     validate(cell);
245 #endif
246     
247     if (cell->isLargeAllocation())
248         setMarkedAndAppendToMarkStack(cell->largeAllocation(), cell, dependency);
249     else
250         setMarkedAndAppendToMarkStack(cell->markedBlock(), cell, dependency);
251 }
252
253 template<typename ContainerType>
254 ALWAYS_INLINE void SlotVisitor::setMarkedAndAppendToMarkStack(ContainerType& container, JSCell* cell, Dependency dependency)
255 {
256     if (container.testAndSetMarked(cell, dependency))
257         return;
258     
259     ASSERT(cell->structure());
260     
261     // Indicate that the object is grey and that:
262     // In case of concurrent GC: it's the first time it is grey in this GC cycle.
263     // In case of eden collection: it's a new object that became grey rather than an old remembered object.
264     cell->setCellState(CellState::PossiblyGrey);
265     
266     appendToMarkStack(container, cell);
267 }
268
269 void SlotVisitor::appendToMarkStack(JSCell* cell)
270 {
271     if (cell->isLargeAllocation())
272         appendToMarkStack(cell->largeAllocation(), cell);
273     else
274         appendToMarkStack(cell->markedBlock(), cell);
275 }
276
277 template<typename ContainerType>
278 ALWAYS_INLINE void SlotVisitor::appendToMarkStack(ContainerType& container, JSCell* cell)
279 {
280     ASSERT(Heap::isMarkedConcurrently(cell));
281     ASSERT(!cell->isZapped());
282     
283     container.noteMarked();
284     
285     m_visitCount++;
286     m_bytesVisited += container.cellSize();
287
288     m_collectorStack.append(cell);
289 }
290
291 void SlotVisitor::appendToMutatorMarkStack(const JSCell* cell)
292 {
293     m_mutatorStack.append(cell);
294 }
295
296 void SlotVisitor::markAuxiliary(const void* base)
297 {
298     HeapCell* cell = bitwise_cast<HeapCell*>(base);
299     
300     ASSERT(cell->heap() == heap());
301     
302     if (Heap::testAndSetMarked(m_markingVersion, cell))
303         return;
304     
305     noteLiveAuxiliaryCell(cell);
306 }
307
308 void SlotVisitor::noteLiveAuxiliaryCell(HeapCell* cell)
309 {
310     // We get here once per GC under these circumstances:
311     //
312     // Eden collection: if the cell was allocated since the last collection and is live somehow.
313     //
314     // Full collection: if the cell is live somehow.
315     
316     CellContainer container = cell->cellContainer();
317     
318     container.assertValidCell(vm(), cell);
319     container.noteMarked();
320     
321     m_visitCount++;
322
323     size_t cellSize = container.cellSize();
324     m_bytesVisited += cellSize;
325     m_nonCellVisitCount += cellSize;
326 }
327
328 class SetCurrentCellScope {
329 public:
330     SetCurrentCellScope(SlotVisitor& visitor, const JSCell* cell)
331         : m_visitor(visitor)
332     {
333         ASSERT(!m_visitor.m_currentCell);
334         m_visitor.m_currentCell = const_cast<JSCell*>(cell);
335     }
336
337     ~SetCurrentCellScope()
338     {
339         ASSERT(m_visitor.m_currentCell);
340         m_visitor.m_currentCell = nullptr;
341     }
342
343 private:
344     SlotVisitor& m_visitor;
345 };
346
347 ALWAYS_INLINE void SlotVisitor::visitChildren(const JSCell* cell)
348 {
349     ASSERT(Heap::isMarkedConcurrently(cell));
350     
351     SetCurrentCellScope currentCellScope(*this, cell);
352     
353     if (false) {
354         dataLog("Visiting ", RawPointer(cell));
355         if (!m_isFirstVisit)
356             dataLog(" (subsequent)");
357         dataLog("\n");
358     }
359     
360     // Funny story: it's possible for the object to be black already, if we barrier the object at
361     // about the same time that it's marked. That's fine. It's a gnarly and super-rare race. It's
362     // not clear to me that it would be correct or profitable to bail here if the object is already
363     // black.
364     
365     cell->setCellState(CellState::PossiblyBlack);
366     
367     WTF::storeLoadFence();
368     
369     switch (cell->type()) {
370     case StringType:
371         JSString::visitChildren(const_cast<JSCell*>(cell), *this);
372         break;
373         
374     case FinalObjectType:
375         JSFinalObject::visitChildren(const_cast<JSCell*>(cell), *this);
376         break;
377
378     case ArrayType:
379         JSArray::visitChildren(const_cast<JSCell*>(cell), *this);
380         break;
381         
382     default:
383         // FIXME: This could be so much better.
384         // https://bugs.webkit.org/show_bug.cgi?id=162462
385         cell->methodTable(vm())->visitChildren(const_cast<JSCell*>(cell), *this);
386         break;
387     }
388     
389     if (UNLIKELY(m_heapSnapshotBuilder)) {
390         if (m_isFirstVisit)
391             m_heapSnapshotBuilder->appendNode(const_cast<JSCell*>(cell));
392     }
393 }
394
395 void SlotVisitor::visitAsConstraint(const JSCell* cell)
396 {
397     m_isFirstVisit = false;
398     visitChildren(cell);
399 }
400
401 void SlotVisitor::donateKnownParallel(MarkStackArray& from, MarkStackArray& to)
402 {
403     // NOTE: Because we re-try often, we can afford to be conservative, and
404     // assume that donating is not profitable.
405
406     // Avoid locking when a thread reaches a dead end in the object graph.
407     if (from.size() < 2)
408         return;
409
410     // If there's already some shared work queued up, be conservative and assume
411     // that donating more is not profitable.
412     if (to.size())
413         return;
414
415     // If we're contending on the lock, be conservative and assume that another
416     // thread is already donating.
417     std::unique_lock<Lock> lock(m_heap.m_markingMutex, std::try_to_lock);
418     if (!lock.owns_lock())
419         return;
420
421     // Otherwise, assume that a thread will go idle soon, and donate.
422     from.donateSomeCellsTo(to);
423
424     m_heap.m_markingConditionVariable.notifyAll();
425 }
426
427 void SlotVisitor::donateKnownParallel()
428 {
429     forEachMarkStack(
430         [&] (MarkStackArray& stack) -> IterationStatus {
431             donateKnownParallel(stack, correspondingGlobalStack(stack));
432             return IterationStatus::Continue;
433         });
434 }
435
436 void SlotVisitor::updateMutatorIsStopped(const AbstractLocker&)
437 {
438     m_mutatorIsStopped = (m_heap.collectorBelievesThatTheWorldIsStopped() & m_canOptimizeForStoppedMutator);
439 }
440
441 void SlotVisitor::updateMutatorIsStopped()
442 {
443     if (mutatorIsStoppedIsUpToDate())
444         return;
445     updateMutatorIsStopped(holdLock(m_rightToRun));
446 }
447
448 bool SlotVisitor::hasAcknowledgedThatTheMutatorIsResumed() const
449 {
450     return !m_mutatorIsStopped;
451 }
452
453 bool SlotVisitor::mutatorIsStoppedIsUpToDate() const
454 {
455     return m_mutatorIsStopped == (m_heap.collectorBelievesThatTheWorldIsStopped() & m_canOptimizeForStoppedMutator);
456 }
457
458 void SlotVisitor::optimizeForStoppedMutator()
459 {
460     m_canOptimizeForStoppedMutator = true;
461 }
462
463 NEVER_INLINE void SlotVisitor::drain(MonotonicTime timeout)
464 {
465     if (!m_isInParallelMode) {
466         dataLog("FATAL: attempting to drain when not in parallel mode.\n");
467         RELEASE_ASSERT_NOT_REACHED();
468     }
469     
470     auto locker = holdLock(m_rightToRun);
471     
472     while (!hasElapsed(timeout)) {
473         updateMutatorIsStopped(locker);
474         IterationStatus status = forEachMarkStack(
475             [&] (MarkStackArray& stack) -> IterationStatus {
476                 if (stack.isEmpty())
477                     return IterationStatus::Continue;
478
479                 stack.refill();
480                 
481                 m_isFirstVisit = (&stack == &m_collectorStack);
482
483                 for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); stack.canRemoveLast() && countdown--;)
484                     visitChildren(stack.removeLast());
485                 return IterationStatus::Done;
486             });
487         if (status == IterationStatus::Continue)
488             break;
489         
490         m_rightToRun.safepoint();
491         donateKnownParallel();
492     }
493     
494     mergeIfNecessary();
495 }
496
497 size_t SlotVisitor::performIncrementOfDraining(size_t bytesRequested)
498 {
499     RELEASE_ASSERT(m_isInParallelMode);
500
501     size_t cellsRequested = bytesRequested / MarkedBlock::atomSize;
502     {
503         auto locker = holdLock(m_heap.m_markingMutex);
504         forEachMarkStack(
505             [&] (MarkStackArray& stack) -> IterationStatus {
506                 cellsRequested -= correspondingGlobalStack(stack).transferTo(stack, cellsRequested);
507                 return cellsRequested ? IterationStatus::Continue : IterationStatus::Done;
508             });
509     }
510
511     size_t cellBytesVisited = 0;
512     m_nonCellVisitCount = 0;
513
514     auto bytesVisited = [&] () -> size_t {
515         return cellBytesVisited + m_nonCellVisitCount;
516     };
517
518     auto isDone = [&] () -> bool {
519         return bytesVisited() >= bytesRequested;
520     };
521     
522     {
523         auto locker = holdLock(m_rightToRun);
524         
525         while (!isDone()) {
526             updateMutatorIsStopped(locker);
527             IterationStatus status = forEachMarkStack(
528                 [&] (MarkStackArray& stack) -> IterationStatus {
529                     if (stack.isEmpty() || isDone())
530                         return IterationStatus::Continue;
531
532                     stack.refill();
533                     
534                     m_isFirstVisit = (&stack == &m_collectorStack);
535
536                     unsigned countdown = Options::minimumNumberOfScansBetweenRebalance();
537                     while (countdown && stack.canRemoveLast() && !isDone()) {
538                         const JSCell* cell = stack.removeLast();
539                         cellBytesVisited += cell->cellSize();
540                         visitChildren(cell);
541                         countdown--;
542                     }
543                     return IterationStatus::Done;
544                 });
545             if (status == IterationStatus::Continue)
546                 break;
547             m_rightToRun.safepoint();
548             donateKnownParallel();
549         }
550     }
551
552     donateAll();
553     mergeIfNecessary();
554
555     return bytesVisited();
556 }
557
558 bool SlotVisitor::didReachTermination()
559 {
560     LockHolder locker(m_heap.m_markingMutex);
561     return didReachTermination(locker);
562 }
563
564 bool SlotVisitor::didReachTermination(const AbstractLocker&)
565 {
566     return isEmpty()
567         && !m_heap.m_numberOfActiveParallelMarkers
568         && m_heap.m_sharedCollectorMarkStack->isEmpty()
569         && m_heap.m_sharedMutatorMarkStack->isEmpty();
570 }
571
572 bool SlotVisitor::hasWork(const AbstractLocker&)
573 {
574     return !m_heap.m_sharedCollectorMarkStack->isEmpty()
575         || !m_heap.m_sharedMutatorMarkStack->isEmpty();
576 }
577
578 NEVER_INLINE SlotVisitor::SharedDrainResult SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode, MonotonicTime timeout)
579 {
580     ASSERT(m_isInParallelMode);
581     
582     ASSERT(Options::numberOfGCMarkers());
583
584     bool isActive = false;
585     while (true) {
586         {
587             LockHolder locker(m_heap.m_markingMutex);
588             if (isActive)
589                 m_heap.m_numberOfActiveParallelMarkers--;
590             m_heap.m_numberOfWaitingParallelMarkers++;
591
592             if (sharedDrainMode == MasterDrain) {
593                 while (true) {
594                     if (hasElapsed(timeout))
595                         return SharedDrainResult::TimedOut;
596
597                     if (didReachTermination(locker)) {
598                         m_heap.m_markingConditionVariable.notifyAll();
599                         return SharedDrainResult::Done;
600                     }
601                     
602                     if (hasWork(locker))
603                         break;
604
605                     m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout);
606                 }
607             } else {
608                 ASSERT(sharedDrainMode == SlaveDrain);
609
610                 if (hasElapsed(timeout))
611                     return SharedDrainResult::TimedOut;
612                 
613                 if (didReachTermination(locker)) {
614                     m_heap.m_markingConditionVariable.notifyAll();
615                     
616                     // If we're in concurrent mode, then we know that the mutator will eventually do
617                     // the right thing because:
618                     // - It's possible that the collector has the conn. In that case, the collector will
619                     //   wake up from the notification above. This will happen if the app released heap
620                     //   access. Native apps can spend a lot of time with heap access released.
621                     // - It's possible that the mutator will allocate soon. Then it will check if we
622                     //   reached termination. This is the most likely outcome in programs that allocate
623                     //   a lot.
624                     // - WebCore never releases access. But WebCore has a runloop. The runloop will check
625                     //   if we reached termination.
626                     // So, this tells the runloop that it's got things to do.
627                     m_heap.m_stopIfNecessaryTimer->scheduleSoon();
628                 }
629
630                 auto isReady = [&] () -> bool {
631                     return hasWork(locker)
632                         || m_heap.m_parallelMarkersShouldExit;
633                 };
634
635                 m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout, isReady);
636                 
637                 if (m_heap.m_parallelMarkersShouldExit)
638                     return SharedDrainResult::Done;
639             }
640
641             forEachMarkStack(
642                 [&] (MarkStackArray& stack) -> IterationStatus {
643                     stack.stealSomeCellsFrom(
644                         correspondingGlobalStack(stack),
645                         m_heap.m_numberOfWaitingParallelMarkers);
646                     return IterationStatus::Continue;
647                 });
648
649             m_heap.m_numberOfActiveParallelMarkers++;
650             m_heap.m_numberOfWaitingParallelMarkers--;
651         }
652         
653         drain(timeout);
654         isActive = true;
655     }
656 }
657
658 SlotVisitor::SharedDrainResult SlotVisitor::drainInParallel(MonotonicTime timeout)
659 {
660     donateAndDrain(timeout);
661     return drainFromShared(MasterDrain, timeout);
662 }
663
664 SlotVisitor::SharedDrainResult SlotVisitor::drainInParallelPassively(MonotonicTime timeout)
665 {
666     ASSERT(m_isInParallelMode);
667     
668     ASSERT(Options::numberOfGCMarkers());
669     
670     if (Options::numberOfGCMarkers() == 1
671         || (m_heap.m_worldState.load() & Heap::mutatorWaitingBit)
672         || !m_heap.hasHeapAccess()
673         || m_heap.collectorBelievesThatTheWorldIsStopped()) {
674         // This is an optimization over drainInParallel() when we have a concurrent mutator but
675         // otherwise it is not profitable.
676         return drainInParallel(timeout);
677     }
678
679     LockHolder locker(m_heap.m_markingMutex);
680     donateAll(locker);
681     
682     for (;;) {
683         if (hasElapsed(timeout))
684             return SharedDrainResult::TimedOut;
685         
686         if (didReachTermination(locker)) {
687             m_heap.m_markingConditionVariable.notifyAll();
688             return SharedDrainResult::Done;
689         }
690         
691         m_heap.m_markingConditionVariable.waitUntil(m_heap.m_markingMutex, timeout);
692     }
693 }
694
695 void SlotVisitor::donateAll()
696 {
697     if (isEmpty())
698         return;
699     
700     donateAll(holdLock(m_heap.m_markingMutex));
701 }
702
703 void SlotVisitor::donateAll(const AbstractLocker&)
704 {
705     forEachMarkStack(
706         [&] (MarkStackArray& stack) -> IterationStatus {
707             stack.transferTo(correspondingGlobalStack(stack));
708             return IterationStatus::Continue;
709         });
710
711     m_heap.m_markingConditionVariable.notifyAll();
712 }
713
714 void SlotVisitor::addOpaqueRoot(void* root)
715 {
716     if (!root)
717         return;
718     
719     if (m_ignoreNewOpaqueRoots)
720         return;
721     
722     if (Options::numberOfGCMarkers() == 1) {
723         // Put directly into the shared HashSet.
724         m_heap.m_opaqueRoots.add(root);
725         return;
726     }
727     // Put into the local set, but merge with the shared one every once in
728     // a while to make sure that the local sets don't grow too large.
729     mergeOpaqueRootsIfProfitable();
730     m_opaqueRoots.add(root);
731 }
732
733 bool SlotVisitor::containsOpaqueRoot(void* root) const
734 {
735     if (!root)
736         return false;
737     
738     ASSERT(!m_isInParallelMode);
739     return m_heap.m_opaqueRoots.contains(root);
740 }
741
742 TriState SlotVisitor::containsOpaqueRootTriState(void* root) const
743 {
744     if (!root)
745         return FalseTriState;
746     
747     if (m_opaqueRoots.contains(root))
748         return TrueTriState;
749     std::lock_guard<Lock> lock(m_heap.m_opaqueRootsMutex);
750     if (m_heap.m_opaqueRoots.contains(root))
751         return TrueTriState;
752     return MixedTriState;
753 }
754
755 void SlotVisitor::mergeIfNecessary()
756 {
757     if (m_opaqueRoots.isEmpty())
758         return;
759     mergeOpaqueRoots();
760 }
761
762 void SlotVisitor::mergeOpaqueRootsIfProfitable()
763 {
764     if (static_cast<unsigned>(m_opaqueRoots.size()) < Options::opaqueRootMergeThreshold())
765         return;
766     mergeOpaqueRoots();
767 }
768     
769 void SlotVisitor::donate()
770 {
771     if (!m_isInParallelMode) {
772         dataLog("FATAL: Attempting to donate when not in parallel mode.\n");
773         RELEASE_ASSERT_NOT_REACHED();
774     }
775     
776     if (Options::numberOfGCMarkers() == 1)
777         return;
778     
779     donateKnownParallel();
780 }
781
782 void SlotVisitor::donateAndDrain(MonotonicTime timeout)
783 {
784     donate();
785     drain(timeout);
786 }
787
788 void SlotVisitor::mergeOpaqueRoots()
789 {
790     {
791         std::lock_guard<Lock> lock(m_heap.m_opaqueRootsMutex);
792         for (auto* root : m_opaqueRoots)
793             m_heap.m_opaqueRoots.add(root);
794     }
795     m_opaqueRoots.clear();
796 }
797
798 void SlotVisitor::addWeakReferenceHarvester(WeakReferenceHarvester* weakReferenceHarvester)
799 {
800     m_heap.m_weakReferenceHarvesters.addThreadSafe(weakReferenceHarvester);
801 }
802
803 void SlotVisitor::addUnconditionalFinalizer(UnconditionalFinalizer* unconditionalFinalizer)
804 {
805     m_heap.m_unconditionalFinalizers.addThreadSafe(unconditionalFinalizer);
806 }
807
808 void SlotVisitor::didRace(const VisitRaceKey& race)
809 {
810     if (Options::verboseVisitRace())
811         dataLog(toCString("GC visit race: ", race, "\n"));
812     
813     auto locker = holdLock(heap()->m_raceMarkStackLock);
814     JSCell* cell = race.cell();
815     cell->setCellState(CellState::PossiblyGrey);
816     heap()->m_raceMarkStack->append(cell);
817 }
818
819 void SlotVisitor::dump(PrintStream& out) const
820 {
821     out.print("Collector: [", pointerListDump(collectorMarkStack()), "], Mutator: [", pointerListDump(mutatorMarkStack()), "]");
822 }
823
824 MarkStackArray& SlotVisitor::correspondingGlobalStack(MarkStackArray& stack)
825 {
826     if (&stack == &m_collectorStack)
827         return *m_heap.m_sharedCollectorMarkStack;
828     RELEASE_ASSERT(&stack == &m_mutatorStack);
829     return *m_heap.m_sharedMutatorMarkStack;
830 }
831
832 } // namespace JSC