df713c5e2e4ba8b3ccf40cf7b120daf762fad458
[WebKit-https.git] / Source / JavaScriptCore / heap / Heap.cpp
1 /*
2  *  Copyright (C) 2003-2017 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "Heap.h"
23
24 #include "CodeBlock.h"
25 #include "CodeBlockSetInlines.h"
26 #include "CollectingScope.h"
27 #include "ConservativeRoots.h"
28 #include "DFGWorklistInlines.h"
29 #include "EdenGCActivityCallback.h"
30 #include "Exception.h"
31 #include "FullGCActivityCallback.h"
32 #include "GCActivityCallback.h"
33 #include "GCIncomingRefCountedSetInlines.h"
34 #include "GCSegmentedArrayInlines.h"
35 #include "GCTypeMap.h"
36 #include "HasOwnPropertyCache.h"
37 #include "HeapHelperPool.h"
38 #include "HeapIterationScope.h"
39 #include "HeapProfiler.h"
40 #include "HeapSnapshot.h"
41 #include "HeapVerifier.h"
42 #include "IncrementalSweeper.h"
43 #include "Interpreter.h"
44 #include "JITStubRoutineSet.h"
45 #include "JITWorklist.h"
46 #include "JSCInlines.h"
47 #include "JSGlobalObject.h"
48 #include "JSLock.h"
49 #include "JSVirtualMachineInternal.h"
50 #include "MachineStackMarker.h"
51 #include "MarkedSpaceInlines.h"
52 #include "MarkingConstraintSet.h"
53 #include "PreventCollectionScope.h"
54 #include "SamplingProfiler.h"
55 #include "ShadowChicken.h"
56 #include "SpaceTimeMutatorScheduler.h"
57 #include "SuperSampler.h"
58 #include "StochasticSpaceTimeMutatorScheduler.h"
59 #include "StopIfNecessaryTimer.h"
60 #include "SweepingScope.h"
61 #include "SynchronousStopTheWorldMutatorScheduler.h"
62 #include "TypeProfilerLog.h"
63 #include "UnlinkedCodeBlock.h"
64 #include "VM.h"
65 #include "WeakSetInlines.h"
66 #include <algorithm>
67 #include <wtf/CurrentTime.h>
68 #include <wtf/MainThread.h>
69 #include <wtf/ParallelVectorIterator.h>
70 #include <wtf/ProcessID.h>
71 #include <wtf/RAMSize.h>
72 #include <wtf/SimpleStats.h>
73
74 #if USE(FOUNDATION)
75 #if __has_include(<objc/objc-internal.h>)
76 #include <objc/objc-internal.h>
77 #else
78 extern "C" void* objc_autoreleasePoolPush(void);
79 extern "C" void objc_autoreleasePoolPop(void *context);
80 #endif
81 #endif // USE(FOUNDATION)
82
83 using namespace std;
84
85 namespace JSC {
86
87 namespace {
88
89 bool verboseStop = false;
90
91 double maxPauseMS(double thisPauseMS)
92 {
93     static double maxPauseMS;
94     maxPauseMS = std::max(thisPauseMS, maxPauseMS);
95     return maxPauseMS;
96 }
97
98 size_t minHeapSize(HeapType heapType, size_t ramSize)
99 {
100     if (heapType == LargeHeap) {
101         double result = min(
102             static_cast<double>(Options::largeHeapSize()),
103             ramSize * Options::smallHeapRAMFraction());
104         return static_cast<size_t>(result);
105     }
106     return Options::smallHeapSize();
107 }
108
109 size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
110 {
111     if (heapSize < ramSize * Options::smallHeapRAMFraction())
112         return Options::smallHeapGrowthFactor() * heapSize;
113     if (heapSize < ramSize * Options::mediumHeapRAMFraction())
114         return Options::mediumHeapGrowthFactor() * heapSize;
115     return Options::largeHeapGrowthFactor() * heapSize;
116 }
117
118 bool isValidSharedInstanceThreadState(VM* vm)
119 {
120     return vm->currentThreadIsHoldingAPILock();
121 }
122
123 bool isValidThreadState(VM* vm)
124 {
125     if (vm->atomicStringTable() != wtfThreadData().atomicStringTable())
126         return false;
127
128     if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
129         return false;
130
131     return true;
132 }
133
134 void recordType(VM& vm, TypeCountSet& set, JSCell* cell)
135 {
136     const char* typeName = "[unknown]";
137     const ClassInfo* info = cell->classInfo(vm);
138     if (info && info->className)
139         typeName = info->className;
140     set.add(typeName);
141 }
142
143 bool measurePhaseTiming()
144 {
145     return false;
146 }
147
148 HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats()
149 {
150     static HashMap<const char*, GCTypeMap<SimpleStats>>* result;
151     static std::once_flag once;
152     std::call_once(
153         once,
154         [] {
155             result = new HashMap<const char*, GCTypeMap<SimpleStats>>();
156         });
157     return *result;
158 }
159
160 SimpleStats& timingStats(const char* name, CollectionScope scope)
161 {
162     return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[scope];
163 }
164
165 class TimingScope {
166 public:
167     TimingScope(std::optional<CollectionScope> scope, const char* name)
168         : m_scope(scope)
169         , m_name(name)
170     {
171         if (measurePhaseTiming())
172             m_before = monotonicallyIncreasingTimeMS();
173     }
174     
175     TimingScope(Heap& heap, const char* name)
176         : TimingScope(heap.collectionScope(), name)
177     {
178     }
179     
180     void setScope(std::optional<CollectionScope> scope)
181     {
182         m_scope = scope;
183     }
184     
185     void setScope(Heap& heap)
186     {
187         setScope(heap.collectionScope());
188     }
189     
190     ~TimingScope()
191     {
192         if (measurePhaseTiming()) {
193             double after = monotonicallyIncreasingTimeMS();
194             double timing = after - m_before;
195             SimpleStats& stats = timingStats(m_name, *m_scope);
196             stats.add(timing);
197             dataLog("[GC:", *m_scope, "] ", m_name, " took: ", timing, "ms (average ", stats.mean(), "ms).\n");
198         }
199     }
200 private:
201     std::optional<CollectionScope> m_scope;
202     double m_before;
203     const char* m_name;
204 };
205
206 } // anonymous namespace
207
208 class Heap::Thread : public AutomaticThread {
209 public:
210     Thread(const AbstractLocker& locker, Heap& heap)
211         : AutomaticThread(locker, heap.m_threadLock, heap.m_threadCondition)
212         , m_heap(heap)
213     {
214     }
215     
216 protected:
217     PollResult poll(const AbstractLocker& locker) override
218     {
219         if (m_heap.m_threadShouldStop) {
220             m_heap.notifyThreadStopping(locker);
221             return PollResult::Stop;
222         }
223         if (m_heap.shouldCollectInCollectorThread(locker))
224             return PollResult::Work;
225         return PollResult::Wait;
226     }
227     
228     WorkResult work() override
229     {
230         m_heap.collectInCollectorThread();
231         return WorkResult::Continue;
232     }
233     
234     void threadDidStart() override
235     {
236         WTF::registerGCThread(GCThreadType::Main);
237     }
238
239 private:
240     Heap& m_heap;
241 };
242
243 Heap::Heap(VM* vm, HeapType heapType)
244     : m_heapType(heapType)
245     , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
246     , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
247     , m_sizeAfterLastCollect(0)
248     , m_sizeAfterLastFullCollect(0)
249     , m_sizeBeforeLastFullCollect(0)
250     , m_sizeAfterLastEdenCollect(0)
251     , m_sizeBeforeLastEdenCollect(0)
252     , m_bytesAllocatedThisCycle(0)
253     , m_bytesAbandonedSinceLastFullCollect(0)
254     , m_maxEdenSize(m_minBytesPerCycle)
255     , m_maxHeapSize(m_minBytesPerCycle)
256     , m_shouldDoFullCollection(false)
257     , m_totalBytesVisited(0)
258     , m_objectSpace(this)
259     , m_extraMemorySize(0)
260     , m_deprecatedExtraMemorySize(0)
261     , m_machineThreads(std::make_unique<MachineThreads>())
262     , m_collectorSlotVisitor(std::make_unique<SlotVisitor>(*this, "C"))
263     , m_mutatorSlotVisitor(std::make_unique<SlotVisitor>(*this, "M"))
264     , m_mutatorMarkStack(std::make_unique<MarkStackArray>())
265     , m_raceMarkStack(std::make_unique<MarkStackArray>())
266     , m_constraintSet(std::make_unique<MarkingConstraintSet>())
267     , m_handleSet(vm)
268     , m_codeBlocks(std::make_unique<CodeBlockSet>())
269     , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>())
270     , m_isSafeToCollect(false)
271     , m_vm(vm)
272     // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously 
273     // schedule the timer if we've never done a collection.
274     , m_lastFullGCLength(0.01)
275     , m_lastEdenGCLength(0.01)
276 #if USE(CF)
277     , m_runLoop(CFRunLoopGetCurrent())
278 #endif // USE(CF)
279     , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
280     , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
281     , m_sweeper(adoptRef(new IncrementalSweeper(this)))
282     , m_stopIfNecessaryTimer(adoptRef(new StopIfNecessaryTimer(vm)))
283     , m_deferralDepth(0)
284 #if USE(FOUNDATION)
285     , m_delayedReleaseRecursionCount(0)
286 #endif
287     , m_sharedCollectorMarkStack(std::make_unique<MarkStackArray>())
288     , m_sharedMutatorMarkStack(std::make_unique<MarkStackArray>())
289     , m_helperClient(&heapHelperPool())
290     , m_threadLock(Box<Lock>::create())
291     , m_threadCondition(AutomaticThreadCondition::create())
292 {
293     m_worldState.store(0);
294     
295     if (Options::useConcurrentGC()) {
296         if (Options::useStochasticMutatorScheduler())
297             m_scheduler = std::make_unique<StochasticSpaceTimeMutatorScheduler>(*this);
298         else
299             m_scheduler = std::make_unique<SpaceTimeMutatorScheduler>(*this);
300     } else {
301         // We simulate turning off concurrent GC by making the scheduler say that the world
302         // should always be stopped when the collector is running.
303         m_scheduler = std::make_unique<SynchronousStopTheWorldMutatorScheduler>();
304     }
305     
306     if (Options::verifyHeap())
307         m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
308     
309     m_collectorSlotVisitor->optimizeForStoppedMutator();
310
311     LockHolder locker(*m_threadLock);
312     m_thread = adoptRef(new Thread(locker, *this));
313 }
314
315 Heap::~Heap()
316 {
317     forEachSlotVisitor(
318         [&] (SlotVisitor& visitor) {
319             visitor.clearMarkStacks();
320         });
321     m_mutatorMarkStack->clear();
322     m_raceMarkStack->clear();
323     
324     for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
325         WeakBlock::destroy(*this, block);
326 }
327
328 bool Heap::isPagedOut(double deadline)
329 {
330     return m_objectSpace.isPagedOut(deadline);
331 }
332
333 // The VM is being destroyed and the collector will never run again.
334 // Run all pending finalizers now because we won't get another chance.
335 void Heap::lastChanceToFinalize()
336 {
337     MonotonicTime before;
338     if (Options::logGC()) {
339         before = MonotonicTime::now();
340         dataLog("[GC<", RawPointer(this), ">: shutdown ");
341     }
342     
343     RELEASE_ASSERT(!m_vm->entryScope);
344     RELEASE_ASSERT(m_mutatorState == MutatorState::Running);
345     
346     if (m_collectContinuouslyThread) {
347         {
348             LockHolder locker(m_collectContinuouslyLock);
349             m_shouldStopCollectingContinuously = true;
350             m_collectContinuouslyCondition.notifyOne();
351         }
352         waitForThreadCompletion(m_collectContinuouslyThread);
353     }
354     
355     if (Options::logGC())
356         dataLog("1");
357     
358     // Prevent new collections from being started. This is probably not even necessary, since we're not
359     // going to call into anything that starts collections. Still, this makes the algorithm more
360     // obviously sound.
361     m_isSafeToCollect = false;
362     
363     if (Options::logGC())
364         dataLog("2");
365
366     bool isCollecting;
367     {
368         auto locker = holdLock(*m_threadLock);
369         RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
370         isCollecting = m_lastServedTicket < m_lastGrantedTicket;
371     }
372     if (isCollecting) {
373         if (Options::logGC())
374             dataLog("...]\n");
375         
376         // Wait for the current collection to finish.
377         waitForCollector(
378             [&] (const AbstractLocker&) -> bool {
379                 RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
380                 return m_lastServedTicket == m_lastGrantedTicket;
381             });
382         
383         if (Options::logGC())
384             dataLog("[GC<", RawPointer(this), ">: shutdown ");
385     }
386     if (Options::logGC())
387         dataLog("3");
388
389     RELEASE_ASSERT(m_requests.isEmpty());
390     RELEASE_ASSERT(m_lastServedTicket == m_lastGrantedTicket);
391     
392     // Carefully bring the thread down.
393     bool stopped = false;
394     {
395         LockHolder locker(*m_threadLock);
396         stopped = m_thread->tryStop(locker);
397         m_threadShouldStop = true;
398         if (!stopped)
399             m_threadCondition->notifyOne(locker);
400     }
401
402     if (Options::logGC())
403         dataLog("4");
404     
405     if (!stopped)
406         m_thread->join();
407     
408     if (Options::logGC())
409         dataLog("5 ");
410     
411     m_arrayBuffers.lastChanceToFinalize();
412     m_codeBlocks->lastChanceToFinalize(*m_vm);
413     m_objectSpace.stopAllocating();
414     m_objectSpace.lastChanceToFinalize();
415     releaseDelayedReleasedObjects();
416
417     sweepAllLogicallyEmptyWeakBlocks();
418     
419     if (Options::logGC())
420         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
421 }
422
423 void Heap::releaseDelayedReleasedObjects()
424 {
425 #if USE(FOUNDATION)
426     // We need to guard against the case that releasing an object can create more objects due to the
427     // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
428     // back here and could try to recursively release objects. We guard that with a recursive entry
429     // count. Only the initial call will release objects, recursive calls simple return and let the
430     // the initial call to the function take care of any objects created during release time.
431     // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
432     // and use a temp Vector for the actual releasing.
433     if (!m_delayedReleaseRecursionCount++) {
434         while (!m_delayedReleaseObjects.isEmpty()) {
435             ASSERT(m_vm->currentThreadIsHoldingAPILock());
436
437             Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTFMove(m_delayedReleaseObjects);
438
439             {
440                 // We need to drop locks before calling out to arbitrary code.
441                 JSLock::DropAllLocks dropAllLocks(m_vm);
442
443                 void* context = objc_autoreleasePoolPush();
444                 objectsToRelease.clear();
445                 objc_autoreleasePoolPop(context);
446             }
447         }
448     }
449     m_delayedReleaseRecursionCount--;
450 #endif
451 }
452
453 void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
454 {
455     didAllocate(size);
456     collectIfNecessaryOrDefer();
457 }
458
459 void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
460 {
461     m_deprecatedExtraMemorySize += size;
462     reportExtraMemoryAllocatedSlowCase(size);
463 }
464
465 void Heap::reportAbandonedObjectGraph()
466 {
467     // Our clients don't know exactly how much memory they
468     // are abandoning so we just guess for them.
469     size_t abandonedBytes = static_cast<size_t>(0.1 * capacity());
470
471     // We want to accelerate the next collection. Because memory has just 
472     // been abandoned, the next collection has the potential to 
473     // be more profitable. Since allocation is the trigger for collection, 
474     // we hasten the next collection by pretending that we've allocated more memory. 
475     if (m_fullActivityCallback) {
476         m_fullActivityCallback->didAllocate(
477             m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
478     }
479     m_bytesAbandonedSinceLastFullCollect += abandonedBytes;
480 }
481
482 void Heap::protect(JSValue k)
483 {
484     ASSERT(k);
485     ASSERT(m_vm->currentThreadIsHoldingAPILock());
486
487     if (!k.isCell())
488         return;
489
490     m_protectedValues.add(k.asCell());
491 }
492
493 bool Heap::unprotect(JSValue k)
494 {
495     ASSERT(k);
496     ASSERT(m_vm->currentThreadIsHoldingAPILock());
497
498     if (!k.isCell())
499         return false;
500
501     return m_protectedValues.remove(k.asCell());
502 }
503
504 void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
505 {
506     if (m_arrayBuffers.addReference(cell, buffer)) {
507         collectIfNecessaryOrDefer();
508         didAllocate(buffer->gcSizeEstimateInBytes());
509     }
510 }
511
512 void Heap::finalizeUnconditionalFinalizers()
513 {
514     while (m_unconditionalFinalizers.hasNext()) {
515         UnconditionalFinalizer* finalizer = m_unconditionalFinalizers.removeNext();
516         finalizer->finalizeUnconditionally();
517     }
518 }
519
520 void Heap::willStartIterating()
521 {
522     m_objectSpace.willStartIterating();
523 }
524
525 void Heap::didFinishIterating()
526 {
527     m_objectSpace.didFinishIterating();
528 }
529
530 void Heap::completeAllJITPlans()
531 {
532 #if ENABLE(JIT)
533     JITWorklist::instance()->completeAllForVM(*m_vm);
534 #endif // ENABLE(JIT)
535     DFG::completeAllPlansForVM(*m_vm);
536 }
537
538 template<typename Func>
539 void Heap::iterateExecutingAndCompilingCodeBlocks(const Func& func)
540 {
541     m_codeBlocks->iterateCurrentlyExecuting(func);
542     DFG::iterateCodeBlocksForGC(*m_vm, func);
543 }
544
545 template<typename Func>
546 void Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func& func)
547 {
548     Vector<CodeBlock*, 256> codeBlocks;
549     iterateExecutingAndCompilingCodeBlocks(
550         [&] (CodeBlock* codeBlock) {
551             codeBlocks.append(codeBlock);
552         });
553     for (CodeBlock* codeBlock : codeBlocks)
554         func(codeBlock);
555 }
556
557 void Heap::assertSharedMarkStacksEmpty()
558 {
559     bool ok = true;
560     
561     if (!m_sharedCollectorMarkStack->isEmpty()) {
562         dataLog("FATAL: Shared collector mark stack not empty! It has ", m_sharedCollectorMarkStack->size(), " elements.\n");
563         ok = false;
564     }
565     
566     if (!m_sharedMutatorMarkStack->isEmpty()) {
567         dataLog("FATAL: Shared mutator mark stack not empty! It has ", m_sharedMutatorMarkStack->size(), " elements.\n");
568         ok = false;
569     }
570     
571     RELEASE_ASSERT(ok);
572 }
573
574 void Heap::gatherStackRoots(ConservativeRoots& roots)
575 {
576     m_machineThreads->gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks, m_currentThreadState);
577 }
578
579 void Heap::gatherJSStackRoots(ConservativeRoots& roots)
580 {
581 #if !ENABLE(JIT)
582     m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
583 #else
584     UNUSED_PARAM(roots);
585 #endif
586 }
587
588 void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
589 {
590 #if ENABLE(DFG_JIT)
591     m_vm->gatherConservativeRoots(roots);
592 #else
593     UNUSED_PARAM(roots);
594 #endif
595 }
596
597 void Heap::beginMarking()
598 {
599     TimingScope timingScope(*this, "Heap::beginMarking");
600     if (m_collectionScope == CollectionScope::Full)
601         m_codeBlocks->clearMarksForFullCollection();
602     m_jitStubRoutines->clearMarks();
603     m_objectSpace.beginMarking();
604     setMutatorShouldBeFenced(true);
605 }
606
607 void Heap::removeDeadCompilerWorklistEntries()
608 {
609 #if ENABLE(DFG_JIT)
610     for (unsigned i = DFG::numberOfWorklists(); i--;)
611         DFG::existingWorklistForIndex(i).removeDeadPlans(*m_vm);
612 #endif
613 }
614
615 bool Heap::isHeapSnapshotting() const
616 {
617     HeapProfiler* heapProfiler = m_vm->heapProfiler();
618     if (UNLIKELY(heapProfiler))
619         return heapProfiler->activeSnapshotBuilder();
620     return false;
621 }
622
623 struct GatherHeapSnapshotData : MarkedBlock::CountFunctor {
624     GatherHeapSnapshotData(HeapSnapshotBuilder& builder)
625         : m_builder(builder)
626     {
627     }
628
629     IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const
630     {
631         if (kind == HeapCell::JSCell) {
632             JSCell* cell = static_cast<JSCell*>(heapCell);
633             cell->methodTable()->heapSnapshot(cell, m_builder);
634         }
635         return IterationStatus::Continue;
636     }
637
638     HeapSnapshotBuilder& m_builder;
639 };
640
641 void Heap::gatherExtraHeapSnapshotData(HeapProfiler& heapProfiler)
642 {
643     if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) {
644         HeapIterationScope heapIterationScope(*this);
645         GatherHeapSnapshotData functor(*builder);
646         m_objectSpace.forEachLiveCell(heapIterationScope, functor);
647     }
648 }
649
650 struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor {
651     RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot)
652         : m_snapshot(snapshot)
653     {
654     }
655
656     IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const
657     {
658         if (kind == HeapCell::JSCell)
659             m_snapshot.sweepCell(static_cast<JSCell*>(cell));
660         return IterationStatus::Continue;
661     }
662
663     HeapSnapshot& m_snapshot;
664 };
665
666 void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler)
667 {
668     if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) {
669         HeapIterationScope heapIterationScope(*this);
670         RemoveDeadHeapSnapshotNodes functor(*snapshot);
671         m_objectSpace.forEachDeadCell(heapIterationScope, functor);
672         snapshot->shrinkToFit();
673     }
674 }
675
676 void Heap::updateObjectCounts()
677 {
678     if (m_collectionScope == CollectionScope::Full)
679         m_totalBytesVisited = 0;
680
681     m_totalBytesVisitedThisCycle = bytesVisited();
682     
683     m_totalBytesVisited += m_totalBytesVisitedThisCycle;
684 }
685
686 void Heap::endMarking()
687 {
688     forEachSlotVisitor(
689         [&] (SlotVisitor& visitor) {
690             visitor.reset();
691         });
692
693     assertSharedMarkStacksEmpty();
694     m_weakReferenceHarvesters.removeAll();
695
696     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
697     
698     m_objectSpace.endMarking();
699     setMutatorShouldBeFenced(Options::forceFencedBarrier());
700 }
701
702 size_t Heap::objectCount()
703 {
704     return m_objectSpace.objectCount();
705 }
706
707 size_t Heap::extraMemorySize()
708 {
709     return m_extraMemorySize + m_deprecatedExtraMemorySize + m_arrayBuffers.size();
710 }
711
712 size_t Heap::size()
713 {
714     return m_objectSpace.size() + extraMemorySize();
715 }
716
717 size_t Heap::capacity()
718 {
719     return m_objectSpace.capacity() + extraMemorySize();
720 }
721
722 size_t Heap::protectedGlobalObjectCount()
723 {
724     size_t result = 0;
725     forEachProtectedCell(
726         [&] (JSCell* cell) {
727             if (cell->isObject() && asObject(cell)->isGlobalObject())
728                 result++;
729         });
730     return result;
731 }
732
733 size_t Heap::globalObjectCount()
734 {
735     HeapIterationScope iterationScope(*this);
736     size_t result = 0;
737     m_objectSpace.forEachLiveCell(
738         iterationScope,
739         [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus {
740             if (kind != HeapCell::JSCell)
741                 return IterationStatus::Continue;
742             JSCell* cell = static_cast<JSCell*>(heapCell);
743             if (cell->isObject() && asObject(cell)->isGlobalObject())
744                 result++;
745             return IterationStatus::Continue;
746         });
747     return result;
748 }
749
750 size_t Heap::protectedObjectCount()
751 {
752     size_t result = 0;
753     forEachProtectedCell(
754         [&] (JSCell*) {
755             result++;
756         });
757     return result;
758 }
759
760 std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
761 {
762     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
763     forEachProtectedCell(
764         [&] (JSCell* cell) {
765             recordType(*vm(), *result, cell);
766         });
767     return result;
768 }
769
770 std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
771 {
772     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
773     HeapIterationScope iterationScope(*this);
774     m_objectSpace.forEachLiveCell(
775         iterationScope,
776         [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
777             if (kind == HeapCell::JSCell)
778                 recordType(*vm(), *result, static_cast<JSCell*>(cell));
779             return IterationStatus::Continue;
780         });
781     return result;
782 }
783
784 void Heap::deleteAllCodeBlocks(DeleteAllCodeEffort effort)
785 {
786     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
787         return;
788     
789     PreventCollectionScope preventCollectionScope(*this);
790     
791     // If JavaScript is running, it's not safe to delete all JavaScript code, since
792     // we'll end up returning to deleted code.
793     RELEASE_ASSERT(!m_vm->entryScope);
794     RELEASE_ASSERT(!m_collectionScope);
795
796     completeAllJITPlans();
797
798     for (ExecutableBase* executable : m_executables)
799         executable->clearCode();
800 }
801
802 void Heap::deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort effort)
803 {
804     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
805         return;
806     
807     PreventCollectionScope preventCollectionScope(*this);
808
809     RELEASE_ASSERT(!m_collectionScope);
810     
811     for (ExecutableBase* current : m_executables) {
812         if (!current->isFunctionExecutable())
813             continue;
814         static_cast<FunctionExecutable*>(current)->unlinkedExecutable()->clearCode();
815     }
816 }
817
818 void Heap::clearUnmarkedExecutables()
819 {
820     for (unsigned i = m_executables.size(); i--;) {
821         ExecutableBase* current = m_executables[i];
822         if (isMarked(current))
823             continue;
824
825         // Eagerly dereference the Executable's JITCode in order to run watchpoint
826         // destructors. Otherwise, watchpoints might fire for deleted CodeBlocks.
827         current->clearCode();
828         std::swap(m_executables[i], m_executables.last());
829         m_executables.removeLast();
830     }
831
832     m_executables.shrinkToFit();
833 }
834
835 void Heap::deleteUnmarkedCompiledCode()
836 {
837     clearUnmarkedExecutables();
838     m_codeBlocks->deleteUnmarkedAndUnreferenced(*m_vm, *m_lastCollectionScope);
839     m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines();
840 }
841
842 void Heap::addToRememberedSet(const JSCell* constCell)
843 {
844     JSCell* cell = const_cast<JSCell*>(constCell);
845     ASSERT(cell);
846     ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
847     m_barriersExecuted++;
848     if (m_mutatorShouldBeFenced) {
849         WTF::loadLoadFence();
850         if (!isMarkedConcurrently(cell)) {
851             // During a full collection a store into an unmarked object that had surivived past
852             // collections will manifest as a store to an unmarked PossiblyBlack object. If the
853             // object gets marked at some time after this then it will go down the normal marking
854             // path. So, we don't have to remember this object. We could return here. But we go
855             // further and attempt to re-white the object.
856             
857             RELEASE_ASSERT(m_collectionScope == CollectionScope::Full);
858             
859             if (cell->atomicCompareExchangeCellStateStrong(CellState::PossiblyBlack, CellState::DefinitelyWhite) == CellState::PossiblyBlack) {
860                 // Now we protect against this race:
861                 //
862                 //     1) Object starts out black + unmarked.
863                 //     --> We do isMarkedConcurrently here.
864                 //     2) Object is marked and greyed.
865                 //     3) Object is scanned and blacked.
866                 //     --> We do atomicCompareExchangeCellStateStrong here.
867                 //
868                 // In this case we would have made the object white again, even though it should
869                 // be black. This check lets us correct our mistake. This relies on the fact that
870                 // isMarkedConcurrently converges monotonically to true.
871                 if (isMarkedConcurrently(cell)) {
872                     // It's difficult to work out whether the object should be grey or black at
873                     // this point. We say black conservatively.
874                     cell->setCellState(CellState::PossiblyBlack);
875                 }
876                 
877                 // Either way, we can return. Most likely, the object was not marked, and so the
878                 // object is now labeled white. This means that future barrier executions will not
879                 // fire. In the unlikely event that the object had become marked, we can still
880                 // return anyway, since we proved that the object was not marked at the time that
881                 // we executed this slow path.
882             }
883             
884             return;
885         }
886     } else
887         ASSERT(Heap::isMarkedConcurrently(cell));
888     // It could be that the object was *just* marked. This means that the collector may set the
889     // state to DefinitelyGrey and then to PossiblyOldOrBlack at any time. It's OK for us to
890     // race with the collector here. If we win then this is accurate because the object _will_
891     // get scanned again. If we lose then someone else will barrier the object again. That would
892     // be unfortunate but not the end of the world.
893     cell->setCellState(CellState::PossiblyGrey);
894     m_mutatorMarkStack->append(cell);
895 }
896
897 void Heap::sweepSynchronously()
898 {
899     double before = 0;
900     if (Options::logGC()) {
901         dataLog("Full sweep: ", capacity() / 1024, "kb ");
902         before = currentTimeMS();
903     }
904     m_objectSpace.sweep();
905     m_objectSpace.shrink();
906     if (Options::logGC()) {
907         double after = currentTimeMS();
908         dataLog("=> ", capacity() / 1024, "kb, ", after - before, "ms");
909     }
910 }
911
912 void Heap::collectAllGarbage()
913 {
914     if (!m_isSafeToCollect)
915         return;
916     
917     collectSync(CollectionScope::Full);
918
919     DeferGCForAWhile deferGC(*this);
920     if (UNLIKELY(Options::useImmortalObjects()))
921         sweeper()->stopSweeping();
922
923     bool alreadySweptInCollectSync = Options::sweepSynchronously();
924     if (!alreadySweptInCollectSync) {
925         if (Options::logGC())
926             dataLog("[GC<", RawPointer(this), ">: ");
927         sweepSynchronously();
928         if (Options::logGC())
929             dataLog("]\n");
930     }
931     m_objectSpace.assertNoUnswept();
932
933     sweepAllLogicallyEmptyWeakBlocks();
934 }
935
936 void Heap::collectAsync(std::optional<CollectionScope> scope)
937 {
938     if (!m_isSafeToCollect)
939         return;
940
941     bool alreadyRequested = false;
942     {
943         LockHolder locker(*m_threadLock);
944         for (std::optional<CollectionScope> request : m_requests) {
945             if (scope) {
946                 if (scope == CollectionScope::Eden) {
947                     alreadyRequested = true;
948                     break;
949                 } else {
950                     RELEASE_ASSERT(scope == CollectionScope::Full);
951                     if (request == CollectionScope::Full) {
952                         alreadyRequested = true;
953                         break;
954                     }
955                 }
956             } else {
957                 if (!request || request == CollectionScope::Full) {
958                     alreadyRequested = true;
959                     break;
960                 }
961             }
962         }
963     }
964     if (alreadyRequested)
965         return;
966
967     requestCollection(scope);
968 }
969
970 void Heap::collectSync(std::optional<CollectionScope> scope)
971 {
972     if (!m_isSafeToCollect)
973         return;
974     
975     waitForCollection(requestCollection(scope));
976 }
977
978 bool Heap::shouldCollectInCollectorThread(const AbstractLocker&)
979 {
980     RELEASE_ASSERT(m_requests.isEmpty() == (m_lastServedTicket == m_lastGrantedTicket));
981     RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
982     
983     if (false)
984         dataLog("Mutator has the conn = ", !!(m_worldState.load() & mutatorHasConnBit), "\n");
985     
986     return !m_requests.isEmpty() && !(m_worldState.load() & mutatorHasConnBit);
987 }
988
989 void Heap::collectInCollectorThread()
990 {
991     for (;;) {
992         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Collector, nullptr);
993         switch (result) {
994         case RunCurrentPhaseResult::Finished:
995             return;
996         case RunCurrentPhaseResult::Continue:
997             break;
998         case RunCurrentPhaseResult::NeedCurrentThreadState:
999             RELEASE_ASSERT_NOT_REACHED();
1000             break;
1001         }
1002     }
1003 }
1004
1005 void Heap::checkConn(GCConductor conn)
1006 {
1007     switch (conn) {
1008     case GCConductor::Mutator:
1009         RELEASE_ASSERT(m_worldState.load() & mutatorHasConnBit);
1010         return;
1011     case GCConductor::Collector:
1012         RELEASE_ASSERT(!(m_worldState.load() & mutatorHasConnBit));
1013         return;
1014     }
1015     RELEASE_ASSERT_NOT_REACHED();
1016 }
1017
1018 auto Heap::runCurrentPhase(GCConductor conn, CurrentThreadState* currentThreadState) -> RunCurrentPhaseResult
1019 {
1020     checkConn(conn);
1021     m_currentThreadState = currentThreadState;
1022     
1023     // If the collector transfers the conn to the mutator, it leaves us in between phases.
1024     if (!finishChangingPhase(conn)) {
1025         // A mischevious mutator could repeatedly relinquish the conn back to us. We try to avoid doing
1026         // this, but it's probably not the end of the world if it did happen.
1027         if (false)
1028             dataLog("Conn bounce-back.\n");
1029         return RunCurrentPhaseResult::Finished;
1030     }
1031     
1032     bool result = false;
1033     switch (m_currentPhase) {
1034     case CollectorPhase::NotRunning:
1035         result = runNotRunningPhase(conn);
1036         break;
1037         
1038     case CollectorPhase::Begin:
1039         result = runBeginPhase(conn);
1040         break;
1041         
1042     case CollectorPhase::Fixpoint:
1043         if (!currentThreadState && conn == GCConductor::Mutator)
1044             return RunCurrentPhaseResult::NeedCurrentThreadState;
1045         
1046         result = runFixpointPhase(conn);
1047         break;
1048         
1049     case CollectorPhase::Concurrent:
1050         result = runConcurrentPhase(conn);
1051         break;
1052         
1053     case CollectorPhase::Reloop:
1054         result = runReloopPhase(conn);
1055         break;
1056         
1057     case CollectorPhase::End:
1058         result = runEndPhase(conn);
1059         break;
1060     }
1061
1062     return result ? RunCurrentPhaseResult::Continue : RunCurrentPhaseResult::Finished;
1063 }
1064
1065 NEVER_INLINE bool Heap::runNotRunningPhase(GCConductor conn)
1066 {
1067     // Check m_requests since the mutator calls this to poll what's going on.
1068     {
1069         auto locker = holdLock(*m_threadLock);
1070         if (m_requests.isEmpty())
1071             return false;
1072     }
1073     
1074     return changePhase(conn, CollectorPhase::Begin);
1075 }
1076
1077 NEVER_INLINE bool Heap::runBeginPhase(GCConductor conn)
1078 {
1079     m_currentGCStartTime = MonotonicTime::now();
1080         
1081     std::optional<CollectionScope> scope;
1082     {
1083         LockHolder locker(*m_threadLock);
1084         RELEASE_ASSERT(!m_requests.isEmpty());
1085         scope = m_requests.first();
1086     }
1087         
1088     if (Options::logGC())
1089         dataLog("[GC<", RawPointer(this), ">: START ", gcConductorShortName(conn), " ", capacity() / 1024, "kb ");
1090
1091     m_beforeGC = MonotonicTime::now();
1092
1093     if (m_collectionScope) {
1094         dataLog("Collection scope already set during GC: ", *m_collectionScope, "\n");
1095         RELEASE_ASSERT_NOT_REACHED();
1096     }
1097         
1098     willStartCollection(scope);
1099         
1100     if (UNLIKELY(m_verifier)) {
1101         // Verify that live objects from the last GC cycle haven't been corrupted by
1102         // mutators before we begin this new GC cycle.
1103         m_verifier->verify(HeapVerifier::Phase::BeforeGC);
1104             
1105         m_verifier->startGC();
1106         m_verifier->gatherLiveCells(HeapVerifier::Phase::BeforeMarking);
1107     }
1108         
1109     prepareForMarking();
1110         
1111     if (m_collectionScope == CollectionScope::Full) {
1112         m_opaqueRoots.clear();
1113         m_collectorSlotVisitor->clearMarkStacks();
1114         m_mutatorMarkStack->clear();
1115     }
1116
1117     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
1118
1119     beginMarking();
1120
1121     forEachSlotVisitor(
1122         [&] (SlotVisitor& visitor) {
1123             visitor.didStartMarking();
1124         });
1125
1126     m_parallelMarkersShouldExit = false;
1127
1128     m_helperClient.setFunction(
1129         [this] () {
1130             SlotVisitor* slotVisitor;
1131             {
1132                 LockHolder locker(m_parallelSlotVisitorLock);
1133                 if (m_availableParallelSlotVisitors.isEmpty()) {
1134                     std::unique_ptr<SlotVisitor> newVisitor = std::make_unique<SlotVisitor>(
1135                         *this, toCString("P", m_parallelSlotVisitors.size() + 1));
1136                     
1137                     if (Options::optimizeParallelSlotVisitorsForStoppedMutator())
1138                         newVisitor->optimizeForStoppedMutator();
1139                     
1140                     newVisitor->didStartMarking();
1141                     
1142                     slotVisitor = newVisitor.get();
1143                     m_parallelSlotVisitors.append(WTFMove(newVisitor));
1144                 } else
1145                     slotVisitor = m_availableParallelSlotVisitors.takeLast();
1146             }
1147
1148             WTF::registerGCThread(GCThreadType::Helper);
1149
1150             {
1151                 ParallelModeEnabler parallelModeEnabler(*slotVisitor);
1152                 slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
1153             }
1154
1155             {
1156                 LockHolder locker(m_parallelSlotVisitorLock);
1157                 m_availableParallelSlotVisitors.append(slotVisitor);
1158             }
1159         });
1160
1161     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1162
1163     m_constraintSet->didStartMarking();
1164     
1165     m_scheduler->beginCollection();
1166     if (Options::logGC())
1167         m_scheduler->log();
1168     
1169     // After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()"
1170     // checks because bootstrap would have put things into the visitor. So, we should fall
1171     // through to draining.
1172     
1173     if (!slotVisitor.didReachTermination()) {
1174         dataLog("Fatal: SlotVisitor should think that GC should terminate before constraint solving, but it does not think this.\n");
1175         dataLog("slotVisitor.isEmpty(): ", slotVisitor.isEmpty(), "\n");
1176         dataLog("slotVisitor.collectorMarkStack().isEmpty(): ", slotVisitor.collectorMarkStack().isEmpty(), "\n");
1177         dataLog("slotVisitor.mutatorMarkStack().isEmpty(): ", slotVisitor.mutatorMarkStack().isEmpty(), "\n");
1178         dataLog("m_numberOfActiveParallelMarkers: ", m_numberOfActiveParallelMarkers, "\n");
1179         dataLog("m_sharedCollectorMarkStack->isEmpty(): ", m_sharedCollectorMarkStack->isEmpty(), "\n");
1180         dataLog("m_sharedMutatorMarkStack->isEmpty(): ", m_sharedMutatorMarkStack->isEmpty(), "\n");
1181         dataLog("slotVisitor.didReachTermination(): ", slotVisitor.didReachTermination(), "\n");
1182         RELEASE_ASSERT_NOT_REACHED();
1183     }
1184         
1185     return changePhase(conn, CollectorPhase::Fixpoint);
1186 }
1187
1188 NEVER_INLINE bool Heap::runFixpointPhase(GCConductor conn)
1189 {
1190     RELEASE_ASSERT(conn == GCConductor::Collector || m_currentThreadState);
1191     
1192     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1193     
1194     if (Options::logGC()) {
1195         HashMap<const char*, size_t> visitMap;
1196         forEachSlotVisitor(
1197             [&] (SlotVisitor& slotVisitor) {
1198                 visitMap.add(slotVisitor.codeName(), slotVisitor.bytesVisited() / 1024);
1199             });
1200         
1201         auto perVisitorDump = sortedMapDump(
1202             visitMap,
1203             [] (const char* a, const char* b) -> bool {
1204                 return strcmp(a, b) < 0;
1205             },
1206             ":", " ");
1207         
1208         dataLog("v=", bytesVisited() / 1024, "kb (", perVisitorDump, ") o=", m_opaqueRoots.size(), " b=", m_barriersExecuted, " ");
1209     }
1210         
1211     if (slotVisitor.didReachTermination()) {
1212         m_scheduler->didReachTermination();
1213             
1214         assertSharedMarkStacksEmpty();
1215             
1216         slotVisitor.mergeIfNecessary();
1217         for (auto& parallelVisitor : m_parallelSlotVisitors)
1218             parallelVisitor->mergeIfNecessary();
1219             
1220         // FIXME: Take m_mutatorDidRun into account when scheduling constraints. Most likely,
1221         // we don't have to execute root constraints again unless the mutator did run. At a
1222         // minimum, we could use this for work estimates - but it's probably more than just an
1223         // estimate.
1224         // https://bugs.webkit.org/show_bug.cgi?id=166828
1225             
1226         // FIXME: We should take advantage of the fact that we could timeout. This only comes
1227         // into play if we're executing constraints for the first time. But that will matter
1228         // when we have deep stacks or a lot of DOM stuff.
1229         // https://bugs.webkit.org/show_bug.cgi?id=166831
1230             
1231         // Wondering what this does? Look at Heap::addCoreConstraints(). The DOM and others can also
1232         // add their own using Heap::addMarkingConstraint().
1233         bool converged =
1234             m_constraintSet->executeConvergence(slotVisitor, MonotonicTime::infinity());
1235         if (converged && slotVisitor.isEmpty()) {
1236             assertSharedMarkStacksEmpty();
1237             return changePhase(conn, CollectorPhase::End);
1238         }
1239             
1240         m_scheduler->didExecuteConstraints();
1241     }
1242         
1243     if (Options::logGC())
1244         dataLog(slotVisitor.collectorMarkStack().size(), "+", m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " ");
1245         
1246     {
1247         ParallelModeEnabler enabler(slotVisitor);
1248         slotVisitor.drainInParallel(m_scheduler->timeToResume());
1249     }
1250         
1251     m_scheduler->synchronousDrainingDidStall();
1252
1253     if (slotVisitor.didReachTermination())
1254         return true; // This is like relooping to the top if runFixpointPhase().
1255         
1256     if (!m_scheduler->shouldResume())
1257         return true;
1258
1259     m_scheduler->willResume();
1260         
1261     if (Options::logGC()) {
1262         double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds();
1263         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), ")...]\n");
1264     }
1265
1266     // Forgive the mutator for its past failures to keep up.
1267     // FIXME: Figure out if moving this to different places results in perf changes.
1268     m_incrementBalance = 0;
1269         
1270     return changePhase(conn, CollectorPhase::Concurrent);
1271 }
1272
1273 NEVER_INLINE bool Heap::runConcurrentPhase(GCConductor conn)
1274 {
1275     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1276
1277     switch (conn) {
1278     case GCConductor::Mutator: {
1279         // When the mutator has the conn, we poll runConcurrentPhase() on every time someone says
1280         // stopIfNecessary(), so on every allocation slow path. When that happens we poll if it's time
1281         // to stop and do some work.
1282         if (slotVisitor.didReachTermination()
1283             || m_scheduler->shouldStop())
1284             return changePhase(conn, CollectorPhase::Reloop);
1285         
1286         // We could be coming from a collector phase that stuffed our SlotVisitor, so make sure we donate
1287         // everything. This is super cheap if the SlotVisitor is already empty.
1288         slotVisitor.donateAll();
1289         return false;
1290     }
1291     case GCConductor::Collector: {
1292         {
1293             ParallelModeEnabler enabler(slotVisitor);
1294             slotVisitor.drainInParallelPassively(m_scheduler->timeToStop());
1295         }
1296         return changePhase(conn, CollectorPhase::Reloop);
1297     } }
1298     
1299     RELEASE_ASSERT_NOT_REACHED();
1300     return false;
1301 }
1302
1303 NEVER_INLINE bool Heap::runReloopPhase(GCConductor conn)
1304 {
1305     if (Options::logGC())
1306         dataLog("[GC<", RawPointer(this), ">: ", gcConductorShortName(conn), " ");
1307     
1308     m_scheduler->didStop();
1309     
1310     if (Options::logGC())
1311         m_scheduler->log();
1312     
1313     return changePhase(conn, CollectorPhase::Fixpoint);
1314 }
1315
1316 NEVER_INLINE bool Heap::runEndPhase(GCConductor conn)
1317 {
1318     m_scheduler->endCollection();
1319         
1320     {
1321         auto locker = holdLock(m_markingMutex);
1322         m_parallelMarkersShouldExit = true;
1323         m_markingConditionVariable.notifyAll();
1324     }
1325     m_helperClient.finish();
1326     
1327     iterateExecutingAndCompilingCodeBlocks(
1328         [&] (CodeBlock* codeBlock) {
1329             writeBarrier(codeBlock);
1330         });
1331         
1332     updateObjectCounts();
1333     endMarking();
1334         
1335     if (UNLIKELY(m_verifier)) {
1336         m_verifier->gatherLiveCells(HeapVerifier::Phase::AfterMarking);
1337         m_verifier->verify(HeapVerifier::Phase::AfterMarking);
1338     }
1339         
1340     if (vm()->typeProfiler())
1341         vm()->typeProfiler()->invalidateTypeSetCache();
1342         
1343     reapWeakHandles();
1344     pruneStaleEntriesFromWeakGCMaps();
1345     sweepArrayBuffers();
1346     snapshotUnswept();
1347     finalizeUnconditionalFinalizers();
1348     removeDeadCompilerWorklistEntries();
1349     notifyIncrementalSweeper();
1350     
1351     m_codeBlocks->iterateCurrentlyExecuting(
1352         [&] (CodeBlock* codeBlock) {
1353             writeBarrier(codeBlock);
1354         });
1355     m_codeBlocks->clearCurrentlyExecuting();
1356         
1357     m_objectSpace.prepareForAllocation();
1358     updateAllocationLimits();
1359
1360     if (UNLIKELY(m_verifier)) {
1361         m_verifier->trimDeadCells();
1362         m_verifier->verify(HeapVerifier::Phase::AfterGC);
1363     }
1364
1365     didFinishCollection();
1366
1367     if (false) {
1368         dataLog("Heap state after GC:\n");
1369         m_objectSpace.dumpBits();
1370     }
1371     
1372     if (Options::logGC()) {
1373         double thisPauseMS = (m_afterGC - m_stopTime).milliseconds();
1374         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), "), cycle ", (m_afterGC - m_beforeGC).milliseconds(), "ms END]\n");
1375     }
1376     
1377     {
1378         auto locker = holdLock(*m_threadLock);
1379         m_requests.removeFirst();
1380         m_lastServedTicket++;
1381         clearMutatorWaiting();
1382     }
1383     ParkingLot::unparkAll(&m_worldState);
1384
1385     if (false)
1386         dataLog("GC END!\n");
1387
1388     setNeedFinalize();
1389
1390     m_lastGCStartTime = m_currentGCStartTime;
1391     m_lastGCEndTime = MonotonicTime::now();
1392         
1393     return changePhase(conn, CollectorPhase::NotRunning);
1394 }
1395
1396 bool Heap::changePhase(GCConductor conn, CollectorPhase nextPhase)
1397 {
1398     checkConn(conn);
1399
1400     m_nextPhase = nextPhase;
1401
1402     return finishChangingPhase(conn);
1403 }
1404
1405 NEVER_INLINE bool Heap::finishChangingPhase(GCConductor conn)
1406 {
1407     checkConn(conn);
1408     
1409     if (m_nextPhase == m_currentPhase)
1410         return true;
1411
1412     if (false)
1413         dataLog(conn, ": Going to phase: ", m_nextPhase, " (from ", m_currentPhase, ")\n");
1414     
1415     bool suspendedBefore = worldShouldBeSuspended(m_currentPhase);
1416     bool suspendedAfter = worldShouldBeSuspended(m_nextPhase);
1417     
1418     if (suspendedBefore != suspendedAfter) {
1419         if (suspendedBefore) {
1420             RELEASE_ASSERT(!suspendedAfter);
1421             
1422             resumeThePeriphery();
1423             if (conn == GCConductor::Collector)
1424                 resumeTheMutator();
1425             else
1426                 handleNeedFinalize();
1427         } else {
1428             RELEASE_ASSERT(!suspendedBefore);
1429             RELEASE_ASSERT(suspendedAfter);
1430             
1431             if (conn == GCConductor::Collector) {
1432                 waitWhileNeedFinalize();
1433                 if (!stopTheMutator()) {
1434                     if (false)
1435                         dataLog("Returning false.\n");
1436                     return false;
1437                 }
1438             } else {
1439                 sanitizeStackForVM(m_vm);
1440                 handleNeedFinalize();
1441             }
1442             stopThePeriphery(conn);
1443         }
1444     }
1445     
1446     m_currentPhase = m_nextPhase;
1447     return true;
1448 }
1449
1450 void Heap::stopThePeriphery(GCConductor conn)
1451 {
1452     if (m_collectorBelievesThatTheWorldIsStopped) {
1453         dataLog("FATAL: world already stopped.\n");
1454         RELEASE_ASSERT_NOT_REACHED();
1455     }
1456     
1457     if (m_mutatorDidRun)
1458         m_mutatorExecutionVersion++;
1459     
1460     m_mutatorDidRun = false;
1461
1462     suspendCompilerThreads();
1463     m_collectorBelievesThatTheWorldIsStopped = true;
1464
1465     forEachSlotVisitor(
1466         [&] (SlotVisitor& slotVisitor) {
1467             slotVisitor.updateMutatorIsStopped(NoLockingNecessary);
1468         });
1469
1470 #if ENABLE(JIT)
1471     {
1472         DeferGCForAWhile awhile(*this);
1473         if (JITWorklist::instance()->completeAllForVM(*m_vm)
1474             && conn == GCConductor::Collector)
1475             setGCDidJIT();
1476     }
1477 #else
1478     UNUSED_PARAM(conn);
1479 #endif // ENABLE(JIT)
1480     
1481     vm()->shadowChicken().update(*vm(), vm()->topCallFrame);
1482     
1483     m_structureIDTable.flushOldTables();
1484     m_objectSpace.stopAllocating();
1485     
1486     m_stopTime = MonotonicTime::now();
1487 }
1488
1489 NEVER_INLINE void Heap::resumeThePeriphery()
1490 {
1491     // Calling resumeAllocating does the Right Thing depending on whether this is the end of a
1492     // collection cycle or this is just a concurrent phase within a collection cycle:
1493     // - At end of collection cycle: it's a no-op because prepareForAllocation already cleared the
1494     //   last active block.
1495     // - During collection cycle: it reinstates the last active block.
1496     m_objectSpace.resumeAllocating();
1497     
1498     m_barriersExecuted = 0;
1499     
1500     if (!m_collectorBelievesThatTheWorldIsStopped) {
1501         dataLog("Fatal: collector does not believe that the world is stopped.\n");
1502 #if OS(DARWIN)
1503         // FIXME: Remove this when no longer needed.
1504         // https://bugs.webkit.org/show_bug.cgi?id=170094
1505 #if CPU(X86_64)
1506         unsigned worldState = m_worldState.load();
1507         asm volatile(
1508             "int3"
1509             :
1510             : "a"(m_currentPhase), "b"(m_nextPhase), "c"(worldState), "S"(m_lastServedTicket), "D"(m_lastGrantedTicket)
1511             : "memory");
1512 #elif CPU(ARM64)
1513         unsigned worldState = m_worldState.load();
1514         asm volatile(
1515             "ldrb w0, %0\n"
1516             "ldrb w1, %1\n"
1517             "ldr w2, %2\n"
1518             "ldr x3, %3\n"
1519             "ldr x4, %4\n"
1520             "brk #0"
1521             :
1522             : "m"(m_currentPhase), "m"(m_nextPhase), "m"(worldState), "m"(m_lastServedTicket), "m"(m_lastGrantedTicket)
1523             : "memory");
1524 #endif
1525 #endif // OS(DARWIN)
1526         RELEASE_ASSERT_NOT_REACHED();
1527     }
1528     m_collectorBelievesThatTheWorldIsStopped = false;
1529     
1530     // FIXME: This could be vastly improved: we want to grab the locks in the order in which they
1531     // become available. We basically want a lockAny() method that will lock whatever lock is available
1532     // and tell you which one it locked. That would require teaching ParkingLot how to park on multiple
1533     // queues at once, which is totally achievable - it would just require memory allocation, which is
1534     // suboptimal but not a disaster. Alternatively, we could replace the SlotVisitor rightToRun lock
1535     // with a DLG-style handshake mechanism, but that seems not as general.
1536     Vector<SlotVisitor*, 8> slotVisitorsToUpdate;
1537
1538     forEachSlotVisitor(
1539         [&] (SlotVisitor& slotVisitor) {
1540             slotVisitorsToUpdate.append(&slotVisitor);
1541         });
1542     
1543     for (unsigned countdown = 40; !slotVisitorsToUpdate.isEmpty() && countdown--;) {
1544         for (unsigned index = 0; index < slotVisitorsToUpdate.size(); ++index) {
1545             SlotVisitor& slotVisitor = *slotVisitorsToUpdate[index];
1546             bool remove = false;
1547             if (slotVisitor.hasAcknowledgedThatTheMutatorIsResumed())
1548                 remove = true;
1549             else if (auto locker = tryHoldLock(slotVisitor.rightToRun())) {
1550                 slotVisitor.updateMutatorIsStopped(locker);
1551                 remove = true;
1552             }
1553             if (remove) {
1554                 slotVisitorsToUpdate[index--] = slotVisitorsToUpdate.last();
1555                 slotVisitorsToUpdate.takeLast();
1556             }
1557         }
1558         std::this_thread::yield();
1559     }
1560     
1561     for (SlotVisitor* slotVisitor : slotVisitorsToUpdate)
1562         slotVisitor->updateMutatorIsStopped();
1563     
1564     resumeCompilerThreads();
1565 }
1566
1567 bool Heap::stopTheMutator()
1568 {
1569     for (;;) {
1570         unsigned oldState = m_worldState.load();
1571         if (oldState & stoppedBit) {
1572             RELEASE_ASSERT(!(oldState & hasAccessBit));
1573             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1574             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1575             return true;
1576         }
1577         
1578         if (oldState & mutatorHasConnBit) {
1579             RELEASE_ASSERT(!(oldState & hasAccessBit));
1580             RELEASE_ASSERT(!(oldState & stoppedBit));
1581             return false;
1582         }
1583
1584         if (!(oldState & hasAccessBit)) {
1585             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1586             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1587             // We can stop the world instantly.
1588             if (m_worldState.compareExchangeWeak(oldState, oldState | stoppedBit))
1589                 return true;
1590             continue;
1591         }
1592         
1593         // Transfer the conn to the mutator and bail.
1594         RELEASE_ASSERT(oldState & hasAccessBit);
1595         RELEASE_ASSERT(!(oldState & stoppedBit));
1596         unsigned newState = (oldState | mutatorHasConnBit) & ~mutatorWaitingBit;
1597         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1598             if (false)
1599                 dataLog("Handed off the conn.\n");
1600             m_stopIfNecessaryTimer->scheduleSoon();
1601             ParkingLot::unparkAll(&m_worldState);
1602             return false;
1603         }
1604     }
1605 }
1606
1607 NEVER_INLINE void Heap::resumeTheMutator()
1608 {
1609     if (false)
1610         dataLog("Resuming the mutator.\n");
1611     for (;;) {
1612         unsigned oldState = m_worldState.load();
1613         if (!!(oldState & hasAccessBit) != !(oldState & stoppedBit)) {
1614             dataLog("Fatal: hasAccess = ", !!(oldState & hasAccessBit), ", stopped = ", !!(oldState & stoppedBit), "\n");
1615             RELEASE_ASSERT_NOT_REACHED();
1616         }
1617         if (oldState & mutatorHasConnBit) {
1618             dataLog("Fatal: mutator has the conn.\n");
1619             RELEASE_ASSERT_NOT_REACHED();
1620         }
1621         
1622         if (!(oldState & stoppedBit)) {
1623             if (false)
1624                 dataLog("Returning because not stopped.\n");
1625             return;
1626         }
1627         
1628         if (m_worldState.compareExchangeWeak(oldState, oldState & ~stoppedBit)) {
1629             if (false)
1630                 dataLog("CASing and returning.\n");
1631             ParkingLot::unparkAll(&m_worldState);
1632             return;
1633         }
1634     }
1635 }
1636
1637 void Heap::stopIfNecessarySlow()
1638 {
1639     while (stopIfNecessarySlow(m_worldState.load())) { }
1640     
1641     RELEASE_ASSERT(m_worldState.load() & hasAccessBit);
1642     RELEASE_ASSERT(!(m_worldState.load() & stoppedBit));
1643     
1644     handleGCDidJIT();
1645     handleNeedFinalize();
1646     m_mutatorDidRun = true;
1647 }
1648
1649 bool Heap::stopIfNecessarySlow(unsigned oldState)
1650 {
1651     RELEASE_ASSERT(oldState & hasAccessBit);
1652     RELEASE_ASSERT(!(oldState & stoppedBit));
1653     
1654     // It's possible for us to wake up with finalization already requested but the world not yet
1655     // resumed. If that happens, we can't run finalization yet.
1656     if (handleNeedFinalize(oldState))
1657         return true;
1658
1659     // FIXME: When entering the concurrent phase, we could arrange for this branch not to fire, and then
1660     // have the SlotVisitor do things to the m_worldState to make this branch fire again. That would
1661     // prevent us from polling this so much. Ideally, stopIfNecessary would ignore the mutatorHasConnBit
1662     // and there would be some other bit indicating whether we were in some GC phase other than the
1663     // NotRunning or Concurrent ones.
1664     if (oldState & mutatorHasConnBit)
1665         collectInMutatorThread();
1666     
1667     return false;
1668 }
1669
1670 NEVER_INLINE void Heap::collectInMutatorThread()
1671 {
1672     CollectingScope collectingScope(*this);
1673     for (;;) {
1674         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, nullptr);
1675         switch (result) {
1676         case RunCurrentPhaseResult::Finished:
1677             return;
1678         case RunCurrentPhaseResult::Continue:
1679             break;
1680         case RunCurrentPhaseResult::NeedCurrentThreadState:
1681             sanitizeStackForVM(m_vm);
1682             auto lambda = [&] (CurrentThreadState& state) {
1683                 for (;;) {
1684                     RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, &state);
1685                     switch (result) {
1686                     case RunCurrentPhaseResult::Finished:
1687                         return;
1688                     case RunCurrentPhaseResult::Continue:
1689                         break;
1690                     case RunCurrentPhaseResult::NeedCurrentThreadState:
1691                         RELEASE_ASSERT_NOT_REACHED();
1692                         break;
1693                     }
1694                 }
1695             };
1696             callWithCurrentThreadState(scopedLambda<void(CurrentThreadState&)>(WTFMove(lambda)));
1697             return;
1698         }
1699     }
1700 }
1701
1702 template<typename Func>
1703 void Heap::waitForCollector(const Func& func)
1704 {
1705     for (;;) {
1706         bool done;
1707         {
1708             LockHolder locker(*m_threadLock);
1709             done = func(locker);
1710             if (!done) {
1711                 setMutatorWaiting();
1712                 
1713                 // At this point, the collector knows that we intend to wait, and he will clear the
1714                 // waiting bit and then unparkAll when the GC cycle finishes. Clearing the bit
1715                 // prevents us from parking except if there is also stop-the-world. Unparking after
1716                 // clearing means that if the clearing happens after we park, then we will unpark.
1717             }
1718         }
1719         
1720         // If we're in a stop-the-world scenario, we need to wait for that even if done is true.
1721         unsigned oldState = m_worldState.load();
1722         if (stopIfNecessarySlow(oldState))
1723             continue;
1724         
1725         // FIXME: We wouldn't need this if stopIfNecessarySlow() had a mode where it knew to just
1726         // do the collection.
1727         relinquishConn();
1728         
1729         if (done) {
1730             clearMutatorWaiting(); // Clean up just in case.
1731             return;
1732         }
1733         
1734         // If mutatorWaitingBit is still set then we want to wait.
1735         ParkingLot::compareAndPark(&m_worldState, oldState | mutatorWaitingBit);
1736     }
1737 }
1738
1739 void Heap::acquireAccessSlow()
1740 {
1741     for (;;) {
1742         unsigned oldState = m_worldState.load();
1743         RELEASE_ASSERT(!(oldState & hasAccessBit));
1744         
1745         if (oldState & stoppedBit) {
1746             if (verboseStop) {
1747                 dataLog("Stopping in acquireAccess!\n");
1748                 WTFReportBacktrace();
1749             }
1750             // Wait until we're not stopped anymore.
1751             ParkingLot::compareAndPark(&m_worldState, oldState);
1752             continue;
1753         }
1754         
1755         RELEASE_ASSERT(!(oldState & stoppedBit));
1756         unsigned newState = oldState | hasAccessBit;
1757         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1758             handleGCDidJIT();
1759             handleNeedFinalize();
1760             m_mutatorDidRun = true;
1761             stopIfNecessary();
1762             return;
1763         }
1764     }
1765 }
1766
1767 void Heap::releaseAccessSlow()
1768 {
1769     for (;;) {
1770         unsigned oldState = m_worldState.load();
1771         if (!(oldState & hasAccessBit)) {
1772             dataLog("FATAL: Attempting to release access but the mutator does not have access.\n");
1773             RELEASE_ASSERT_NOT_REACHED();
1774         }
1775         if (oldState & stoppedBit) {
1776             dataLog("FATAL: Attempting to release access but the mutator is stopped.\n");
1777             RELEASE_ASSERT_NOT_REACHED();
1778         }
1779         
1780         if (handleNeedFinalize(oldState))
1781             continue;
1782         
1783         unsigned newState = oldState & ~(hasAccessBit | mutatorHasConnBit);
1784         
1785         if ((oldState & mutatorHasConnBit)
1786             && m_nextPhase != m_currentPhase) {
1787             // This means that the collector thread had given us the conn so that we would do something
1788             // for it. Stop ourselves as we release access. This ensures that acquireAccess blocks. In
1789             // the meantime, since we're handing the conn over, the collector will be awoken and it is
1790             // sure to have work to do.
1791             newState |= stoppedBit;
1792         }
1793
1794         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1795             if (oldState & mutatorHasConnBit)
1796                 finishRelinquishingConn();
1797             return;
1798         }
1799     }
1800 }
1801
1802 bool Heap::relinquishConn(unsigned oldState)
1803 {
1804     RELEASE_ASSERT(oldState & hasAccessBit);
1805     RELEASE_ASSERT(!(oldState & stoppedBit));
1806     
1807     if (!(oldState & mutatorHasConnBit))
1808         return false; // Done.
1809     
1810     if (m_threadShouldStop)
1811         return false;
1812     
1813     if (!m_worldState.compareExchangeWeak(oldState, oldState & ~mutatorHasConnBit))
1814         return true; // Loop around.
1815     
1816     finishRelinquishingConn();
1817     return true;
1818 }
1819
1820 void Heap::finishRelinquishingConn()
1821 {
1822     if (false)
1823         dataLog("Relinquished the conn.\n");
1824     
1825     sanitizeStackForVM(m_vm);
1826     
1827     auto locker = holdLock(*m_threadLock);
1828     if (!m_requests.isEmpty())
1829         m_threadCondition->notifyOne(locker);
1830     ParkingLot::unparkAll(&m_worldState);
1831 }
1832
1833 void Heap::relinquishConn()
1834 {
1835     while (relinquishConn(m_worldState.load())) { }
1836 }
1837
1838 bool Heap::handleGCDidJIT(unsigned oldState)
1839 {
1840     RELEASE_ASSERT(oldState & hasAccessBit);
1841     if (!(oldState & gcDidJITBit))
1842         return false;
1843     if (m_worldState.compareExchangeWeak(oldState, oldState & ~gcDidJITBit)) {
1844         WTF::crossModifyingCodeFence();
1845         return true;
1846     }
1847     return true;
1848 }
1849
1850 NEVER_INLINE bool Heap::handleNeedFinalize(unsigned oldState)
1851 {
1852     RELEASE_ASSERT(oldState & hasAccessBit);
1853     RELEASE_ASSERT(!(oldState & stoppedBit));
1854     
1855     if (!(oldState & needFinalizeBit))
1856         return false;
1857     if (m_worldState.compareExchangeWeak(oldState, oldState & ~needFinalizeBit)) {
1858         finalize();
1859         // Wake up anyone waiting for us to finalize. Note that they may have woken up already, in
1860         // which case they would be waiting for us to release heap access.
1861         ParkingLot::unparkAll(&m_worldState);
1862         return true;
1863     }
1864     return true;
1865 }
1866
1867 void Heap::handleGCDidJIT()
1868 {
1869     while (handleGCDidJIT(m_worldState.load())) { }
1870 }
1871
1872 void Heap::handleNeedFinalize()
1873 {
1874     while (handleNeedFinalize(m_worldState.load())) { }
1875 }
1876
1877 void Heap::setGCDidJIT()
1878 {
1879     m_worldState.transaction(
1880         [&] (unsigned& state) -> bool {
1881             RELEASE_ASSERT(state & stoppedBit);
1882             state |= gcDidJITBit;
1883             return true;
1884         });
1885 }
1886
1887 void Heap::setNeedFinalize()
1888 {
1889     m_worldState.exchangeOr(needFinalizeBit);
1890     ParkingLot::unparkAll(&m_worldState);
1891     m_stopIfNecessaryTimer->scheduleSoon();
1892 }
1893
1894 void Heap::waitWhileNeedFinalize()
1895 {
1896     for (;;) {
1897         unsigned oldState = m_worldState.load();
1898         if (!(oldState & needFinalizeBit)) {
1899             // This means that either there was no finalize request or the main thread will finalize
1900             // with heap access, so a subsequent call to stopTheWorld() will return only when
1901             // finalize finishes.
1902             return;
1903         }
1904         ParkingLot::compareAndPark(&m_worldState, oldState);
1905     }
1906 }
1907
1908 void Heap::setMutatorWaiting()
1909 {
1910     m_worldState.exchangeOr(mutatorWaitingBit);
1911 }
1912
1913 void Heap::clearMutatorWaiting()
1914 {
1915     m_worldState.exchangeAnd(~mutatorWaitingBit);
1916 }
1917
1918 void Heap::notifyThreadStopping(const AbstractLocker&)
1919 {
1920     m_threadIsStopping = true;
1921     clearMutatorWaiting();
1922     ParkingLot::unparkAll(&m_worldState);
1923 }
1924
1925 void Heap::finalize()
1926 {
1927     MonotonicTime before;
1928     if (Options::logGC()) {
1929         before = MonotonicTime::now();
1930         dataLog("[GC<", RawPointer(this), ">: finalize ");
1931     }
1932     
1933     {
1934         SweepingScope helpingGCScope(*this);
1935         deleteUnmarkedCompiledCode();
1936         deleteSourceProviderCaches();
1937         sweepLargeAllocations();
1938     }
1939     
1940     if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache())
1941         cache->clear();
1942     
1943     if (Options::sweepSynchronously())
1944         sweepSynchronously();
1945
1946     if (Options::logGC()) {
1947         MonotonicTime after = MonotonicTime::now();
1948         dataLog((after - before).milliseconds(), "ms]\n");
1949     }
1950 }
1951
1952 Heap::Ticket Heap::requestCollection(std::optional<CollectionScope> scope)
1953 {
1954     stopIfNecessary();
1955     
1956     ASSERT(vm()->currentThreadIsHoldingAPILock());
1957     RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
1958     
1959     LockHolder locker(*m_threadLock);
1960     // We may be able to steal the conn. That only works if the collector is definitely not running
1961     // right now. This is an optimization that prevents the collector thread from ever starting in most
1962     // cases.
1963     ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
1964     if ((m_lastServedTicket == m_lastGrantedTicket) && (m_currentPhase == CollectorPhase::NotRunning)) {
1965         if (false)
1966             dataLog("Taking the conn.\n");
1967         m_worldState.exchangeOr(mutatorHasConnBit);
1968     }
1969     
1970     m_requests.append(scope);
1971     m_lastGrantedTicket++;
1972     if (!(m_worldState.load() & mutatorHasConnBit))
1973         m_threadCondition->notifyOne(locker);
1974     return m_lastGrantedTicket;
1975 }
1976
1977 void Heap::waitForCollection(Ticket ticket)
1978 {
1979     waitForCollector(
1980         [&] (const AbstractLocker&) -> bool {
1981             return m_lastServedTicket >= ticket;
1982         });
1983 }
1984
1985 void Heap::sweepLargeAllocations()
1986 {
1987     m_objectSpace.sweepLargeAllocations();
1988 }
1989
1990 void Heap::suspendCompilerThreads()
1991 {
1992 #if ENABLE(DFG_JIT)
1993     // We ensure the worklists so that it's not possible for the mutator to start a new worklist
1994     // after we have suspended the ones that he had started before. That's not very expensive since
1995     // the worklists use AutomaticThreads anyway.
1996     for (unsigned i = DFG::numberOfWorklists(); i--;)
1997         DFG::ensureWorklistForIndex(i).suspendAllThreads();
1998 #endif
1999 }
2000
2001 void Heap::willStartCollection(std::optional<CollectionScope> scope)
2002 {
2003     if (Options::logGC())
2004         dataLog("=> ");
2005     
2006     if (shouldDoFullCollection(scope)) {
2007         m_collectionScope = CollectionScope::Full;
2008         m_shouldDoFullCollection = false;
2009         if (Options::logGC())
2010             dataLog("FullCollection, ");
2011         if (false)
2012             dataLog("Full collection!\n");
2013     } else {
2014         m_collectionScope = CollectionScope::Eden;
2015         if (Options::logGC())
2016             dataLog("EdenCollection, ");
2017         if (false)
2018             dataLog("Eden collection!\n");
2019     }
2020     if (m_collectionScope == CollectionScope::Full) {
2021         m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2022         m_extraMemorySize = 0;
2023         m_deprecatedExtraMemorySize = 0;
2024 #if ENABLE(RESOURCE_USAGE)
2025         m_externalMemorySize = 0;
2026 #endif
2027
2028         if (m_fullActivityCallback)
2029             m_fullActivityCallback->willCollect();
2030     } else {
2031         ASSERT(m_collectionScope == CollectionScope::Eden);
2032         m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2033     }
2034
2035     if (m_edenActivityCallback)
2036         m_edenActivityCallback->willCollect();
2037
2038     for (auto* observer : m_observers)
2039         observer->willGarbageCollect();
2040 }
2041
2042 void Heap::prepareForMarking()
2043 {
2044     m_objectSpace.prepareForMarking();
2045 }
2046
2047 void Heap::reapWeakHandles()
2048 {
2049     m_objectSpace.reapWeakSets();
2050 }
2051
2052 void Heap::pruneStaleEntriesFromWeakGCMaps()
2053 {
2054     if (m_collectionScope != CollectionScope::Full)
2055         return;
2056     for (auto& pruneCallback : m_weakGCMaps.values())
2057         pruneCallback();
2058 }
2059
2060 void Heap::sweepArrayBuffers()
2061 {
2062     m_arrayBuffers.sweep();
2063 }
2064
2065 void Heap::snapshotUnswept()
2066 {
2067     TimingScope timingScope(*this, "Heap::snapshotUnswept");
2068     m_objectSpace.snapshotUnswept();
2069 }
2070
2071 void Heap::deleteSourceProviderCaches()
2072 {
2073     if (*m_lastCollectionScope == CollectionScope::Full)
2074         m_vm->clearSourceProviderCaches();
2075 }
2076
2077 void Heap::notifyIncrementalSweeper()
2078 {
2079     if (m_collectionScope == CollectionScope::Full) {
2080         if (!m_logicallyEmptyWeakBlocks.isEmpty())
2081             m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2082     }
2083
2084     m_sweeper->startSweeping();
2085 }
2086
2087 void Heap::updateAllocationLimits()
2088 {
2089     static const bool verbose = false;
2090     
2091     if (verbose) {
2092         dataLog("\n");
2093         dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n");
2094     }
2095     
2096     // Calculate our current heap size threshold for the purpose of figuring out when we should
2097     // run another collection. This isn't the same as either size() or capacity(), though it should
2098     // be somewhere between the two. The key is to match the size calculations involved calls to
2099     // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
2100     // fragmentation, we may have size() much smaller than capacity().
2101     size_t currentHeapSize = 0;
2102
2103     // For marked space, we use the total number of bytes visited. This matches the logic for
2104     // MarkedAllocator's calls to didAllocate(), which effectively accounts for the total size of
2105     // objects allocated rather than blocks used. This will underestimate capacity(), and in case
2106     // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
2107     // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
2108     currentHeapSize += m_totalBytesVisited;
2109     if (verbose)
2110         dataLog("totalBytesVisited = ", m_totalBytesVisited, ", currentHeapSize = ", currentHeapSize, "\n");
2111
2112     // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
2113     // extra memory reporting.
2114     currentHeapSize += extraMemorySize();
2115
2116     if (verbose)
2117         dataLog("extraMemorySize() = ", extraMemorySize(), ", currentHeapSize = ", currentHeapSize, "\n");
2118     
2119     if (m_collectionScope == CollectionScope::Full) {
2120         // To avoid pathological GC churn in very small and very large heaps, we set
2121         // the new allocation limit based on the current size of the heap, with a
2122         // fixed minimum.
2123         m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
2124         if (verbose)
2125             dataLog("Full: maxHeapSize = ", m_maxHeapSize, "\n");
2126         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2127         if (verbose)
2128             dataLog("Full: maxEdenSize = ", m_maxEdenSize, "\n");
2129         m_sizeAfterLastFullCollect = currentHeapSize;
2130         if (verbose)
2131             dataLog("Full: sizeAfterLastFullCollect = ", currentHeapSize, "\n");
2132         m_bytesAbandonedSinceLastFullCollect = 0;
2133         if (verbose)
2134             dataLog("Full: bytesAbandonedSinceLastFullCollect = ", 0, "\n");
2135     } else {
2136         ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
2137         // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have.
2138         // But we are sloppy, so we have to defend against the overflow.
2139         m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize;
2140         if (verbose)
2141             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2142         m_sizeAfterLastEdenCollect = currentHeapSize;
2143         if (verbose)
2144             dataLog("Eden: sizeAfterLastEdenCollect = ", currentHeapSize, "\n");
2145         double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
2146         double minEdenToOldGenerationRatio = 1.0 / 3.0;
2147         if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
2148             m_shouldDoFullCollection = true;
2149         // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
2150         m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
2151         if (verbose)
2152             dataLog("Eden: maxHeapSize = ", m_maxHeapSize, "\n");
2153         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2154         if (verbose)
2155             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2156         if (m_fullActivityCallback) {
2157             ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
2158             m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
2159         }
2160     }
2161
2162     m_sizeAfterLastCollect = currentHeapSize;
2163     if (verbose)
2164         dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n");
2165     m_bytesAllocatedThisCycle = 0;
2166
2167     if (Options::logGC())
2168         dataLog("=> ", currentHeapSize / 1024, "kb, ");
2169 }
2170
2171 void Heap::didFinishCollection()
2172 {
2173     m_afterGC = MonotonicTime::now();
2174     CollectionScope scope = *m_collectionScope;
2175     if (scope == CollectionScope::Full)
2176         m_lastFullGCLength = m_afterGC - m_beforeGC;
2177     else
2178         m_lastEdenGCLength = m_afterGC - m_beforeGC;
2179
2180 #if ENABLE(RESOURCE_USAGE)
2181     ASSERT(externalMemorySize() <= extraMemorySize());
2182 #endif
2183
2184     if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) {
2185         gatherExtraHeapSnapshotData(*heapProfiler);
2186         removeDeadHeapSnapshotNodes(*heapProfiler);
2187     }
2188
2189     if (UNLIKELY(m_verifier))
2190         m_verifier->endGC();
2191
2192     RELEASE_ASSERT(m_collectionScope);
2193     m_lastCollectionScope = m_collectionScope;
2194     m_collectionScope = std::nullopt;
2195
2196     for (auto* observer : m_observers)
2197         observer->didGarbageCollect(scope);
2198 }
2199
2200 void Heap::resumeCompilerThreads()
2201 {
2202 #if ENABLE(DFG_JIT)
2203     for (unsigned i = DFG::numberOfWorklists(); i--;)
2204         DFG::existingWorklistForIndex(i).resumeAllThreads();
2205 #endif
2206 }
2207
2208 GCActivityCallback* Heap::fullActivityCallback()
2209 {
2210     return m_fullActivityCallback.get();
2211 }
2212
2213 GCActivityCallback* Heap::edenActivityCallback()
2214 {
2215     return m_edenActivityCallback.get();
2216 }
2217
2218 IncrementalSweeper* Heap::sweeper()
2219 {
2220     return m_sweeper.get();
2221 }
2222
2223 void Heap::setGarbageCollectionTimerEnabled(bool enable)
2224 {
2225     if (m_fullActivityCallback)
2226         m_fullActivityCallback->setEnabled(enable);
2227     if (m_edenActivityCallback)
2228         m_edenActivityCallback->setEnabled(enable);
2229 }
2230
2231 void Heap::didAllocate(size_t bytes)
2232 {
2233     if (m_edenActivityCallback)
2234         m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
2235     m_bytesAllocatedThisCycle += bytes;
2236     performIncrement(bytes);
2237 }
2238
2239 bool Heap::isValidAllocation(size_t)
2240 {
2241     if (!isValidThreadState(m_vm))
2242         return false;
2243
2244     if (isCurrentThreadBusy())
2245         return false;
2246     
2247     return true;
2248 }
2249
2250 void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
2251 {
2252     WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
2253 }
2254
2255 void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
2256 {
2257     HandleSlot slot = handle.slot();
2258     Finalizer finalizer = reinterpret_cast<Finalizer>(context);
2259     finalizer(slot->asCell());
2260     WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
2261 }
2262
2263 void Heap::addExecutable(ExecutableBase* executable)
2264 {
2265     m_executables.append(executable);
2266 }
2267
2268 void Heap::collectAllGarbageIfNotDoneRecently()
2269 {
2270     if (!m_fullActivityCallback) {
2271         collectAllGarbage();
2272         return;
2273     }
2274
2275     if (m_fullActivityCallback->didSyncGCRecently()) {
2276         // A synchronous GC was already requested recently so we merely accelerate next collection.
2277         reportAbandonedObjectGraph();
2278         return;
2279     }
2280
2281     m_fullActivityCallback->setDidSyncGCRecently();
2282     collectAllGarbage();
2283 }
2284
2285 bool Heap::shouldDoFullCollection(std::optional<CollectionScope> scope) const
2286 {
2287     if (!Options::useGenerationalGC())
2288         return true;
2289
2290     if (!scope)
2291         return m_shouldDoFullCollection;
2292     return *scope == CollectionScope::Full;
2293 }
2294
2295 void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
2296 {
2297     m_logicallyEmptyWeakBlocks.append(block);
2298 }
2299
2300 void Heap::sweepAllLogicallyEmptyWeakBlocks()
2301 {
2302     if (m_logicallyEmptyWeakBlocks.isEmpty())
2303         return;
2304
2305     m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2306     while (sweepNextLogicallyEmptyWeakBlock()) { }
2307 }
2308
2309 bool Heap::sweepNextLogicallyEmptyWeakBlock()
2310 {
2311     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
2312         return false;
2313
2314     WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
2315
2316     block->sweep();
2317     if (block->isEmpty()) {
2318         std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
2319         m_logicallyEmptyWeakBlocks.removeLast();
2320         WeakBlock::destroy(*this, block);
2321     } else
2322         m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
2323
2324     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
2325         m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
2326         return false;
2327     }
2328
2329     return true;
2330 }
2331
2332 size_t Heap::visitCount()
2333 {
2334     size_t result = 0;
2335     forEachSlotVisitor(
2336         [&] (SlotVisitor& visitor) {
2337             result += visitor.visitCount();
2338         });
2339     return result;
2340 }
2341
2342 size_t Heap::bytesVisited()
2343 {
2344     size_t result = 0;
2345     forEachSlotVisitor(
2346         [&] (SlotVisitor& visitor) {
2347             result += visitor.bytesVisited();
2348         });
2349     return result;
2350 }
2351
2352 void Heap::forEachCodeBlockImpl(const ScopedLambda<bool(CodeBlock*)>& func)
2353 {
2354     // We don't know the full set of CodeBlocks until compilation has terminated.
2355     completeAllJITPlans();
2356
2357     return m_codeBlocks->iterate(func);
2358 }
2359
2360 void Heap::forEachCodeBlockIgnoringJITPlansImpl(const AbstractLocker& locker, const ScopedLambda<bool(CodeBlock*)>& func)
2361 {
2362     return m_codeBlocks->iterate(locker, func);
2363 }
2364
2365 void Heap::writeBarrierSlowPath(const JSCell* from)
2366 {
2367     if (UNLIKELY(mutatorShouldBeFenced())) {
2368         // In this case, the barrierThreshold is the tautological threshold, so from could still be
2369         // not black. But we can't know for sure until we fire off a fence.
2370         WTF::storeLoadFence();
2371         if (from->cellState() != CellState::PossiblyBlack)
2372             return;
2373     }
2374     
2375     addToRememberedSet(from);
2376 }
2377
2378 bool Heap::isCurrentThreadBusy()
2379 {
2380     return mayBeGCThread() || mutatorState() != MutatorState::Running;
2381 }
2382
2383 void Heap::reportExtraMemoryVisited(size_t size)
2384 {
2385     size_t* counter = &m_extraMemorySize;
2386     
2387     for (;;) {
2388         size_t oldSize = *counter;
2389         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size))
2390             return;
2391     }
2392 }
2393
2394 #if ENABLE(RESOURCE_USAGE)
2395 void Heap::reportExternalMemoryVisited(size_t size)
2396 {
2397     size_t* counter = &m_externalMemorySize;
2398
2399     for (;;) {
2400         size_t oldSize = *counter;
2401         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size))
2402             return;
2403     }
2404 }
2405 #endif
2406
2407 void Heap::collectIfNecessaryOrDefer(GCDeferralContext* deferralContext)
2408 {
2409     ASSERT(!DisallowGC::isGCDisallowedOnCurrentThread());
2410
2411     if (!m_isSafeToCollect)
2412         return;
2413     switch (mutatorState()) {
2414     case MutatorState::Running:
2415     case MutatorState::Allocating:
2416         break;
2417     case MutatorState::Sweeping:
2418     case MutatorState::Collecting:
2419         return;
2420     }
2421     if (!Options::useGC())
2422         return;
2423     
2424     if (mayNeedToStop()) {
2425         if (deferralContext)
2426             deferralContext->m_shouldGC = true;
2427         else if (isDeferred())
2428             m_didDeferGCWork = true;
2429         else
2430             stopIfNecessary();
2431     }
2432     
2433     if (UNLIKELY(Options::gcMaxHeapSize())) {
2434         if (m_bytesAllocatedThisCycle <= Options::gcMaxHeapSize())
2435             return;
2436     } else {
2437         if (m_bytesAllocatedThisCycle <= m_maxEdenSize)
2438             return;
2439     }
2440
2441     if (deferralContext)
2442         deferralContext->m_shouldGC = true;
2443     else if (isDeferred())
2444         m_didDeferGCWork = true;
2445     else {
2446         collectAsync();
2447         stopIfNecessary(); // This will immediately start the collection if we have the conn.
2448     }
2449 }
2450
2451 void Heap::decrementDeferralDepthAndGCIfNeededSlow()
2452 {
2453     // Can't do anything if we're still deferred.
2454     if (m_deferralDepth)
2455         return;
2456     
2457     ASSERT(!isDeferred());
2458     
2459     m_didDeferGCWork = false;
2460     // FIXME: Bring back something like the DeferGCProbability mode.
2461     // https://bugs.webkit.org/show_bug.cgi?id=166627
2462     collectIfNecessaryOrDefer();
2463 }
2464
2465 void Heap::registerWeakGCMap(void* weakGCMap, std::function<void()> pruningCallback)
2466 {
2467     m_weakGCMaps.add(weakGCMap, WTFMove(pruningCallback));
2468 }
2469
2470 void Heap::unregisterWeakGCMap(void* weakGCMap)
2471 {
2472     m_weakGCMaps.remove(weakGCMap);
2473 }
2474
2475 void Heap::didAllocateBlock(size_t capacity)
2476 {
2477 #if ENABLE(RESOURCE_USAGE)
2478     m_blockBytesAllocated += capacity;
2479 #else
2480     UNUSED_PARAM(capacity);
2481 #endif
2482 }
2483
2484 void Heap::didFreeBlock(size_t capacity)
2485 {
2486 #if ENABLE(RESOURCE_USAGE)
2487     m_blockBytesAllocated -= capacity;
2488 #else
2489     UNUSED_PARAM(capacity);
2490 #endif
2491 }
2492
2493 #if USE(CF)
2494 void Heap::setRunLoop(CFRunLoopRef runLoop)
2495 {
2496     m_runLoop = runLoop;
2497     m_fullActivityCallback->setRunLoop(runLoop);
2498     m_edenActivityCallback->setRunLoop(runLoop);
2499     m_sweeper->setRunLoop(runLoop);
2500 }
2501 #endif // USE(CF)
2502
2503 void Heap::addCoreConstraints()
2504 {
2505     m_constraintSet->add(
2506         "Cs", "Conservative Scan",
2507         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2508             TimingScope preConvergenceTimingScope(*this, "Constraint: conservative scan");
2509             m_objectSpace.prepareForConservativeScan();
2510             ConservativeRoots conservativeRoots(*this);
2511             SuperSamplerScope superSamplerScope(false);
2512             gatherStackRoots(conservativeRoots);
2513             gatherJSStackRoots(conservativeRoots);
2514             gatherScratchBufferRoots(conservativeRoots);
2515             slotVisitor.append(conservativeRoots);
2516         },
2517         ConstraintVolatility::GreyedByExecution);
2518     
2519     m_constraintSet->add(
2520         "Msr", "Misc Small Roots",
2521         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2522 #if JSC_OBJC_API_ENABLED
2523             scanExternalRememberedSet(*m_vm, slotVisitor);
2524 #endif
2525
2526             if (m_vm->smallStrings.needsToBeVisited(*m_collectionScope))
2527                 m_vm->smallStrings.visitStrongReferences(slotVisitor);
2528             
2529             for (auto& pair : m_protectedValues)
2530                 slotVisitor.appendUnbarriered(pair.key);
2531             
2532             if (m_markListSet && m_markListSet->size())
2533                 MarkedArgumentBuffer::markLists(slotVisitor, *m_markListSet);
2534             
2535             slotVisitor.appendUnbarriered(m_vm->exception());
2536             slotVisitor.appendUnbarriered(m_vm->lastException());
2537         },
2538         ConstraintVolatility::GreyedByExecution);
2539     
2540     m_constraintSet->add(
2541         "Sh", "Strong Handles",
2542         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2543             m_handleSet.visitStrongHandles(slotVisitor);
2544             m_handleStack.visit(slotVisitor);
2545         },
2546         ConstraintVolatility::GreyedByExecution);
2547     
2548     m_constraintSet->add(
2549         "D", "Debugger",
2550         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2551 #if ENABLE(SAMPLING_PROFILER)
2552             if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
2553                 LockHolder locker(samplingProfiler->getLock());
2554                 samplingProfiler->processUnverifiedStackTraces();
2555                 samplingProfiler->visit(slotVisitor);
2556                 if (Options::logGC() == GCLogging::Verbose)
2557                     dataLog("Sampling Profiler data:\n", slotVisitor);
2558             }
2559 #endif // ENABLE(SAMPLING_PROFILER)
2560             
2561             if (m_vm->typeProfiler())
2562                 m_vm->typeProfilerLog()->visit(slotVisitor);
2563             
2564             m_vm->shadowChicken().visitChildren(slotVisitor);
2565         },
2566         ConstraintVolatility::GreyedByExecution);
2567     
2568     m_constraintSet->add(
2569         "Jsr", "JIT Stub Routines",
2570         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2571             m_jitStubRoutines->traceMarkedStubRoutines(slotVisitor);
2572         },
2573         ConstraintVolatility::GreyedByExecution);
2574     
2575     m_constraintSet->add(
2576         "Ws", "Weak Sets",
2577         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2578             m_objectSpace.visitWeakSets(slotVisitor);
2579         },
2580         ConstraintVolatility::GreyedByMarking);
2581     
2582     m_constraintSet->add(
2583         "Wrh", "Weak Reference Harvesters",
2584         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2585             for (WeakReferenceHarvester* current = m_weakReferenceHarvesters.head(); current; current = current->next())
2586                 current->visitWeakReferences(slotVisitor);
2587         },
2588         ConstraintVolatility::GreyedByMarking);
2589     
2590 #if ENABLE(DFG_JIT)
2591     m_constraintSet->add(
2592         "Dw", "DFG Worklists",
2593         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2594             for (unsigned i = DFG::numberOfWorklists(); i--;)
2595                 DFG::existingWorklistForIndex(i).visitWeakReferences(slotVisitor);
2596             
2597             // FIXME: This is almost certainly unnecessary.
2598             // https://bugs.webkit.org/show_bug.cgi?id=166829
2599             DFG::iterateCodeBlocksForGC(
2600                 *m_vm,
2601                 [&] (CodeBlock* codeBlock) {
2602                     slotVisitor.appendUnbarriered(codeBlock);
2603                 });
2604             
2605             if (Options::logGC() == GCLogging::Verbose)
2606                 dataLog("DFG Worklists:\n", slotVisitor);
2607         },
2608         ConstraintVolatility::GreyedByMarking);
2609 #endif
2610     
2611     m_constraintSet->add(
2612         "Cb", "CodeBlocks",
2613         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2614             iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(
2615                 [&] (CodeBlock* codeBlock) {
2616                     // Visit the CodeBlock as a constraint only if it's black.
2617                     if (Heap::isMarked(codeBlock)
2618                         && codeBlock->cellState() == CellState::PossiblyBlack)
2619                         slotVisitor.visitAsConstraint(codeBlock);
2620                 });
2621         },
2622         ConstraintVolatility::SeldomGreyed);
2623     
2624     m_constraintSet->add(
2625         "Mrms", "Mutator+Race Mark Stack",
2626         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2627             // Indicate to the fixpoint that we introduced work!
2628             size_t size = m_mutatorMarkStack->size() + m_raceMarkStack->size();
2629             slotVisitor.addToVisitCount(size);
2630             
2631             if (Options::logGC())
2632                 dataLog("(", size, ")");
2633             
2634             m_mutatorMarkStack->transferTo(slotVisitor.mutatorMarkStack());
2635             m_raceMarkStack->transferTo(slotVisitor.mutatorMarkStack());
2636         },
2637         [this] (SlotVisitor&) -> double {
2638             return m_mutatorMarkStack->size() + m_raceMarkStack->size();
2639         },
2640         ConstraintVolatility::GreyedByExecution);
2641 }
2642
2643 void Heap::addMarkingConstraint(std::unique_ptr<MarkingConstraint> constraint)
2644 {
2645     PreventCollectionScope preventCollectionScope(*this);
2646     m_constraintSet->add(WTFMove(constraint));
2647 }
2648
2649 void Heap::notifyIsSafeToCollect()
2650 {
2651     MonotonicTime before;
2652     if (Options::logGC()) {
2653         before = MonotonicTime::now();
2654         dataLog("[GC<", RawPointer(this), ">: starting ");
2655     }
2656     
2657     addCoreConstraints();
2658     
2659     m_isSafeToCollect = true;
2660     
2661     if (Options::collectContinuously()) {
2662         m_collectContinuouslyThread = createThread(
2663             "JSC DEBUG Continuous GC",
2664             [this] () {
2665                 MonotonicTime initialTime = MonotonicTime::now();
2666                 Seconds period = Seconds::fromMilliseconds(Options::collectContinuouslyPeriodMS());
2667                 while (!m_shouldStopCollectingContinuously) {
2668                     {
2669                         LockHolder locker(*m_threadLock);
2670                         if (m_requests.isEmpty()) {
2671                             m_requests.append(std::nullopt);
2672                             m_lastGrantedTicket++;
2673                             m_threadCondition->notifyOne(locker);
2674                         }
2675                     }
2676                     
2677                     {
2678                         LockHolder locker(m_collectContinuouslyLock);
2679                         Seconds elapsed = MonotonicTime::now() - initialTime;
2680                         Seconds elapsedInPeriod = elapsed % period;
2681                         MonotonicTime timeToWakeUp =
2682                             initialTime + elapsed - elapsedInPeriod + period;
2683                         while (!hasElapsed(timeToWakeUp) && !m_shouldStopCollectingContinuously) {
2684                             m_collectContinuouslyCondition.waitUntil(
2685                                 m_collectContinuouslyLock, timeToWakeUp);
2686                         }
2687                     }
2688                 }
2689             });
2690     }
2691     
2692     if (Options::logGC())
2693         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
2694 }
2695
2696 void Heap::preventCollection()
2697 {
2698     if (!m_isSafeToCollect)
2699         return;
2700     
2701     // This prevents the collectContinuously thread from starting a collection.
2702     m_collectContinuouslyLock.lock();
2703     
2704     // Wait for all collections to finish.
2705     waitForCollector(
2706         [&] (const AbstractLocker&) -> bool {
2707             ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2708             return m_lastServedTicket == m_lastGrantedTicket;
2709         });
2710     
2711     // Now a collection can only start if this thread starts it.
2712     RELEASE_ASSERT(!m_collectionScope);
2713 }
2714
2715 void Heap::allowCollection()
2716 {
2717     if (!m_isSafeToCollect)
2718         return;
2719     
2720     m_collectContinuouslyLock.unlock();
2721 }
2722
2723 template<typename Func>
2724 void Heap::forEachSlotVisitor(const Func& func)
2725 {
2726     auto locker = holdLock(m_parallelSlotVisitorLock);
2727     func(*m_collectorSlotVisitor);
2728     func(*m_mutatorSlotVisitor);
2729     for (auto& slotVisitor : m_parallelSlotVisitors)
2730         func(*slotVisitor);
2731 }
2732
2733 void Heap::setMutatorShouldBeFenced(bool value)
2734 {
2735     m_mutatorShouldBeFenced = value;
2736     m_barrierThreshold = value ? tautologicalThreshold : blackThreshold;
2737 }
2738
2739 void Heap::performIncrement(size_t bytes)
2740 {
2741     if (!m_objectSpace.isMarking())
2742         return;
2743
2744     m_incrementBalance += bytes * Options::gcIncrementScale();
2745
2746     // Save ourselves from crazy. Since this is an optimization, it's OK to go back to any consistent
2747     // state when the double goes wild.
2748     if (std::isnan(m_incrementBalance) || std::isinf(m_incrementBalance))
2749         m_incrementBalance = 0;
2750     
2751     if (m_incrementBalance < static_cast<double>(Options::gcIncrementBytes()))
2752         return;
2753
2754     double targetBytes = m_incrementBalance;
2755     if (targetBytes <= 0)
2756         return;
2757     targetBytes = std::min(targetBytes, Options::gcIncrementMaxBytes());
2758
2759     SlotVisitor& slotVisitor = *m_mutatorSlotVisitor;
2760     ParallelModeEnabler parallelModeEnabler(slotVisitor);
2761     size_t bytesVisited = slotVisitor.performIncrementOfDraining(static_cast<size_t>(targetBytes));
2762     // incrementBalance may go negative here because it'll remember how many bytes we overshot.
2763     m_incrementBalance -= bytesVisited;
2764 }
2765
2766 } // namespace JSC