Fix incorrect capacity delta calculation reported in SparseArrayValueMap::add().
[WebKit-https.git] / Source / JavaScriptCore / heap / Heap.cpp
1 /*
2  *  Copyright (C) 2003-2017 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "Heap.h"
23
24 #include "CodeBlock.h"
25 #include "CodeBlockSetInlines.h"
26 #include "CollectingScope.h"
27 #include "ConservativeRoots.h"
28 #include "DFGWorklistInlines.h"
29 #include "EdenGCActivityCallback.h"
30 #include "Exception.h"
31 #include "FullGCActivityCallback.h"
32 #include "GCActivityCallback.h"
33 #include "GCIncomingRefCountedSetInlines.h"
34 #include "GCSegmentedArrayInlines.h"
35 #include "GCTypeMap.h"
36 #include "HasOwnPropertyCache.h"
37 #include "HeapHelperPool.h"
38 #include "HeapIterationScope.h"
39 #include "HeapProfiler.h"
40 #include "HeapSnapshot.h"
41 #include "HeapVerifier.h"
42 #include "IncrementalSweeper.h"
43 #include "Interpreter.h"
44 #include "JITStubRoutineSet.h"
45 #include "JITWorklist.h"
46 #include "JSCInlines.h"
47 #include "JSGlobalObject.h"
48 #include "JSLock.h"
49 #include "JSVirtualMachineInternal.h"
50 #include "MachineStackMarker.h"
51 #include "MarkedSpaceInlines.h"
52 #include "MarkingConstraintSet.h"
53 #include "PreventCollectionScope.h"
54 #include "SamplingProfiler.h"
55 #include "ShadowChicken.h"
56 #include "SpaceTimeMutatorScheduler.h"
57 #include "SuperSampler.h"
58 #include "StochasticSpaceTimeMutatorScheduler.h"
59 #include "StopIfNecessaryTimer.h"
60 #include "SweepingScope.h"
61 #include "SynchronousStopTheWorldMutatorScheduler.h"
62 #include "TypeProfilerLog.h"
63 #include "UnlinkedCodeBlock.h"
64 #include "VM.h"
65 #include "WeakSetInlines.h"
66 #include <algorithm>
67 #include <wtf/CurrentTime.h>
68 #include <wtf/MainThread.h>
69 #include <wtf/ParallelVectorIterator.h>
70 #include <wtf/ProcessID.h>
71 #include <wtf/RAMSize.h>
72 #include <wtf/SimpleStats.h>
73
74 #if USE(FOUNDATION)
75 #if __has_include(<objc/objc-internal.h>)
76 #include <objc/objc-internal.h>
77 #else
78 extern "C" void* objc_autoreleasePoolPush(void);
79 extern "C" void objc_autoreleasePoolPop(void *context);
80 #endif
81 #endif // USE(FOUNDATION)
82
83 using namespace std;
84
85 namespace JSC {
86
87 namespace {
88
89 bool verboseStop = false;
90
91 double maxPauseMS(double thisPauseMS)
92 {
93     static double maxPauseMS;
94     maxPauseMS = std::max(thisPauseMS, maxPauseMS);
95     return maxPauseMS;
96 }
97
98 size_t minHeapSize(HeapType heapType, size_t ramSize)
99 {
100     if (heapType == LargeHeap) {
101         double result = min(
102             static_cast<double>(Options::largeHeapSize()),
103             ramSize * Options::smallHeapRAMFraction());
104         return static_cast<size_t>(result);
105     }
106     return Options::smallHeapSize();
107 }
108
109 size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
110 {
111     if (heapSize < ramSize * Options::smallHeapRAMFraction())
112         return Options::smallHeapGrowthFactor() * heapSize;
113     if (heapSize < ramSize * Options::mediumHeapRAMFraction())
114         return Options::mediumHeapGrowthFactor() * heapSize;
115     return Options::largeHeapGrowthFactor() * heapSize;
116 }
117
118 bool isValidSharedInstanceThreadState(VM* vm)
119 {
120     return vm->currentThreadIsHoldingAPILock();
121 }
122
123 bool isValidThreadState(VM* vm)
124 {
125     if (vm->atomicStringTable() != wtfThreadData().atomicStringTable())
126         return false;
127
128     if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
129         return false;
130
131     return true;
132 }
133
134 void recordType(VM& vm, TypeCountSet& set, JSCell* cell)
135 {
136     const char* typeName = "[unknown]";
137     const ClassInfo* info = cell->classInfo(vm);
138     if (info && info->className)
139         typeName = info->className;
140     set.add(typeName);
141 }
142
143 bool measurePhaseTiming()
144 {
145     return false;
146 }
147
148 HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats()
149 {
150     static HashMap<const char*, GCTypeMap<SimpleStats>>* result;
151     static std::once_flag once;
152     std::call_once(
153         once,
154         [] {
155             result = new HashMap<const char*, GCTypeMap<SimpleStats>>();
156         });
157     return *result;
158 }
159
160 SimpleStats& timingStats(const char* name, CollectionScope scope)
161 {
162     return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[scope];
163 }
164
165 class TimingScope {
166 public:
167     TimingScope(std::optional<CollectionScope> scope, const char* name)
168         : m_scope(scope)
169         , m_name(name)
170     {
171         if (measurePhaseTiming())
172             m_before = monotonicallyIncreasingTimeMS();
173     }
174     
175     TimingScope(Heap& heap, const char* name)
176         : TimingScope(heap.collectionScope(), name)
177     {
178     }
179     
180     void setScope(std::optional<CollectionScope> scope)
181     {
182         m_scope = scope;
183     }
184     
185     void setScope(Heap& heap)
186     {
187         setScope(heap.collectionScope());
188     }
189     
190     ~TimingScope()
191     {
192         if (measurePhaseTiming()) {
193             double after = monotonicallyIncreasingTimeMS();
194             double timing = after - m_before;
195             SimpleStats& stats = timingStats(m_name, *m_scope);
196             stats.add(timing);
197             dataLog("[GC:", *m_scope, "] ", m_name, " took: ", timing, "ms (average ", stats.mean(), "ms).\n");
198         }
199     }
200 private:
201     std::optional<CollectionScope> m_scope;
202     double m_before;
203     const char* m_name;
204 };
205
206 } // anonymous namespace
207
208 class Heap::Thread : public AutomaticThread {
209 public:
210     Thread(const AbstractLocker& locker, Heap& heap)
211         : AutomaticThread(locker, heap.m_threadLock, heap.m_threadCondition)
212         , m_heap(heap)
213     {
214     }
215     
216 protected:
217     PollResult poll(const AbstractLocker& locker) override
218     {
219         if (m_heap.m_threadShouldStop) {
220             m_heap.notifyThreadStopping(locker);
221             return PollResult::Stop;
222         }
223         if (m_heap.shouldCollectInCollectorThread(locker))
224             return PollResult::Work;
225         return PollResult::Wait;
226     }
227     
228     WorkResult work() override
229     {
230         m_heap.collectInCollectorThread();
231         return WorkResult::Continue;
232     }
233     
234     void threadDidStart() override
235     {
236         WTF::registerGCThread(GCThreadType::Main);
237     }
238
239 private:
240     Heap& m_heap;
241 };
242
243 Heap::Heap(VM* vm, HeapType heapType)
244     : m_heapType(heapType)
245     , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
246     , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
247     , m_sizeAfterLastCollect(0)
248     , m_sizeAfterLastFullCollect(0)
249     , m_sizeBeforeLastFullCollect(0)
250     , m_sizeAfterLastEdenCollect(0)
251     , m_sizeBeforeLastEdenCollect(0)
252     , m_bytesAllocatedThisCycle(0)
253     , m_bytesAbandonedSinceLastFullCollect(0)
254     , m_maxEdenSize(m_minBytesPerCycle)
255     , m_maxHeapSize(m_minBytesPerCycle)
256     , m_shouldDoFullCollection(false)
257     , m_totalBytesVisited(0)
258     , m_objectSpace(this)
259     , m_extraMemorySize(0)
260     , m_deprecatedExtraMemorySize(0)
261     , m_machineThreads(std::make_unique<MachineThreads>())
262     , m_collectorSlotVisitor(std::make_unique<SlotVisitor>(*this, "C"))
263     , m_mutatorSlotVisitor(std::make_unique<SlotVisitor>(*this, "M"))
264     , m_mutatorMarkStack(std::make_unique<MarkStackArray>())
265     , m_raceMarkStack(std::make_unique<MarkStackArray>())
266     , m_constraintSet(std::make_unique<MarkingConstraintSet>())
267     , m_handleSet(vm)
268     , m_codeBlocks(std::make_unique<CodeBlockSet>())
269     , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>())
270     , m_isSafeToCollect(false)
271     , m_vm(vm)
272     // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously 
273     // schedule the timer if we've never done a collection.
274     , m_lastFullGCLength(0.01)
275     , m_lastEdenGCLength(0.01)
276 #if USE(CF)
277     , m_runLoop(CFRunLoopGetCurrent())
278 #endif // USE(CF)
279     , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
280     , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
281     , m_sweeper(adoptRef(new IncrementalSweeper(this)))
282     , m_stopIfNecessaryTimer(adoptRef(new StopIfNecessaryTimer(vm)))
283     , m_deferralDepth(0)
284 #if USE(FOUNDATION)
285     , m_delayedReleaseRecursionCount(0)
286 #endif
287     , m_sharedCollectorMarkStack(std::make_unique<MarkStackArray>())
288     , m_sharedMutatorMarkStack(std::make_unique<MarkStackArray>())
289     , m_helperClient(&heapHelperPool())
290     , m_threadLock(Box<Lock>::create())
291     , m_threadCondition(AutomaticThreadCondition::create())
292 {
293     m_worldState.store(0);
294     
295     if (Options::useConcurrentGC()) {
296         if (Options::useStochasticMutatorScheduler())
297             m_scheduler = std::make_unique<StochasticSpaceTimeMutatorScheduler>(*this);
298         else
299             m_scheduler = std::make_unique<SpaceTimeMutatorScheduler>(*this);
300     } else {
301         // We simulate turning off concurrent GC by making the scheduler say that the world
302         // should always be stopped when the collector is running.
303         m_scheduler = std::make_unique<SynchronousStopTheWorldMutatorScheduler>();
304     }
305     
306     if (Options::verifyHeap())
307         m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
308     
309     m_collectorSlotVisitor->optimizeForStoppedMutator();
310
311     LockHolder locker(*m_threadLock);
312     m_thread = adoptRef(new Thread(locker, *this));
313 }
314
315 Heap::~Heap()
316 {
317     forEachSlotVisitor(
318         [&] (SlotVisitor& visitor) {
319             visitor.clearMarkStacks();
320         });
321     m_mutatorMarkStack->clear();
322     m_raceMarkStack->clear();
323     
324     for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
325         WeakBlock::destroy(*this, block);
326 }
327
328 bool Heap::isPagedOut(double deadline)
329 {
330     return m_objectSpace.isPagedOut(deadline);
331 }
332
333 // The VM is being destroyed and the collector will never run again.
334 // Run all pending finalizers now because we won't get another chance.
335 void Heap::lastChanceToFinalize()
336 {
337     MonotonicTime before;
338     if (Options::logGC()) {
339         before = MonotonicTime::now();
340         dataLog("[GC<", RawPointer(this), ">: shutdown ");
341     }
342     
343     RELEASE_ASSERT(!m_vm->entryScope);
344     RELEASE_ASSERT(m_mutatorState == MutatorState::Running);
345     
346     if (m_collectContinuouslyThread) {
347         {
348             LockHolder locker(m_collectContinuouslyLock);
349             m_shouldStopCollectingContinuously = true;
350             m_collectContinuouslyCondition.notifyOne();
351         }
352         waitForThreadCompletion(m_collectContinuouslyThread);
353     }
354     
355     if (Options::logGC())
356         dataLog("1");
357     
358     // Prevent new collections from being started. This is probably not even necessary, since we're not
359     // going to call into anything that starts collections. Still, this makes the algorithm more
360     // obviously sound.
361     m_isSafeToCollect = false;
362     
363     if (Options::logGC())
364         dataLog("2");
365
366     bool isCollecting;
367     {
368         auto locker = holdLock(*m_threadLock);
369         RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
370         isCollecting = m_lastServedTicket < m_lastGrantedTicket;
371     }
372     if (isCollecting) {
373         if (Options::logGC())
374             dataLog("...]\n");
375         
376         // Wait for the current collection to finish.
377         waitForCollector(
378             [&] (const AbstractLocker&) -> bool {
379                 RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
380                 return m_lastServedTicket == m_lastGrantedTicket;
381             });
382         
383         if (Options::logGC())
384             dataLog("[GC<", RawPointer(this), ">: shutdown ");
385     }
386     if (Options::logGC())
387         dataLog("3");
388
389     RELEASE_ASSERT(m_requests.isEmpty());
390     RELEASE_ASSERT(m_lastServedTicket == m_lastGrantedTicket);
391     
392     // Carefully bring the thread down.
393     bool stopped = false;
394     {
395         LockHolder locker(*m_threadLock);
396         stopped = m_thread->tryStop(locker);
397         m_threadShouldStop = true;
398         if (!stopped)
399             m_threadCondition->notifyOne(locker);
400     }
401
402     if (Options::logGC())
403         dataLog("4");
404     
405     if (!stopped)
406         m_thread->join();
407     
408     if (Options::logGC())
409         dataLog("5 ");
410     
411     m_arrayBuffers.lastChanceToFinalize();
412     m_codeBlocks->lastChanceToFinalize(*m_vm);
413     m_objectSpace.stopAllocating();
414     m_objectSpace.lastChanceToFinalize();
415     releaseDelayedReleasedObjects();
416
417     sweepAllLogicallyEmptyWeakBlocks();
418     
419     if (Options::logGC())
420         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
421 }
422
423 void Heap::releaseDelayedReleasedObjects()
424 {
425 #if USE(FOUNDATION)
426     // We need to guard against the case that releasing an object can create more objects due to the
427     // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
428     // back here and could try to recursively release objects. We guard that with a recursive entry
429     // count. Only the initial call will release objects, recursive calls simple return and let the
430     // the initial call to the function take care of any objects created during release time.
431     // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
432     // and use a temp Vector for the actual releasing.
433     if (!m_delayedReleaseRecursionCount++) {
434         while (!m_delayedReleaseObjects.isEmpty()) {
435             ASSERT(m_vm->currentThreadIsHoldingAPILock());
436
437             Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTFMove(m_delayedReleaseObjects);
438
439             {
440                 // We need to drop locks before calling out to arbitrary code.
441                 JSLock::DropAllLocks dropAllLocks(m_vm);
442
443                 void* context = objc_autoreleasePoolPush();
444                 objectsToRelease.clear();
445                 objc_autoreleasePoolPop(context);
446             }
447         }
448     }
449     m_delayedReleaseRecursionCount--;
450 #endif
451 }
452
453 void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
454 {
455     didAllocate(size);
456     collectIfNecessaryOrDefer();
457 }
458
459 void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
460 {
461     // FIXME: Change this to use SaturatedArithmetic when available.
462     // https://bugs.webkit.org/show_bug.cgi?id=170411
463     Checked<size_t, RecordOverflow> checkedNewSize = m_deprecatedExtraMemorySize;
464     checkedNewSize += size;
465     m_deprecatedExtraMemorySize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
466     reportExtraMemoryAllocatedSlowCase(size);
467 }
468
469 void Heap::reportAbandonedObjectGraph()
470 {
471     // Our clients don't know exactly how much memory they
472     // are abandoning so we just guess for them.
473     size_t abandonedBytes = static_cast<size_t>(0.1 * capacity());
474
475     // We want to accelerate the next collection. Because memory has just 
476     // been abandoned, the next collection has the potential to 
477     // be more profitable. Since allocation is the trigger for collection, 
478     // we hasten the next collection by pretending that we've allocated more memory. 
479     if (m_fullActivityCallback) {
480         m_fullActivityCallback->didAllocate(
481             m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
482     }
483     m_bytesAbandonedSinceLastFullCollect += abandonedBytes;
484 }
485
486 void Heap::protect(JSValue k)
487 {
488     ASSERT(k);
489     ASSERT(m_vm->currentThreadIsHoldingAPILock());
490
491     if (!k.isCell())
492         return;
493
494     m_protectedValues.add(k.asCell());
495 }
496
497 bool Heap::unprotect(JSValue k)
498 {
499     ASSERT(k);
500     ASSERT(m_vm->currentThreadIsHoldingAPILock());
501
502     if (!k.isCell())
503         return false;
504
505     return m_protectedValues.remove(k.asCell());
506 }
507
508 void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
509 {
510     if (m_arrayBuffers.addReference(cell, buffer)) {
511         collectIfNecessaryOrDefer();
512         didAllocate(buffer->gcSizeEstimateInBytes());
513     }
514 }
515
516 void Heap::finalizeUnconditionalFinalizers()
517 {
518     while (m_unconditionalFinalizers.hasNext()) {
519         UnconditionalFinalizer* finalizer = m_unconditionalFinalizers.removeNext();
520         finalizer->finalizeUnconditionally();
521     }
522 }
523
524 void Heap::willStartIterating()
525 {
526     m_objectSpace.willStartIterating();
527 }
528
529 void Heap::didFinishIterating()
530 {
531     m_objectSpace.didFinishIterating();
532 }
533
534 void Heap::completeAllJITPlans()
535 {
536 #if ENABLE(JIT)
537     JITWorklist::instance()->completeAllForVM(*m_vm);
538 #endif // ENABLE(JIT)
539     DFG::completeAllPlansForVM(*m_vm);
540 }
541
542 template<typename Func>
543 void Heap::iterateExecutingAndCompilingCodeBlocks(const Func& func)
544 {
545     m_codeBlocks->iterateCurrentlyExecuting(func);
546     DFG::iterateCodeBlocksForGC(*m_vm, func);
547 }
548
549 template<typename Func>
550 void Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func& func)
551 {
552     Vector<CodeBlock*, 256> codeBlocks;
553     iterateExecutingAndCompilingCodeBlocks(
554         [&] (CodeBlock* codeBlock) {
555             codeBlocks.append(codeBlock);
556         });
557     for (CodeBlock* codeBlock : codeBlocks)
558         func(codeBlock);
559 }
560
561 void Heap::assertSharedMarkStacksEmpty()
562 {
563     bool ok = true;
564     
565     if (!m_sharedCollectorMarkStack->isEmpty()) {
566         dataLog("FATAL: Shared collector mark stack not empty! It has ", m_sharedCollectorMarkStack->size(), " elements.\n");
567         ok = false;
568     }
569     
570     if (!m_sharedMutatorMarkStack->isEmpty()) {
571         dataLog("FATAL: Shared mutator mark stack not empty! It has ", m_sharedMutatorMarkStack->size(), " elements.\n");
572         ok = false;
573     }
574     
575     RELEASE_ASSERT(ok);
576 }
577
578 void Heap::gatherStackRoots(ConservativeRoots& roots)
579 {
580     m_machineThreads->gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks, m_currentThreadState);
581 }
582
583 void Heap::gatherJSStackRoots(ConservativeRoots& roots)
584 {
585 #if !ENABLE(JIT)
586     m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
587 #else
588     UNUSED_PARAM(roots);
589 #endif
590 }
591
592 void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
593 {
594 #if ENABLE(DFG_JIT)
595     m_vm->gatherConservativeRoots(roots);
596 #else
597     UNUSED_PARAM(roots);
598 #endif
599 }
600
601 void Heap::beginMarking()
602 {
603     TimingScope timingScope(*this, "Heap::beginMarking");
604     if (m_collectionScope == CollectionScope::Full)
605         m_codeBlocks->clearMarksForFullCollection();
606     m_jitStubRoutines->clearMarks();
607     m_objectSpace.beginMarking();
608     setMutatorShouldBeFenced(true);
609 }
610
611 void Heap::removeDeadCompilerWorklistEntries()
612 {
613 #if ENABLE(DFG_JIT)
614     for (unsigned i = DFG::numberOfWorklists(); i--;)
615         DFG::existingWorklistForIndex(i).removeDeadPlans(*m_vm);
616 #endif
617 }
618
619 bool Heap::isHeapSnapshotting() const
620 {
621     HeapProfiler* heapProfiler = m_vm->heapProfiler();
622     if (UNLIKELY(heapProfiler))
623         return heapProfiler->activeSnapshotBuilder();
624     return false;
625 }
626
627 struct GatherHeapSnapshotData : MarkedBlock::CountFunctor {
628     GatherHeapSnapshotData(HeapSnapshotBuilder& builder)
629         : m_builder(builder)
630     {
631     }
632
633     IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const
634     {
635         if (kind == HeapCell::JSCell) {
636             JSCell* cell = static_cast<JSCell*>(heapCell);
637             cell->methodTable()->heapSnapshot(cell, m_builder);
638         }
639         return IterationStatus::Continue;
640     }
641
642     HeapSnapshotBuilder& m_builder;
643 };
644
645 void Heap::gatherExtraHeapSnapshotData(HeapProfiler& heapProfiler)
646 {
647     if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) {
648         HeapIterationScope heapIterationScope(*this);
649         GatherHeapSnapshotData functor(*builder);
650         m_objectSpace.forEachLiveCell(heapIterationScope, functor);
651     }
652 }
653
654 struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor {
655     RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot)
656         : m_snapshot(snapshot)
657     {
658     }
659
660     IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const
661     {
662         if (kind == HeapCell::JSCell)
663             m_snapshot.sweepCell(static_cast<JSCell*>(cell));
664         return IterationStatus::Continue;
665     }
666
667     HeapSnapshot& m_snapshot;
668 };
669
670 void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler)
671 {
672     if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) {
673         HeapIterationScope heapIterationScope(*this);
674         RemoveDeadHeapSnapshotNodes functor(*snapshot);
675         m_objectSpace.forEachDeadCell(heapIterationScope, functor);
676         snapshot->shrinkToFit();
677     }
678 }
679
680 void Heap::updateObjectCounts()
681 {
682     if (m_collectionScope == CollectionScope::Full)
683         m_totalBytesVisited = 0;
684
685     m_totalBytesVisitedThisCycle = bytesVisited();
686     
687     m_totalBytesVisited += m_totalBytesVisitedThisCycle;
688 }
689
690 void Heap::endMarking()
691 {
692     forEachSlotVisitor(
693         [&] (SlotVisitor& visitor) {
694             visitor.reset();
695         });
696
697     assertSharedMarkStacksEmpty();
698     m_weakReferenceHarvesters.removeAll();
699
700     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
701     
702     m_objectSpace.endMarking();
703     setMutatorShouldBeFenced(Options::forceFencedBarrier());
704 }
705
706 size_t Heap::objectCount()
707 {
708     return m_objectSpace.objectCount();
709 }
710
711 size_t Heap::extraMemorySize()
712 {
713     // FIXME: Change this to use SaturatedArithmetic when available.
714     // https://bugs.webkit.org/show_bug.cgi?id=170411
715     Checked<size_t, RecordOverflow> checkedTotal = m_extraMemorySize;
716     checkedTotal += m_deprecatedExtraMemorySize;
717     checkedTotal += m_arrayBuffers.size();
718     size_t total = UNLIKELY(checkedTotal.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedTotal.unsafeGet();
719
720     ASSERT(m_objectSpace.capacity() >= m_objectSpace.size());
721     return std::min(total, std::numeric_limits<size_t>::max() - m_objectSpace.capacity());
722 }
723
724 size_t Heap::size()
725 {
726     return m_objectSpace.size() + extraMemorySize();
727 }
728
729 size_t Heap::capacity()
730 {
731     return m_objectSpace.capacity() + extraMemorySize();
732 }
733
734 size_t Heap::protectedGlobalObjectCount()
735 {
736     size_t result = 0;
737     forEachProtectedCell(
738         [&] (JSCell* cell) {
739             if (cell->isObject() && asObject(cell)->isGlobalObject())
740                 result++;
741         });
742     return result;
743 }
744
745 size_t Heap::globalObjectCount()
746 {
747     HeapIterationScope iterationScope(*this);
748     size_t result = 0;
749     m_objectSpace.forEachLiveCell(
750         iterationScope,
751         [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus {
752             if (kind != HeapCell::JSCell)
753                 return IterationStatus::Continue;
754             JSCell* cell = static_cast<JSCell*>(heapCell);
755             if (cell->isObject() && asObject(cell)->isGlobalObject())
756                 result++;
757             return IterationStatus::Continue;
758         });
759     return result;
760 }
761
762 size_t Heap::protectedObjectCount()
763 {
764     size_t result = 0;
765     forEachProtectedCell(
766         [&] (JSCell*) {
767             result++;
768         });
769     return result;
770 }
771
772 std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
773 {
774     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
775     forEachProtectedCell(
776         [&] (JSCell* cell) {
777             recordType(*vm(), *result, cell);
778         });
779     return result;
780 }
781
782 std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
783 {
784     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
785     HeapIterationScope iterationScope(*this);
786     m_objectSpace.forEachLiveCell(
787         iterationScope,
788         [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
789             if (kind == HeapCell::JSCell)
790                 recordType(*vm(), *result, static_cast<JSCell*>(cell));
791             return IterationStatus::Continue;
792         });
793     return result;
794 }
795
796 void Heap::deleteAllCodeBlocks(DeleteAllCodeEffort effort)
797 {
798     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
799         return;
800     
801     PreventCollectionScope preventCollectionScope(*this);
802     
803     // If JavaScript is running, it's not safe to delete all JavaScript code, since
804     // we'll end up returning to deleted code.
805     RELEASE_ASSERT(!m_vm->entryScope);
806     RELEASE_ASSERT(!m_collectionScope);
807
808     completeAllJITPlans();
809
810     for (ExecutableBase* executable : m_executables)
811         executable->clearCode();
812 }
813
814 void Heap::deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort effort)
815 {
816     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
817         return;
818     
819     PreventCollectionScope preventCollectionScope(*this);
820
821     RELEASE_ASSERT(!m_collectionScope);
822     
823     for (ExecutableBase* current : m_executables) {
824         if (!current->isFunctionExecutable())
825             continue;
826         static_cast<FunctionExecutable*>(current)->unlinkedExecutable()->clearCode();
827     }
828 }
829
830 void Heap::clearUnmarkedExecutables()
831 {
832     for (unsigned i = m_executables.size(); i--;) {
833         ExecutableBase* current = m_executables[i];
834         if (isMarked(current))
835             continue;
836
837         // Eagerly dereference the Executable's JITCode in order to run watchpoint
838         // destructors. Otherwise, watchpoints might fire for deleted CodeBlocks.
839         current->clearCode();
840         std::swap(m_executables[i], m_executables.last());
841         m_executables.removeLast();
842     }
843
844     m_executables.shrinkToFit();
845 }
846
847 void Heap::deleteUnmarkedCompiledCode()
848 {
849     clearUnmarkedExecutables();
850     m_codeBlocks->deleteUnmarkedAndUnreferenced(*m_vm, *m_lastCollectionScope);
851     m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines();
852 }
853
854 void Heap::addToRememberedSet(const JSCell* constCell)
855 {
856     JSCell* cell = const_cast<JSCell*>(constCell);
857     ASSERT(cell);
858     ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
859     m_barriersExecuted++;
860     if (m_mutatorShouldBeFenced) {
861         WTF::loadLoadFence();
862         if (!isMarkedConcurrently(cell)) {
863             // During a full collection a store into an unmarked object that had surivived past
864             // collections will manifest as a store to an unmarked PossiblyBlack object. If the
865             // object gets marked at some time after this then it will go down the normal marking
866             // path. So, we don't have to remember this object. We could return here. But we go
867             // further and attempt to re-white the object.
868             
869             RELEASE_ASSERT(m_collectionScope == CollectionScope::Full);
870             
871             if (cell->atomicCompareExchangeCellStateStrong(CellState::PossiblyBlack, CellState::DefinitelyWhite) == CellState::PossiblyBlack) {
872                 // Now we protect against this race:
873                 //
874                 //     1) Object starts out black + unmarked.
875                 //     --> We do isMarkedConcurrently here.
876                 //     2) Object is marked and greyed.
877                 //     3) Object is scanned and blacked.
878                 //     --> We do atomicCompareExchangeCellStateStrong here.
879                 //
880                 // In this case we would have made the object white again, even though it should
881                 // be black. This check lets us correct our mistake. This relies on the fact that
882                 // isMarkedConcurrently converges monotonically to true.
883                 if (isMarkedConcurrently(cell)) {
884                     // It's difficult to work out whether the object should be grey or black at
885                     // this point. We say black conservatively.
886                     cell->setCellState(CellState::PossiblyBlack);
887                 }
888                 
889                 // Either way, we can return. Most likely, the object was not marked, and so the
890                 // object is now labeled white. This means that future barrier executions will not
891                 // fire. In the unlikely event that the object had become marked, we can still
892                 // return anyway, since we proved that the object was not marked at the time that
893                 // we executed this slow path.
894             }
895             
896             return;
897         }
898     } else
899         ASSERT(Heap::isMarkedConcurrently(cell));
900     // It could be that the object was *just* marked. This means that the collector may set the
901     // state to DefinitelyGrey and then to PossiblyOldOrBlack at any time. It's OK for us to
902     // race with the collector here. If we win then this is accurate because the object _will_
903     // get scanned again. If we lose then someone else will barrier the object again. That would
904     // be unfortunate but not the end of the world.
905     cell->setCellState(CellState::PossiblyGrey);
906     m_mutatorMarkStack->append(cell);
907 }
908
909 void Heap::sweepSynchronously()
910 {
911     double before = 0;
912     if (Options::logGC()) {
913         dataLog("Full sweep: ", capacity() / 1024, "kb ");
914         before = currentTimeMS();
915     }
916     m_objectSpace.sweep();
917     m_objectSpace.shrink();
918     if (Options::logGC()) {
919         double after = currentTimeMS();
920         dataLog("=> ", capacity() / 1024, "kb, ", after - before, "ms");
921     }
922 }
923
924 void Heap::collectAllGarbage()
925 {
926     if (!m_isSafeToCollect)
927         return;
928     
929     collectSync(CollectionScope::Full);
930
931     DeferGCForAWhile deferGC(*this);
932     if (UNLIKELY(Options::useImmortalObjects()))
933         sweeper()->stopSweeping();
934
935     bool alreadySweptInCollectSync = Options::sweepSynchronously();
936     if (!alreadySweptInCollectSync) {
937         if (Options::logGC())
938             dataLog("[GC<", RawPointer(this), ">: ");
939         sweepSynchronously();
940         if (Options::logGC())
941             dataLog("]\n");
942     }
943     m_objectSpace.assertNoUnswept();
944
945     sweepAllLogicallyEmptyWeakBlocks();
946 }
947
948 void Heap::collectAsync(std::optional<CollectionScope> scope)
949 {
950     if (!m_isSafeToCollect)
951         return;
952
953     bool alreadyRequested = false;
954     {
955         LockHolder locker(*m_threadLock);
956         for (std::optional<CollectionScope> request : m_requests) {
957             if (scope) {
958                 if (scope == CollectionScope::Eden) {
959                     alreadyRequested = true;
960                     break;
961                 } else {
962                     RELEASE_ASSERT(scope == CollectionScope::Full);
963                     if (request == CollectionScope::Full) {
964                         alreadyRequested = true;
965                         break;
966                     }
967                 }
968             } else {
969                 if (!request || request == CollectionScope::Full) {
970                     alreadyRequested = true;
971                     break;
972                 }
973             }
974         }
975     }
976     if (alreadyRequested)
977         return;
978
979     requestCollection(scope);
980 }
981
982 void Heap::collectSync(std::optional<CollectionScope> scope)
983 {
984     if (!m_isSafeToCollect)
985         return;
986     
987     waitForCollection(requestCollection(scope));
988 }
989
990 bool Heap::shouldCollectInCollectorThread(const AbstractLocker&)
991 {
992     RELEASE_ASSERT(m_requests.isEmpty() == (m_lastServedTicket == m_lastGrantedTicket));
993     RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
994     
995     if (false)
996         dataLog("Mutator has the conn = ", !!(m_worldState.load() & mutatorHasConnBit), "\n");
997     
998     return !m_requests.isEmpty() && !(m_worldState.load() & mutatorHasConnBit);
999 }
1000
1001 void Heap::collectInCollectorThread()
1002 {
1003     for (;;) {
1004         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Collector, nullptr);
1005         switch (result) {
1006         case RunCurrentPhaseResult::Finished:
1007             return;
1008         case RunCurrentPhaseResult::Continue:
1009             break;
1010         case RunCurrentPhaseResult::NeedCurrentThreadState:
1011             RELEASE_ASSERT_NOT_REACHED();
1012             break;
1013         }
1014     }
1015 }
1016
1017 void Heap::checkConn(GCConductor conn)
1018 {
1019     switch (conn) {
1020     case GCConductor::Mutator:
1021         RELEASE_ASSERT(m_worldState.load() & mutatorHasConnBit);
1022         return;
1023     case GCConductor::Collector:
1024         RELEASE_ASSERT(!(m_worldState.load() & mutatorHasConnBit));
1025         return;
1026     }
1027     RELEASE_ASSERT_NOT_REACHED();
1028 }
1029
1030 auto Heap::runCurrentPhase(GCConductor conn, CurrentThreadState* currentThreadState) -> RunCurrentPhaseResult
1031 {
1032     checkConn(conn);
1033     m_currentThreadState = currentThreadState;
1034     
1035     // If the collector transfers the conn to the mutator, it leaves us in between phases.
1036     if (!finishChangingPhase(conn)) {
1037         // A mischevious mutator could repeatedly relinquish the conn back to us. We try to avoid doing
1038         // this, but it's probably not the end of the world if it did happen.
1039         if (false)
1040             dataLog("Conn bounce-back.\n");
1041         return RunCurrentPhaseResult::Finished;
1042     }
1043     
1044     bool result = false;
1045     switch (m_currentPhase) {
1046     case CollectorPhase::NotRunning:
1047         result = runNotRunningPhase(conn);
1048         break;
1049         
1050     case CollectorPhase::Begin:
1051         result = runBeginPhase(conn);
1052         break;
1053         
1054     case CollectorPhase::Fixpoint:
1055         if (!currentThreadState && conn == GCConductor::Mutator)
1056             return RunCurrentPhaseResult::NeedCurrentThreadState;
1057         
1058         result = runFixpointPhase(conn);
1059         break;
1060         
1061     case CollectorPhase::Concurrent:
1062         result = runConcurrentPhase(conn);
1063         break;
1064         
1065     case CollectorPhase::Reloop:
1066         result = runReloopPhase(conn);
1067         break;
1068         
1069     case CollectorPhase::End:
1070         result = runEndPhase(conn);
1071         break;
1072     }
1073
1074     return result ? RunCurrentPhaseResult::Continue : RunCurrentPhaseResult::Finished;
1075 }
1076
1077 NEVER_INLINE bool Heap::runNotRunningPhase(GCConductor conn)
1078 {
1079     // Check m_requests since the mutator calls this to poll what's going on.
1080     {
1081         auto locker = holdLock(*m_threadLock);
1082         if (m_requests.isEmpty())
1083             return false;
1084     }
1085     
1086     return changePhase(conn, CollectorPhase::Begin);
1087 }
1088
1089 NEVER_INLINE bool Heap::runBeginPhase(GCConductor conn)
1090 {
1091     m_currentGCStartTime = MonotonicTime::now();
1092         
1093     std::optional<CollectionScope> scope;
1094     {
1095         LockHolder locker(*m_threadLock);
1096         RELEASE_ASSERT(!m_requests.isEmpty());
1097         scope = m_requests.first();
1098     }
1099         
1100     if (Options::logGC())
1101         dataLog("[GC<", RawPointer(this), ">: START ", gcConductorShortName(conn), " ", capacity() / 1024, "kb ");
1102
1103     m_beforeGC = MonotonicTime::now();
1104
1105     if (m_collectionScope) {
1106         dataLog("Collection scope already set during GC: ", *m_collectionScope, "\n");
1107         RELEASE_ASSERT_NOT_REACHED();
1108     }
1109         
1110     willStartCollection(scope);
1111         
1112     if (UNLIKELY(m_verifier)) {
1113         // Verify that live objects from the last GC cycle haven't been corrupted by
1114         // mutators before we begin this new GC cycle.
1115         m_verifier->verify(HeapVerifier::Phase::BeforeGC);
1116             
1117         m_verifier->startGC();
1118         m_verifier->gatherLiveCells(HeapVerifier::Phase::BeforeMarking);
1119     }
1120         
1121     prepareForMarking();
1122         
1123     if (m_collectionScope == CollectionScope::Full) {
1124         m_opaqueRoots.clear();
1125         m_collectorSlotVisitor->clearMarkStacks();
1126         m_mutatorMarkStack->clear();
1127     }
1128
1129     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
1130
1131     beginMarking();
1132
1133     forEachSlotVisitor(
1134         [&] (SlotVisitor& visitor) {
1135             visitor.didStartMarking();
1136         });
1137
1138     m_parallelMarkersShouldExit = false;
1139
1140     m_helperClient.setFunction(
1141         [this] () {
1142             SlotVisitor* slotVisitor;
1143             {
1144                 LockHolder locker(m_parallelSlotVisitorLock);
1145                 if (m_availableParallelSlotVisitors.isEmpty()) {
1146                     std::unique_ptr<SlotVisitor> newVisitor = std::make_unique<SlotVisitor>(
1147                         *this, toCString("P", m_parallelSlotVisitors.size() + 1));
1148                     
1149                     if (Options::optimizeParallelSlotVisitorsForStoppedMutator())
1150                         newVisitor->optimizeForStoppedMutator();
1151                     
1152                     newVisitor->didStartMarking();
1153                     
1154                     slotVisitor = newVisitor.get();
1155                     m_parallelSlotVisitors.append(WTFMove(newVisitor));
1156                 } else
1157                     slotVisitor = m_availableParallelSlotVisitors.takeLast();
1158             }
1159
1160             WTF::registerGCThread(GCThreadType::Helper);
1161
1162             {
1163                 ParallelModeEnabler parallelModeEnabler(*slotVisitor);
1164                 slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
1165             }
1166
1167             {
1168                 LockHolder locker(m_parallelSlotVisitorLock);
1169                 m_availableParallelSlotVisitors.append(slotVisitor);
1170             }
1171         });
1172
1173     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1174
1175     m_constraintSet->didStartMarking();
1176     
1177     m_scheduler->beginCollection();
1178     if (Options::logGC())
1179         m_scheduler->log();
1180     
1181     // After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()"
1182     // checks because bootstrap would have put things into the visitor. So, we should fall
1183     // through to draining.
1184     
1185     if (!slotVisitor.didReachTermination()) {
1186         dataLog("Fatal: SlotVisitor should think that GC should terminate before constraint solving, but it does not think this.\n");
1187         dataLog("slotVisitor.isEmpty(): ", slotVisitor.isEmpty(), "\n");
1188         dataLog("slotVisitor.collectorMarkStack().isEmpty(): ", slotVisitor.collectorMarkStack().isEmpty(), "\n");
1189         dataLog("slotVisitor.mutatorMarkStack().isEmpty(): ", slotVisitor.mutatorMarkStack().isEmpty(), "\n");
1190         dataLog("m_numberOfActiveParallelMarkers: ", m_numberOfActiveParallelMarkers, "\n");
1191         dataLog("m_sharedCollectorMarkStack->isEmpty(): ", m_sharedCollectorMarkStack->isEmpty(), "\n");
1192         dataLog("m_sharedMutatorMarkStack->isEmpty(): ", m_sharedMutatorMarkStack->isEmpty(), "\n");
1193         dataLog("slotVisitor.didReachTermination(): ", slotVisitor.didReachTermination(), "\n");
1194         RELEASE_ASSERT_NOT_REACHED();
1195     }
1196         
1197     return changePhase(conn, CollectorPhase::Fixpoint);
1198 }
1199
1200 NEVER_INLINE bool Heap::runFixpointPhase(GCConductor conn)
1201 {
1202     RELEASE_ASSERT(conn == GCConductor::Collector || m_currentThreadState);
1203     
1204     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1205     
1206     if (Options::logGC()) {
1207         HashMap<const char*, size_t> visitMap;
1208         forEachSlotVisitor(
1209             [&] (SlotVisitor& slotVisitor) {
1210                 visitMap.add(slotVisitor.codeName(), slotVisitor.bytesVisited() / 1024);
1211             });
1212         
1213         auto perVisitorDump = sortedMapDump(
1214             visitMap,
1215             [] (const char* a, const char* b) -> bool {
1216                 return strcmp(a, b) < 0;
1217             },
1218             ":", " ");
1219         
1220         dataLog("v=", bytesVisited() / 1024, "kb (", perVisitorDump, ") o=", m_opaqueRoots.size(), " b=", m_barriersExecuted, " ");
1221     }
1222         
1223     if (slotVisitor.didReachTermination()) {
1224         m_scheduler->didReachTermination();
1225             
1226         assertSharedMarkStacksEmpty();
1227             
1228         slotVisitor.mergeIfNecessary();
1229         for (auto& parallelVisitor : m_parallelSlotVisitors)
1230             parallelVisitor->mergeIfNecessary();
1231             
1232         // FIXME: Take m_mutatorDidRun into account when scheduling constraints. Most likely,
1233         // we don't have to execute root constraints again unless the mutator did run. At a
1234         // minimum, we could use this for work estimates - but it's probably more than just an
1235         // estimate.
1236         // https://bugs.webkit.org/show_bug.cgi?id=166828
1237             
1238         // FIXME: We should take advantage of the fact that we could timeout. This only comes
1239         // into play if we're executing constraints for the first time. But that will matter
1240         // when we have deep stacks or a lot of DOM stuff.
1241         // https://bugs.webkit.org/show_bug.cgi?id=166831
1242             
1243         // Wondering what this does? Look at Heap::addCoreConstraints(). The DOM and others can also
1244         // add their own using Heap::addMarkingConstraint().
1245         bool converged =
1246             m_constraintSet->executeConvergence(slotVisitor, MonotonicTime::infinity());
1247         if (converged && slotVisitor.isEmpty()) {
1248             assertSharedMarkStacksEmpty();
1249             return changePhase(conn, CollectorPhase::End);
1250         }
1251             
1252         m_scheduler->didExecuteConstraints();
1253     }
1254         
1255     if (Options::logGC())
1256         dataLog(slotVisitor.collectorMarkStack().size(), "+", m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " ");
1257         
1258     {
1259         ParallelModeEnabler enabler(slotVisitor);
1260         slotVisitor.drainInParallel(m_scheduler->timeToResume());
1261     }
1262         
1263     m_scheduler->synchronousDrainingDidStall();
1264
1265     if (slotVisitor.didReachTermination())
1266         return true; // This is like relooping to the top if runFixpointPhase().
1267         
1268     if (!m_scheduler->shouldResume())
1269         return true;
1270
1271     m_scheduler->willResume();
1272         
1273     if (Options::logGC()) {
1274         double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds();
1275         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), ")...]\n");
1276     }
1277
1278     // Forgive the mutator for its past failures to keep up.
1279     // FIXME: Figure out if moving this to different places results in perf changes.
1280     m_incrementBalance = 0;
1281         
1282     return changePhase(conn, CollectorPhase::Concurrent);
1283 }
1284
1285 NEVER_INLINE bool Heap::runConcurrentPhase(GCConductor conn)
1286 {
1287     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1288
1289     switch (conn) {
1290     case GCConductor::Mutator: {
1291         // When the mutator has the conn, we poll runConcurrentPhase() on every time someone says
1292         // stopIfNecessary(), so on every allocation slow path. When that happens we poll if it's time
1293         // to stop and do some work.
1294         if (slotVisitor.didReachTermination()
1295             || m_scheduler->shouldStop())
1296             return changePhase(conn, CollectorPhase::Reloop);
1297         
1298         // We could be coming from a collector phase that stuffed our SlotVisitor, so make sure we donate
1299         // everything. This is super cheap if the SlotVisitor is already empty.
1300         slotVisitor.donateAll();
1301         return false;
1302     }
1303     case GCConductor::Collector: {
1304         {
1305             ParallelModeEnabler enabler(slotVisitor);
1306             slotVisitor.drainInParallelPassively(m_scheduler->timeToStop());
1307         }
1308         return changePhase(conn, CollectorPhase::Reloop);
1309     } }
1310     
1311     RELEASE_ASSERT_NOT_REACHED();
1312     return false;
1313 }
1314
1315 NEVER_INLINE bool Heap::runReloopPhase(GCConductor conn)
1316 {
1317     if (Options::logGC())
1318         dataLog("[GC<", RawPointer(this), ">: ", gcConductorShortName(conn), " ");
1319     
1320     m_scheduler->didStop();
1321     
1322     if (Options::logGC())
1323         m_scheduler->log();
1324     
1325     return changePhase(conn, CollectorPhase::Fixpoint);
1326 }
1327
1328 NEVER_INLINE bool Heap::runEndPhase(GCConductor conn)
1329 {
1330     m_scheduler->endCollection();
1331         
1332     {
1333         auto locker = holdLock(m_markingMutex);
1334         m_parallelMarkersShouldExit = true;
1335         m_markingConditionVariable.notifyAll();
1336     }
1337     m_helperClient.finish();
1338     
1339     iterateExecutingAndCompilingCodeBlocks(
1340         [&] (CodeBlock* codeBlock) {
1341             writeBarrier(codeBlock);
1342         });
1343         
1344     updateObjectCounts();
1345     endMarking();
1346         
1347     if (UNLIKELY(m_verifier)) {
1348         m_verifier->gatherLiveCells(HeapVerifier::Phase::AfterMarking);
1349         m_verifier->verify(HeapVerifier::Phase::AfterMarking);
1350     }
1351         
1352     if (vm()->typeProfiler())
1353         vm()->typeProfiler()->invalidateTypeSetCache();
1354         
1355     reapWeakHandles();
1356     pruneStaleEntriesFromWeakGCMaps();
1357     sweepArrayBuffers();
1358     snapshotUnswept();
1359     finalizeUnconditionalFinalizers();
1360     removeDeadCompilerWorklistEntries();
1361     notifyIncrementalSweeper();
1362     
1363     m_codeBlocks->iterateCurrentlyExecuting(
1364         [&] (CodeBlock* codeBlock) {
1365             writeBarrier(codeBlock);
1366         });
1367     m_codeBlocks->clearCurrentlyExecuting();
1368         
1369     m_objectSpace.prepareForAllocation();
1370     updateAllocationLimits();
1371
1372     if (UNLIKELY(m_verifier)) {
1373         m_verifier->trimDeadCells();
1374         m_verifier->verify(HeapVerifier::Phase::AfterGC);
1375     }
1376
1377     didFinishCollection();
1378
1379     if (false) {
1380         dataLog("Heap state after GC:\n");
1381         m_objectSpace.dumpBits();
1382     }
1383     
1384     if (Options::logGC()) {
1385         double thisPauseMS = (m_afterGC - m_stopTime).milliseconds();
1386         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), "), cycle ", (m_afterGC - m_beforeGC).milliseconds(), "ms END]\n");
1387     }
1388     
1389     {
1390         auto locker = holdLock(*m_threadLock);
1391         m_requests.removeFirst();
1392         m_lastServedTicket++;
1393         clearMutatorWaiting();
1394     }
1395     ParkingLot::unparkAll(&m_worldState);
1396
1397     if (false)
1398         dataLog("GC END!\n");
1399
1400     setNeedFinalize();
1401
1402     m_lastGCStartTime = m_currentGCStartTime;
1403     m_lastGCEndTime = MonotonicTime::now();
1404         
1405     return changePhase(conn, CollectorPhase::NotRunning);
1406 }
1407
1408 bool Heap::changePhase(GCConductor conn, CollectorPhase nextPhase)
1409 {
1410     checkConn(conn);
1411
1412     m_nextPhase = nextPhase;
1413
1414     return finishChangingPhase(conn);
1415 }
1416
1417 NEVER_INLINE bool Heap::finishChangingPhase(GCConductor conn)
1418 {
1419     checkConn(conn);
1420     
1421     if (m_nextPhase == m_currentPhase)
1422         return true;
1423
1424     if (false)
1425         dataLog(conn, ": Going to phase: ", m_nextPhase, " (from ", m_currentPhase, ")\n");
1426     
1427     bool suspendedBefore = worldShouldBeSuspended(m_currentPhase);
1428     bool suspendedAfter = worldShouldBeSuspended(m_nextPhase);
1429     
1430     if (suspendedBefore != suspendedAfter) {
1431         if (suspendedBefore) {
1432             RELEASE_ASSERT(!suspendedAfter);
1433             
1434             resumeThePeriphery();
1435             if (conn == GCConductor::Collector)
1436                 resumeTheMutator();
1437             else
1438                 handleNeedFinalize();
1439         } else {
1440             RELEASE_ASSERT(!suspendedBefore);
1441             RELEASE_ASSERT(suspendedAfter);
1442             
1443             if (conn == GCConductor::Collector) {
1444                 waitWhileNeedFinalize();
1445                 if (!stopTheMutator()) {
1446                     if (false)
1447                         dataLog("Returning false.\n");
1448                     return false;
1449                 }
1450             } else {
1451                 sanitizeStackForVM(m_vm);
1452                 handleNeedFinalize();
1453             }
1454             stopThePeriphery(conn);
1455         }
1456     }
1457     
1458     m_currentPhase = m_nextPhase;
1459     return true;
1460 }
1461
1462 void Heap::stopThePeriphery(GCConductor conn)
1463 {
1464     if (m_collectorBelievesThatTheWorldIsStopped) {
1465         dataLog("FATAL: world already stopped.\n");
1466         RELEASE_ASSERT_NOT_REACHED();
1467     }
1468     
1469     if (m_mutatorDidRun)
1470         m_mutatorExecutionVersion++;
1471     
1472     m_mutatorDidRun = false;
1473
1474     suspendCompilerThreads();
1475     m_collectorBelievesThatTheWorldIsStopped = true;
1476
1477     forEachSlotVisitor(
1478         [&] (SlotVisitor& slotVisitor) {
1479             slotVisitor.updateMutatorIsStopped(NoLockingNecessary);
1480         });
1481
1482 #if ENABLE(JIT)
1483     {
1484         DeferGCForAWhile awhile(*this);
1485         if (JITWorklist::instance()->completeAllForVM(*m_vm)
1486             && conn == GCConductor::Collector)
1487             setGCDidJIT();
1488     }
1489 #else
1490     UNUSED_PARAM(conn);
1491 #endif // ENABLE(JIT)
1492     
1493     vm()->shadowChicken().update(*vm(), vm()->topCallFrame);
1494     
1495     m_structureIDTable.flushOldTables();
1496     m_objectSpace.stopAllocating();
1497     
1498     m_stopTime = MonotonicTime::now();
1499 }
1500
1501 NEVER_INLINE void Heap::resumeThePeriphery()
1502 {
1503     // Calling resumeAllocating does the Right Thing depending on whether this is the end of a
1504     // collection cycle or this is just a concurrent phase within a collection cycle:
1505     // - At end of collection cycle: it's a no-op because prepareForAllocation already cleared the
1506     //   last active block.
1507     // - During collection cycle: it reinstates the last active block.
1508     m_objectSpace.resumeAllocating();
1509     
1510     m_barriersExecuted = 0;
1511     
1512     if (!m_collectorBelievesThatTheWorldIsStopped) {
1513         dataLog("Fatal: collector does not believe that the world is stopped.\n");
1514 #if OS(DARWIN)
1515         // FIXME: Remove this when no longer needed.
1516         // https://bugs.webkit.org/show_bug.cgi?id=170094
1517 #if CPU(X86_64)
1518         unsigned worldState = m_worldState.load();
1519         asm volatile(
1520             "int3"
1521             :
1522             : "a"(m_currentPhase), "b"(m_nextPhase), "c"(worldState), "S"(m_lastServedTicket), "D"(m_lastGrantedTicket)
1523             : "memory");
1524 #elif CPU(ARM64)
1525         unsigned worldState = m_worldState.load();
1526         asm volatile(
1527             "ldrb w0, %0\n"
1528             "ldrb w1, %1\n"
1529             "ldr w2, %2\n"
1530             "ldr x3, %3\n"
1531             "ldr x4, %4\n"
1532             "brk #0"
1533             :
1534             : "m"(m_currentPhase), "m"(m_nextPhase), "m"(worldState), "m"(m_lastServedTicket), "m"(m_lastGrantedTicket)
1535             : "memory");
1536 #endif
1537 #endif // OS(DARWIN)
1538         RELEASE_ASSERT_NOT_REACHED();
1539     }
1540     m_collectorBelievesThatTheWorldIsStopped = false;
1541     
1542     // FIXME: This could be vastly improved: we want to grab the locks in the order in which they
1543     // become available. We basically want a lockAny() method that will lock whatever lock is available
1544     // and tell you which one it locked. That would require teaching ParkingLot how to park on multiple
1545     // queues at once, which is totally achievable - it would just require memory allocation, which is
1546     // suboptimal but not a disaster. Alternatively, we could replace the SlotVisitor rightToRun lock
1547     // with a DLG-style handshake mechanism, but that seems not as general.
1548     Vector<SlotVisitor*, 8> slotVisitorsToUpdate;
1549
1550     forEachSlotVisitor(
1551         [&] (SlotVisitor& slotVisitor) {
1552             slotVisitorsToUpdate.append(&slotVisitor);
1553         });
1554     
1555     for (unsigned countdown = 40; !slotVisitorsToUpdate.isEmpty() && countdown--;) {
1556         for (unsigned index = 0; index < slotVisitorsToUpdate.size(); ++index) {
1557             SlotVisitor& slotVisitor = *slotVisitorsToUpdate[index];
1558             bool remove = false;
1559             if (slotVisitor.hasAcknowledgedThatTheMutatorIsResumed())
1560                 remove = true;
1561             else if (auto locker = tryHoldLock(slotVisitor.rightToRun())) {
1562                 slotVisitor.updateMutatorIsStopped(locker);
1563                 remove = true;
1564             }
1565             if (remove) {
1566                 slotVisitorsToUpdate[index--] = slotVisitorsToUpdate.last();
1567                 slotVisitorsToUpdate.takeLast();
1568             }
1569         }
1570         std::this_thread::yield();
1571     }
1572     
1573     for (SlotVisitor* slotVisitor : slotVisitorsToUpdate)
1574         slotVisitor->updateMutatorIsStopped();
1575     
1576     resumeCompilerThreads();
1577 }
1578
1579 bool Heap::stopTheMutator()
1580 {
1581     for (;;) {
1582         unsigned oldState = m_worldState.load();
1583         if (oldState & stoppedBit) {
1584             RELEASE_ASSERT(!(oldState & hasAccessBit));
1585             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1586             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1587             return true;
1588         }
1589         
1590         if (oldState & mutatorHasConnBit) {
1591             RELEASE_ASSERT(!(oldState & hasAccessBit));
1592             RELEASE_ASSERT(!(oldState & stoppedBit));
1593             return false;
1594         }
1595
1596         if (!(oldState & hasAccessBit)) {
1597             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1598             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1599             // We can stop the world instantly.
1600             if (m_worldState.compareExchangeWeak(oldState, oldState | stoppedBit))
1601                 return true;
1602             continue;
1603         }
1604         
1605         // Transfer the conn to the mutator and bail.
1606         RELEASE_ASSERT(oldState & hasAccessBit);
1607         RELEASE_ASSERT(!(oldState & stoppedBit));
1608         unsigned newState = (oldState | mutatorHasConnBit) & ~mutatorWaitingBit;
1609         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1610             if (false)
1611                 dataLog("Handed off the conn.\n");
1612             m_stopIfNecessaryTimer->scheduleSoon();
1613             ParkingLot::unparkAll(&m_worldState);
1614             return false;
1615         }
1616     }
1617 }
1618
1619 NEVER_INLINE void Heap::resumeTheMutator()
1620 {
1621     if (false)
1622         dataLog("Resuming the mutator.\n");
1623     for (;;) {
1624         unsigned oldState = m_worldState.load();
1625         if (!!(oldState & hasAccessBit) != !(oldState & stoppedBit)) {
1626             dataLog("Fatal: hasAccess = ", !!(oldState & hasAccessBit), ", stopped = ", !!(oldState & stoppedBit), "\n");
1627             RELEASE_ASSERT_NOT_REACHED();
1628         }
1629         if (oldState & mutatorHasConnBit) {
1630             dataLog("Fatal: mutator has the conn.\n");
1631             RELEASE_ASSERT_NOT_REACHED();
1632         }
1633         
1634         if (!(oldState & stoppedBit)) {
1635             if (false)
1636                 dataLog("Returning because not stopped.\n");
1637             return;
1638         }
1639         
1640         if (m_worldState.compareExchangeWeak(oldState, oldState & ~stoppedBit)) {
1641             if (false)
1642                 dataLog("CASing and returning.\n");
1643             ParkingLot::unparkAll(&m_worldState);
1644             return;
1645         }
1646     }
1647 }
1648
1649 void Heap::stopIfNecessarySlow()
1650 {
1651     while (stopIfNecessarySlow(m_worldState.load())) { }
1652     
1653     RELEASE_ASSERT(m_worldState.load() & hasAccessBit);
1654     RELEASE_ASSERT(!(m_worldState.load() & stoppedBit));
1655     
1656     handleGCDidJIT();
1657     handleNeedFinalize();
1658     m_mutatorDidRun = true;
1659 }
1660
1661 bool Heap::stopIfNecessarySlow(unsigned oldState)
1662 {
1663     RELEASE_ASSERT(oldState & hasAccessBit);
1664     RELEASE_ASSERT(!(oldState & stoppedBit));
1665     
1666     // It's possible for us to wake up with finalization already requested but the world not yet
1667     // resumed. If that happens, we can't run finalization yet.
1668     if (handleNeedFinalize(oldState))
1669         return true;
1670
1671     // FIXME: When entering the concurrent phase, we could arrange for this branch not to fire, and then
1672     // have the SlotVisitor do things to the m_worldState to make this branch fire again. That would
1673     // prevent us from polling this so much. Ideally, stopIfNecessary would ignore the mutatorHasConnBit
1674     // and there would be some other bit indicating whether we were in some GC phase other than the
1675     // NotRunning or Concurrent ones.
1676     if (oldState & mutatorHasConnBit)
1677         collectInMutatorThread();
1678     
1679     return false;
1680 }
1681
1682 NEVER_INLINE void Heap::collectInMutatorThread()
1683 {
1684     CollectingScope collectingScope(*this);
1685     for (;;) {
1686         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, nullptr);
1687         switch (result) {
1688         case RunCurrentPhaseResult::Finished:
1689             return;
1690         case RunCurrentPhaseResult::Continue:
1691             break;
1692         case RunCurrentPhaseResult::NeedCurrentThreadState:
1693             sanitizeStackForVM(m_vm);
1694             auto lambda = [&] (CurrentThreadState& state) {
1695                 for (;;) {
1696                     RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, &state);
1697                     switch (result) {
1698                     case RunCurrentPhaseResult::Finished:
1699                         return;
1700                     case RunCurrentPhaseResult::Continue:
1701                         break;
1702                     case RunCurrentPhaseResult::NeedCurrentThreadState:
1703                         RELEASE_ASSERT_NOT_REACHED();
1704                         break;
1705                     }
1706                 }
1707             };
1708             callWithCurrentThreadState(scopedLambda<void(CurrentThreadState&)>(WTFMove(lambda)));
1709             return;
1710         }
1711     }
1712 }
1713
1714 template<typename Func>
1715 void Heap::waitForCollector(const Func& func)
1716 {
1717     for (;;) {
1718         bool done;
1719         {
1720             LockHolder locker(*m_threadLock);
1721             done = func(locker);
1722             if (!done) {
1723                 setMutatorWaiting();
1724                 
1725                 // At this point, the collector knows that we intend to wait, and he will clear the
1726                 // waiting bit and then unparkAll when the GC cycle finishes. Clearing the bit
1727                 // prevents us from parking except if there is also stop-the-world. Unparking after
1728                 // clearing means that if the clearing happens after we park, then we will unpark.
1729             }
1730         }
1731         
1732         // If we're in a stop-the-world scenario, we need to wait for that even if done is true.
1733         unsigned oldState = m_worldState.load();
1734         if (stopIfNecessarySlow(oldState))
1735             continue;
1736         
1737         // FIXME: We wouldn't need this if stopIfNecessarySlow() had a mode where it knew to just
1738         // do the collection.
1739         relinquishConn();
1740         
1741         if (done) {
1742             clearMutatorWaiting(); // Clean up just in case.
1743             return;
1744         }
1745         
1746         // If mutatorWaitingBit is still set then we want to wait.
1747         ParkingLot::compareAndPark(&m_worldState, oldState | mutatorWaitingBit);
1748     }
1749 }
1750
1751 void Heap::acquireAccessSlow()
1752 {
1753     for (;;) {
1754         unsigned oldState = m_worldState.load();
1755         RELEASE_ASSERT(!(oldState & hasAccessBit));
1756         
1757         if (oldState & stoppedBit) {
1758             if (verboseStop) {
1759                 dataLog("Stopping in acquireAccess!\n");
1760                 WTFReportBacktrace();
1761             }
1762             // Wait until we're not stopped anymore.
1763             ParkingLot::compareAndPark(&m_worldState, oldState);
1764             continue;
1765         }
1766         
1767         RELEASE_ASSERT(!(oldState & stoppedBit));
1768         unsigned newState = oldState | hasAccessBit;
1769         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1770             handleGCDidJIT();
1771             handleNeedFinalize();
1772             m_mutatorDidRun = true;
1773             stopIfNecessary();
1774             return;
1775         }
1776     }
1777 }
1778
1779 void Heap::releaseAccessSlow()
1780 {
1781     for (;;) {
1782         unsigned oldState = m_worldState.load();
1783         if (!(oldState & hasAccessBit)) {
1784             dataLog("FATAL: Attempting to release access but the mutator does not have access.\n");
1785             RELEASE_ASSERT_NOT_REACHED();
1786         }
1787         if (oldState & stoppedBit) {
1788             dataLog("FATAL: Attempting to release access but the mutator is stopped.\n");
1789             RELEASE_ASSERT_NOT_REACHED();
1790         }
1791         
1792         if (handleNeedFinalize(oldState))
1793             continue;
1794         
1795         unsigned newState = oldState & ~(hasAccessBit | mutatorHasConnBit);
1796         
1797         if ((oldState & mutatorHasConnBit)
1798             && m_nextPhase != m_currentPhase) {
1799             // This means that the collector thread had given us the conn so that we would do something
1800             // for it. Stop ourselves as we release access. This ensures that acquireAccess blocks. In
1801             // the meantime, since we're handing the conn over, the collector will be awoken and it is
1802             // sure to have work to do.
1803             newState |= stoppedBit;
1804         }
1805
1806         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1807             if (oldState & mutatorHasConnBit)
1808                 finishRelinquishingConn();
1809             return;
1810         }
1811     }
1812 }
1813
1814 bool Heap::relinquishConn(unsigned oldState)
1815 {
1816     RELEASE_ASSERT(oldState & hasAccessBit);
1817     RELEASE_ASSERT(!(oldState & stoppedBit));
1818     
1819     if (!(oldState & mutatorHasConnBit))
1820         return false; // Done.
1821     
1822     if (m_threadShouldStop)
1823         return false;
1824     
1825     if (!m_worldState.compareExchangeWeak(oldState, oldState & ~mutatorHasConnBit))
1826         return true; // Loop around.
1827     
1828     finishRelinquishingConn();
1829     return true;
1830 }
1831
1832 void Heap::finishRelinquishingConn()
1833 {
1834     if (false)
1835         dataLog("Relinquished the conn.\n");
1836     
1837     sanitizeStackForVM(m_vm);
1838     
1839     auto locker = holdLock(*m_threadLock);
1840     if (!m_requests.isEmpty())
1841         m_threadCondition->notifyOne(locker);
1842     ParkingLot::unparkAll(&m_worldState);
1843 }
1844
1845 void Heap::relinquishConn()
1846 {
1847     while (relinquishConn(m_worldState.load())) { }
1848 }
1849
1850 bool Heap::handleGCDidJIT(unsigned oldState)
1851 {
1852     RELEASE_ASSERT(oldState & hasAccessBit);
1853     if (!(oldState & gcDidJITBit))
1854         return false;
1855     if (m_worldState.compareExchangeWeak(oldState, oldState & ~gcDidJITBit)) {
1856         WTF::crossModifyingCodeFence();
1857         return true;
1858     }
1859     return true;
1860 }
1861
1862 NEVER_INLINE bool Heap::handleNeedFinalize(unsigned oldState)
1863 {
1864     RELEASE_ASSERT(oldState & hasAccessBit);
1865     RELEASE_ASSERT(!(oldState & stoppedBit));
1866     
1867     if (!(oldState & needFinalizeBit))
1868         return false;
1869     if (m_worldState.compareExchangeWeak(oldState, oldState & ~needFinalizeBit)) {
1870         finalize();
1871         // Wake up anyone waiting for us to finalize. Note that they may have woken up already, in
1872         // which case they would be waiting for us to release heap access.
1873         ParkingLot::unparkAll(&m_worldState);
1874         return true;
1875     }
1876     return true;
1877 }
1878
1879 void Heap::handleGCDidJIT()
1880 {
1881     while (handleGCDidJIT(m_worldState.load())) { }
1882 }
1883
1884 void Heap::handleNeedFinalize()
1885 {
1886     while (handleNeedFinalize(m_worldState.load())) { }
1887 }
1888
1889 void Heap::setGCDidJIT()
1890 {
1891     m_worldState.transaction(
1892         [&] (unsigned& state) -> bool {
1893             RELEASE_ASSERT(state & stoppedBit);
1894             state |= gcDidJITBit;
1895             return true;
1896         });
1897 }
1898
1899 void Heap::setNeedFinalize()
1900 {
1901     m_worldState.exchangeOr(needFinalizeBit);
1902     ParkingLot::unparkAll(&m_worldState);
1903     m_stopIfNecessaryTimer->scheduleSoon();
1904 }
1905
1906 void Heap::waitWhileNeedFinalize()
1907 {
1908     for (;;) {
1909         unsigned oldState = m_worldState.load();
1910         if (!(oldState & needFinalizeBit)) {
1911             // This means that either there was no finalize request or the main thread will finalize
1912             // with heap access, so a subsequent call to stopTheWorld() will return only when
1913             // finalize finishes.
1914             return;
1915         }
1916         ParkingLot::compareAndPark(&m_worldState, oldState);
1917     }
1918 }
1919
1920 void Heap::setMutatorWaiting()
1921 {
1922     m_worldState.exchangeOr(mutatorWaitingBit);
1923 }
1924
1925 void Heap::clearMutatorWaiting()
1926 {
1927     m_worldState.exchangeAnd(~mutatorWaitingBit);
1928 }
1929
1930 void Heap::notifyThreadStopping(const AbstractLocker&)
1931 {
1932     m_threadIsStopping = true;
1933     clearMutatorWaiting();
1934     ParkingLot::unparkAll(&m_worldState);
1935 }
1936
1937 void Heap::finalize()
1938 {
1939     MonotonicTime before;
1940     if (Options::logGC()) {
1941         before = MonotonicTime::now();
1942         dataLog("[GC<", RawPointer(this), ">: finalize ");
1943     }
1944     
1945     {
1946         SweepingScope helpingGCScope(*this);
1947         deleteUnmarkedCompiledCode();
1948         deleteSourceProviderCaches();
1949         sweepLargeAllocations();
1950     }
1951     
1952     if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache())
1953         cache->clear();
1954     
1955     if (Options::sweepSynchronously())
1956         sweepSynchronously();
1957
1958     if (Options::logGC()) {
1959         MonotonicTime after = MonotonicTime::now();
1960         dataLog((after - before).milliseconds(), "ms]\n");
1961     }
1962 }
1963
1964 Heap::Ticket Heap::requestCollection(std::optional<CollectionScope> scope)
1965 {
1966     stopIfNecessary();
1967     
1968     ASSERT(vm()->currentThreadIsHoldingAPILock());
1969     RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
1970     
1971     LockHolder locker(*m_threadLock);
1972     // We may be able to steal the conn. That only works if the collector is definitely not running
1973     // right now. This is an optimization that prevents the collector thread from ever starting in most
1974     // cases.
1975     ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
1976     if ((m_lastServedTicket == m_lastGrantedTicket) && (m_currentPhase == CollectorPhase::NotRunning)) {
1977         if (false)
1978             dataLog("Taking the conn.\n");
1979         m_worldState.exchangeOr(mutatorHasConnBit);
1980     }
1981     
1982     m_requests.append(scope);
1983     m_lastGrantedTicket++;
1984     if (!(m_worldState.load() & mutatorHasConnBit))
1985         m_threadCondition->notifyOne(locker);
1986     return m_lastGrantedTicket;
1987 }
1988
1989 void Heap::waitForCollection(Ticket ticket)
1990 {
1991     waitForCollector(
1992         [&] (const AbstractLocker&) -> bool {
1993             return m_lastServedTicket >= ticket;
1994         });
1995 }
1996
1997 void Heap::sweepLargeAllocations()
1998 {
1999     m_objectSpace.sweepLargeAllocations();
2000 }
2001
2002 void Heap::suspendCompilerThreads()
2003 {
2004 #if ENABLE(DFG_JIT)
2005     // We ensure the worklists so that it's not possible for the mutator to start a new worklist
2006     // after we have suspended the ones that he had started before. That's not very expensive since
2007     // the worklists use AutomaticThreads anyway.
2008     for (unsigned i = DFG::numberOfWorklists(); i--;)
2009         DFG::ensureWorklistForIndex(i).suspendAllThreads();
2010 #endif
2011 }
2012
2013 void Heap::willStartCollection(std::optional<CollectionScope> scope)
2014 {
2015     if (Options::logGC())
2016         dataLog("=> ");
2017     
2018     if (shouldDoFullCollection(scope)) {
2019         m_collectionScope = CollectionScope::Full;
2020         m_shouldDoFullCollection = false;
2021         if (Options::logGC())
2022             dataLog("FullCollection, ");
2023         if (false)
2024             dataLog("Full collection!\n");
2025     } else {
2026         m_collectionScope = CollectionScope::Eden;
2027         if (Options::logGC())
2028             dataLog("EdenCollection, ");
2029         if (false)
2030             dataLog("Eden collection!\n");
2031     }
2032     if (m_collectionScope == CollectionScope::Full) {
2033         m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2034         m_extraMemorySize = 0;
2035         m_deprecatedExtraMemorySize = 0;
2036 #if ENABLE(RESOURCE_USAGE)
2037         m_externalMemorySize = 0;
2038 #endif
2039
2040         if (m_fullActivityCallback)
2041             m_fullActivityCallback->willCollect();
2042     } else {
2043         ASSERT(m_collectionScope == CollectionScope::Eden);
2044         m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2045     }
2046
2047     if (m_edenActivityCallback)
2048         m_edenActivityCallback->willCollect();
2049
2050     for (auto* observer : m_observers)
2051         observer->willGarbageCollect();
2052 }
2053
2054 void Heap::prepareForMarking()
2055 {
2056     m_objectSpace.prepareForMarking();
2057 }
2058
2059 void Heap::reapWeakHandles()
2060 {
2061     m_objectSpace.reapWeakSets();
2062 }
2063
2064 void Heap::pruneStaleEntriesFromWeakGCMaps()
2065 {
2066     if (m_collectionScope != CollectionScope::Full)
2067         return;
2068     for (auto& pruneCallback : m_weakGCMaps.values())
2069         pruneCallback();
2070 }
2071
2072 void Heap::sweepArrayBuffers()
2073 {
2074     m_arrayBuffers.sweep();
2075 }
2076
2077 void Heap::snapshotUnswept()
2078 {
2079     TimingScope timingScope(*this, "Heap::snapshotUnswept");
2080     m_objectSpace.snapshotUnswept();
2081 }
2082
2083 void Heap::deleteSourceProviderCaches()
2084 {
2085     if (*m_lastCollectionScope == CollectionScope::Full)
2086         m_vm->clearSourceProviderCaches();
2087 }
2088
2089 void Heap::notifyIncrementalSweeper()
2090 {
2091     if (m_collectionScope == CollectionScope::Full) {
2092         if (!m_logicallyEmptyWeakBlocks.isEmpty())
2093             m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2094     }
2095
2096     m_sweeper->startSweeping();
2097 }
2098
2099 void Heap::updateAllocationLimits()
2100 {
2101     static const bool verbose = false;
2102     
2103     if (verbose) {
2104         dataLog("\n");
2105         dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n");
2106     }
2107     
2108     // Calculate our current heap size threshold for the purpose of figuring out when we should
2109     // run another collection. This isn't the same as either size() or capacity(), though it should
2110     // be somewhere between the two. The key is to match the size calculations involved calls to
2111     // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
2112     // fragmentation, we may have size() much smaller than capacity().
2113     size_t currentHeapSize = 0;
2114
2115     // For marked space, we use the total number of bytes visited. This matches the logic for
2116     // MarkedAllocator's calls to didAllocate(), which effectively accounts for the total size of
2117     // objects allocated rather than blocks used. This will underestimate capacity(), and in case
2118     // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
2119     // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
2120     currentHeapSize += m_totalBytesVisited;
2121     if (verbose)
2122         dataLog("totalBytesVisited = ", m_totalBytesVisited, ", currentHeapSize = ", currentHeapSize, "\n");
2123
2124     // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
2125     // extra memory reporting.
2126     currentHeapSize += extraMemorySize();
2127     if (!ASSERT_DISABLED) {
2128         Checked<size_t, RecordOverflow> checkedCurrentHeapSize = m_totalBytesVisited;
2129         checkedCurrentHeapSize += extraMemorySize();
2130         ASSERT(!checkedCurrentHeapSize.hasOverflowed() && checkedCurrentHeapSize.unsafeGet() == currentHeapSize);
2131     }
2132
2133     if (verbose)
2134         dataLog("extraMemorySize() = ", extraMemorySize(), ", currentHeapSize = ", currentHeapSize, "\n");
2135     
2136     if (m_collectionScope == CollectionScope::Full) {
2137         // To avoid pathological GC churn in very small and very large heaps, we set
2138         // the new allocation limit based on the current size of the heap, with a
2139         // fixed minimum.
2140         m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
2141         if (verbose)
2142             dataLog("Full: maxHeapSize = ", m_maxHeapSize, "\n");
2143         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2144         if (verbose)
2145             dataLog("Full: maxEdenSize = ", m_maxEdenSize, "\n");
2146         m_sizeAfterLastFullCollect = currentHeapSize;
2147         if (verbose)
2148             dataLog("Full: sizeAfterLastFullCollect = ", currentHeapSize, "\n");
2149         m_bytesAbandonedSinceLastFullCollect = 0;
2150         if (verbose)
2151             dataLog("Full: bytesAbandonedSinceLastFullCollect = ", 0, "\n");
2152     } else {
2153         ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
2154         // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have.
2155         // But we are sloppy, so we have to defend against the overflow.
2156         m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize;
2157         if (verbose)
2158             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2159         m_sizeAfterLastEdenCollect = currentHeapSize;
2160         if (verbose)
2161             dataLog("Eden: sizeAfterLastEdenCollect = ", currentHeapSize, "\n");
2162         double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
2163         double minEdenToOldGenerationRatio = 1.0 / 3.0;
2164         if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
2165             m_shouldDoFullCollection = true;
2166         // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
2167         m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
2168         if (verbose)
2169             dataLog("Eden: maxHeapSize = ", m_maxHeapSize, "\n");
2170         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2171         if (verbose)
2172             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2173         if (m_fullActivityCallback) {
2174             ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
2175             m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
2176         }
2177     }
2178
2179     m_sizeAfterLastCollect = currentHeapSize;
2180     if (verbose)
2181         dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n");
2182     m_bytesAllocatedThisCycle = 0;
2183
2184     if (Options::logGC())
2185         dataLog("=> ", currentHeapSize / 1024, "kb, ");
2186 }
2187
2188 void Heap::didFinishCollection()
2189 {
2190     m_afterGC = MonotonicTime::now();
2191     CollectionScope scope = *m_collectionScope;
2192     if (scope == CollectionScope::Full)
2193         m_lastFullGCLength = m_afterGC - m_beforeGC;
2194     else
2195         m_lastEdenGCLength = m_afterGC - m_beforeGC;
2196
2197 #if ENABLE(RESOURCE_USAGE)
2198     ASSERT(externalMemorySize() <= extraMemorySize());
2199 #endif
2200
2201     if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) {
2202         gatherExtraHeapSnapshotData(*heapProfiler);
2203         removeDeadHeapSnapshotNodes(*heapProfiler);
2204     }
2205
2206     if (UNLIKELY(m_verifier))
2207         m_verifier->endGC();
2208
2209     RELEASE_ASSERT(m_collectionScope);
2210     m_lastCollectionScope = m_collectionScope;
2211     m_collectionScope = std::nullopt;
2212
2213     for (auto* observer : m_observers)
2214         observer->didGarbageCollect(scope);
2215 }
2216
2217 void Heap::resumeCompilerThreads()
2218 {
2219 #if ENABLE(DFG_JIT)
2220     for (unsigned i = DFG::numberOfWorklists(); i--;)
2221         DFG::existingWorklistForIndex(i).resumeAllThreads();
2222 #endif
2223 }
2224
2225 GCActivityCallback* Heap::fullActivityCallback()
2226 {
2227     return m_fullActivityCallback.get();
2228 }
2229
2230 GCActivityCallback* Heap::edenActivityCallback()
2231 {
2232     return m_edenActivityCallback.get();
2233 }
2234
2235 IncrementalSweeper* Heap::sweeper()
2236 {
2237     return m_sweeper.get();
2238 }
2239
2240 void Heap::setGarbageCollectionTimerEnabled(bool enable)
2241 {
2242     if (m_fullActivityCallback)
2243         m_fullActivityCallback->setEnabled(enable);
2244     if (m_edenActivityCallback)
2245         m_edenActivityCallback->setEnabled(enable);
2246 }
2247
2248 void Heap::didAllocate(size_t bytes)
2249 {
2250     if (m_edenActivityCallback)
2251         m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
2252     m_bytesAllocatedThisCycle += bytes;
2253     performIncrement(bytes);
2254 }
2255
2256 bool Heap::isValidAllocation(size_t)
2257 {
2258     if (!isValidThreadState(m_vm))
2259         return false;
2260
2261     if (isCurrentThreadBusy())
2262         return false;
2263     
2264     return true;
2265 }
2266
2267 void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
2268 {
2269     WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
2270 }
2271
2272 void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
2273 {
2274     HandleSlot slot = handle.slot();
2275     Finalizer finalizer = reinterpret_cast<Finalizer>(context);
2276     finalizer(slot->asCell());
2277     WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
2278 }
2279
2280 void Heap::addExecutable(ExecutableBase* executable)
2281 {
2282     m_executables.append(executable);
2283 }
2284
2285 void Heap::collectAllGarbageIfNotDoneRecently()
2286 {
2287     if (!m_fullActivityCallback) {
2288         collectAllGarbage();
2289         return;
2290     }
2291
2292     if (m_fullActivityCallback->didSyncGCRecently()) {
2293         // A synchronous GC was already requested recently so we merely accelerate next collection.
2294         reportAbandonedObjectGraph();
2295         return;
2296     }
2297
2298     m_fullActivityCallback->setDidSyncGCRecently();
2299     collectAllGarbage();
2300 }
2301
2302 bool Heap::shouldDoFullCollection(std::optional<CollectionScope> scope) const
2303 {
2304     if (!Options::useGenerationalGC())
2305         return true;
2306
2307     if (!scope)
2308         return m_shouldDoFullCollection;
2309     return *scope == CollectionScope::Full;
2310 }
2311
2312 void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
2313 {
2314     m_logicallyEmptyWeakBlocks.append(block);
2315 }
2316
2317 void Heap::sweepAllLogicallyEmptyWeakBlocks()
2318 {
2319     if (m_logicallyEmptyWeakBlocks.isEmpty())
2320         return;
2321
2322     m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2323     while (sweepNextLogicallyEmptyWeakBlock()) { }
2324 }
2325
2326 bool Heap::sweepNextLogicallyEmptyWeakBlock()
2327 {
2328     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
2329         return false;
2330
2331     WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
2332
2333     block->sweep();
2334     if (block->isEmpty()) {
2335         std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
2336         m_logicallyEmptyWeakBlocks.removeLast();
2337         WeakBlock::destroy(*this, block);
2338     } else
2339         m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
2340
2341     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
2342         m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
2343         return false;
2344     }
2345
2346     return true;
2347 }
2348
2349 size_t Heap::visitCount()
2350 {
2351     size_t result = 0;
2352     forEachSlotVisitor(
2353         [&] (SlotVisitor& visitor) {
2354             result += visitor.visitCount();
2355         });
2356     return result;
2357 }
2358
2359 size_t Heap::bytesVisited()
2360 {
2361     size_t result = 0;
2362     forEachSlotVisitor(
2363         [&] (SlotVisitor& visitor) {
2364             result += visitor.bytesVisited();
2365         });
2366     return result;
2367 }
2368
2369 void Heap::forEachCodeBlockImpl(const ScopedLambda<bool(CodeBlock*)>& func)
2370 {
2371     // We don't know the full set of CodeBlocks until compilation has terminated.
2372     completeAllJITPlans();
2373
2374     return m_codeBlocks->iterate(func);
2375 }
2376
2377 void Heap::forEachCodeBlockIgnoringJITPlansImpl(const AbstractLocker& locker, const ScopedLambda<bool(CodeBlock*)>& func)
2378 {
2379     return m_codeBlocks->iterate(locker, func);
2380 }
2381
2382 void Heap::writeBarrierSlowPath(const JSCell* from)
2383 {
2384     if (UNLIKELY(mutatorShouldBeFenced())) {
2385         // In this case, the barrierThreshold is the tautological threshold, so from could still be
2386         // not black. But we can't know for sure until we fire off a fence.
2387         WTF::storeLoadFence();
2388         if (from->cellState() != CellState::PossiblyBlack)
2389             return;
2390     }
2391     
2392     addToRememberedSet(from);
2393 }
2394
2395 bool Heap::isCurrentThreadBusy()
2396 {
2397     return mayBeGCThread() || mutatorState() != MutatorState::Running;
2398 }
2399
2400 void Heap::reportExtraMemoryVisited(size_t size)
2401 {
2402     size_t* counter = &m_extraMemorySize;
2403     
2404     for (;;) {
2405         size_t oldSize = *counter;
2406         // FIXME: Change this to use SaturatedArithmetic when available.
2407         // https://bugs.webkit.org/show_bug.cgi?id=170411
2408         Checked<size_t, RecordOverflow> checkedNewSize = oldSize;
2409         checkedNewSize += size;
2410         size_t newSize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
2411         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, newSize))
2412             return;
2413     }
2414 }
2415
2416 #if ENABLE(RESOURCE_USAGE)
2417 void Heap::reportExternalMemoryVisited(size_t size)
2418 {
2419     size_t* counter = &m_externalMemorySize;
2420
2421     for (;;) {
2422         size_t oldSize = *counter;
2423         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size))
2424             return;
2425     }
2426 }
2427 #endif
2428
2429 void Heap::collectIfNecessaryOrDefer(GCDeferralContext* deferralContext)
2430 {
2431     ASSERT(!DisallowGC::isGCDisallowedOnCurrentThread());
2432
2433     if (!m_isSafeToCollect)
2434         return;
2435     switch (mutatorState()) {
2436     case MutatorState::Running:
2437     case MutatorState::Allocating:
2438         break;
2439     case MutatorState::Sweeping:
2440     case MutatorState::Collecting:
2441         return;
2442     }
2443     if (!Options::useGC())
2444         return;
2445     
2446     if (mayNeedToStop()) {
2447         if (deferralContext)
2448             deferralContext->m_shouldGC = true;
2449         else if (isDeferred())
2450             m_didDeferGCWork = true;
2451         else
2452             stopIfNecessary();
2453     }
2454     
2455     if (UNLIKELY(Options::gcMaxHeapSize())) {
2456         if (m_bytesAllocatedThisCycle <= Options::gcMaxHeapSize())
2457             return;
2458     } else {
2459         if (m_bytesAllocatedThisCycle <= m_maxEdenSize)
2460             return;
2461     }
2462
2463     if (deferralContext)
2464         deferralContext->m_shouldGC = true;
2465     else if (isDeferred())
2466         m_didDeferGCWork = true;
2467     else {
2468         collectAsync();
2469         stopIfNecessary(); // This will immediately start the collection if we have the conn.
2470     }
2471 }
2472
2473 void Heap::decrementDeferralDepthAndGCIfNeededSlow()
2474 {
2475     // Can't do anything if we're still deferred.
2476     if (m_deferralDepth)
2477         return;
2478     
2479     ASSERT(!isDeferred());
2480     
2481     m_didDeferGCWork = false;
2482     // FIXME: Bring back something like the DeferGCProbability mode.
2483     // https://bugs.webkit.org/show_bug.cgi?id=166627
2484     collectIfNecessaryOrDefer();
2485 }
2486
2487 void Heap::registerWeakGCMap(void* weakGCMap, std::function<void()> pruningCallback)
2488 {
2489     m_weakGCMaps.add(weakGCMap, WTFMove(pruningCallback));
2490 }
2491
2492 void Heap::unregisterWeakGCMap(void* weakGCMap)
2493 {
2494     m_weakGCMaps.remove(weakGCMap);
2495 }
2496
2497 void Heap::didAllocateBlock(size_t capacity)
2498 {
2499 #if ENABLE(RESOURCE_USAGE)
2500     m_blockBytesAllocated += capacity;
2501 #else
2502     UNUSED_PARAM(capacity);
2503 #endif
2504 }
2505
2506 void Heap::didFreeBlock(size_t capacity)
2507 {
2508 #if ENABLE(RESOURCE_USAGE)
2509     m_blockBytesAllocated -= capacity;
2510 #else
2511     UNUSED_PARAM(capacity);
2512 #endif
2513 }
2514
2515 #if USE(CF)
2516 void Heap::setRunLoop(CFRunLoopRef runLoop)
2517 {
2518     m_runLoop = runLoop;
2519     m_fullActivityCallback->setRunLoop(runLoop);
2520     m_edenActivityCallback->setRunLoop(runLoop);
2521     m_sweeper->setRunLoop(runLoop);
2522 }
2523 #endif // USE(CF)
2524
2525 void Heap::addCoreConstraints()
2526 {
2527     m_constraintSet->add(
2528         "Cs", "Conservative Scan",
2529         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2530             TimingScope preConvergenceTimingScope(*this, "Constraint: conservative scan");
2531             m_objectSpace.prepareForConservativeScan();
2532             ConservativeRoots conservativeRoots(*this);
2533             SuperSamplerScope superSamplerScope(false);
2534             gatherStackRoots(conservativeRoots);
2535             gatherJSStackRoots(conservativeRoots);
2536             gatherScratchBufferRoots(conservativeRoots);
2537             slotVisitor.append(conservativeRoots);
2538         },
2539         ConstraintVolatility::GreyedByExecution);
2540     
2541     m_constraintSet->add(
2542         "Msr", "Misc Small Roots",
2543         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2544 #if JSC_OBJC_API_ENABLED
2545             scanExternalRememberedSet(*m_vm, slotVisitor);
2546 #endif
2547
2548             if (m_vm->smallStrings.needsToBeVisited(*m_collectionScope))
2549                 m_vm->smallStrings.visitStrongReferences(slotVisitor);
2550             
2551             for (auto& pair : m_protectedValues)
2552                 slotVisitor.appendUnbarriered(pair.key);
2553             
2554             if (m_markListSet && m_markListSet->size())
2555                 MarkedArgumentBuffer::markLists(slotVisitor, *m_markListSet);
2556             
2557             slotVisitor.appendUnbarriered(m_vm->exception());
2558             slotVisitor.appendUnbarriered(m_vm->lastException());
2559         },
2560         ConstraintVolatility::GreyedByExecution);
2561     
2562     m_constraintSet->add(
2563         "Sh", "Strong Handles",
2564         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2565             m_handleSet.visitStrongHandles(slotVisitor);
2566             m_handleStack.visit(slotVisitor);
2567         },
2568         ConstraintVolatility::GreyedByExecution);
2569     
2570     m_constraintSet->add(
2571         "D", "Debugger",
2572         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2573 #if ENABLE(SAMPLING_PROFILER)
2574             if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
2575                 LockHolder locker(samplingProfiler->getLock());
2576                 samplingProfiler->processUnverifiedStackTraces();
2577                 samplingProfiler->visit(slotVisitor);
2578                 if (Options::logGC() == GCLogging::Verbose)
2579                     dataLog("Sampling Profiler data:\n", slotVisitor);
2580             }
2581 #endif // ENABLE(SAMPLING_PROFILER)
2582             
2583             if (m_vm->typeProfiler())
2584                 m_vm->typeProfilerLog()->visit(slotVisitor);
2585             
2586             m_vm->shadowChicken().visitChildren(slotVisitor);
2587         },
2588         ConstraintVolatility::GreyedByExecution);
2589     
2590     m_constraintSet->add(
2591         "Jsr", "JIT Stub Routines",
2592         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2593             m_jitStubRoutines->traceMarkedStubRoutines(slotVisitor);
2594         },
2595         ConstraintVolatility::GreyedByExecution);
2596     
2597     m_constraintSet->add(
2598         "Ws", "Weak Sets",
2599         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2600             m_objectSpace.visitWeakSets(slotVisitor);
2601         },
2602         ConstraintVolatility::GreyedByMarking);
2603     
2604     m_constraintSet->add(
2605         "Wrh", "Weak Reference Harvesters",
2606         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2607             for (WeakReferenceHarvester* current = m_weakReferenceHarvesters.head(); current; current = current->next())
2608                 current->visitWeakReferences(slotVisitor);
2609         },
2610         ConstraintVolatility::GreyedByMarking);
2611     
2612 #if ENABLE(DFG_JIT)
2613     m_constraintSet->add(
2614         "Dw", "DFG Worklists",
2615         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2616             for (unsigned i = DFG::numberOfWorklists(); i--;)
2617                 DFG::existingWorklistForIndex(i).visitWeakReferences(slotVisitor);
2618             
2619             // FIXME: This is almost certainly unnecessary.
2620             // https://bugs.webkit.org/show_bug.cgi?id=166829
2621             DFG::iterateCodeBlocksForGC(
2622                 *m_vm,
2623                 [&] (CodeBlock* codeBlock) {
2624                     slotVisitor.appendUnbarriered(codeBlock);
2625                 });
2626             
2627             if (Options::logGC() == GCLogging::Verbose)
2628                 dataLog("DFG Worklists:\n", slotVisitor);
2629         },
2630         ConstraintVolatility::GreyedByMarking);
2631 #endif
2632     
2633     m_constraintSet->add(
2634         "Cb", "CodeBlocks",
2635         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2636             iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(
2637                 [&] (CodeBlock* codeBlock) {
2638                     // Visit the CodeBlock as a constraint only if it's black.
2639                     if (Heap::isMarked(codeBlock)
2640                         && codeBlock->cellState() == CellState::PossiblyBlack)
2641                         slotVisitor.visitAsConstraint(codeBlock);
2642                 });
2643         },
2644         ConstraintVolatility::SeldomGreyed);
2645     
2646     m_constraintSet->add(
2647         "Mrms", "Mutator+Race Mark Stack",
2648         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2649             // Indicate to the fixpoint that we introduced work!
2650             size_t size = m_mutatorMarkStack->size() + m_raceMarkStack->size();
2651             slotVisitor.addToVisitCount(size);
2652             
2653             if (Options::logGC())
2654                 dataLog("(", size, ")");
2655             
2656             m_mutatorMarkStack->transferTo(slotVisitor.mutatorMarkStack());
2657             m_raceMarkStack->transferTo(slotVisitor.mutatorMarkStack());
2658         },
2659         [this] (SlotVisitor&) -> double {
2660             return m_mutatorMarkStack->size() + m_raceMarkStack->size();
2661         },
2662         ConstraintVolatility::GreyedByExecution);
2663 }
2664
2665 void Heap::addMarkingConstraint(std::unique_ptr<MarkingConstraint> constraint)
2666 {
2667     PreventCollectionScope preventCollectionScope(*this);
2668     m_constraintSet->add(WTFMove(constraint));
2669 }
2670
2671 void Heap::notifyIsSafeToCollect()
2672 {
2673     MonotonicTime before;
2674     if (Options::logGC()) {
2675         before = MonotonicTime::now();
2676         dataLog("[GC<", RawPointer(this), ">: starting ");
2677     }
2678     
2679     addCoreConstraints();
2680     
2681     m_isSafeToCollect = true;
2682     
2683     if (Options::collectContinuously()) {
2684         m_collectContinuouslyThread = createThread(
2685             "JSC DEBUG Continuous GC",
2686             [this] () {
2687                 MonotonicTime initialTime = MonotonicTime::now();
2688                 Seconds period = Seconds::fromMilliseconds(Options::collectContinuouslyPeriodMS());
2689                 while (!m_shouldStopCollectingContinuously) {
2690                     {
2691                         LockHolder locker(*m_threadLock);
2692                         if (m_requests.isEmpty()) {
2693                             m_requests.append(std::nullopt);
2694                             m_lastGrantedTicket++;
2695                             m_threadCondition->notifyOne(locker);
2696                         }
2697                     }
2698                     
2699                     {
2700                         LockHolder locker(m_collectContinuouslyLock);
2701                         Seconds elapsed = MonotonicTime::now() - initialTime;
2702                         Seconds elapsedInPeriod = elapsed % period;
2703                         MonotonicTime timeToWakeUp =
2704                             initialTime + elapsed - elapsedInPeriod + period;
2705                         while (!hasElapsed(timeToWakeUp) && !m_shouldStopCollectingContinuously) {
2706                             m_collectContinuouslyCondition.waitUntil(
2707                                 m_collectContinuouslyLock, timeToWakeUp);
2708                         }
2709                     }
2710                 }
2711             });
2712     }
2713     
2714     if (Options::logGC())
2715         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
2716 }
2717
2718 void Heap::preventCollection()
2719 {
2720     if (!m_isSafeToCollect)
2721         return;
2722     
2723     // This prevents the collectContinuously thread from starting a collection.
2724     m_collectContinuouslyLock.lock();
2725     
2726     // Wait for all collections to finish.
2727     waitForCollector(
2728         [&] (const AbstractLocker&) -> bool {
2729             ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2730             return m_lastServedTicket == m_lastGrantedTicket;
2731         });
2732     
2733     // Now a collection can only start if this thread starts it.
2734     RELEASE_ASSERT(!m_collectionScope);
2735 }
2736
2737 void Heap::allowCollection()
2738 {
2739     if (!m_isSafeToCollect)
2740         return;
2741     
2742     m_collectContinuouslyLock.unlock();
2743 }
2744
2745 template<typename Func>
2746 void Heap::forEachSlotVisitor(const Func& func)
2747 {
2748     auto locker = holdLock(m_parallelSlotVisitorLock);
2749     func(*m_collectorSlotVisitor);
2750     func(*m_mutatorSlotVisitor);
2751     for (auto& slotVisitor : m_parallelSlotVisitors)
2752         func(*slotVisitor);
2753 }
2754
2755 void Heap::setMutatorShouldBeFenced(bool value)
2756 {
2757     m_mutatorShouldBeFenced = value;
2758     m_barrierThreshold = value ? tautologicalThreshold : blackThreshold;
2759 }
2760
2761 void Heap::performIncrement(size_t bytes)
2762 {
2763     if (!m_objectSpace.isMarking())
2764         return;
2765
2766     m_incrementBalance += bytes * Options::gcIncrementScale();
2767
2768     // Save ourselves from crazy. Since this is an optimization, it's OK to go back to any consistent
2769     // state when the double goes wild.
2770     if (std::isnan(m_incrementBalance) || std::isinf(m_incrementBalance))
2771         m_incrementBalance = 0;
2772     
2773     if (m_incrementBalance < static_cast<double>(Options::gcIncrementBytes()))
2774         return;
2775
2776     double targetBytes = m_incrementBalance;
2777     if (targetBytes <= 0)
2778         return;
2779     targetBytes = std::min(targetBytes, Options::gcIncrementMaxBytes());
2780
2781     SlotVisitor& slotVisitor = *m_mutatorSlotVisitor;
2782     ParallelModeEnabler parallelModeEnabler(slotVisitor);
2783     size_t bytesVisited = slotVisitor.performIncrementOfDraining(static_cast<size_t>(targetBytes));
2784     // incrementBalance may go negative here because it'll remember how many bytes we overshot.
2785     m_incrementBalance -= bytesVisited;
2786 }
2787
2788 } // namespace JSC