f0baf318709a7ddd4a7360bc65fe85378167e628
[WebKit-https.git] / Source / JavaScriptCore / heap / Heap.cpp
1 /*
2  *  Copyright (C) 2003-2017 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "Heap.h"
23
24 #include "CodeBlock.h"
25 #include "CodeBlockSetInlines.h"
26 #include "CollectingScope.h"
27 #include "ConservativeRoots.h"
28 #include "DFGWorklistInlines.h"
29 #include "EdenGCActivityCallback.h"
30 #include "Exception.h"
31 #include "FullGCActivityCallback.h"
32 #include "GCActivityCallback.h"
33 #include "GCIncomingRefCountedSetInlines.h"
34 #include "GCSegmentedArrayInlines.h"
35 #include "GCTypeMap.h"
36 #include "HasOwnPropertyCache.h"
37 #include "HeapHelperPool.h"
38 #include "HeapIterationScope.h"
39 #include "HeapProfiler.h"
40 #include "HeapSnapshot.h"
41 #include "HeapVerifier.h"
42 #include "IncrementalSweeper.h"
43 #include "InferredTypeInlines.h"
44 #include "InferredValueInlines.h"
45 #include "Interpreter.h"
46 #include "IsoCellSetInlines.h"
47 #include "JITStubRoutineSet.h"
48 #include "JITWorklist.h"
49 #include "JSCInlines.h"
50 #include "JSGlobalObject.h"
51 #include "JSLock.h"
52 #include "JSVirtualMachineInternal.h"
53 #include "JSWeakMap.h"
54 #include "JSWeakSet.h"
55 #include "JSWebAssemblyCodeBlock.h"
56 #include "MachineStackMarker.h"
57 #include "MarkStackMergingConstraint.h"
58 #include "MarkedAllocatorInlines.h"
59 #include "MarkedSpaceInlines.h"
60 #include "MarkingConstraintSet.h"
61 #include "PreventCollectionScope.h"
62 #include "SamplingProfiler.h"
63 #include "ShadowChicken.h"
64 #include "SpaceTimeMutatorScheduler.h"
65 #include "SubspaceInlines.h"
66 #include "SuperSampler.h"
67 #include "StochasticSpaceTimeMutatorScheduler.h"
68 #include "StopIfNecessaryTimer.h"
69 #include "SweepingScope.h"
70 #include "SynchronousStopTheWorldMutatorScheduler.h"
71 #include "TypeProfiler.h"
72 #include "TypeProfilerLog.h"
73 #include "UnlinkedCodeBlock.h"
74 #include "VM.h"
75 #include "VisitCounter.h"
76 #include "WasmMemory.h"
77 #include "WeakMapImplInlines.h"
78 #include "WeakSetInlines.h"
79 #include <algorithm>
80 #if PLATFORM(IOS)
81 #include <bmalloc/bmalloc.h>
82 #endif
83 #include <wtf/CurrentTime.h>
84 #include <wtf/ListDump.h>
85 #include <wtf/MainThread.h>
86 #include <wtf/ParallelVectorIterator.h>
87 #include <wtf/ProcessID.h>
88 #include <wtf/RAMSize.h>
89 #include <wtf/SimpleStats.h>
90 #include <wtf/Threading.h>
91
92 #if USE(FOUNDATION)
93 #if __has_include(<objc/objc-internal.h>)
94 #include <objc/objc-internal.h>
95 #else
96 extern "C" void* objc_autoreleasePoolPush(void);
97 extern "C" void objc_autoreleasePoolPop(void *context);
98 #endif
99 #endif // USE(FOUNDATION)
100
101 using namespace std;
102
103 namespace JSC {
104
105 namespace {
106
107 bool verboseStop = false;
108
109 double maxPauseMS(double thisPauseMS)
110 {
111     static double maxPauseMS;
112     maxPauseMS = std::max(thisPauseMS, maxPauseMS);
113     return maxPauseMS;
114 }
115
116 size_t minHeapSize(HeapType heapType, size_t ramSize)
117 {
118     if (heapType == LargeHeap) {
119         double result = min(
120             static_cast<double>(Options::largeHeapSize()),
121             ramSize * Options::smallHeapRAMFraction());
122         return static_cast<size_t>(result);
123     }
124     return Options::smallHeapSize();
125 }
126
127 size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
128 {
129 #if PLATFORM(IOS)
130     size_t memoryFootprint = bmalloc::api::memoryFootprint();
131     if (memoryFootprint < ramSize * Options::smallHeapRAMFraction())
132         return Options::smallHeapGrowthFactor() * heapSize;
133     if (memoryFootprint < ramSize * Options::mediumHeapRAMFraction())
134         return Options::mediumHeapGrowthFactor() * heapSize;
135 #else
136     if (heapSize < ramSize * Options::smallHeapRAMFraction())
137         return Options::smallHeapGrowthFactor() * heapSize;
138     if (heapSize < ramSize * Options::mediumHeapRAMFraction())
139         return Options::mediumHeapGrowthFactor() * heapSize;
140 #endif
141     return Options::largeHeapGrowthFactor() * heapSize;
142 }
143
144 bool isValidSharedInstanceThreadState(VM* vm)
145 {
146     return vm->currentThreadIsHoldingAPILock();
147 }
148
149 bool isValidThreadState(VM* vm)
150 {
151     if (vm->atomicStringTable() != WTF::Thread::current().atomicStringTable())
152         return false;
153
154     if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
155         return false;
156
157     return true;
158 }
159
160 void recordType(VM& vm, TypeCountSet& set, JSCell* cell)
161 {
162     const char* typeName = "[unknown]";
163     const ClassInfo* info = cell->classInfo(vm);
164     if (info && info->className)
165         typeName = info->className;
166     set.add(typeName);
167 }
168
169 bool measurePhaseTiming()
170 {
171     return false;
172 }
173
174 HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats()
175 {
176     static HashMap<const char*, GCTypeMap<SimpleStats>>* result;
177     static std::once_flag once;
178     std::call_once(
179         once,
180         [] {
181             result = new HashMap<const char*, GCTypeMap<SimpleStats>>();
182         });
183     return *result;
184 }
185
186 SimpleStats& timingStats(const char* name, CollectionScope scope)
187 {
188     return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[scope];
189 }
190
191 class TimingScope {
192 public:
193     TimingScope(std::optional<CollectionScope> scope, const char* name)
194         : m_scope(scope)
195         , m_name(name)
196     {
197         if (measurePhaseTiming())
198             m_before = monotonicallyIncreasingTimeMS();
199     }
200     
201     TimingScope(Heap& heap, const char* name)
202         : TimingScope(heap.collectionScope(), name)
203     {
204     }
205     
206     void setScope(std::optional<CollectionScope> scope)
207     {
208         m_scope = scope;
209     }
210     
211     void setScope(Heap& heap)
212     {
213         setScope(heap.collectionScope());
214     }
215     
216     ~TimingScope()
217     {
218         if (measurePhaseTiming()) {
219             double after = monotonicallyIncreasingTimeMS();
220             double timing = after - m_before;
221             SimpleStats& stats = timingStats(m_name, *m_scope);
222             stats.add(timing);
223             dataLog("[GC:", *m_scope, "] ", m_name, " took: ", timing, "ms (average ", stats.mean(), "ms).\n");
224         }
225     }
226 private:
227     std::optional<CollectionScope> m_scope;
228     double m_before;
229     const char* m_name;
230 };
231
232 } // anonymous namespace
233
234 class Heap::Thread : public AutomaticThread {
235 public:
236     Thread(const AbstractLocker& locker, Heap& heap)
237         : AutomaticThread(locker, heap.m_threadLock, heap.m_threadCondition)
238         , m_heap(heap)
239     {
240     }
241     
242 protected:
243     PollResult poll(const AbstractLocker& locker) override
244     {
245         if (m_heap.m_threadShouldStop) {
246             m_heap.notifyThreadStopping(locker);
247             return PollResult::Stop;
248         }
249         if (m_heap.shouldCollectInCollectorThread(locker))
250             return PollResult::Work;
251         return PollResult::Wait;
252     }
253     
254     WorkResult work() override
255     {
256         m_heap.collectInCollectorThread();
257         return WorkResult::Continue;
258     }
259     
260     void threadDidStart() override
261     {
262         WTF::registerGCThread(GCThreadType::Main);
263     }
264
265 private:
266     Heap& m_heap;
267 };
268
269 Heap::Heap(VM* vm, HeapType heapType)
270     : m_heapType(heapType)
271     , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
272     , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
273     , m_sizeAfterLastCollect(0)
274     , m_sizeAfterLastFullCollect(0)
275     , m_sizeBeforeLastFullCollect(0)
276     , m_sizeAfterLastEdenCollect(0)
277     , m_sizeBeforeLastEdenCollect(0)
278     , m_bytesAllocatedThisCycle(0)
279     , m_bytesAbandonedSinceLastFullCollect(0)
280     , m_maxEdenSize(m_minBytesPerCycle)
281     , m_maxHeapSize(m_minBytesPerCycle)
282     , m_shouldDoFullCollection(false)
283     , m_totalBytesVisited(0)
284     , m_objectSpace(this)
285     , m_extraMemorySize(0)
286     , m_deprecatedExtraMemorySize(0)
287     , m_machineThreads(std::make_unique<MachineThreads>())
288     , m_collectorSlotVisitor(std::make_unique<SlotVisitor>(*this, "C"))
289     , m_mutatorSlotVisitor(std::make_unique<SlotVisitor>(*this, "M"))
290     , m_mutatorMarkStack(std::make_unique<MarkStackArray>())
291     , m_raceMarkStack(std::make_unique<MarkStackArray>())
292     , m_constraintSet(std::make_unique<MarkingConstraintSet>(*this))
293     , m_handleSet(vm)
294     , m_codeBlocks(std::make_unique<CodeBlockSet>())
295     , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>())
296     , m_isSafeToCollect(false)
297     , m_vm(vm)
298     // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously 
299     // schedule the timer if we've never done a collection.
300     , m_lastFullGCLength(0.01)
301     , m_lastEdenGCLength(0.01)
302     , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
303     , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
304     , m_sweeper(adoptRef(new IncrementalSweeper(this)))
305     , m_stopIfNecessaryTimer(adoptRef(new StopIfNecessaryTimer(vm)))
306     , m_deferralDepth(0)
307 #if USE(FOUNDATION)
308     , m_delayedReleaseRecursionCount(0)
309 #endif
310     , m_sharedCollectorMarkStack(std::make_unique<MarkStackArray>())
311     , m_sharedMutatorMarkStack(std::make_unique<MarkStackArray>())
312     , m_helperClient(&heapHelperPool())
313     , m_threadLock(Box<Lock>::create())
314     , m_threadCondition(AutomaticThreadCondition::create())
315 {
316     m_worldState.store(0);
317     
318     if (Options::useConcurrentGC()) {
319         if (Options::useStochasticMutatorScheduler())
320             m_scheduler = std::make_unique<StochasticSpaceTimeMutatorScheduler>(*this);
321         else
322             m_scheduler = std::make_unique<SpaceTimeMutatorScheduler>(*this);
323     } else {
324         // We simulate turning off concurrent GC by making the scheduler say that the world
325         // should always be stopped when the collector is running.
326         m_scheduler = std::make_unique<SynchronousStopTheWorldMutatorScheduler>();
327     }
328     
329     if (Options::verifyHeap())
330         m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
331     
332     m_collectorSlotVisitor->optimizeForStoppedMutator();
333
334     // When memory is critical, allow allocating 25% of the amount above the critical threshold before collecting.
335     size_t memoryAboveCriticalThreshold = static_cast<size_t>(static_cast<double>(m_ramSize) * (1.0 - Options::criticalGCMemoryThreshold()));
336     m_maxEdenSizeWhenCritical = memoryAboveCriticalThreshold / 4;
337
338     LockHolder locker(*m_threadLock);
339     m_thread = adoptRef(new Thread(locker, *this));
340 }
341
342 Heap::~Heap()
343 {
344     forEachSlotVisitor(
345         [&] (SlotVisitor& visitor) {
346             visitor.clearMarkStacks();
347         });
348     m_mutatorMarkStack->clear();
349     m_raceMarkStack->clear();
350     
351     for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
352         WeakBlock::destroy(*this, block);
353 }
354
355 bool Heap::isPagedOut(double deadline)
356 {
357     return m_objectSpace.isPagedOut(deadline);
358 }
359
360 // The VM is being destroyed and the collector will never run again.
361 // Run all pending finalizers now because we won't get another chance.
362 void Heap::lastChanceToFinalize()
363 {
364     MonotonicTime before;
365     if (Options::logGC()) {
366         before = MonotonicTime::now();
367         dataLog("[GC<", RawPointer(this), ">: shutdown ");
368     }
369     
370     RELEASE_ASSERT(!m_vm->entryScope);
371     RELEASE_ASSERT(m_mutatorState == MutatorState::Running);
372     
373     if (m_collectContinuouslyThread) {
374         {
375             LockHolder locker(m_collectContinuouslyLock);
376             m_shouldStopCollectingContinuously = true;
377             m_collectContinuouslyCondition.notifyOne();
378         }
379         m_collectContinuouslyThread->waitForCompletion();
380     }
381     
382     if (Options::logGC())
383         dataLog("1");
384     
385     // Prevent new collections from being started. This is probably not even necessary, since we're not
386     // going to call into anything that starts collections. Still, this makes the algorithm more
387     // obviously sound.
388     m_isSafeToCollect = false;
389     
390     if (Options::logGC())
391         dataLog("2");
392
393     bool isCollecting;
394     {
395         auto locker = holdLock(*m_threadLock);
396         RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
397         isCollecting = m_lastServedTicket < m_lastGrantedTicket;
398     }
399     if (isCollecting) {
400         if (Options::logGC())
401             dataLog("...]\n");
402         
403         // Wait for the current collection to finish.
404         waitForCollector(
405             [&] (const AbstractLocker&) -> bool {
406                 RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
407                 return m_lastServedTicket == m_lastGrantedTicket;
408             });
409         
410         if (Options::logGC())
411             dataLog("[GC<", RawPointer(this), ">: shutdown ");
412     }
413     if (Options::logGC())
414         dataLog("3");
415
416     RELEASE_ASSERT(m_requests.isEmpty());
417     RELEASE_ASSERT(m_lastServedTicket == m_lastGrantedTicket);
418     
419     // Carefully bring the thread down.
420     bool stopped = false;
421     {
422         LockHolder locker(*m_threadLock);
423         stopped = m_thread->tryStop(locker);
424         m_threadShouldStop = true;
425         if (!stopped)
426             m_threadCondition->notifyOne(locker);
427     }
428
429     if (Options::logGC())
430         dataLog("4");
431     
432     if (!stopped)
433         m_thread->join();
434     
435     if (Options::logGC())
436         dataLog("5 ");
437     
438     m_arrayBuffers.lastChanceToFinalize();
439     m_codeBlocks->lastChanceToFinalize(*m_vm);
440     m_objectSpace.stopAllocating();
441     m_objectSpace.lastChanceToFinalize();
442     releaseDelayedReleasedObjects();
443
444     sweepAllLogicallyEmptyWeakBlocks();
445     
446     m_objectSpace.freeMemory();
447     
448     if (Options::logGC())
449         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
450 }
451
452 void Heap::releaseDelayedReleasedObjects()
453 {
454 #if USE(FOUNDATION)
455     // We need to guard against the case that releasing an object can create more objects due to the
456     // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
457     // back here and could try to recursively release objects. We guard that with a recursive entry
458     // count. Only the initial call will release objects, recursive calls simple return and let the
459     // the initial call to the function take care of any objects created during release time.
460     // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
461     // and use a temp Vector for the actual releasing.
462     if (!m_delayedReleaseRecursionCount++) {
463         while (!m_delayedReleaseObjects.isEmpty()) {
464             ASSERT(m_vm->currentThreadIsHoldingAPILock());
465
466             Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTFMove(m_delayedReleaseObjects);
467
468             {
469                 // We need to drop locks before calling out to arbitrary code.
470                 JSLock::DropAllLocks dropAllLocks(m_vm);
471
472                 void* context = objc_autoreleasePoolPush();
473                 objectsToRelease.clear();
474                 objc_autoreleasePoolPop(context);
475             }
476         }
477     }
478     m_delayedReleaseRecursionCount--;
479 #endif
480 }
481
482 void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
483 {
484     didAllocate(size);
485     collectIfNecessaryOrDefer();
486 }
487
488 void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
489 {
490     // FIXME: Change this to use SaturatedArithmetic when available.
491     // https://bugs.webkit.org/show_bug.cgi?id=170411
492     Checked<size_t, RecordOverflow> checkedNewSize = m_deprecatedExtraMemorySize;
493     checkedNewSize += size;
494     m_deprecatedExtraMemorySize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
495     reportExtraMemoryAllocatedSlowCase(size);
496 }
497
498 bool Heap::overCriticalMemoryThreshold(MemoryThresholdCallType memoryThresholdCallType)
499 {
500 #if PLATFORM(IOS)
501     if (memoryThresholdCallType == MemoryThresholdCallType::Direct || ++m_precentAvailableMemoryCachedCallCount >= 100) {
502         m_overCriticalMemoryThreshold = bmalloc::api::percentAvailableMemoryInUse() > Options::criticalGCMemoryThreshold();
503         m_precentAvailableMemoryCachedCallCount = 0;
504     }
505
506     return m_overCriticalMemoryThreshold;
507 #else
508     UNUSED_PARAM(memoryThresholdCallType);
509     return false;
510 #endif
511 }
512
513 void Heap::reportAbandonedObjectGraph()
514 {
515     // Our clients don't know exactly how much memory they
516     // are abandoning so we just guess for them.
517     size_t abandonedBytes = static_cast<size_t>(0.1 * capacity());
518
519     // We want to accelerate the next collection. Because memory has just 
520     // been abandoned, the next collection has the potential to 
521     // be more profitable. Since allocation is the trigger for collection, 
522     // we hasten the next collection by pretending that we've allocated more memory. 
523     if (m_fullActivityCallback) {
524         m_fullActivityCallback->didAllocate(
525             m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
526     }
527     m_bytesAbandonedSinceLastFullCollect += abandonedBytes;
528 }
529
530 void Heap::protect(JSValue k)
531 {
532     ASSERT(k);
533     ASSERT(m_vm->currentThreadIsHoldingAPILock());
534
535     if (!k.isCell())
536         return;
537
538     m_protectedValues.add(k.asCell());
539 }
540
541 bool Heap::unprotect(JSValue k)
542 {
543     ASSERT(k);
544     ASSERT(m_vm->currentThreadIsHoldingAPILock());
545
546     if (!k.isCell())
547         return false;
548
549     return m_protectedValues.remove(k.asCell());
550 }
551
552 void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
553 {
554     if (m_arrayBuffers.addReference(cell, buffer)) {
555         collectIfNecessaryOrDefer();
556         didAllocate(buffer->gcSizeEstimateInBytes());
557     }
558 }
559
560 template<typename CellType, typename CellSet>
561 void Heap::finalizeUnconditionalFinalizers(CellSet& cellSet)
562 {
563     cellSet.forEachMarkedCell(
564         [&] (HeapCell* cell, HeapCell::Kind) {
565             static_cast<CellType*>(cell)->finalizeUnconditionally(*vm());
566         });
567 }
568
569 template<typename CellType>
570 void Heap::finalizeUnconditionalFinalizersInIsoSubspace()
571 {
572     JSC::subspaceFor<CellType>(*vm())->forEachMarkedCell(
573         [&] (HeapCell* cell, HeapCell::Kind) {
574             static_cast<CellType*>(cell)->finalizeUnconditionally(*vm());
575         });
576 }
577
578 void Heap::finalizeUnconditionalFinalizers()
579 {
580     finalizeUnconditionalFinalizers<InferredType>(vm()->inferredTypesWithFinalizers);
581     finalizeUnconditionalFinalizers<InferredValue>(vm()->inferredValuesWithFinalizers);
582     finalizeUnconditionalFinalizersInIsoSubspace<JSWeakSet>();
583     finalizeUnconditionalFinalizersInIsoSubspace<JSWeakMap>();
584     
585     while (m_unconditionalFinalizers.hasNext()) {
586         UnconditionalFinalizer* finalizer = m_unconditionalFinalizers.removeNext();
587         finalizer->finalizeUnconditionally();
588     }
589 }
590
591 void Heap::willStartIterating()
592 {
593     m_objectSpace.willStartIterating();
594 }
595
596 void Heap::didFinishIterating()
597 {
598     m_objectSpace.didFinishIterating();
599 }
600
601 void Heap::completeAllJITPlans()
602 {
603 #if ENABLE(JIT)
604     JITWorklist::instance()->completeAllForVM(*m_vm);
605 #endif // ENABLE(JIT)
606     DFG::completeAllPlansForVM(*m_vm);
607 }
608
609 template<typename Func>
610 void Heap::iterateExecutingAndCompilingCodeBlocks(const Func& func)
611 {
612     m_codeBlocks->iterateCurrentlyExecuting(func);
613     DFG::iterateCodeBlocksForGC(*m_vm, func);
614 }
615
616 template<typename Func>
617 void Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func& func)
618 {
619     Vector<CodeBlock*, 256> codeBlocks;
620     iterateExecutingAndCompilingCodeBlocks(
621         [&] (CodeBlock* codeBlock) {
622             codeBlocks.append(codeBlock);
623         });
624     for (CodeBlock* codeBlock : codeBlocks)
625         func(codeBlock);
626 }
627
628 void Heap::assertMarkStacksEmpty()
629 {
630     bool ok = true;
631     
632     if (!m_sharedCollectorMarkStack->isEmpty()) {
633         dataLog("FATAL: Shared collector mark stack not empty! It has ", m_sharedCollectorMarkStack->size(), " elements.\n");
634         ok = false;
635     }
636     
637     if (!m_sharedMutatorMarkStack->isEmpty()) {
638         dataLog("FATAL: Shared mutator mark stack not empty! It has ", m_sharedMutatorMarkStack->size(), " elements.\n");
639         ok = false;
640     }
641     
642     forEachSlotVisitor(
643         [&] (SlotVisitor& visitor) {
644             if (visitor.isEmpty())
645                 return;
646             
647             dataLog("FATAL: Visitor ", RawPointer(&visitor), " is not empty!\n");
648             ok = false;
649         });
650     
651     RELEASE_ASSERT(ok);
652 }
653
654 void Heap::gatherStackRoots(ConservativeRoots& roots)
655 {
656     m_machineThreads->gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks, m_currentThreadState, m_currentThread);
657 }
658
659 void Heap::gatherJSStackRoots(ConservativeRoots& roots)
660 {
661 #if !ENABLE(JIT)
662     m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
663 #else
664     UNUSED_PARAM(roots);
665 #endif
666 }
667
668 void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
669 {
670 #if ENABLE(DFG_JIT)
671     m_vm->gatherConservativeRoots(roots);
672 #else
673     UNUSED_PARAM(roots);
674 #endif
675 }
676
677 void Heap::beginMarking()
678 {
679     TimingScope timingScope(*this, "Heap::beginMarking");
680     if (m_collectionScope == CollectionScope::Full)
681         m_codeBlocks->clearMarksForFullCollection();
682     m_jitStubRoutines->clearMarks();
683     m_objectSpace.beginMarking();
684     setMutatorShouldBeFenced(true);
685 }
686
687 void Heap::removeDeadCompilerWorklistEntries()
688 {
689 #if ENABLE(DFG_JIT)
690     for (unsigned i = DFG::numberOfWorklists(); i--;)
691         DFG::existingWorklistForIndex(i).removeDeadPlans(*m_vm);
692 #endif
693 }
694
695 bool Heap::isHeapSnapshotting() const
696 {
697     HeapProfiler* heapProfiler = m_vm->heapProfiler();
698     if (UNLIKELY(heapProfiler))
699         return heapProfiler->activeSnapshotBuilder();
700     return false;
701 }
702
703 struct GatherHeapSnapshotData : MarkedBlock::CountFunctor {
704     GatherHeapSnapshotData(HeapSnapshotBuilder& builder)
705         : m_builder(builder)
706     {
707     }
708
709     IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const
710     {
711         if (kind == HeapCell::JSCell) {
712             JSCell* cell = static_cast<JSCell*>(heapCell);
713             cell->methodTable()->heapSnapshot(cell, m_builder);
714         }
715         return IterationStatus::Continue;
716     }
717
718     HeapSnapshotBuilder& m_builder;
719 };
720
721 void Heap::gatherExtraHeapSnapshotData(HeapProfiler& heapProfiler)
722 {
723     if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) {
724         HeapIterationScope heapIterationScope(*this);
725         GatherHeapSnapshotData functor(*builder);
726         m_objectSpace.forEachLiveCell(heapIterationScope, functor);
727     }
728 }
729
730 struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor {
731     RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot)
732         : m_snapshot(snapshot)
733     {
734     }
735
736     IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const
737     {
738         if (kind == HeapCell::JSCell)
739             m_snapshot.sweepCell(static_cast<JSCell*>(cell));
740         return IterationStatus::Continue;
741     }
742
743     HeapSnapshot& m_snapshot;
744 };
745
746 void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler)
747 {
748     if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) {
749         HeapIterationScope heapIterationScope(*this);
750         RemoveDeadHeapSnapshotNodes functor(*snapshot);
751         m_objectSpace.forEachDeadCell(heapIterationScope, functor);
752         snapshot->shrinkToFit();
753     }
754 }
755
756 void Heap::updateObjectCounts()
757 {
758     if (m_collectionScope == CollectionScope::Full)
759         m_totalBytesVisited = 0;
760
761     m_totalBytesVisitedThisCycle = bytesVisited();
762     
763     m_totalBytesVisited += m_totalBytesVisitedThisCycle;
764 }
765
766 void Heap::endMarking()
767 {
768     forEachSlotVisitor(
769         [&] (SlotVisitor& visitor) {
770             visitor.reset();
771         });
772
773     assertMarkStacksEmpty();
774     m_weakReferenceHarvesters.removeAll();
775
776     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
777     
778     m_objectSpace.endMarking();
779     setMutatorShouldBeFenced(Options::forceFencedBarrier());
780 }
781
782 size_t Heap::objectCount()
783 {
784     return m_objectSpace.objectCount();
785 }
786
787 size_t Heap::extraMemorySize()
788 {
789     // FIXME: Change this to use SaturatedArithmetic when available.
790     // https://bugs.webkit.org/show_bug.cgi?id=170411
791     Checked<size_t, RecordOverflow> checkedTotal = m_extraMemorySize;
792     checkedTotal += m_deprecatedExtraMemorySize;
793     checkedTotal += m_arrayBuffers.size();
794     size_t total = UNLIKELY(checkedTotal.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedTotal.unsafeGet();
795
796     ASSERT(m_objectSpace.capacity() >= m_objectSpace.size());
797     return std::min(total, std::numeric_limits<size_t>::max() - m_objectSpace.capacity());
798 }
799
800 size_t Heap::size()
801 {
802     return m_objectSpace.size() + extraMemorySize();
803 }
804
805 size_t Heap::capacity()
806 {
807     return m_objectSpace.capacity() + extraMemorySize();
808 }
809
810 size_t Heap::protectedGlobalObjectCount()
811 {
812     size_t result = 0;
813     forEachProtectedCell(
814         [&] (JSCell* cell) {
815             if (cell->isObject() && asObject(cell)->isGlobalObject())
816                 result++;
817         });
818     return result;
819 }
820
821 size_t Heap::globalObjectCount()
822 {
823     HeapIterationScope iterationScope(*this);
824     size_t result = 0;
825     m_objectSpace.forEachLiveCell(
826         iterationScope,
827         [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus {
828             if (kind != HeapCell::JSCell)
829                 return IterationStatus::Continue;
830             JSCell* cell = static_cast<JSCell*>(heapCell);
831             if (cell->isObject() && asObject(cell)->isGlobalObject())
832                 result++;
833             return IterationStatus::Continue;
834         });
835     return result;
836 }
837
838 size_t Heap::protectedObjectCount()
839 {
840     size_t result = 0;
841     forEachProtectedCell(
842         [&] (JSCell*) {
843             result++;
844         });
845     return result;
846 }
847
848 std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
849 {
850     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
851     forEachProtectedCell(
852         [&] (JSCell* cell) {
853             recordType(*vm(), *result, cell);
854         });
855     return result;
856 }
857
858 std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
859 {
860     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
861     HeapIterationScope iterationScope(*this);
862     m_objectSpace.forEachLiveCell(
863         iterationScope,
864         [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
865             if (kind == HeapCell::JSCell)
866                 recordType(*vm(), *result, static_cast<JSCell*>(cell));
867             return IterationStatus::Continue;
868         });
869     return result;
870 }
871
872 void Heap::deleteAllCodeBlocks(DeleteAllCodeEffort effort)
873 {
874     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
875         return;
876     
877     PreventCollectionScope preventCollectionScope(*this);
878     
879     // If JavaScript is running, it's not safe to delete all JavaScript code, since
880     // we'll end up returning to deleted code.
881     RELEASE_ASSERT(!m_vm->entryScope);
882     RELEASE_ASSERT(!m_collectionScope);
883
884     completeAllJITPlans();
885
886     for (ExecutableBase* executable : m_executables)
887         executable->clearCode();
888
889 #if ENABLE(WEBASSEMBLY)
890     {
891         // We must ensure that we clear the JS call ICs from Wasm. Otherwise, Wasm will
892         // have no idea that we cleared the code from all of the Executables in the
893         // VM. This could leave Wasm in an inconsistent state where it has an IC that
894         // points into a CodeBlock that could be dead. The IC will still succeed because
895         // it uses a callee check, but then it will call into dead code.
896         HeapIterationScope heapIterationScope(*this);
897         m_vm->webAssemblyCodeBlockSpace.forEachLiveCell([&] (HeapCell* cell, HeapCell::Kind kind) {
898             ASSERT_UNUSED(kind, kind == HeapCell::Kind::JSCell);
899             JSWebAssemblyCodeBlock* codeBlock = static_cast<JSWebAssemblyCodeBlock*>(cell);
900             codeBlock->clearJSCallICs(*m_vm);
901         });
902     }
903 #endif
904 }
905
906 void Heap::deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort effort)
907 {
908     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
909         return;
910     
911     PreventCollectionScope preventCollectionScope(*this);
912
913     RELEASE_ASSERT(!m_collectionScope);
914     
915     for (ExecutableBase* current : m_executables) {
916         if (!current->isFunctionExecutable())
917             continue;
918         static_cast<FunctionExecutable*>(current)->unlinkedExecutable()->clearCode();
919     }
920 }
921
922 void Heap::clearUnmarkedExecutables()
923 {
924     for (unsigned i = m_executables.size(); i--;) {
925         ExecutableBase* current = m_executables[i];
926         if (isMarked(current))
927             continue;
928
929         // Eagerly dereference the Executable's JITCode in order to run watchpoint
930         // destructors. Otherwise, watchpoints might fire for deleted CodeBlocks.
931         current->clearCode();
932         std::swap(m_executables[i], m_executables.last());
933         m_executables.removeLast();
934     }
935
936     m_executables.shrinkToFit();
937 }
938
939 void Heap::deleteUnmarkedCompiledCode()
940 {
941     clearUnmarkedExecutables();
942     m_codeBlocks->deleteUnmarkedAndUnreferenced(*m_vm, *m_lastCollectionScope);
943     m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines();
944 }
945
946 void Heap::addToRememberedSet(const JSCell* constCell)
947 {
948     JSCell* cell = const_cast<JSCell*>(constCell);
949     ASSERT(cell);
950     ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
951     m_barriersExecuted++;
952     if (m_mutatorShouldBeFenced) {
953         WTF::loadLoadFence();
954         if (!isMarked(cell)) {
955             // During a full collection a store into an unmarked object that had surivived past
956             // collections will manifest as a store to an unmarked PossiblyBlack object. If the
957             // object gets marked at some time after this then it will go down the normal marking
958             // path. So, we don't have to remember this object. We could return here. But we go
959             // further and attempt to re-white the object.
960             
961             RELEASE_ASSERT(m_collectionScope == CollectionScope::Full);
962             
963             if (cell->atomicCompareExchangeCellStateStrong(CellState::PossiblyBlack, CellState::DefinitelyWhite) == CellState::PossiblyBlack) {
964                 // Now we protect against this race:
965                 //
966                 //     1) Object starts out black + unmarked.
967                 //     --> We do isMarked here.
968                 //     2) Object is marked and greyed.
969                 //     3) Object is scanned and blacked.
970                 //     --> We do atomicCompareExchangeCellStateStrong here.
971                 //
972                 // In this case we would have made the object white again, even though it should
973                 // be black. This check lets us correct our mistake. This relies on the fact that
974                 // isMarked converges monotonically to true.
975                 if (isMarked(cell)) {
976                     // It's difficult to work out whether the object should be grey or black at
977                     // this point. We say black conservatively.
978                     cell->setCellState(CellState::PossiblyBlack);
979                 }
980                 
981                 // Either way, we can return. Most likely, the object was not marked, and so the
982                 // object is now labeled white. This means that future barrier executions will not
983                 // fire. In the unlikely event that the object had become marked, we can still
984                 // return anyway, since we proved that the object was not marked at the time that
985                 // we executed this slow path.
986             }
987             
988             return;
989         }
990     } else
991         ASSERT(Heap::isMarked(cell));
992     // It could be that the object was *just* marked. This means that the collector may set the
993     // state to DefinitelyGrey and then to PossiblyOldOrBlack at any time. It's OK for us to
994     // race with the collector here. If we win then this is accurate because the object _will_
995     // get scanned again. If we lose then someone else will barrier the object again. That would
996     // be unfortunate but not the end of the world.
997     cell->setCellState(CellState::PossiblyGrey);
998     m_mutatorMarkStack->append(cell);
999 }
1000
1001 void Heap::sweepSynchronously()
1002 {
1003     double before = 0;
1004     if (Options::logGC()) {
1005         dataLog("Full sweep: ", capacity() / 1024, "kb ");
1006         before = currentTimeMS();
1007     }
1008     m_objectSpace.sweep();
1009     m_objectSpace.shrink();
1010     if (Options::logGC()) {
1011         double after = currentTimeMS();
1012         dataLog("=> ", capacity() / 1024, "kb, ", after - before, "ms");
1013     }
1014 }
1015
1016 void Heap::collect(Synchronousness synchronousness, GCRequest request)
1017 {
1018     switch (synchronousness) {
1019     case Async:
1020         collectAsync(request);
1021         return;
1022     case Sync:
1023         collectSync(request);
1024         return;
1025     }
1026     RELEASE_ASSERT_NOT_REACHED();
1027 }
1028
1029 void Heap::collectNow(Synchronousness synchronousness, GCRequest request)
1030 {
1031     switch (synchronousness) {
1032     case Async: {
1033         collectAsync(request);
1034         stopIfNecessary();
1035         return;
1036     }
1037         
1038     case Sync: {
1039         collectSync(request);
1040         
1041         DeferGCForAWhile deferGC(*this);
1042         if (UNLIKELY(Options::useImmortalObjects()))
1043             sweeper().stopSweeping();
1044         
1045         bool alreadySweptInCollectSync = Options::sweepSynchronously();
1046         if (!alreadySweptInCollectSync) {
1047             if (Options::logGC())
1048                 dataLog("[GC<", RawPointer(this), ">: ");
1049             sweepSynchronously();
1050             if (Options::logGC())
1051                 dataLog("]\n");
1052         }
1053         m_objectSpace.assertNoUnswept();
1054         
1055         sweepAllLogicallyEmptyWeakBlocks();
1056         return;
1057     } }
1058     RELEASE_ASSERT_NOT_REACHED();
1059 }
1060
1061 void Heap::collectAsync(GCRequest request)
1062 {
1063     if (!m_isSafeToCollect)
1064         return;
1065
1066     bool alreadyRequested = false;
1067     {
1068         LockHolder locker(*m_threadLock);
1069         for (const GCRequest& previousRequest : m_requests) {
1070             if (request.subsumedBy(previousRequest)) {
1071                 alreadyRequested = true;
1072                 break;
1073             }
1074         }
1075     }
1076     if (alreadyRequested)
1077         return;
1078
1079     requestCollection(request);
1080 }
1081
1082 void Heap::collectSync(GCRequest request)
1083 {
1084     if (!m_isSafeToCollect)
1085         return;
1086     
1087     waitForCollection(requestCollection(request));
1088 }
1089
1090 bool Heap::shouldCollectInCollectorThread(const AbstractLocker&)
1091 {
1092     RELEASE_ASSERT(m_requests.isEmpty() == (m_lastServedTicket == m_lastGrantedTicket));
1093     RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
1094     
1095     if (false)
1096         dataLog("Mutator has the conn = ", !!(m_worldState.load() & mutatorHasConnBit), "\n");
1097     
1098     return !m_requests.isEmpty() && !(m_worldState.load() & mutatorHasConnBit);
1099 }
1100
1101 void Heap::collectInCollectorThread()
1102 {
1103     for (;;) {
1104         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Collector, nullptr);
1105         switch (result) {
1106         case RunCurrentPhaseResult::Finished:
1107             return;
1108         case RunCurrentPhaseResult::Continue:
1109             break;
1110         case RunCurrentPhaseResult::NeedCurrentThreadState:
1111             RELEASE_ASSERT_NOT_REACHED();
1112             break;
1113         }
1114     }
1115 }
1116
1117 void Heap::checkConn(GCConductor conn)
1118 {
1119     switch (conn) {
1120     case GCConductor::Mutator:
1121         RELEASE_ASSERT(m_worldState.load() & mutatorHasConnBit);
1122         return;
1123     case GCConductor::Collector:
1124         RELEASE_ASSERT(!(m_worldState.load() & mutatorHasConnBit));
1125         return;
1126     }
1127     RELEASE_ASSERT_NOT_REACHED();
1128 }
1129
1130 auto Heap::runCurrentPhase(GCConductor conn, CurrentThreadState* currentThreadState) -> RunCurrentPhaseResult
1131 {
1132     checkConn(conn);
1133     m_currentThreadState = currentThreadState;
1134     m_currentThread = &WTF::Thread::current();
1135     
1136     if (conn == GCConductor::Mutator)
1137         sanitizeStackForVM(vm());
1138     
1139     // If the collector transfers the conn to the mutator, it leaves us in between phases.
1140     if (!finishChangingPhase(conn)) {
1141         // A mischevious mutator could repeatedly relinquish the conn back to us. We try to avoid doing
1142         // this, but it's probably not the end of the world if it did happen.
1143         if (false)
1144             dataLog("Conn bounce-back.\n");
1145         return RunCurrentPhaseResult::Finished;
1146     }
1147     
1148     bool result = false;
1149     switch (m_currentPhase) {
1150     case CollectorPhase::NotRunning:
1151         result = runNotRunningPhase(conn);
1152         break;
1153         
1154     case CollectorPhase::Begin:
1155         result = runBeginPhase(conn);
1156         break;
1157         
1158     case CollectorPhase::Fixpoint:
1159         if (!currentThreadState && conn == GCConductor::Mutator)
1160             return RunCurrentPhaseResult::NeedCurrentThreadState;
1161         
1162         result = runFixpointPhase(conn);
1163         break;
1164         
1165     case CollectorPhase::Concurrent:
1166         result = runConcurrentPhase(conn);
1167         break;
1168         
1169     case CollectorPhase::Reloop:
1170         result = runReloopPhase(conn);
1171         break;
1172         
1173     case CollectorPhase::End:
1174         result = runEndPhase(conn);
1175         break;
1176     }
1177
1178     return result ? RunCurrentPhaseResult::Continue : RunCurrentPhaseResult::Finished;
1179 }
1180
1181 NEVER_INLINE bool Heap::runNotRunningPhase(GCConductor conn)
1182 {
1183     // Check m_requests since the mutator calls this to poll what's going on.
1184     {
1185         auto locker = holdLock(*m_threadLock);
1186         if (m_requests.isEmpty())
1187             return false;
1188     }
1189     
1190     return changePhase(conn, CollectorPhase::Begin);
1191 }
1192
1193 NEVER_INLINE bool Heap::runBeginPhase(GCConductor conn)
1194 {
1195     m_currentGCStartTime = MonotonicTime::now();
1196     
1197     {
1198         LockHolder locker(*m_threadLock);
1199         RELEASE_ASSERT(!m_requests.isEmpty());
1200         m_currentRequest = m_requests.first();
1201     }
1202         
1203     if (Options::logGC())
1204         dataLog("[GC<", RawPointer(this), ">: START ", gcConductorShortName(conn), " ", capacity() / 1024, "kb ");
1205
1206     m_beforeGC = MonotonicTime::now();
1207
1208     if (m_collectionScope) {
1209         dataLog("Collection scope already set during GC: ", *m_collectionScope, "\n");
1210         RELEASE_ASSERT_NOT_REACHED();
1211     }
1212     
1213     willStartCollection();
1214         
1215     if (UNLIKELY(m_verifier)) {
1216         // Verify that live objects from the last GC cycle haven't been corrupted by
1217         // mutators before we begin this new GC cycle.
1218         m_verifier->verify(HeapVerifier::Phase::BeforeGC);
1219             
1220         m_verifier->startGC();
1221         m_verifier->gatherLiveCells(HeapVerifier::Phase::BeforeMarking);
1222     }
1223         
1224     prepareForMarking();
1225         
1226     if (m_collectionScope == CollectionScope::Full) {
1227         m_opaqueRoots.clear();
1228         m_collectorSlotVisitor->clearMarkStacks();
1229         m_mutatorMarkStack->clear();
1230     }
1231
1232     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
1233
1234     beginMarking();
1235
1236     forEachSlotVisitor(
1237         [&] (SlotVisitor& visitor) {
1238             visitor.didStartMarking();
1239         });
1240
1241     m_parallelMarkersShouldExit = false;
1242
1243     m_helperClient.setFunction(
1244         [this] () {
1245             SlotVisitor* slotVisitor;
1246             {
1247                 LockHolder locker(m_parallelSlotVisitorLock);
1248                 if (m_availableParallelSlotVisitors.isEmpty()) {
1249                     std::unique_ptr<SlotVisitor> newVisitor = std::make_unique<SlotVisitor>(
1250                         *this, toCString("P", m_parallelSlotVisitors.size() + 1));
1251                     
1252                     if (Options::optimizeParallelSlotVisitorsForStoppedMutator())
1253                         newVisitor->optimizeForStoppedMutator();
1254                     
1255                     newVisitor->didStartMarking();
1256                     
1257                     slotVisitor = newVisitor.get();
1258                     m_parallelSlotVisitors.append(WTFMove(newVisitor));
1259                 } else
1260                     slotVisitor = m_availableParallelSlotVisitors.takeLast();
1261             }
1262
1263             WTF::registerGCThread(GCThreadType::Helper);
1264
1265             {
1266                 ParallelModeEnabler parallelModeEnabler(*slotVisitor);
1267                 slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
1268             }
1269
1270             {
1271                 LockHolder locker(m_parallelSlotVisitorLock);
1272                 m_availableParallelSlotVisitors.append(slotVisitor);
1273             }
1274         });
1275
1276     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1277
1278     m_constraintSet->didStartMarking();
1279     
1280     m_scheduler->beginCollection();
1281     if (Options::logGC())
1282         m_scheduler->log();
1283     
1284     // After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()"
1285     // checks because bootstrap would have put things into the visitor. So, we should fall
1286     // through to draining.
1287     
1288     if (!slotVisitor.didReachTermination()) {
1289         dataLog("Fatal: SlotVisitor should think that GC should terminate before constraint solving, but it does not think this.\n");
1290         dataLog("slotVisitor.isEmpty(): ", slotVisitor.isEmpty(), "\n");
1291         dataLog("slotVisitor.collectorMarkStack().isEmpty(): ", slotVisitor.collectorMarkStack().isEmpty(), "\n");
1292         dataLog("slotVisitor.mutatorMarkStack().isEmpty(): ", slotVisitor.mutatorMarkStack().isEmpty(), "\n");
1293         dataLog("m_numberOfActiveParallelMarkers: ", m_numberOfActiveParallelMarkers, "\n");
1294         dataLog("m_sharedCollectorMarkStack->isEmpty(): ", m_sharedCollectorMarkStack->isEmpty(), "\n");
1295         dataLog("m_sharedMutatorMarkStack->isEmpty(): ", m_sharedMutatorMarkStack->isEmpty(), "\n");
1296         dataLog("slotVisitor.didReachTermination(): ", slotVisitor.didReachTermination(), "\n");
1297         RELEASE_ASSERT_NOT_REACHED();
1298     }
1299         
1300     return changePhase(conn, CollectorPhase::Fixpoint);
1301 }
1302
1303 NEVER_INLINE bool Heap::runFixpointPhase(GCConductor conn)
1304 {
1305     RELEASE_ASSERT(conn == GCConductor::Collector || m_currentThreadState);
1306     
1307     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1308     
1309     if (Options::logGC()) {
1310         HashMap<const char*, size_t> visitMap;
1311         forEachSlotVisitor(
1312             [&] (SlotVisitor& slotVisitor) {
1313                 visitMap.add(slotVisitor.codeName(), slotVisitor.bytesVisited() / 1024);
1314             });
1315         
1316         auto perVisitorDump = sortedMapDump(
1317             visitMap,
1318             [] (const char* a, const char* b) -> bool {
1319                 return strcmp(a, b) < 0;
1320             },
1321             ":", " ");
1322         
1323         dataLog("v=", bytesVisited() / 1024, "kb (", perVisitorDump, ") o=", m_opaqueRoots.size(), " b=", m_barriersExecuted, " ");
1324     }
1325         
1326     if (slotVisitor.didReachTermination()) {
1327         m_opaqueRoots.deleteOldTables();
1328         
1329         m_scheduler->didReachTermination();
1330         
1331         assertMarkStacksEmpty();
1332             
1333         // FIXME: Take m_mutatorDidRun into account when scheduling constraints. Most likely,
1334         // we don't have to execute root constraints again unless the mutator did run. At a
1335         // minimum, we could use this for work estimates - but it's probably more than just an
1336         // estimate.
1337         // https://bugs.webkit.org/show_bug.cgi?id=166828
1338             
1339         // Wondering what this does? Look at Heap::addCoreConstraints(). The DOM and others can also
1340         // add their own using Heap::addMarkingConstraint().
1341         bool converged = m_constraintSet->executeConvergence(slotVisitor);
1342         
1343         // FIXME: The slotVisitor.isEmpty() check is most likely not needed.
1344         // https://bugs.webkit.org/show_bug.cgi?id=180310
1345         if (converged && slotVisitor.isEmpty()) {
1346             assertMarkStacksEmpty();
1347             return changePhase(conn, CollectorPhase::End);
1348         }
1349             
1350         m_scheduler->didExecuteConstraints();
1351     }
1352         
1353     if (Options::logGC())
1354         dataLog(slotVisitor.collectorMarkStack().size(), "+", m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " ");
1355         
1356     {
1357         ParallelModeEnabler enabler(slotVisitor);
1358         slotVisitor.drainInParallel(m_scheduler->timeToResume());
1359     }
1360         
1361     m_scheduler->synchronousDrainingDidStall();
1362
1363     // This is kinda tricky. The termination check looks at:
1364     //
1365     // - Whether the marking threads are active. If they are not, this means that the marking threads'
1366     //   SlotVisitors are empty.
1367     // - Whether the collector's slot visitor is empty.
1368     // - Whether the shared mark stacks are empty.
1369     //
1370     // This doesn't have to check the mutator SlotVisitor because that one becomes empty after every GC
1371     // work increment, so it must be empty now.
1372     if (slotVisitor.didReachTermination())
1373         return true; // This is like relooping to the top if runFixpointPhase().
1374         
1375     if (!m_scheduler->shouldResume())
1376         return true;
1377
1378     m_scheduler->willResume();
1379         
1380     if (Options::logGC()) {
1381         double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds();
1382         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), ")...]\n");
1383     }
1384
1385     // Forgive the mutator for its past failures to keep up.
1386     // FIXME: Figure out if moving this to different places results in perf changes.
1387     m_incrementBalance = 0;
1388         
1389     return changePhase(conn, CollectorPhase::Concurrent);
1390 }
1391
1392 NEVER_INLINE bool Heap::runConcurrentPhase(GCConductor conn)
1393 {
1394     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1395
1396     switch (conn) {
1397     case GCConductor::Mutator: {
1398         // When the mutator has the conn, we poll runConcurrentPhase() on every time someone says
1399         // stopIfNecessary(), so on every allocation slow path. When that happens we poll if it's time
1400         // to stop and do some work.
1401         if (slotVisitor.didReachTermination()
1402             || m_scheduler->shouldStop())
1403             return changePhase(conn, CollectorPhase::Reloop);
1404         
1405         // We could be coming from a collector phase that stuffed our SlotVisitor, so make sure we donate
1406         // everything. This is super cheap if the SlotVisitor is already empty.
1407         slotVisitor.donateAll();
1408         return false;
1409     }
1410     case GCConductor::Collector: {
1411         {
1412             ParallelModeEnabler enabler(slotVisitor);
1413             slotVisitor.drainInParallelPassively(m_scheduler->timeToStop());
1414         }
1415         return changePhase(conn, CollectorPhase::Reloop);
1416     } }
1417     
1418     RELEASE_ASSERT_NOT_REACHED();
1419     return false;
1420 }
1421
1422 NEVER_INLINE bool Heap::runReloopPhase(GCConductor conn)
1423 {
1424     if (Options::logGC())
1425         dataLog("[GC<", RawPointer(this), ">: ", gcConductorShortName(conn), " ");
1426     
1427     m_scheduler->didStop();
1428     
1429     if (Options::logGC())
1430         m_scheduler->log();
1431     
1432     return changePhase(conn, CollectorPhase::Fixpoint);
1433 }
1434
1435 NEVER_INLINE bool Heap::runEndPhase(GCConductor conn)
1436 {
1437     m_scheduler->endCollection();
1438         
1439     {
1440         auto locker = holdLock(m_markingMutex);
1441         m_parallelMarkersShouldExit = true;
1442         m_markingConditionVariable.notifyAll();
1443     }
1444     m_helperClient.finish();
1445     
1446     iterateExecutingAndCompilingCodeBlocks(
1447         [&] (CodeBlock* codeBlock) {
1448             writeBarrier(codeBlock);
1449         });
1450         
1451     updateObjectCounts();
1452     endMarking();
1453         
1454     if (UNLIKELY(m_verifier)) {
1455         m_verifier->gatherLiveCells(HeapVerifier::Phase::AfterMarking);
1456         m_verifier->verify(HeapVerifier::Phase::AfterMarking);
1457     }
1458         
1459     if (vm()->typeProfiler())
1460         vm()->typeProfiler()->invalidateTypeSetCache();
1461         
1462     reapWeakHandles();
1463     pruneStaleEntriesFromWeakGCMaps();
1464     sweepArrayBuffers();
1465     snapshotUnswept();
1466     finalizeUnconditionalFinalizers();
1467     removeDeadCompilerWorklistEntries();
1468     notifyIncrementalSweeper();
1469     
1470     m_codeBlocks->iterateCurrentlyExecuting(
1471         [&] (CodeBlock* codeBlock) {
1472             writeBarrier(codeBlock);
1473         });
1474     m_codeBlocks->clearCurrentlyExecuting();
1475         
1476     m_objectSpace.prepareForAllocation();
1477     updateAllocationLimits();
1478
1479     if (UNLIKELY(m_verifier)) {
1480         m_verifier->trimDeadCells();
1481         m_verifier->verify(HeapVerifier::Phase::AfterGC);
1482     }
1483
1484     didFinishCollection();
1485     
1486     if (m_currentRequest.didFinishEndPhase)
1487         m_currentRequest.didFinishEndPhase->run();
1488     
1489     if (false) {
1490         dataLog("Heap state after GC:\n");
1491         m_objectSpace.dumpBits();
1492     }
1493     
1494     if (Options::logGC()) {
1495         double thisPauseMS = (m_afterGC - m_stopTime).milliseconds();
1496         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), "), cycle ", (m_afterGC - m_beforeGC).milliseconds(), "ms END]\n");
1497     }
1498     
1499     {
1500         auto locker = holdLock(*m_threadLock);
1501         m_requests.removeFirst();
1502         m_lastServedTicket++;
1503         clearMutatorWaiting();
1504     }
1505     ParkingLot::unparkAll(&m_worldState);
1506
1507     if (false)
1508         dataLog("GC END!\n");
1509
1510     setNeedFinalize();
1511
1512     m_lastGCStartTime = m_currentGCStartTime;
1513     m_lastGCEndTime = MonotonicTime::now();
1514         
1515     return changePhase(conn, CollectorPhase::NotRunning);
1516 }
1517
1518 bool Heap::changePhase(GCConductor conn, CollectorPhase nextPhase)
1519 {
1520     checkConn(conn);
1521
1522     m_nextPhase = nextPhase;
1523
1524     return finishChangingPhase(conn);
1525 }
1526
1527 NEVER_INLINE bool Heap::finishChangingPhase(GCConductor conn)
1528 {
1529     checkConn(conn);
1530     
1531     if (m_nextPhase == m_currentPhase)
1532         return true;
1533
1534     if (false)
1535         dataLog(conn, ": Going to phase: ", m_nextPhase, " (from ", m_currentPhase, ")\n");
1536     
1537     m_phaseVersion++;
1538     
1539     bool suspendedBefore = worldShouldBeSuspended(m_currentPhase);
1540     bool suspendedAfter = worldShouldBeSuspended(m_nextPhase);
1541     
1542     if (suspendedBefore != suspendedAfter) {
1543         if (suspendedBefore) {
1544             RELEASE_ASSERT(!suspendedAfter);
1545             
1546             resumeThePeriphery();
1547             if (conn == GCConductor::Collector)
1548                 resumeTheMutator();
1549             else
1550                 handleNeedFinalize();
1551         } else {
1552             RELEASE_ASSERT(!suspendedBefore);
1553             RELEASE_ASSERT(suspendedAfter);
1554             
1555             if (conn == GCConductor::Collector) {
1556                 waitWhileNeedFinalize();
1557                 if (!stopTheMutator()) {
1558                     if (false)
1559                         dataLog("Returning false.\n");
1560                     return false;
1561                 }
1562             } else {
1563                 sanitizeStackForVM(m_vm);
1564                 handleNeedFinalize();
1565             }
1566             stopThePeriphery(conn);
1567         }
1568     }
1569     
1570     m_currentPhase = m_nextPhase;
1571     return true;
1572 }
1573
1574 void Heap::stopThePeriphery(GCConductor conn)
1575 {
1576     if (m_worldIsStopped) {
1577         dataLog("FATAL: world already stopped.\n");
1578         RELEASE_ASSERT_NOT_REACHED();
1579     }
1580     
1581     if (m_mutatorDidRun)
1582         m_mutatorExecutionVersion++;
1583     
1584     m_mutatorDidRun = false;
1585
1586     suspendCompilerThreads();
1587     m_worldIsStopped = true;
1588
1589     forEachSlotVisitor(
1590         [&] (SlotVisitor& slotVisitor) {
1591             slotVisitor.updateMutatorIsStopped(NoLockingNecessary);
1592         });
1593
1594 #if ENABLE(JIT)
1595     {
1596         DeferGCForAWhile awhile(*this);
1597         if (JITWorklist::instance()->completeAllForVM(*m_vm)
1598             && conn == GCConductor::Collector)
1599             setGCDidJIT();
1600     }
1601 #else
1602     UNUSED_PARAM(conn);
1603 #endif // ENABLE(JIT)
1604     
1605     vm()->shadowChicken().update(*vm(), vm()->topCallFrame);
1606     
1607     m_structureIDTable.flushOldTables();
1608     m_objectSpace.stopAllocating();
1609     
1610     m_stopTime = MonotonicTime::now();
1611 }
1612
1613 NEVER_INLINE void Heap::resumeThePeriphery()
1614 {
1615     // Calling resumeAllocating does the Right Thing depending on whether this is the end of a
1616     // collection cycle or this is just a concurrent phase within a collection cycle:
1617     // - At end of collection cycle: it's a no-op because prepareForAllocation already cleared the
1618     //   last active block.
1619     // - During collection cycle: it reinstates the last active block.
1620     m_objectSpace.resumeAllocating();
1621     
1622     m_barriersExecuted = 0;
1623     
1624     if (!m_worldIsStopped) {
1625         dataLog("Fatal: collector does not believe that the world is stopped.\n");
1626         RELEASE_ASSERT_NOT_REACHED();
1627     }
1628     m_worldIsStopped = false;
1629     
1630     // FIXME: This could be vastly improved: we want to grab the locks in the order in which they
1631     // become available. We basically want a lockAny() method that will lock whatever lock is available
1632     // and tell you which one it locked. That would require teaching ParkingLot how to park on multiple
1633     // queues at once, which is totally achievable - it would just require memory allocation, which is
1634     // suboptimal but not a disaster. Alternatively, we could replace the SlotVisitor rightToRun lock
1635     // with a DLG-style handshake mechanism, but that seems not as general.
1636     Vector<SlotVisitor*, 8> slotVisitorsToUpdate;
1637
1638     forEachSlotVisitor(
1639         [&] (SlotVisitor& slotVisitor) {
1640             slotVisitorsToUpdate.append(&slotVisitor);
1641         });
1642     
1643     for (unsigned countdown = 40; !slotVisitorsToUpdate.isEmpty() && countdown--;) {
1644         for (unsigned index = 0; index < slotVisitorsToUpdate.size(); ++index) {
1645             SlotVisitor& slotVisitor = *slotVisitorsToUpdate[index];
1646             bool remove = false;
1647             if (slotVisitor.hasAcknowledgedThatTheMutatorIsResumed())
1648                 remove = true;
1649             else if (auto locker = tryHoldLock(slotVisitor.rightToRun())) {
1650                 slotVisitor.updateMutatorIsStopped(locker);
1651                 remove = true;
1652             }
1653             if (remove) {
1654                 slotVisitorsToUpdate[index--] = slotVisitorsToUpdate.last();
1655                 slotVisitorsToUpdate.takeLast();
1656             }
1657         }
1658         WTF::Thread::yield();
1659     }
1660     
1661     for (SlotVisitor* slotVisitor : slotVisitorsToUpdate)
1662         slotVisitor->updateMutatorIsStopped();
1663     
1664     resumeCompilerThreads();
1665 }
1666
1667 bool Heap::stopTheMutator()
1668 {
1669     for (;;) {
1670         unsigned oldState = m_worldState.load();
1671         if (oldState & stoppedBit) {
1672             RELEASE_ASSERT(!(oldState & hasAccessBit));
1673             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1674             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1675             return true;
1676         }
1677         
1678         if (oldState & mutatorHasConnBit) {
1679             RELEASE_ASSERT(!(oldState & hasAccessBit));
1680             RELEASE_ASSERT(!(oldState & stoppedBit));
1681             return false;
1682         }
1683
1684         if (!(oldState & hasAccessBit)) {
1685             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1686             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1687             // We can stop the world instantly.
1688             if (m_worldState.compareExchangeWeak(oldState, oldState | stoppedBit))
1689                 return true;
1690             continue;
1691         }
1692         
1693         // Transfer the conn to the mutator and bail.
1694         RELEASE_ASSERT(oldState & hasAccessBit);
1695         RELEASE_ASSERT(!(oldState & stoppedBit));
1696         unsigned newState = (oldState | mutatorHasConnBit) & ~mutatorWaitingBit;
1697         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1698             if (false)
1699                 dataLog("Handed off the conn.\n");
1700             m_stopIfNecessaryTimer->scheduleSoon();
1701             ParkingLot::unparkAll(&m_worldState);
1702             return false;
1703         }
1704     }
1705 }
1706
1707 NEVER_INLINE void Heap::resumeTheMutator()
1708 {
1709     if (false)
1710         dataLog("Resuming the mutator.\n");
1711     for (;;) {
1712         unsigned oldState = m_worldState.load();
1713         if (!!(oldState & hasAccessBit) != !(oldState & stoppedBit)) {
1714             dataLog("Fatal: hasAccess = ", !!(oldState & hasAccessBit), ", stopped = ", !!(oldState & stoppedBit), "\n");
1715             RELEASE_ASSERT_NOT_REACHED();
1716         }
1717         if (oldState & mutatorHasConnBit) {
1718             dataLog("Fatal: mutator has the conn.\n");
1719             RELEASE_ASSERT_NOT_REACHED();
1720         }
1721         
1722         if (!(oldState & stoppedBit)) {
1723             if (false)
1724                 dataLog("Returning because not stopped.\n");
1725             return;
1726         }
1727         
1728         if (m_worldState.compareExchangeWeak(oldState, oldState & ~stoppedBit)) {
1729             if (false)
1730                 dataLog("CASing and returning.\n");
1731             ParkingLot::unparkAll(&m_worldState);
1732             return;
1733         }
1734     }
1735 }
1736
1737 void Heap::stopIfNecessarySlow()
1738 {
1739     while (stopIfNecessarySlow(m_worldState.load())) { }
1740     
1741     RELEASE_ASSERT(m_worldState.load() & hasAccessBit);
1742     RELEASE_ASSERT(!(m_worldState.load() & stoppedBit));
1743     
1744     handleGCDidJIT();
1745     handleNeedFinalize();
1746     m_mutatorDidRun = true;
1747 }
1748
1749 bool Heap::stopIfNecessarySlow(unsigned oldState)
1750 {
1751     RELEASE_ASSERT(oldState & hasAccessBit);
1752     RELEASE_ASSERT(!(oldState & stoppedBit));
1753     
1754     // It's possible for us to wake up with finalization already requested but the world not yet
1755     // resumed. If that happens, we can't run finalization yet.
1756     if (handleNeedFinalize(oldState))
1757         return true;
1758
1759     // FIXME: When entering the concurrent phase, we could arrange for this branch not to fire, and then
1760     // have the SlotVisitor do things to the m_worldState to make this branch fire again. That would
1761     // prevent us from polling this so much. Ideally, stopIfNecessary would ignore the mutatorHasConnBit
1762     // and there would be some other bit indicating whether we were in some GC phase other than the
1763     // NotRunning or Concurrent ones.
1764     if (oldState & mutatorHasConnBit)
1765         collectInMutatorThread();
1766     
1767     return false;
1768 }
1769
1770 NEVER_INLINE void Heap::collectInMutatorThread()
1771 {
1772     CollectingScope collectingScope(*this);
1773     for (;;) {
1774         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, nullptr);
1775         switch (result) {
1776         case RunCurrentPhaseResult::Finished:
1777             return;
1778         case RunCurrentPhaseResult::Continue:
1779             break;
1780         case RunCurrentPhaseResult::NeedCurrentThreadState:
1781             sanitizeStackForVM(m_vm);
1782             auto lambda = [&] (CurrentThreadState& state) {
1783                 for (;;) {
1784                     RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, &state);
1785                     switch (result) {
1786                     case RunCurrentPhaseResult::Finished:
1787                         return;
1788                     case RunCurrentPhaseResult::Continue:
1789                         break;
1790                     case RunCurrentPhaseResult::NeedCurrentThreadState:
1791                         RELEASE_ASSERT_NOT_REACHED();
1792                         break;
1793                     }
1794                 }
1795             };
1796             callWithCurrentThreadState(scopedLambda<void(CurrentThreadState&)>(WTFMove(lambda)));
1797             return;
1798         }
1799     }
1800 }
1801
1802 template<typename Func>
1803 void Heap::waitForCollector(const Func& func)
1804 {
1805     for (;;) {
1806         bool done;
1807         {
1808             LockHolder locker(*m_threadLock);
1809             done = func(locker);
1810             if (!done) {
1811                 setMutatorWaiting();
1812                 
1813                 // At this point, the collector knows that we intend to wait, and he will clear the
1814                 // waiting bit and then unparkAll when the GC cycle finishes. Clearing the bit
1815                 // prevents us from parking except if there is also stop-the-world. Unparking after
1816                 // clearing means that if the clearing happens after we park, then we will unpark.
1817             }
1818         }
1819         
1820         // If we're in a stop-the-world scenario, we need to wait for that even if done is true.
1821         unsigned oldState = m_worldState.load();
1822         if (stopIfNecessarySlow(oldState))
1823             continue;
1824         
1825         // FIXME: We wouldn't need this if stopIfNecessarySlow() had a mode where it knew to just
1826         // do the collection.
1827         relinquishConn();
1828         
1829         if (done) {
1830             clearMutatorWaiting(); // Clean up just in case.
1831             return;
1832         }
1833         
1834         // If mutatorWaitingBit is still set then we want to wait.
1835         ParkingLot::compareAndPark(&m_worldState, oldState | mutatorWaitingBit);
1836     }
1837 }
1838
1839 void Heap::acquireAccessSlow()
1840 {
1841     for (;;) {
1842         unsigned oldState = m_worldState.load();
1843         RELEASE_ASSERT(!(oldState & hasAccessBit));
1844         
1845         if (oldState & stoppedBit) {
1846             if (verboseStop) {
1847                 dataLog("Stopping in acquireAccess!\n");
1848                 WTFReportBacktrace();
1849             }
1850             // Wait until we're not stopped anymore.
1851             ParkingLot::compareAndPark(&m_worldState, oldState);
1852             continue;
1853         }
1854         
1855         RELEASE_ASSERT(!(oldState & stoppedBit));
1856         unsigned newState = oldState | hasAccessBit;
1857         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1858             handleGCDidJIT();
1859             handleNeedFinalize();
1860             m_mutatorDidRun = true;
1861             stopIfNecessary();
1862             return;
1863         }
1864     }
1865 }
1866
1867 void Heap::releaseAccessSlow()
1868 {
1869     for (;;) {
1870         unsigned oldState = m_worldState.load();
1871         if (!(oldState & hasAccessBit)) {
1872             dataLog("FATAL: Attempting to release access but the mutator does not have access.\n");
1873             RELEASE_ASSERT_NOT_REACHED();
1874         }
1875         if (oldState & stoppedBit) {
1876             dataLog("FATAL: Attempting to release access but the mutator is stopped.\n");
1877             RELEASE_ASSERT_NOT_REACHED();
1878         }
1879         
1880         if (handleNeedFinalize(oldState))
1881             continue;
1882         
1883         unsigned newState = oldState & ~(hasAccessBit | mutatorHasConnBit);
1884         
1885         if ((oldState & mutatorHasConnBit)
1886             && m_nextPhase != m_currentPhase) {
1887             // This means that the collector thread had given us the conn so that we would do something
1888             // for it. Stop ourselves as we release access. This ensures that acquireAccess blocks. In
1889             // the meantime, since we're handing the conn over, the collector will be awoken and it is
1890             // sure to have work to do.
1891             newState |= stoppedBit;
1892         }
1893
1894         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1895             if (oldState & mutatorHasConnBit)
1896                 finishRelinquishingConn();
1897             return;
1898         }
1899     }
1900 }
1901
1902 bool Heap::relinquishConn(unsigned oldState)
1903 {
1904     RELEASE_ASSERT(oldState & hasAccessBit);
1905     RELEASE_ASSERT(!(oldState & stoppedBit));
1906     
1907     if (!(oldState & mutatorHasConnBit))
1908         return false; // Done.
1909     
1910     if (m_threadShouldStop)
1911         return false;
1912     
1913     if (!m_worldState.compareExchangeWeak(oldState, oldState & ~mutatorHasConnBit))
1914         return true; // Loop around.
1915     
1916     finishRelinquishingConn();
1917     return true;
1918 }
1919
1920 void Heap::finishRelinquishingConn()
1921 {
1922     if (false)
1923         dataLog("Relinquished the conn.\n");
1924     
1925     sanitizeStackForVM(m_vm);
1926     
1927     auto locker = holdLock(*m_threadLock);
1928     if (!m_requests.isEmpty())
1929         m_threadCondition->notifyOne(locker);
1930     ParkingLot::unparkAll(&m_worldState);
1931 }
1932
1933 void Heap::relinquishConn()
1934 {
1935     while (relinquishConn(m_worldState.load())) { }
1936 }
1937
1938 bool Heap::handleGCDidJIT(unsigned oldState)
1939 {
1940     RELEASE_ASSERT(oldState & hasAccessBit);
1941     if (!(oldState & gcDidJITBit))
1942         return false;
1943     if (m_worldState.compareExchangeWeak(oldState, oldState & ~gcDidJITBit)) {
1944         WTF::crossModifyingCodeFence();
1945         return true;
1946     }
1947     return true;
1948 }
1949
1950 NEVER_INLINE bool Heap::handleNeedFinalize(unsigned oldState)
1951 {
1952     RELEASE_ASSERT(oldState & hasAccessBit);
1953     RELEASE_ASSERT(!(oldState & stoppedBit));
1954     
1955     if (!(oldState & needFinalizeBit))
1956         return false;
1957     if (m_worldState.compareExchangeWeak(oldState, oldState & ~needFinalizeBit)) {
1958         finalize();
1959         // Wake up anyone waiting for us to finalize. Note that they may have woken up already, in
1960         // which case they would be waiting for us to release heap access.
1961         ParkingLot::unparkAll(&m_worldState);
1962         return true;
1963     }
1964     return true;
1965 }
1966
1967 void Heap::handleGCDidJIT()
1968 {
1969     while (handleGCDidJIT(m_worldState.load())) { }
1970 }
1971
1972 void Heap::handleNeedFinalize()
1973 {
1974     while (handleNeedFinalize(m_worldState.load())) { }
1975 }
1976
1977 void Heap::setGCDidJIT()
1978 {
1979     m_worldState.transaction(
1980         [&] (unsigned& state) -> bool {
1981             RELEASE_ASSERT(state & stoppedBit);
1982             state |= gcDidJITBit;
1983             return true;
1984         });
1985 }
1986
1987 void Heap::setNeedFinalize()
1988 {
1989     m_worldState.exchangeOr(needFinalizeBit);
1990     ParkingLot::unparkAll(&m_worldState);
1991     m_stopIfNecessaryTimer->scheduleSoon();
1992 }
1993
1994 void Heap::waitWhileNeedFinalize()
1995 {
1996     for (;;) {
1997         unsigned oldState = m_worldState.load();
1998         if (!(oldState & needFinalizeBit)) {
1999             // This means that either there was no finalize request or the main thread will finalize
2000             // with heap access, so a subsequent call to stopTheWorld() will return only when
2001             // finalize finishes.
2002             return;
2003         }
2004         ParkingLot::compareAndPark(&m_worldState, oldState);
2005     }
2006 }
2007
2008 void Heap::setMutatorWaiting()
2009 {
2010     m_worldState.exchangeOr(mutatorWaitingBit);
2011 }
2012
2013 void Heap::clearMutatorWaiting()
2014 {
2015     m_worldState.exchangeAnd(~mutatorWaitingBit);
2016 }
2017
2018 void Heap::notifyThreadStopping(const AbstractLocker&)
2019 {
2020     m_threadIsStopping = true;
2021     clearMutatorWaiting();
2022     ParkingLot::unparkAll(&m_worldState);
2023 }
2024
2025 void Heap::finalize()
2026 {
2027     MonotonicTime before;
2028     if (Options::logGC()) {
2029         before = MonotonicTime::now();
2030         dataLog("[GC<", RawPointer(this), ">: finalize ");
2031     }
2032     
2033     {
2034         SweepingScope sweepingScope(*this);
2035         deleteUnmarkedCompiledCode();
2036         deleteSourceProviderCaches();
2037         sweepInFinalize();
2038     }
2039     
2040     if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache())
2041         cache->clear();
2042     
2043     for (const HeapFinalizerCallback& callback : m_heapFinalizerCallbacks)
2044         callback.run(*vm());
2045     
2046     if (Options::sweepSynchronously())
2047         sweepSynchronously();
2048
2049     if (Options::logGC()) {
2050         MonotonicTime after = MonotonicTime::now();
2051         dataLog((after - before).milliseconds(), "ms]\n");
2052     }
2053 }
2054
2055 Heap::Ticket Heap::requestCollection(GCRequest request)
2056 {
2057     stopIfNecessary();
2058     
2059     ASSERT(vm()->currentThreadIsHoldingAPILock());
2060     RELEASE_ASSERT(vm()->atomicStringTable() == WTF::Thread::current().atomicStringTable());
2061     
2062     LockHolder locker(*m_threadLock);
2063     // We may be able to steal the conn. That only works if the collector is definitely not running
2064     // right now. This is an optimization that prevents the collector thread from ever starting in most
2065     // cases.
2066     ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2067     if ((m_lastServedTicket == m_lastGrantedTicket) && (m_currentPhase == CollectorPhase::NotRunning)) {
2068         if (false)
2069             dataLog("Taking the conn.\n");
2070         m_worldState.exchangeOr(mutatorHasConnBit);
2071     }
2072     
2073     m_requests.append(request);
2074     m_lastGrantedTicket++;
2075     if (!(m_worldState.load() & mutatorHasConnBit))
2076         m_threadCondition->notifyOne(locker);
2077     return m_lastGrantedTicket;
2078 }
2079
2080 void Heap::waitForCollection(Ticket ticket)
2081 {
2082     waitForCollector(
2083         [&] (const AbstractLocker&) -> bool {
2084             return m_lastServedTicket >= ticket;
2085         });
2086 }
2087
2088 void Heap::sweepInFinalize()
2089 {
2090     m_objectSpace.sweepLargeAllocations();
2091     
2092     auto sweepBlock = [&] (MarkedBlock::Handle* handle) {
2093         handle->sweep(nullptr);
2094     };
2095     
2096     vm()->eagerlySweptDestructibleObjectSpace.forEachMarkedBlock(sweepBlock);
2097 }
2098
2099 void Heap::suspendCompilerThreads()
2100 {
2101 #if ENABLE(DFG_JIT)
2102     // We ensure the worklists so that it's not possible for the mutator to start a new worklist
2103     // after we have suspended the ones that he had started before. That's not very expensive since
2104     // the worklists use AutomaticThreads anyway.
2105     for (unsigned i = DFG::numberOfWorklists(); i--;)
2106         DFG::ensureWorklistForIndex(i).suspendAllThreads();
2107 #endif
2108 }
2109
2110 void Heap::willStartCollection()
2111 {
2112     if (Options::logGC())
2113         dataLog("=> ");
2114     
2115     if (shouldDoFullCollection()) {
2116         m_collectionScope = CollectionScope::Full;
2117         m_shouldDoFullCollection = false;
2118         if (Options::logGC())
2119             dataLog("FullCollection, ");
2120         if (false)
2121             dataLog("Full collection!\n");
2122     } else {
2123         m_collectionScope = CollectionScope::Eden;
2124         if (Options::logGC())
2125             dataLog("EdenCollection, ");
2126         if (false)
2127             dataLog("Eden collection!\n");
2128     }
2129     if (m_collectionScope == CollectionScope::Full) {
2130         m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2131         m_extraMemorySize = 0;
2132         m_deprecatedExtraMemorySize = 0;
2133 #if ENABLE(RESOURCE_USAGE)
2134         m_externalMemorySize = 0;
2135 #endif
2136
2137         if (m_fullActivityCallback)
2138             m_fullActivityCallback->willCollect();
2139     } else {
2140         ASSERT(m_collectionScope == CollectionScope::Eden);
2141         m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2142     }
2143
2144     if (m_edenActivityCallback)
2145         m_edenActivityCallback->willCollect();
2146
2147     for (auto* observer : m_observers)
2148         observer->willGarbageCollect();
2149 }
2150
2151 void Heap::prepareForMarking()
2152 {
2153     m_objectSpace.prepareForMarking();
2154 }
2155
2156 void Heap::reapWeakHandles()
2157 {
2158     m_objectSpace.reapWeakSets();
2159 }
2160
2161 void Heap::pruneStaleEntriesFromWeakGCMaps()
2162 {
2163     if (m_collectionScope != CollectionScope::Full)
2164         return;
2165     for (WeakGCMapBase* weakGCMap : m_weakGCMaps)
2166         weakGCMap->pruneStaleEntries();
2167 }
2168
2169 void Heap::sweepArrayBuffers()
2170 {
2171     m_arrayBuffers.sweep();
2172 }
2173
2174 void Heap::snapshotUnswept()
2175 {
2176     TimingScope timingScope(*this, "Heap::snapshotUnswept");
2177     m_objectSpace.snapshotUnswept();
2178 }
2179
2180 void Heap::deleteSourceProviderCaches()
2181 {
2182     if (*m_lastCollectionScope == CollectionScope::Full)
2183         m_vm->clearSourceProviderCaches();
2184 }
2185
2186 void Heap::notifyIncrementalSweeper()
2187 {
2188     if (m_collectionScope == CollectionScope::Full) {
2189         if (!m_logicallyEmptyWeakBlocks.isEmpty())
2190             m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2191     }
2192
2193     m_sweeper->startSweeping();
2194 }
2195
2196 void Heap::updateAllocationLimits()
2197 {
2198     static const bool verbose = false;
2199     
2200     if (verbose) {
2201         dataLog("\n");
2202         dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n");
2203     }
2204     
2205     // Calculate our current heap size threshold for the purpose of figuring out when we should
2206     // run another collection. This isn't the same as either size() or capacity(), though it should
2207     // be somewhere between the two. The key is to match the size calculations involved calls to
2208     // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
2209     // fragmentation, we may have size() much smaller than capacity().
2210     size_t currentHeapSize = 0;
2211
2212     // For marked space, we use the total number of bytes visited. This matches the logic for
2213     // MarkedAllocator's calls to didAllocate(), which effectively accounts for the total size of
2214     // objects allocated rather than blocks used. This will underestimate capacity(), and in case
2215     // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
2216     // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
2217     currentHeapSize += m_totalBytesVisited;
2218     if (verbose)
2219         dataLog("totalBytesVisited = ", m_totalBytesVisited, ", currentHeapSize = ", currentHeapSize, "\n");
2220
2221     // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
2222     // extra memory reporting.
2223     currentHeapSize += extraMemorySize();
2224     if (!ASSERT_DISABLED) {
2225         Checked<size_t, RecordOverflow> checkedCurrentHeapSize = m_totalBytesVisited;
2226         checkedCurrentHeapSize += extraMemorySize();
2227         ASSERT(!checkedCurrentHeapSize.hasOverflowed() && checkedCurrentHeapSize.unsafeGet() == currentHeapSize);
2228     }
2229
2230     if (verbose)
2231         dataLog("extraMemorySize() = ", extraMemorySize(), ", currentHeapSize = ", currentHeapSize, "\n");
2232     
2233     if (m_collectionScope == CollectionScope::Full) {
2234         // To avoid pathological GC churn in very small and very large heaps, we set
2235         // the new allocation limit based on the current size of the heap, with a
2236         // fixed minimum.
2237         m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
2238         if (verbose)
2239             dataLog("Full: maxHeapSize = ", m_maxHeapSize, "\n");
2240         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2241         if (verbose)
2242             dataLog("Full: maxEdenSize = ", m_maxEdenSize, "\n");
2243         m_sizeAfterLastFullCollect = currentHeapSize;
2244         if (verbose)
2245             dataLog("Full: sizeAfterLastFullCollect = ", currentHeapSize, "\n");
2246         m_bytesAbandonedSinceLastFullCollect = 0;
2247         if (verbose)
2248             dataLog("Full: bytesAbandonedSinceLastFullCollect = ", 0, "\n");
2249     } else {
2250         ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
2251         // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have.
2252         // But we are sloppy, so we have to defend against the overflow.
2253         m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize;
2254         if (verbose)
2255             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2256         m_sizeAfterLastEdenCollect = currentHeapSize;
2257         if (verbose)
2258             dataLog("Eden: sizeAfterLastEdenCollect = ", currentHeapSize, "\n");
2259         double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
2260         double minEdenToOldGenerationRatio = 1.0 / 3.0;
2261         if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
2262             m_shouldDoFullCollection = true;
2263         // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
2264         m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
2265         if (verbose)
2266             dataLog("Eden: maxHeapSize = ", m_maxHeapSize, "\n");
2267         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2268         if (verbose)
2269             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2270         if (m_fullActivityCallback) {
2271             ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
2272             m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
2273         }
2274     }
2275
2276 #if PLATFORM(IOS)
2277     // Get critical memory threshold for next cycle.
2278     overCriticalMemoryThreshold(MemoryThresholdCallType::Direct);
2279 #endif
2280
2281     m_sizeAfterLastCollect = currentHeapSize;
2282     if (verbose)
2283         dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n");
2284     m_bytesAllocatedThisCycle = 0;
2285
2286     if (Options::logGC())
2287         dataLog("=> ", currentHeapSize / 1024, "kb, ");
2288 }
2289
2290 void Heap::didFinishCollection()
2291 {
2292     m_afterGC = MonotonicTime::now();
2293     CollectionScope scope = *m_collectionScope;
2294     if (scope == CollectionScope::Full)
2295         m_lastFullGCLength = m_afterGC - m_beforeGC;
2296     else
2297         m_lastEdenGCLength = m_afterGC - m_beforeGC;
2298
2299 #if ENABLE(RESOURCE_USAGE)
2300     ASSERT(externalMemorySize() <= extraMemorySize());
2301 #endif
2302
2303     if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) {
2304         gatherExtraHeapSnapshotData(*heapProfiler);
2305         removeDeadHeapSnapshotNodes(*heapProfiler);
2306     }
2307
2308     if (UNLIKELY(m_verifier))
2309         m_verifier->endGC();
2310
2311     RELEASE_ASSERT(m_collectionScope);
2312     m_lastCollectionScope = m_collectionScope;
2313     m_collectionScope = std::nullopt;
2314
2315     for (auto* observer : m_observers)
2316         observer->didGarbageCollect(scope);
2317 }
2318
2319 void Heap::resumeCompilerThreads()
2320 {
2321 #if ENABLE(DFG_JIT)
2322     for (unsigned i = DFG::numberOfWorklists(); i--;)
2323         DFG::existingWorklistForIndex(i).resumeAllThreads();
2324 #endif
2325 }
2326
2327 GCActivityCallback* Heap::fullActivityCallback()
2328 {
2329     return m_fullActivityCallback.get();
2330 }
2331
2332 GCActivityCallback* Heap::edenActivityCallback()
2333 {
2334     return m_edenActivityCallback.get();
2335 }
2336
2337 IncrementalSweeper& Heap::sweeper()
2338 {
2339     return *m_sweeper;
2340 }
2341
2342 void Heap::setGarbageCollectionTimerEnabled(bool enable)
2343 {
2344     if (m_fullActivityCallback)
2345         m_fullActivityCallback->setEnabled(enable);
2346     if (m_edenActivityCallback)
2347         m_edenActivityCallback->setEnabled(enable);
2348 }
2349
2350 void Heap::didAllocate(size_t bytes)
2351 {
2352     if (m_edenActivityCallback)
2353         m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
2354     m_bytesAllocatedThisCycle += bytes;
2355     performIncrement(bytes);
2356 }
2357
2358 bool Heap::isValidAllocation(size_t)
2359 {
2360     if (!isValidThreadState(m_vm))
2361         return false;
2362
2363     if (isCurrentThreadBusy())
2364         return false;
2365     
2366     return true;
2367 }
2368
2369 void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
2370 {
2371     WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
2372 }
2373
2374 void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
2375 {
2376     HandleSlot slot = handle.slot();
2377     Finalizer finalizer = reinterpret_cast<Finalizer>(context);
2378     finalizer(slot->asCell());
2379     WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
2380 }
2381
2382 void Heap::addExecutable(ExecutableBase* executable)
2383 {
2384     m_executables.append(executable);
2385 }
2386
2387 void Heap::collectNowFullIfNotDoneRecently(Synchronousness synchronousness)
2388 {
2389     if (!m_fullActivityCallback) {
2390         collectNow(synchronousness, CollectionScope::Full);
2391         return;
2392     }
2393
2394     if (m_fullActivityCallback->didGCRecently()) {
2395         // A synchronous GC was already requested recently so we merely accelerate next collection.
2396         reportAbandonedObjectGraph();
2397         return;
2398     }
2399
2400     m_fullActivityCallback->setDidGCRecently();
2401     collectNow(synchronousness, CollectionScope::Full);
2402 }
2403
2404 bool Heap::shouldDoFullCollection()
2405 {
2406     if (!Options::useGenerationalGC())
2407         return true;
2408
2409     if (!m_currentRequest.scope)
2410         return m_shouldDoFullCollection || overCriticalMemoryThreshold();
2411     return *m_currentRequest.scope == CollectionScope::Full;
2412 }
2413
2414 void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
2415 {
2416     m_logicallyEmptyWeakBlocks.append(block);
2417 }
2418
2419 void Heap::sweepAllLogicallyEmptyWeakBlocks()
2420 {
2421     if (m_logicallyEmptyWeakBlocks.isEmpty())
2422         return;
2423
2424     m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2425     while (sweepNextLogicallyEmptyWeakBlock()) { }
2426 }
2427
2428 bool Heap::sweepNextLogicallyEmptyWeakBlock()
2429 {
2430     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
2431         return false;
2432
2433     WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
2434
2435     block->sweep();
2436     if (block->isEmpty()) {
2437         std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
2438         m_logicallyEmptyWeakBlocks.removeLast();
2439         WeakBlock::destroy(*this, block);
2440     } else
2441         m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
2442
2443     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
2444         m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
2445         return false;
2446     }
2447
2448     return true;
2449 }
2450
2451 size_t Heap::visitCount()
2452 {
2453     size_t result = 0;
2454     forEachSlotVisitor(
2455         [&] (SlotVisitor& visitor) {
2456             result += visitor.visitCount();
2457         });
2458     return result;
2459 }
2460
2461 size_t Heap::bytesVisited()
2462 {
2463     size_t result = 0;
2464     forEachSlotVisitor(
2465         [&] (SlotVisitor& visitor) {
2466             result += visitor.bytesVisited();
2467         });
2468     return result;
2469 }
2470
2471 void Heap::forEachCodeBlockImpl(const ScopedLambda<bool(CodeBlock*)>& func)
2472 {
2473     // We don't know the full set of CodeBlocks until compilation has terminated.
2474     completeAllJITPlans();
2475
2476     return m_codeBlocks->iterate(func);
2477 }
2478
2479 void Heap::forEachCodeBlockIgnoringJITPlansImpl(const AbstractLocker& locker, const ScopedLambda<bool(CodeBlock*)>& func)
2480 {
2481     return m_codeBlocks->iterate(locker, func);
2482 }
2483
2484 void Heap::writeBarrierSlowPath(const JSCell* from)
2485 {
2486     if (UNLIKELY(mutatorShouldBeFenced())) {
2487         // In this case, the barrierThreshold is the tautological threshold, so from could still be
2488         // not black. But we can't know for sure until we fire off a fence.
2489         WTF::storeLoadFence();
2490         if (from->cellState() != CellState::PossiblyBlack)
2491             return;
2492     }
2493     
2494     addToRememberedSet(from);
2495 }
2496
2497 bool Heap::isCurrentThreadBusy()
2498 {
2499     return mayBeGCThread() || mutatorState() != MutatorState::Running;
2500 }
2501
2502 void Heap::reportExtraMemoryVisited(size_t size)
2503 {
2504     size_t* counter = &m_extraMemorySize;
2505     
2506     for (;;) {
2507         size_t oldSize = *counter;
2508         // FIXME: Change this to use SaturatedArithmetic when available.
2509         // https://bugs.webkit.org/show_bug.cgi?id=170411
2510         Checked<size_t, RecordOverflow> checkedNewSize = oldSize;
2511         checkedNewSize += size;
2512         size_t newSize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
2513         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, newSize))
2514             return;
2515     }
2516 }
2517
2518 #if ENABLE(RESOURCE_USAGE)
2519 void Heap::reportExternalMemoryVisited(size_t size)
2520 {
2521     size_t* counter = &m_externalMemorySize;
2522
2523     for (;;) {
2524         size_t oldSize = *counter;
2525         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size))
2526             return;
2527     }
2528 }
2529 #endif
2530
2531 void Heap::collectIfNecessaryOrDefer(GCDeferralContext* deferralContext)
2532 {
2533     ASSERT(deferralContext || isDeferred() || !DisallowGC::isInEffectOnCurrentThread());
2534
2535     if (!m_isSafeToCollect)
2536         return;
2537     switch (mutatorState()) {
2538     case MutatorState::Running:
2539     case MutatorState::Allocating:
2540         break;
2541     case MutatorState::Sweeping:
2542     case MutatorState::Collecting:
2543         return;
2544     }
2545     if (!Options::useGC())
2546         return;
2547     
2548     if (mayNeedToStop()) {
2549         if (deferralContext)
2550             deferralContext->m_shouldGC = true;
2551         else if (isDeferred())
2552             m_didDeferGCWork = true;
2553         else
2554             stopIfNecessary();
2555     }
2556     
2557     if (UNLIKELY(Options::gcMaxHeapSize())) {
2558         if (m_bytesAllocatedThisCycle <= Options::gcMaxHeapSize())
2559             return;
2560     } else {
2561         size_t bytesAllowedThisCycle = m_maxEdenSize;
2562
2563 #if PLATFORM(IOS)
2564         if (overCriticalMemoryThreshold())
2565             bytesAllowedThisCycle = std::min(m_maxEdenSizeWhenCritical, bytesAllowedThisCycle);
2566 #endif
2567
2568         if (m_bytesAllocatedThisCycle <= bytesAllowedThisCycle)
2569             return;
2570     }
2571
2572     if (deferralContext)
2573         deferralContext->m_shouldGC = true;
2574     else if (isDeferred())
2575         m_didDeferGCWork = true;
2576     else {
2577         collectAsync();
2578         stopIfNecessary(); // This will immediately start the collection if we have the conn.
2579     }
2580 }
2581
2582 void Heap::decrementDeferralDepthAndGCIfNeededSlow()
2583 {
2584     // Can't do anything if we're still deferred.
2585     if (m_deferralDepth)
2586         return;
2587     
2588     ASSERT(!isDeferred());
2589     
2590     m_didDeferGCWork = false;
2591     // FIXME: Bring back something like the DeferGCProbability mode.
2592     // https://bugs.webkit.org/show_bug.cgi?id=166627
2593     collectIfNecessaryOrDefer();
2594 }
2595
2596 void Heap::registerWeakGCMap(WeakGCMapBase* weakGCMap)
2597 {
2598     m_weakGCMaps.add(weakGCMap);
2599 }
2600
2601 void Heap::unregisterWeakGCMap(WeakGCMapBase* weakGCMap)
2602 {
2603     m_weakGCMaps.remove(weakGCMap);
2604 }
2605
2606 void Heap::didAllocateBlock(size_t capacity)
2607 {
2608 #if ENABLE(RESOURCE_USAGE)
2609     m_blockBytesAllocated += capacity;
2610 #else
2611     UNUSED_PARAM(capacity);
2612 #endif
2613 }
2614
2615 void Heap::didFreeBlock(size_t capacity)
2616 {
2617 #if ENABLE(RESOURCE_USAGE)
2618     m_blockBytesAllocated -= capacity;
2619 #else
2620     UNUSED_PARAM(capacity);
2621 #endif
2622 }
2623
2624 void Heap::addCoreConstraints()
2625 {
2626     m_constraintSet->add(
2627         "Cs", "Conservative Scan",
2628         [this, lastVersion = static_cast<uint64_t>(0)] (SlotVisitor& slotVisitor) mutable {
2629             bool shouldNotProduceWork = lastVersion == m_phaseVersion;
2630             if (shouldNotProduceWork)
2631                 return;
2632             
2633             TimingScope preConvergenceTimingScope(*this, "Constraint: conservative scan");
2634             m_objectSpace.prepareForConservativeScan();
2635             ConservativeRoots conservativeRoots(*this);
2636             SuperSamplerScope superSamplerScope(false);
2637             gatherStackRoots(conservativeRoots);
2638             gatherJSStackRoots(conservativeRoots);
2639             gatherScratchBufferRoots(conservativeRoots);
2640             slotVisitor.append(conservativeRoots);
2641             
2642             lastVersion = m_phaseVersion;
2643         },
2644         ConstraintVolatility::GreyedByExecution);
2645     
2646     m_constraintSet->add(
2647         "Msr", "Misc Small Roots",
2648         [this] (SlotVisitor& slotVisitor) {
2649 #if JSC_OBJC_API_ENABLED
2650             scanExternalRememberedSet(*m_vm, slotVisitor);
2651 #endif
2652
2653             if (m_vm->smallStrings.needsToBeVisited(*m_collectionScope))
2654                 m_vm->smallStrings.visitStrongReferences(slotVisitor);
2655             
2656             for (auto& pair : m_protectedValues)
2657                 slotVisitor.appendUnbarriered(pair.key);
2658             
2659             if (m_markListSet && m_markListSet->size())
2660                 MarkedArgumentBuffer::markLists(slotVisitor, *m_markListSet);
2661             
2662             slotVisitor.appendUnbarriered(m_vm->exception());
2663             slotVisitor.appendUnbarriered(m_vm->lastException());
2664         },
2665         ConstraintVolatility::GreyedByExecution);
2666     
2667     m_constraintSet->add(
2668         "Sh", "Strong Handles",
2669         [this] (SlotVisitor& slotVisitor) {
2670             m_handleSet.visitStrongHandles(slotVisitor);
2671         },
2672         ConstraintVolatility::GreyedByExecution);
2673     
2674     m_constraintSet->add(
2675         "D", "Debugger",
2676         [this] (SlotVisitor& slotVisitor) {
2677 #if ENABLE(SAMPLING_PROFILER)
2678             if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
2679                 LockHolder locker(samplingProfiler->getLock());
2680                 samplingProfiler->processUnverifiedStackTraces();
2681                 samplingProfiler->visit(slotVisitor);
2682                 if (Options::logGC() == GCLogging::Verbose)
2683                     dataLog("Sampling Profiler data:\n", slotVisitor);
2684             }
2685 #endif // ENABLE(SAMPLING_PROFILER)
2686             
2687             if (m_vm->typeProfiler())
2688                 m_vm->typeProfilerLog()->visit(slotVisitor);
2689             
2690             m_vm->shadowChicken().visitChildren(slotVisitor);
2691         },
2692         ConstraintVolatility::GreyedByExecution);
2693     
2694     m_constraintSet->add(
2695         "Jsr", "JIT Stub Routines",
2696         [this] (SlotVisitor& slotVisitor) {
2697             m_jitStubRoutines->traceMarkedStubRoutines(slotVisitor);
2698         },
2699         ConstraintVolatility::GreyedByExecution);
2700     
2701     m_constraintSet->add(
2702         "Ws", "Weak Sets",
2703         [this] (SlotVisitor& slotVisitor) {
2704             m_objectSpace.visitWeakSets(slotVisitor);
2705         },
2706         ConstraintVolatility::GreyedByMarking);
2707     
2708     m_constraintSet->add(
2709         "Wrh", "Weak Reference Harvesters",
2710         [this] (SlotVisitor& slotVisitor) {
2711             for (WeakReferenceHarvester* current = m_weakReferenceHarvesters.head(); current; current = current->next())
2712                 current->visitWeakReferences(slotVisitor);
2713         },
2714         ConstraintVolatility::GreyedByMarking);
2715     
2716 #if ENABLE(DFG_JIT)
2717     m_constraintSet->add(
2718         "Dw", "DFG Worklists",
2719         [this] (SlotVisitor& slotVisitor) {
2720             for (unsigned i = DFG::numberOfWorklists(); i--;)
2721                 DFG::existingWorklistForIndex(i).visitWeakReferences(slotVisitor);
2722             
2723             // FIXME: This is almost certainly unnecessary.
2724             // https://bugs.webkit.org/show_bug.cgi?id=166829
2725             DFG::iterateCodeBlocksForGC(
2726                 *m_vm,
2727                 [&] (CodeBlock* codeBlock) {
2728                     slotVisitor.appendUnbarriered(codeBlock);
2729                 });
2730             
2731             if (Options::logGC() == GCLogging::Verbose)
2732                 dataLog("DFG Worklists:\n", slotVisitor);
2733         },
2734         ConstraintVolatility::GreyedByMarking);
2735 #endif
2736     
2737     m_constraintSet->add(
2738         "Cb", "CodeBlocks",
2739         [this] (SlotVisitor& slotVisitor) {
2740             iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(
2741                 [&] (CodeBlock* codeBlock) {
2742                     // Visit the CodeBlock as a constraint only if it's black.
2743                     if (Heap::isMarked(codeBlock)
2744                         && codeBlock->cellState() == CellState::PossiblyBlack)
2745                         slotVisitor.visitAsConstraint(codeBlock);
2746                 });
2747         },
2748         ConstraintVolatility::SeldomGreyed);
2749     
2750     m_constraintSet->add(std::make_unique<MarkStackMergingConstraint>(*this));
2751 }
2752
2753 void Heap::addMarkingConstraint(std::unique_ptr<MarkingConstraint> constraint)
2754 {
2755     PreventCollectionScope preventCollectionScope(*this);
2756     m_constraintSet->add(WTFMove(constraint));
2757 }
2758
2759 void Heap::notifyIsSafeToCollect()
2760 {
2761     MonotonicTime before;
2762     if (Options::logGC()) {
2763         before = MonotonicTime::now();
2764         dataLog("[GC<", RawPointer(this), ">: starting ");
2765     }
2766     
2767     addCoreConstraints();
2768     
2769     m_isSafeToCollect = true;
2770     
2771     if (Options::collectContinuously()) {
2772         m_collectContinuouslyThread = WTF::Thread::create(
2773             "JSC DEBUG Continuous GC",
2774             [this] () {
2775                 MonotonicTime initialTime = MonotonicTime::now();
2776                 Seconds period = Seconds::fromMilliseconds(Options::collectContinuouslyPeriodMS());
2777                 while (!m_shouldStopCollectingContinuously) {
2778                     {
2779                         LockHolder locker(*m_threadLock);
2780                         if (m_requests.isEmpty()) {
2781                             m_requests.append(std::nullopt);
2782                             m_lastGrantedTicket++;
2783                             m_threadCondition->notifyOne(locker);
2784                         }
2785                     }
2786                     
2787                     {
2788                         LockHolder locker(m_collectContinuouslyLock);
2789                         Seconds elapsed = MonotonicTime::now() - initialTime;
2790                         Seconds elapsedInPeriod = elapsed % period;
2791                         MonotonicTime timeToWakeUp =
2792                             initialTime + elapsed - elapsedInPeriod + period;
2793                         while (!hasElapsed(timeToWakeUp) && !m_shouldStopCollectingContinuously) {
2794                             m_collectContinuouslyCondition.waitUntil(
2795                                 m_collectContinuouslyLock, timeToWakeUp);
2796                         }
2797                     }
2798                 }
2799             });
2800     }
2801     
2802     if (Options::logGC())
2803         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
2804 }
2805
2806 void Heap::preventCollection()
2807 {
2808     if (!m_isSafeToCollect)
2809         return;
2810     
2811     // This prevents the collectContinuously thread from starting a collection.
2812     m_collectContinuouslyLock.lock();
2813     
2814     // Wait for all collections to finish.
2815     waitForCollector(
2816         [&] (const AbstractLocker&) -> bool {
2817             ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2818             return m_lastServedTicket == m_lastGrantedTicket;
2819         });
2820     
2821     // Now a collection can only start if this thread starts it.
2822     RELEASE_ASSERT(!m_collectionScope);
2823 }
2824
2825 void Heap::allowCollection()
2826 {
2827     if (!m_isSafeToCollect)
2828         return;
2829     
2830     m_collectContinuouslyLock.unlock();
2831 }
2832
2833 void Heap::setMutatorShouldBeFenced(bool value)
2834 {
2835     m_mutatorShouldBeFenced = value;
2836     m_barrierThreshold = value ? tautologicalThreshold : blackThreshold;
2837 }
2838
2839 void Heap::performIncrement(size_t bytes)
2840 {
2841     if (!m_objectSpace.isMarking())
2842         return;
2843
2844     m_incrementBalance += bytes * Options::gcIncrementScale();
2845
2846     // Save ourselves from crazy. Since this is an optimization, it's OK to go back to any consistent
2847     // state when the double goes wild.
2848     if (std::isnan(m_incrementBalance) || std::isinf(m_incrementBalance))
2849         m_incrementBalance = 0;
2850     
2851     if (m_incrementBalance < static_cast<double>(Options::gcIncrementBytes()))
2852         return;
2853
2854     double targetBytes = m_incrementBalance;
2855     if (targetBytes <= 0)
2856         return;
2857     targetBytes = std::min(targetBytes, Options::gcIncrementMaxBytes());
2858
2859     SlotVisitor& slotVisitor = *m_mutatorSlotVisitor;
2860     ParallelModeEnabler parallelModeEnabler(slotVisitor);
2861     size_t bytesVisited = slotVisitor.performIncrementOfDraining(static_cast<size_t>(targetBytes));
2862     // incrementBalance may go negative here because it'll remember how many bytes we overshot.
2863     m_incrementBalance -= bytesVisited;
2864 }
2865
2866 void Heap::addHeapFinalizerCallback(const HeapFinalizerCallback& callback)
2867 {
2868     m_heapFinalizerCallbacks.append(callback);
2869 }
2870
2871 void Heap::removeHeapFinalizerCallback(const HeapFinalizerCallback& callback)
2872 {
2873     m_heapFinalizerCallbacks.removeFirst(callback);
2874 }
2875
2876 void Heap::setBonusVisitorTask(RefPtr<SharedTask<void(SlotVisitor&)>> task)
2877 {
2878     auto locker = holdLock(m_markingMutex);
2879     m_bonusVisitorTask = task;
2880     m_markingConditionVariable.notifyAll();
2881 }
2882
2883 void Heap::runTaskInParallel(RefPtr<SharedTask<void(SlotVisitor&)>> task)
2884 {
2885     unsigned initialRefCount = task->refCount();
2886     setBonusVisitorTask(task);
2887     task->run(*m_collectorSlotVisitor);
2888     setBonusVisitorTask(nullptr);
2889     // The constraint solver expects return of this function to imply termination of the task in all
2890     // threads. This ensures that property.
2891     {
2892         auto locker = holdLock(m_markingMutex);
2893         while (task->refCount() > initialRefCount)
2894             m_markingConditionVariable.wait(m_markingMutex);
2895     }
2896 }
2897
2898 } // namespace JSC