CodeBlocks should be in IsoSubspaces
[WebKit-https.git] / Source / JavaScriptCore / heap / Heap.cpp
1 /*
2  *  Copyright (C) 2003-2018 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "Heap.h"
23
24 #include "CodeBlock.h"
25 #include "CodeBlockSetInlines.h"
26 #include "CollectingScope.h"
27 #include "ConservativeRoots.h"
28 #include "DFGWorklistInlines.h"
29 #include "EdenGCActivityCallback.h"
30 #include "Exception.h"
31 #include "FullGCActivityCallback.h"
32 #include "GCActivityCallback.h"
33 #include "GCIncomingRefCountedSetInlines.h"
34 #include "GCSegmentedArrayInlines.h"
35 #include "GCTypeMap.h"
36 #include "HasOwnPropertyCache.h"
37 #include "HeapHelperPool.h"
38 #include "HeapIterationScope.h"
39 #include "HeapProfiler.h"
40 #include "HeapSnapshot.h"
41 #include "HeapVerifier.h"
42 #include "IncrementalSweeper.h"
43 #include "InferredTypeInlines.h"
44 #include "InferredValueInlines.h"
45 #include "Interpreter.h"
46 #include "IsoCellSetInlines.h"
47 #include "JITStubRoutineSet.h"
48 #include "JITWorklist.h"
49 #include "JSCInlines.h"
50 #include "JSGlobalObject.h"
51 #include "JSLock.h"
52 #include "JSVirtualMachineInternal.h"
53 #include "JSWeakMap.h"
54 #include "JSWeakSet.h"
55 #include "JSWebAssemblyCodeBlock.h"
56 #include "MachineStackMarker.h"
57 #include "MarkStackMergingConstraint.h"
58 #include "MarkedAllocatorInlines.h"
59 #include "MarkedSpaceInlines.h"
60 #include "MarkingConstraintSet.h"
61 #include "PreventCollectionScope.h"
62 #include "SamplingProfiler.h"
63 #include "ShadowChicken.h"
64 #include "SpaceTimeMutatorScheduler.h"
65 #include "SubspaceInlines.h"
66 #include "SuperSampler.h"
67 #include "StochasticSpaceTimeMutatorScheduler.h"
68 #include "StopIfNecessaryTimer.h"
69 #include "SweepingScope.h"
70 #include "SynchronousStopTheWorldMutatorScheduler.h"
71 #include "TypeProfiler.h"
72 #include "TypeProfilerLog.h"
73 #include "UnlinkedCodeBlock.h"
74 #include "VM.h"
75 #include "VisitCounter.h"
76 #include "WasmMemory.h"
77 #include "WeakMapImplInlines.h"
78 #include "WeakSetInlines.h"
79 #include <algorithm>
80 #if PLATFORM(IOS)
81 #include <bmalloc/bmalloc.h>
82 #endif
83 #include <wtf/CurrentTime.h>
84 #include <wtf/ListDump.h>
85 #include <wtf/MainThread.h>
86 #include <wtf/ParallelVectorIterator.h>
87 #include <wtf/ProcessID.h>
88 #include <wtf/RAMSize.h>
89 #include <wtf/SimpleStats.h>
90 #include <wtf/Threading.h>
91
92 #if USE(FOUNDATION)
93 #if __has_include(<objc/objc-internal.h>)
94 #include <objc/objc-internal.h>
95 #else
96 extern "C" void* objc_autoreleasePoolPush(void);
97 extern "C" void objc_autoreleasePoolPop(void *context);
98 #endif
99 #endif // USE(FOUNDATION)
100
101 using namespace std;
102
103 namespace JSC {
104
105 namespace {
106
107 bool verboseStop = false;
108
109 double maxPauseMS(double thisPauseMS)
110 {
111     static double maxPauseMS;
112     maxPauseMS = std::max(thisPauseMS, maxPauseMS);
113     return maxPauseMS;
114 }
115
116 size_t minHeapSize(HeapType heapType, size_t ramSize)
117 {
118     if (heapType == LargeHeap) {
119         double result = min(
120             static_cast<double>(Options::largeHeapSize()),
121             ramSize * Options::smallHeapRAMFraction());
122         return static_cast<size_t>(result);
123     }
124     return Options::smallHeapSize();
125 }
126
127 size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
128 {
129 #if PLATFORM(IOS)
130     size_t memoryFootprint = bmalloc::api::memoryFootprint();
131     if (memoryFootprint < ramSize * Options::smallHeapRAMFraction())
132         return Options::smallHeapGrowthFactor() * heapSize;
133     if (memoryFootprint < ramSize * Options::mediumHeapRAMFraction())
134         return Options::mediumHeapGrowthFactor() * heapSize;
135 #else
136     if (heapSize < ramSize * Options::smallHeapRAMFraction())
137         return Options::smallHeapGrowthFactor() * heapSize;
138     if (heapSize < ramSize * Options::mediumHeapRAMFraction())
139         return Options::mediumHeapGrowthFactor() * heapSize;
140 #endif
141     return Options::largeHeapGrowthFactor() * heapSize;
142 }
143
144 bool isValidSharedInstanceThreadState(VM* vm)
145 {
146     return vm->currentThreadIsHoldingAPILock();
147 }
148
149 bool isValidThreadState(VM* vm)
150 {
151     if (vm->atomicStringTable() != WTF::Thread::current().atomicStringTable())
152         return false;
153
154     if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
155         return false;
156
157     return true;
158 }
159
160 void recordType(VM& vm, TypeCountSet& set, JSCell* cell)
161 {
162     const char* typeName = "[unknown]";
163     const ClassInfo* info = cell->classInfo(vm);
164     if (info && info->className)
165         typeName = info->className;
166     set.add(typeName);
167 }
168
169 bool measurePhaseTiming()
170 {
171     return false;
172 }
173
174 HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats()
175 {
176     static HashMap<const char*, GCTypeMap<SimpleStats>>* result;
177     static std::once_flag once;
178     std::call_once(
179         once,
180         [] {
181             result = new HashMap<const char*, GCTypeMap<SimpleStats>>();
182         });
183     return *result;
184 }
185
186 SimpleStats& timingStats(const char* name, CollectionScope scope)
187 {
188     return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[scope];
189 }
190
191 class TimingScope {
192 public:
193     TimingScope(std::optional<CollectionScope> scope, const char* name)
194         : m_scope(scope)
195         , m_name(name)
196     {
197         if (measurePhaseTiming())
198             m_before = monotonicallyIncreasingTimeMS();
199     }
200     
201     TimingScope(Heap& heap, const char* name)
202         : TimingScope(heap.collectionScope(), name)
203     {
204     }
205     
206     void setScope(std::optional<CollectionScope> scope)
207     {
208         m_scope = scope;
209     }
210     
211     void setScope(Heap& heap)
212     {
213         setScope(heap.collectionScope());
214     }
215     
216     ~TimingScope()
217     {
218         if (measurePhaseTiming()) {
219             double after = monotonicallyIncreasingTimeMS();
220             double timing = after - m_before;
221             SimpleStats& stats = timingStats(m_name, *m_scope);
222             stats.add(timing);
223             dataLog("[GC:", *m_scope, "] ", m_name, " took: ", timing, "ms (average ", stats.mean(), "ms).\n");
224         }
225     }
226 private:
227     std::optional<CollectionScope> m_scope;
228     double m_before;
229     const char* m_name;
230 };
231
232 } // anonymous namespace
233
234 class Heap::Thread : public AutomaticThread {
235 public:
236     Thread(const AbstractLocker& locker, Heap& heap)
237         : AutomaticThread(locker, heap.m_threadLock, heap.m_threadCondition)
238         , m_heap(heap)
239     {
240     }
241     
242 protected:
243     PollResult poll(const AbstractLocker& locker) override
244     {
245         if (m_heap.m_threadShouldStop) {
246             m_heap.notifyThreadStopping(locker);
247             return PollResult::Stop;
248         }
249         if (m_heap.shouldCollectInCollectorThread(locker))
250             return PollResult::Work;
251         return PollResult::Wait;
252     }
253     
254     WorkResult work() override
255     {
256         m_heap.collectInCollectorThread();
257         return WorkResult::Continue;
258     }
259     
260     void threadDidStart() override
261     {
262         WTF::registerGCThread(GCThreadType::Main);
263     }
264
265 private:
266     Heap& m_heap;
267 };
268
269 Heap::Heap(VM* vm, HeapType heapType)
270     : m_heapType(heapType)
271     , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
272     , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
273     , m_sizeAfterLastCollect(0)
274     , m_sizeAfterLastFullCollect(0)
275     , m_sizeBeforeLastFullCollect(0)
276     , m_sizeAfterLastEdenCollect(0)
277     , m_sizeBeforeLastEdenCollect(0)
278     , m_bytesAllocatedThisCycle(0)
279     , m_bytesAbandonedSinceLastFullCollect(0)
280     , m_maxEdenSize(m_minBytesPerCycle)
281     , m_maxHeapSize(m_minBytesPerCycle)
282     , m_shouldDoFullCollection(false)
283     , m_totalBytesVisited(0)
284     , m_objectSpace(this)
285     , m_extraMemorySize(0)
286     , m_deprecatedExtraMemorySize(0)
287     , m_machineThreads(std::make_unique<MachineThreads>())
288     , m_collectorSlotVisitor(std::make_unique<SlotVisitor>(*this, "C"))
289     , m_mutatorSlotVisitor(std::make_unique<SlotVisitor>(*this, "M"))
290     , m_mutatorMarkStack(std::make_unique<MarkStackArray>())
291     , m_raceMarkStack(std::make_unique<MarkStackArray>())
292     , m_constraintSet(std::make_unique<MarkingConstraintSet>(*this))
293     , m_handleSet(vm)
294     , m_codeBlocks(std::make_unique<CodeBlockSet>())
295     , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>())
296     , m_isSafeToCollect(false)
297     , m_vm(vm)
298     // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously 
299     // schedule the timer if we've never done a collection.
300     , m_lastFullGCLength(0.01)
301     , m_lastEdenGCLength(0.01)
302     , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
303     , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
304     , m_sweeper(adoptRef(new IncrementalSweeper(this)))
305     , m_stopIfNecessaryTimer(adoptRef(new StopIfNecessaryTimer(vm)))
306     , m_deferralDepth(0)
307 #if USE(FOUNDATION)
308     , m_delayedReleaseRecursionCount(0)
309 #endif
310     , m_sharedCollectorMarkStack(std::make_unique<MarkStackArray>())
311     , m_sharedMutatorMarkStack(std::make_unique<MarkStackArray>())
312     , m_helperClient(&heapHelperPool())
313     , m_threadLock(Box<Lock>::create())
314     , m_threadCondition(AutomaticThreadCondition::create())
315 {
316     m_worldState.store(0);
317     
318     if (Options::useConcurrentGC()) {
319         if (Options::useStochasticMutatorScheduler())
320             m_scheduler = std::make_unique<StochasticSpaceTimeMutatorScheduler>(*this);
321         else
322             m_scheduler = std::make_unique<SpaceTimeMutatorScheduler>(*this);
323     } else {
324         // We simulate turning off concurrent GC by making the scheduler say that the world
325         // should always be stopped when the collector is running.
326         m_scheduler = std::make_unique<SynchronousStopTheWorldMutatorScheduler>();
327     }
328     
329     if (Options::verifyHeap())
330         m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
331     
332     m_collectorSlotVisitor->optimizeForStoppedMutator();
333
334     // When memory is critical, allow allocating 25% of the amount above the critical threshold before collecting.
335     size_t memoryAboveCriticalThreshold = static_cast<size_t>(static_cast<double>(m_ramSize) * (1.0 - Options::criticalGCMemoryThreshold()));
336     m_maxEdenSizeWhenCritical = memoryAboveCriticalThreshold / 4;
337
338     LockHolder locker(*m_threadLock);
339     m_thread = adoptRef(new Thread(locker, *this));
340 }
341
342 Heap::~Heap()
343 {
344     forEachSlotVisitor(
345         [&] (SlotVisitor& visitor) {
346             visitor.clearMarkStacks();
347         });
348     m_mutatorMarkStack->clear();
349     m_raceMarkStack->clear();
350     
351     for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
352         WeakBlock::destroy(*this, block);
353 }
354
355 bool Heap::isPagedOut(double deadline)
356 {
357     return m_objectSpace.isPagedOut(deadline);
358 }
359
360 // The VM is being destroyed and the collector will never run again.
361 // Run all pending finalizers now because we won't get another chance.
362 void Heap::lastChanceToFinalize()
363 {
364     MonotonicTime before;
365     if (Options::logGC()) {
366         before = MonotonicTime::now();
367         dataLog("[GC<", RawPointer(this), ">: shutdown ");
368     }
369     
370     m_isShuttingDown = true;
371     
372     RELEASE_ASSERT(!m_vm->entryScope);
373     RELEASE_ASSERT(m_mutatorState == MutatorState::Running);
374     
375     if (m_collectContinuouslyThread) {
376         {
377             LockHolder locker(m_collectContinuouslyLock);
378             m_shouldStopCollectingContinuously = true;
379             m_collectContinuouslyCondition.notifyOne();
380         }
381         m_collectContinuouslyThread->waitForCompletion();
382     }
383     
384     if (Options::logGC())
385         dataLog("1");
386     
387     // Prevent new collections from being started. This is probably not even necessary, since we're not
388     // going to call into anything that starts collections. Still, this makes the algorithm more
389     // obviously sound.
390     m_isSafeToCollect = false;
391     
392     if (Options::logGC())
393         dataLog("2");
394
395     bool isCollecting;
396     {
397         auto locker = holdLock(*m_threadLock);
398         RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
399         isCollecting = m_lastServedTicket < m_lastGrantedTicket;
400     }
401     if (isCollecting) {
402         if (Options::logGC())
403             dataLog("...]\n");
404         
405         // Wait for the current collection to finish.
406         waitForCollector(
407             [&] (const AbstractLocker&) -> bool {
408                 RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
409                 return m_lastServedTicket == m_lastGrantedTicket;
410             });
411         
412         if (Options::logGC())
413             dataLog("[GC<", RawPointer(this), ">: shutdown ");
414     }
415     if (Options::logGC())
416         dataLog("3");
417
418     RELEASE_ASSERT(m_requests.isEmpty());
419     RELEASE_ASSERT(m_lastServedTicket == m_lastGrantedTicket);
420     
421     // Carefully bring the thread down.
422     bool stopped = false;
423     {
424         LockHolder locker(*m_threadLock);
425         stopped = m_thread->tryStop(locker);
426         m_threadShouldStop = true;
427         if (!stopped)
428             m_threadCondition->notifyOne(locker);
429     }
430
431     if (Options::logGC())
432         dataLog("4");
433     
434     if (!stopped)
435         m_thread->join();
436     
437     if (Options::logGC())
438         dataLog("5 ");
439     
440     m_arrayBuffers.lastChanceToFinalize();
441     m_objectSpace.stopAllocating();
442     m_objectSpace.lastChanceToFinalize();
443     releaseDelayedReleasedObjects();
444
445     sweepAllLogicallyEmptyWeakBlocks();
446     
447     m_objectSpace.freeMemory();
448     
449     if (Options::logGC())
450         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
451 }
452
453 void Heap::releaseDelayedReleasedObjects()
454 {
455 #if USE(FOUNDATION)
456     // We need to guard against the case that releasing an object can create more objects due to the
457     // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
458     // back here and could try to recursively release objects. We guard that with a recursive entry
459     // count. Only the initial call will release objects, recursive calls simple return and let the
460     // the initial call to the function take care of any objects created during release time.
461     // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
462     // and use a temp Vector for the actual releasing.
463     if (!m_delayedReleaseRecursionCount++) {
464         while (!m_delayedReleaseObjects.isEmpty()) {
465             ASSERT(m_vm->currentThreadIsHoldingAPILock());
466
467             Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTFMove(m_delayedReleaseObjects);
468
469             {
470                 // We need to drop locks before calling out to arbitrary code.
471                 JSLock::DropAllLocks dropAllLocks(m_vm);
472
473                 void* context = objc_autoreleasePoolPush();
474                 objectsToRelease.clear();
475                 objc_autoreleasePoolPop(context);
476             }
477         }
478     }
479     m_delayedReleaseRecursionCount--;
480 #endif
481 }
482
483 void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
484 {
485     didAllocate(size);
486     collectIfNecessaryOrDefer();
487 }
488
489 void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
490 {
491     // FIXME: Change this to use SaturatedArithmetic when available.
492     // https://bugs.webkit.org/show_bug.cgi?id=170411
493     Checked<size_t, RecordOverflow> checkedNewSize = m_deprecatedExtraMemorySize;
494     checkedNewSize += size;
495     m_deprecatedExtraMemorySize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
496     reportExtraMemoryAllocatedSlowCase(size);
497 }
498
499 bool Heap::overCriticalMemoryThreshold(MemoryThresholdCallType memoryThresholdCallType)
500 {
501 #if PLATFORM(IOS)
502     if (memoryThresholdCallType == MemoryThresholdCallType::Direct || ++m_precentAvailableMemoryCachedCallCount >= 100) {
503         m_overCriticalMemoryThreshold = bmalloc::api::percentAvailableMemoryInUse() > Options::criticalGCMemoryThreshold();
504         m_precentAvailableMemoryCachedCallCount = 0;
505     }
506
507     return m_overCriticalMemoryThreshold;
508 #else
509     UNUSED_PARAM(memoryThresholdCallType);
510     return false;
511 #endif
512 }
513
514 void Heap::reportAbandonedObjectGraph()
515 {
516     // Our clients don't know exactly how much memory they
517     // are abandoning so we just guess for them.
518     size_t abandonedBytes = static_cast<size_t>(0.1 * capacity());
519
520     // We want to accelerate the next collection. Because memory has just 
521     // been abandoned, the next collection has the potential to 
522     // be more profitable. Since allocation is the trigger for collection, 
523     // we hasten the next collection by pretending that we've allocated more memory. 
524     if (m_fullActivityCallback) {
525         m_fullActivityCallback->didAllocate(
526             m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
527     }
528     m_bytesAbandonedSinceLastFullCollect += abandonedBytes;
529 }
530
531 void Heap::protect(JSValue k)
532 {
533     ASSERT(k);
534     ASSERT(m_vm->currentThreadIsHoldingAPILock());
535
536     if (!k.isCell())
537         return;
538
539     m_protectedValues.add(k.asCell());
540 }
541
542 bool Heap::unprotect(JSValue k)
543 {
544     ASSERT(k);
545     ASSERT(m_vm->currentThreadIsHoldingAPILock());
546
547     if (!k.isCell())
548         return false;
549
550     return m_protectedValues.remove(k.asCell());
551 }
552
553 void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
554 {
555     if (m_arrayBuffers.addReference(cell, buffer)) {
556         collectIfNecessaryOrDefer();
557         didAllocate(buffer->gcSizeEstimateInBytes());
558     }
559 }
560
561 template<typename CellType, typename CellSet>
562 void Heap::finalizeMarkedUnconditionalFinalizers(CellSet& cellSet)
563 {
564     cellSet.forEachMarkedCell(
565         [&] (HeapCell* cell, HeapCell::Kind) {
566             static_cast<CellType*>(cell)->finalizeUnconditionally(*vm());
567         });
568 }
569
570 void Heap::finalizeUnconditionalFinalizers()
571 {
572     finalizeMarkedUnconditionalFinalizers<InferredType>(vm()->inferredTypesWithFinalizers);
573     finalizeMarkedUnconditionalFinalizers<InferredValue>(vm()->inferredValuesWithFinalizers);
574     vm()->forEachCodeBlockSpace(
575         [&] (auto& space) {
576             this->finalizeMarkedUnconditionalFinalizers<CodeBlock>(space.finalizerSet);
577         });
578     finalizeMarkedUnconditionalFinalizers<ExecutableToCodeBlockEdge>(vm()->executableToCodeBlockEdgesWithFinalizers);
579     finalizeMarkedUnconditionalFinalizers<JSWeakSet>(vm()->weakSetSpace);
580     finalizeMarkedUnconditionalFinalizers<JSWeakMap>(vm()->weakMapSpace);
581     
582     while (m_unconditionalFinalizers.hasNext()) {
583         UnconditionalFinalizer* finalizer = m_unconditionalFinalizers.removeNext();
584         finalizer->finalizeUnconditionally();
585     }
586 }
587
588 void Heap::willStartIterating()
589 {
590     m_objectSpace.willStartIterating();
591 }
592
593 void Heap::didFinishIterating()
594 {
595     m_objectSpace.didFinishIterating();
596 }
597
598 void Heap::completeAllJITPlans()
599 {
600 #if ENABLE(JIT)
601     JITWorklist::instance()->completeAllForVM(*m_vm);
602 #endif // ENABLE(JIT)
603     DFG::completeAllPlansForVM(*m_vm);
604 }
605
606 template<typename Func>
607 void Heap::iterateExecutingAndCompilingCodeBlocks(const Func& func)
608 {
609     m_codeBlocks->iterateCurrentlyExecuting(func);
610     DFG::iterateCodeBlocksForGC(*m_vm, func);
611 }
612
613 template<typename Func>
614 void Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func& func)
615 {
616     Vector<CodeBlock*, 256> codeBlocks;
617     iterateExecutingAndCompilingCodeBlocks(
618         [&] (CodeBlock* codeBlock) {
619             codeBlocks.append(codeBlock);
620         });
621     for (CodeBlock* codeBlock : codeBlocks)
622         func(codeBlock);
623 }
624
625 void Heap::assertMarkStacksEmpty()
626 {
627     bool ok = true;
628     
629     if (!m_sharedCollectorMarkStack->isEmpty()) {
630         dataLog("FATAL: Shared collector mark stack not empty! It has ", m_sharedCollectorMarkStack->size(), " elements.\n");
631         ok = false;
632     }
633     
634     if (!m_sharedMutatorMarkStack->isEmpty()) {
635         dataLog("FATAL: Shared mutator mark stack not empty! It has ", m_sharedMutatorMarkStack->size(), " elements.\n");
636         ok = false;
637     }
638     
639     forEachSlotVisitor(
640         [&] (SlotVisitor& visitor) {
641             if (visitor.isEmpty())
642                 return;
643             
644             dataLog("FATAL: Visitor ", RawPointer(&visitor), " is not empty!\n");
645             ok = false;
646         });
647     
648     RELEASE_ASSERT(ok);
649 }
650
651 void Heap::gatherStackRoots(ConservativeRoots& roots)
652 {
653     m_machineThreads->gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks, m_currentThreadState, m_currentThread);
654 }
655
656 void Heap::gatherJSStackRoots(ConservativeRoots& roots)
657 {
658 #if !ENABLE(JIT)
659     m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
660 #else
661     UNUSED_PARAM(roots);
662 #endif
663 }
664
665 void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
666 {
667 #if ENABLE(DFG_JIT)
668     m_vm->gatherConservativeRoots(roots);
669 #else
670     UNUSED_PARAM(roots);
671 #endif
672 }
673
674 void Heap::beginMarking()
675 {
676     TimingScope timingScope(*this, "Heap::beginMarking");
677     m_jitStubRoutines->clearMarks();
678     m_objectSpace.beginMarking();
679     setMutatorShouldBeFenced(true);
680 }
681
682 void Heap::removeDeadCompilerWorklistEntries()
683 {
684 #if ENABLE(DFG_JIT)
685     for (unsigned i = DFG::numberOfWorklists(); i--;)
686         DFG::existingWorklistForIndex(i).removeDeadPlans(*m_vm);
687 #endif
688 }
689
690 bool Heap::isHeapSnapshotting() const
691 {
692     HeapProfiler* heapProfiler = m_vm->heapProfiler();
693     if (UNLIKELY(heapProfiler))
694         return heapProfiler->activeSnapshotBuilder();
695     return false;
696 }
697
698 struct GatherHeapSnapshotData : MarkedBlock::CountFunctor {
699     GatherHeapSnapshotData(HeapSnapshotBuilder& builder)
700         : m_builder(builder)
701     {
702     }
703
704     IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const
705     {
706         if (kind == HeapCell::JSCell) {
707             JSCell* cell = static_cast<JSCell*>(heapCell);
708             cell->methodTable()->heapSnapshot(cell, m_builder);
709         }
710         return IterationStatus::Continue;
711     }
712
713     HeapSnapshotBuilder& m_builder;
714 };
715
716 void Heap::gatherExtraHeapSnapshotData(HeapProfiler& heapProfiler)
717 {
718     if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) {
719         HeapIterationScope heapIterationScope(*this);
720         GatherHeapSnapshotData functor(*builder);
721         m_objectSpace.forEachLiveCell(heapIterationScope, functor);
722     }
723 }
724
725 struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor {
726     RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot)
727         : m_snapshot(snapshot)
728     {
729     }
730
731     IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const
732     {
733         if (kind == HeapCell::JSCell)
734             m_snapshot.sweepCell(static_cast<JSCell*>(cell));
735         return IterationStatus::Continue;
736     }
737
738     HeapSnapshot& m_snapshot;
739 };
740
741 void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler)
742 {
743     if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) {
744         HeapIterationScope heapIterationScope(*this);
745         RemoveDeadHeapSnapshotNodes functor(*snapshot);
746         m_objectSpace.forEachDeadCell(heapIterationScope, functor);
747         snapshot->shrinkToFit();
748     }
749 }
750
751 void Heap::updateObjectCounts()
752 {
753     if (m_collectionScope == CollectionScope::Full)
754         m_totalBytesVisited = 0;
755
756     m_totalBytesVisitedThisCycle = bytesVisited();
757     
758     m_totalBytesVisited += m_totalBytesVisitedThisCycle;
759 }
760
761 void Heap::endMarking()
762 {
763     forEachSlotVisitor(
764         [&] (SlotVisitor& visitor) {
765             visitor.reset();
766         });
767
768     assertMarkStacksEmpty();
769     m_weakReferenceHarvesters.removeAll();
770
771     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
772     
773     m_objectSpace.endMarking();
774     setMutatorShouldBeFenced(Options::forceFencedBarrier());
775 }
776
777 size_t Heap::objectCount()
778 {
779     return m_objectSpace.objectCount();
780 }
781
782 size_t Heap::extraMemorySize()
783 {
784     // FIXME: Change this to use SaturatedArithmetic when available.
785     // https://bugs.webkit.org/show_bug.cgi?id=170411
786     Checked<size_t, RecordOverflow> checkedTotal = m_extraMemorySize;
787     checkedTotal += m_deprecatedExtraMemorySize;
788     checkedTotal += m_arrayBuffers.size();
789     size_t total = UNLIKELY(checkedTotal.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedTotal.unsafeGet();
790
791     ASSERT(m_objectSpace.capacity() >= m_objectSpace.size());
792     return std::min(total, std::numeric_limits<size_t>::max() - m_objectSpace.capacity());
793 }
794
795 size_t Heap::size()
796 {
797     return m_objectSpace.size() + extraMemorySize();
798 }
799
800 size_t Heap::capacity()
801 {
802     return m_objectSpace.capacity() + extraMemorySize();
803 }
804
805 size_t Heap::protectedGlobalObjectCount()
806 {
807     size_t result = 0;
808     forEachProtectedCell(
809         [&] (JSCell* cell) {
810             if (cell->isObject() && asObject(cell)->isGlobalObject())
811                 result++;
812         });
813     return result;
814 }
815
816 size_t Heap::globalObjectCount()
817 {
818     HeapIterationScope iterationScope(*this);
819     size_t result = 0;
820     m_objectSpace.forEachLiveCell(
821         iterationScope,
822         [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus {
823             if (kind != HeapCell::JSCell)
824                 return IterationStatus::Continue;
825             JSCell* cell = static_cast<JSCell*>(heapCell);
826             if (cell->isObject() && asObject(cell)->isGlobalObject())
827                 result++;
828             return IterationStatus::Continue;
829         });
830     return result;
831 }
832
833 size_t Heap::protectedObjectCount()
834 {
835     size_t result = 0;
836     forEachProtectedCell(
837         [&] (JSCell*) {
838             result++;
839         });
840     return result;
841 }
842
843 std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
844 {
845     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
846     forEachProtectedCell(
847         [&] (JSCell* cell) {
848             recordType(*vm(), *result, cell);
849         });
850     return result;
851 }
852
853 std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
854 {
855     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
856     HeapIterationScope iterationScope(*this);
857     m_objectSpace.forEachLiveCell(
858         iterationScope,
859         [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
860             if (kind == HeapCell::JSCell)
861                 recordType(*vm(), *result, static_cast<JSCell*>(cell));
862             return IterationStatus::Continue;
863         });
864     return result;
865 }
866
867 void Heap::deleteAllCodeBlocks(DeleteAllCodeEffort effort)
868 {
869     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
870         return;
871     
872     PreventCollectionScope preventCollectionScope(*this);
873     
874     // If JavaScript is running, it's not safe to delete all JavaScript code, since
875     // we'll end up returning to deleted code.
876     RELEASE_ASSERT(!m_vm->entryScope);
877     RELEASE_ASSERT(!m_collectionScope);
878
879     completeAllJITPlans();
880
881     for (ExecutableBase* executable : m_executables)
882         executable->clearCode();
883
884 #if ENABLE(WEBASSEMBLY)
885     {
886         // We must ensure that we clear the JS call ICs from Wasm. Otherwise, Wasm will
887         // have no idea that we cleared the code from all of the Executables in the
888         // VM. This could leave Wasm in an inconsistent state where it has an IC that
889         // points into a CodeBlock that could be dead. The IC will still succeed because
890         // it uses a callee check, but then it will call into dead code.
891         HeapIterationScope heapIterationScope(*this);
892         m_vm->webAssemblyCodeBlockSpace.forEachLiveCell([&] (HeapCell* cell, HeapCell::Kind kind) {
893             ASSERT_UNUSED(kind, kind == HeapCell::Kind::JSCell);
894             JSWebAssemblyCodeBlock* codeBlock = static_cast<JSWebAssemblyCodeBlock*>(cell);
895             codeBlock->clearJSCallICs(*m_vm);
896         });
897     }
898 #endif
899 }
900
901 void Heap::deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort effort)
902 {
903     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
904         return;
905     
906     PreventCollectionScope preventCollectionScope(*this);
907
908     RELEASE_ASSERT(!m_collectionScope);
909     
910     for (ExecutableBase* current : m_executables) {
911         if (!current->isFunctionExecutable())
912             continue;
913         static_cast<FunctionExecutable*>(current)->unlinkedExecutable()->clearCode();
914     }
915 }
916
917 void Heap::clearUnmarkedExecutables()
918 {
919     for (unsigned i = m_executables.size(); i--;) {
920         ExecutableBase* current = m_executables[i];
921         if (isMarked(current))
922             continue;
923
924         // Eagerly dereference the Executable's JITCode in order to run watchpoint
925         // destructors. Otherwise, watchpoints might fire for deleted CodeBlocks.
926         current->clearCode();
927         std::swap(m_executables[i], m_executables.last());
928         m_executables.removeLast();
929     }
930
931     m_executables.shrinkToFit();
932 }
933
934 void Heap::deleteUnmarkedCompiledCode()
935 {
936     clearUnmarkedExecutables();
937     m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines();
938 }
939
940 void Heap::addToRememberedSet(const JSCell* constCell)
941 {
942     JSCell* cell = const_cast<JSCell*>(constCell);
943     ASSERT(cell);
944     ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
945     m_barriersExecuted++;
946     if (m_mutatorShouldBeFenced) {
947         WTF::loadLoadFence();
948         if (!isMarked(cell)) {
949             // During a full collection a store into an unmarked object that had surivived past
950             // collections will manifest as a store to an unmarked PossiblyBlack object. If the
951             // object gets marked at some time after this then it will go down the normal marking
952             // path. So, we don't have to remember this object. We could return here. But we go
953             // further and attempt to re-white the object.
954             
955             RELEASE_ASSERT(m_collectionScope == CollectionScope::Full);
956             
957             if (cell->atomicCompareExchangeCellStateStrong(CellState::PossiblyBlack, CellState::DefinitelyWhite) == CellState::PossiblyBlack) {
958                 // Now we protect against this race:
959                 //
960                 //     1) Object starts out black + unmarked.
961                 //     --> We do isMarked here.
962                 //     2) Object is marked and greyed.
963                 //     3) Object is scanned and blacked.
964                 //     --> We do atomicCompareExchangeCellStateStrong here.
965                 //
966                 // In this case we would have made the object white again, even though it should
967                 // be black. This check lets us correct our mistake. This relies on the fact that
968                 // isMarked converges monotonically to true.
969                 if (isMarked(cell)) {
970                     // It's difficult to work out whether the object should be grey or black at
971                     // this point. We say black conservatively.
972                     cell->setCellState(CellState::PossiblyBlack);
973                 }
974                 
975                 // Either way, we can return. Most likely, the object was not marked, and so the
976                 // object is now labeled white. This means that future barrier executions will not
977                 // fire. In the unlikely event that the object had become marked, we can still
978                 // return anyway, since we proved that the object was not marked at the time that
979                 // we executed this slow path.
980             }
981             
982             return;
983         }
984     } else
985         ASSERT(Heap::isMarked(cell));
986     // It could be that the object was *just* marked. This means that the collector may set the
987     // state to DefinitelyGrey and then to PossiblyOldOrBlack at any time. It's OK for us to
988     // race with the collector here. If we win then this is accurate because the object _will_
989     // get scanned again. If we lose then someone else will barrier the object again. That would
990     // be unfortunate but not the end of the world.
991     cell->setCellState(CellState::PossiblyGrey);
992     m_mutatorMarkStack->append(cell);
993 }
994
995 void Heap::sweepSynchronously()
996 {
997     double before = 0;
998     if (Options::logGC()) {
999         dataLog("Full sweep: ", capacity() / 1024, "kb ");
1000         before = currentTimeMS();
1001     }
1002     m_objectSpace.sweep();
1003     m_objectSpace.shrink();
1004     if (Options::logGC()) {
1005         double after = currentTimeMS();
1006         dataLog("=> ", capacity() / 1024, "kb, ", after - before, "ms");
1007     }
1008 }
1009
1010 void Heap::collect(Synchronousness synchronousness, GCRequest request)
1011 {
1012     switch (synchronousness) {
1013     case Async:
1014         collectAsync(request);
1015         return;
1016     case Sync:
1017         collectSync(request);
1018         return;
1019     }
1020     RELEASE_ASSERT_NOT_REACHED();
1021 }
1022
1023 void Heap::collectNow(Synchronousness synchronousness, GCRequest request)
1024 {
1025     switch (synchronousness) {
1026     case Async: {
1027         collectAsync(request);
1028         stopIfNecessary();
1029         return;
1030     }
1031         
1032     case Sync: {
1033         collectSync(request);
1034         
1035         DeferGCForAWhile deferGC(*this);
1036         if (UNLIKELY(Options::useImmortalObjects()))
1037             sweeper().stopSweeping();
1038         
1039         bool alreadySweptInCollectSync = Options::sweepSynchronously();
1040         if (!alreadySweptInCollectSync) {
1041             if (Options::logGC())
1042                 dataLog("[GC<", RawPointer(this), ">: ");
1043             sweepSynchronously();
1044             if (Options::logGC())
1045                 dataLog("]\n");
1046         }
1047         m_objectSpace.assertNoUnswept();
1048         
1049         sweepAllLogicallyEmptyWeakBlocks();
1050         return;
1051     } }
1052     RELEASE_ASSERT_NOT_REACHED();
1053 }
1054
1055 void Heap::collectAsync(GCRequest request)
1056 {
1057     if (!m_isSafeToCollect)
1058         return;
1059
1060     bool alreadyRequested = false;
1061     {
1062         LockHolder locker(*m_threadLock);
1063         for (const GCRequest& previousRequest : m_requests) {
1064             if (request.subsumedBy(previousRequest)) {
1065                 alreadyRequested = true;
1066                 break;
1067             }
1068         }
1069     }
1070     if (alreadyRequested)
1071         return;
1072
1073     requestCollection(request);
1074 }
1075
1076 void Heap::collectSync(GCRequest request)
1077 {
1078     if (!m_isSafeToCollect)
1079         return;
1080     
1081     waitForCollection(requestCollection(request));
1082 }
1083
1084 bool Heap::shouldCollectInCollectorThread(const AbstractLocker&)
1085 {
1086     RELEASE_ASSERT(m_requests.isEmpty() == (m_lastServedTicket == m_lastGrantedTicket));
1087     RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
1088     
1089     if (false)
1090         dataLog("Mutator has the conn = ", !!(m_worldState.load() & mutatorHasConnBit), "\n");
1091     
1092     return !m_requests.isEmpty() && !(m_worldState.load() & mutatorHasConnBit);
1093 }
1094
1095 void Heap::collectInCollectorThread()
1096 {
1097     for (;;) {
1098         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Collector, nullptr);
1099         switch (result) {
1100         case RunCurrentPhaseResult::Finished:
1101             return;
1102         case RunCurrentPhaseResult::Continue:
1103             break;
1104         case RunCurrentPhaseResult::NeedCurrentThreadState:
1105             RELEASE_ASSERT_NOT_REACHED();
1106             break;
1107         }
1108     }
1109 }
1110
1111 void Heap::checkConn(GCConductor conn)
1112 {
1113     switch (conn) {
1114     case GCConductor::Mutator:
1115         RELEASE_ASSERT(m_worldState.load() & mutatorHasConnBit);
1116         return;
1117     case GCConductor::Collector:
1118         RELEASE_ASSERT(!(m_worldState.load() & mutatorHasConnBit));
1119         return;
1120     }
1121     RELEASE_ASSERT_NOT_REACHED();
1122 }
1123
1124 auto Heap::runCurrentPhase(GCConductor conn, CurrentThreadState* currentThreadState) -> RunCurrentPhaseResult
1125 {
1126     checkConn(conn);
1127     m_currentThreadState = currentThreadState;
1128     m_currentThread = &WTF::Thread::current();
1129     
1130     if (conn == GCConductor::Mutator)
1131         sanitizeStackForVM(vm());
1132     
1133     // If the collector transfers the conn to the mutator, it leaves us in between phases.
1134     if (!finishChangingPhase(conn)) {
1135         // A mischevious mutator could repeatedly relinquish the conn back to us. We try to avoid doing
1136         // this, but it's probably not the end of the world if it did happen.
1137         if (false)
1138             dataLog("Conn bounce-back.\n");
1139         return RunCurrentPhaseResult::Finished;
1140     }
1141     
1142     bool result = false;
1143     switch (m_currentPhase) {
1144     case CollectorPhase::NotRunning:
1145         result = runNotRunningPhase(conn);
1146         break;
1147         
1148     case CollectorPhase::Begin:
1149         result = runBeginPhase(conn);
1150         break;
1151         
1152     case CollectorPhase::Fixpoint:
1153         if (!currentThreadState && conn == GCConductor::Mutator)
1154             return RunCurrentPhaseResult::NeedCurrentThreadState;
1155         
1156         result = runFixpointPhase(conn);
1157         break;
1158         
1159     case CollectorPhase::Concurrent:
1160         result = runConcurrentPhase(conn);
1161         break;
1162         
1163     case CollectorPhase::Reloop:
1164         result = runReloopPhase(conn);
1165         break;
1166         
1167     case CollectorPhase::End:
1168         result = runEndPhase(conn);
1169         break;
1170     }
1171
1172     return result ? RunCurrentPhaseResult::Continue : RunCurrentPhaseResult::Finished;
1173 }
1174
1175 NEVER_INLINE bool Heap::runNotRunningPhase(GCConductor conn)
1176 {
1177     // Check m_requests since the mutator calls this to poll what's going on.
1178     {
1179         auto locker = holdLock(*m_threadLock);
1180         if (m_requests.isEmpty())
1181             return false;
1182     }
1183     
1184     return changePhase(conn, CollectorPhase::Begin);
1185 }
1186
1187 NEVER_INLINE bool Heap::runBeginPhase(GCConductor conn)
1188 {
1189     m_currentGCStartTime = MonotonicTime::now();
1190     
1191     {
1192         LockHolder locker(*m_threadLock);
1193         RELEASE_ASSERT(!m_requests.isEmpty());
1194         m_currentRequest = m_requests.first();
1195     }
1196         
1197     if (Options::logGC())
1198         dataLog("[GC<", RawPointer(this), ">: START ", gcConductorShortName(conn), " ", capacity() / 1024, "kb ");
1199
1200     m_beforeGC = MonotonicTime::now();
1201
1202     if (m_collectionScope) {
1203         dataLog("Collection scope already set during GC: ", *m_collectionScope, "\n");
1204         RELEASE_ASSERT_NOT_REACHED();
1205     }
1206     
1207     willStartCollection();
1208         
1209     if (UNLIKELY(m_verifier)) {
1210         // Verify that live objects from the last GC cycle haven't been corrupted by
1211         // mutators before we begin this new GC cycle.
1212         m_verifier->verify(HeapVerifier::Phase::BeforeGC);
1213             
1214         m_verifier->startGC();
1215         m_verifier->gatherLiveCells(HeapVerifier::Phase::BeforeMarking);
1216     }
1217         
1218     prepareForMarking();
1219         
1220     if (m_collectionScope == CollectionScope::Full) {
1221         m_opaqueRoots.clear();
1222         m_collectorSlotVisitor->clearMarkStacks();
1223         m_mutatorMarkStack->clear();
1224     }
1225
1226     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
1227
1228     beginMarking();
1229
1230     forEachSlotVisitor(
1231         [&] (SlotVisitor& visitor) {
1232             visitor.didStartMarking();
1233         });
1234
1235     m_parallelMarkersShouldExit = false;
1236
1237     m_helperClient.setFunction(
1238         [this] () {
1239             SlotVisitor* slotVisitor;
1240             {
1241                 LockHolder locker(m_parallelSlotVisitorLock);
1242                 if (m_availableParallelSlotVisitors.isEmpty()) {
1243                     std::unique_ptr<SlotVisitor> newVisitor = std::make_unique<SlotVisitor>(
1244                         *this, toCString("P", m_parallelSlotVisitors.size() + 1));
1245                     
1246                     if (Options::optimizeParallelSlotVisitorsForStoppedMutator())
1247                         newVisitor->optimizeForStoppedMutator();
1248                     
1249                     newVisitor->didStartMarking();
1250                     
1251                     slotVisitor = newVisitor.get();
1252                     m_parallelSlotVisitors.append(WTFMove(newVisitor));
1253                 } else
1254                     slotVisitor = m_availableParallelSlotVisitors.takeLast();
1255             }
1256
1257             WTF::registerGCThread(GCThreadType::Helper);
1258
1259             {
1260                 ParallelModeEnabler parallelModeEnabler(*slotVisitor);
1261                 slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
1262             }
1263
1264             {
1265                 LockHolder locker(m_parallelSlotVisitorLock);
1266                 m_availableParallelSlotVisitors.append(slotVisitor);
1267             }
1268         });
1269
1270     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1271
1272     m_constraintSet->didStartMarking();
1273     
1274     m_scheduler->beginCollection();
1275     if (Options::logGC())
1276         m_scheduler->log();
1277     
1278     // After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()"
1279     // checks because bootstrap would have put things into the visitor. So, we should fall
1280     // through to draining.
1281     
1282     if (!slotVisitor.didReachTermination()) {
1283         dataLog("Fatal: SlotVisitor should think that GC should terminate before constraint solving, but it does not think this.\n");
1284         dataLog("slotVisitor.isEmpty(): ", slotVisitor.isEmpty(), "\n");
1285         dataLog("slotVisitor.collectorMarkStack().isEmpty(): ", slotVisitor.collectorMarkStack().isEmpty(), "\n");
1286         dataLog("slotVisitor.mutatorMarkStack().isEmpty(): ", slotVisitor.mutatorMarkStack().isEmpty(), "\n");
1287         dataLog("m_numberOfActiveParallelMarkers: ", m_numberOfActiveParallelMarkers, "\n");
1288         dataLog("m_sharedCollectorMarkStack->isEmpty(): ", m_sharedCollectorMarkStack->isEmpty(), "\n");
1289         dataLog("m_sharedMutatorMarkStack->isEmpty(): ", m_sharedMutatorMarkStack->isEmpty(), "\n");
1290         dataLog("slotVisitor.didReachTermination(): ", slotVisitor.didReachTermination(), "\n");
1291         RELEASE_ASSERT_NOT_REACHED();
1292     }
1293         
1294     return changePhase(conn, CollectorPhase::Fixpoint);
1295 }
1296
1297 NEVER_INLINE bool Heap::runFixpointPhase(GCConductor conn)
1298 {
1299     RELEASE_ASSERT(conn == GCConductor::Collector || m_currentThreadState);
1300     
1301     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1302     
1303     if (Options::logGC()) {
1304         HashMap<const char*, size_t> visitMap;
1305         forEachSlotVisitor(
1306             [&] (SlotVisitor& slotVisitor) {
1307                 visitMap.add(slotVisitor.codeName(), slotVisitor.bytesVisited() / 1024);
1308             });
1309         
1310         auto perVisitorDump = sortedMapDump(
1311             visitMap,
1312             [] (const char* a, const char* b) -> bool {
1313                 return strcmp(a, b) < 0;
1314             },
1315             ":", " ");
1316         
1317         dataLog("v=", bytesVisited() / 1024, "kb (", perVisitorDump, ") o=", m_opaqueRoots.size(), " b=", m_barriersExecuted, " ");
1318     }
1319         
1320     if (slotVisitor.didReachTermination()) {
1321         m_opaqueRoots.deleteOldTables();
1322         
1323         m_scheduler->didReachTermination();
1324         
1325         assertMarkStacksEmpty();
1326             
1327         // FIXME: Take m_mutatorDidRun into account when scheduling constraints. Most likely,
1328         // we don't have to execute root constraints again unless the mutator did run. At a
1329         // minimum, we could use this for work estimates - but it's probably more than just an
1330         // estimate.
1331         // https://bugs.webkit.org/show_bug.cgi?id=166828
1332             
1333         // Wondering what this does? Look at Heap::addCoreConstraints(). The DOM and others can also
1334         // add their own using Heap::addMarkingConstraint().
1335         bool converged = m_constraintSet->executeConvergence(slotVisitor);
1336         
1337         // FIXME: The slotVisitor.isEmpty() check is most likely not needed.
1338         // https://bugs.webkit.org/show_bug.cgi?id=180310
1339         if (converged && slotVisitor.isEmpty()) {
1340             assertMarkStacksEmpty();
1341             return changePhase(conn, CollectorPhase::End);
1342         }
1343             
1344         m_scheduler->didExecuteConstraints();
1345     }
1346         
1347     if (Options::logGC())
1348         dataLog(slotVisitor.collectorMarkStack().size(), "+", m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " ");
1349         
1350     {
1351         ParallelModeEnabler enabler(slotVisitor);
1352         slotVisitor.drainInParallel(m_scheduler->timeToResume());
1353     }
1354         
1355     m_scheduler->synchronousDrainingDidStall();
1356
1357     // This is kinda tricky. The termination check looks at:
1358     //
1359     // - Whether the marking threads are active. If they are not, this means that the marking threads'
1360     //   SlotVisitors are empty.
1361     // - Whether the collector's slot visitor is empty.
1362     // - Whether the shared mark stacks are empty.
1363     //
1364     // This doesn't have to check the mutator SlotVisitor because that one becomes empty after every GC
1365     // work increment, so it must be empty now.
1366     if (slotVisitor.didReachTermination())
1367         return true; // This is like relooping to the top if runFixpointPhase().
1368         
1369     if (!m_scheduler->shouldResume())
1370         return true;
1371
1372     m_scheduler->willResume();
1373         
1374     if (Options::logGC()) {
1375         double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds();
1376         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), ")...]\n");
1377     }
1378
1379     // Forgive the mutator for its past failures to keep up.
1380     // FIXME: Figure out if moving this to different places results in perf changes.
1381     m_incrementBalance = 0;
1382         
1383     return changePhase(conn, CollectorPhase::Concurrent);
1384 }
1385
1386 NEVER_INLINE bool Heap::runConcurrentPhase(GCConductor conn)
1387 {
1388     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1389
1390     switch (conn) {
1391     case GCConductor::Mutator: {
1392         // When the mutator has the conn, we poll runConcurrentPhase() on every time someone says
1393         // stopIfNecessary(), so on every allocation slow path. When that happens we poll if it's time
1394         // to stop and do some work.
1395         if (slotVisitor.didReachTermination()
1396             || m_scheduler->shouldStop())
1397             return changePhase(conn, CollectorPhase::Reloop);
1398         
1399         // We could be coming from a collector phase that stuffed our SlotVisitor, so make sure we donate
1400         // everything. This is super cheap if the SlotVisitor is already empty.
1401         slotVisitor.donateAll();
1402         return false;
1403     }
1404     case GCConductor::Collector: {
1405         {
1406             ParallelModeEnabler enabler(slotVisitor);
1407             slotVisitor.drainInParallelPassively(m_scheduler->timeToStop());
1408         }
1409         return changePhase(conn, CollectorPhase::Reloop);
1410     } }
1411     
1412     RELEASE_ASSERT_NOT_REACHED();
1413     return false;
1414 }
1415
1416 NEVER_INLINE bool Heap::runReloopPhase(GCConductor conn)
1417 {
1418     if (Options::logGC())
1419         dataLog("[GC<", RawPointer(this), ">: ", gcConductorShortName(conn), " ");
1420     
1421     m_scheduler->didStop();
1422     
1423     if (Options::logGC())
1424         m_scheduler->log();
1425     
1426     return changePhase(conn, CollectorPhase::Fixpoint);
1427 }
1428
1429 NEVER_INLINE bool Heap::runEndPhase(GCConductor conn)
1430 {
1431     m_scheduler->endCollection();
1432         
1433     {
1434         auto locker = holdLock(m_markingMutex);
1435         m_parallelMarkersShouldExit = true;
1436         m_markingConditionVariable.notifyAll();
1437     }
1438     m_helperClient.finish();
1439     
1440     iterateExecutingAndCompilingCodeBlocks(
1441         [&] (CodeBlock* codeBlock) {
1442             writeBarrier(codeBlock);
1443         });
1444         
1445     updateObjectCounts();
1446     endMarking();
1447         
1448     if (UNLIKELY(m_verifier)) {
1449         m_verifier->gatherLiveCells(HeapVerifier::Phase::AfterMarking);
1450         m_verifier->verify(HeapVerifier::Phase::AfterMarking);
1451     }
1452         
1453     if (vm()->typeProfiler())
1454         vm()->typeProfiler()->invalidateTypeSetCache();
1455         
1456     reapWeakHandles();
1457     pruneStaleEntriesFromWeakGCMaps();
1458     sweepArrayBuffers();
1459     snapshotUnswept();
1460     finalizeUnconditionalFinalizers();
1461     removeDeadCompilerWorklistEntries();
1462     notifyIncrementalSweeper();
1463     
1464     m_codeBlocks->iterateCurrentlyExecuting(
1465         [&] (CodeBlock* codeBlock) {
1466             writeBarrier(codeBlock);
1467         });
1468     m_codeBlocks->clearCurrentlyExecuting();
1469         
1470     m_objectSpace.prepareForAllocation();
1471     updateAllocationLimits();
1472
1473     if (UNLIKELY(m_verifier)) {
1474         m_verifier->trimDeadCells();
1475         m_verifier->verify(HeapVerifier::Phase::AfterGC);
1476     }
1477
1478     didFinishCollection();
1479     
1480     if (m_currentRequest.didFinishEndPhase)
1481         m_currentRequest.didFinishEndPhase->run();
1482     
1483     if (false) {
1484         dataLog("Heap state after GC:\n");
1485         m_objectSpace.dumpBits();
1486     }
1487     
1488     if (Options::logGC()) {
1489         double thisPauseMS = (m_afterGC - m_stopTime).milliseconds();
1490         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), "), cycle ", (m_afterGC - m_beforeGC).milliseconds(), "ms END]\n");
1491     }
1492     
1493     {
1494         auto locker = holdLock(*m_threadLock);
1495         m_requests.removeFirst();
1496         m_lastServedTicket++;
1497         clearMutatorWaiting();
1498     }
1499     ParkingLot::unparkAll(&m_worldState);
1500
1501     if (false)
1502         dataLog("GC END!\n");
1503
1504     setNeedFinalize();
1505
1506     m_lastGCStartTime = m_currentGCStartTime;
1507     m_lastGCEndTime = MonotonicTime::now();
1508         
1509     return changePhase(conn, CollectorPhase::NotRunning);
1510 }
1511
1512 bool Heap::changePhase(GCConductor conn, CollectorPhase nextPhase)
1513 {
1514     checkConn(conn);
1515
1516     m_nextPhase = nextPhase;
1517
1518     return finishChangingPhase(conn);
1519 }
1520
1521 NEVER_INLINE bool Heap::finishChangingPhase(GCConductor conn)
1522 {
1523     checkConn(conn);
1524     
1525     if (m_nextPhase == m_currentPhase)
1526         return true;
1527
1528     if (false)
1529         dataLog(conn, ": Going to phase: ", m_nextPhase, " (from ", m_currentPhase, ")\n");
1530     
1531     m_phaseVersion++;
1532     
1533     bool suspendedBefore = worldShouldBeSuspended(m_currentPhase);
1534     bool suspendedAfter = worldShouldBeSuspended(m_nextPhase);
1535     
1536     if (suspendedBefore != suspendedAfter) {
1537         if (suspendedBefore) {
1538             RELEASE_ASSERT(!suspendedAfter);
1539             
1540             resumeThePeriphery();
1541             if (conn == GCConductor::Collector)
1542                 resumeTheMutator();
1543             else
1544                 handleNeedFinalize();
1545         } else {
1546             RELEASE_ASSERT(!suspendedBefore);
1547             RELEASE_ASSERT(suspendedAfter);
1548             
1549             if (conn == GCConductor::Collector) {
1550                 waitWhileNeedFinalize();
1551                 if (!stopTheMutator()) {
1552                     if (false)
1553                         dataLog("Returning false.\n");
1554                     return false;
1555                 }
1556             } else {
1557                 sanitizeStackForVM(m_vm);
1558                 handleNeedFinalize();
1559             }
1560             stopThePeriphery(conn);
1561         }
1562     }
1563     
1564     m_currentPhase = m_nextPhase;
1565     return true;
1566 }
1567
1568 void Heap::stopThePeriphery(GCConductor conn)
1569 {
1570     if (m_worldIsStopped) {
1571         dataLog("FATAL: world already stopped.\n");
1572         RELEASE_ASSERT_NOT_REACHED();
1573     }
1574     
1575     if (m_mutatorDidRun)
1576         m_mutatorExecutionVersion++;
1577     
1578     m_mutatorDidRun = false;
1579
1580     suspendCompilerThreads();
1581     m_worldIsStopped = true;
1582
1583     forEachSlotVisitor(
1584         [&] (SlotVisitor& slotVisitor) {
1585             slotVisitor.updateMutatorIsStopped(NoLockingNecessary);
1586         });
1587
1588 #if ENABLE(JIT)
1589     {
1590         DeferGCForAWhile awhile(*this);
1591         if (JITWorklist::instance()->completeAllForVM(*m_vm)
1592             && conn == GCConductor::Collector)
1593             setGCDidJIT();
1594     }
1595 #else
1596     UNUSED_PARAM(conn);
1597 #endif // ENABLE(JIT)
1598     
1599     vm()->shadowChicken().update(*vm(), vm()->topCallFrame);
1600     
1601     m_structureIDTable.flushOldTables();
1602     m_objectSpace.stopAllocating();
1603     
1604     m_stopTime = MonotonicTime::now();
1605 }
1606
1607 NEVER_INLINE void Heap::resumeThePeriphery()
1608 {
1609     // Calling resumeAllocating does the Right Thing depending on whether this is the end of a
1610     // collection cycle or this is just a concurrent phase within a collection cycle:
1611     // - At end of collection cycle: it's a no-op because prepareForAllocation already cleared the
1612     //   last active block.
1613     // - During collection cycle: it reinstates the last active block.
1614     m_objectSpace.resumeAllocating();
1615     
1616     m_barriersExecuted = 0;
1617     
1618     if (!m_worldIsStopped) {
1619         dataLog("Fatal: collector does not believe that the world is stopped.\n");
1620         RELEASE_ASSERT_NOT_REACHED();
1621     }
1622     m_worldIsStopped = false;
1623     
1624     // FIXME: This could be vastly improved: we want to grab the locks in the order in which they
1625     // become available. We basically want a lockAny() method that will lock whatever lock is available
1626     // and tell you which one it locked. That would require teaching ParkingLot how to park on multiple
1627     // queues at once, which is totally achievable - it would just require memory allocation, which is
1628     // suboptimal but not a disaster. Alternatively, we could replace the SlotVisitor rightToRun lock
1629     // with a DLG-style handshake mechanism, but that seems not as general.
1630     Vector<SlotVisitor*, 8> slotVisitorsToUpdate;
1631
1632     forEachSlotVisitor(
1633         [&] (SlotVisitor& slotVisitor) {
1634             slotVisitorsToUpdate.append(&slotVisitor);
1635         });
1636     
1637     for (unsigned countdown = 40; !slotVisitorsToUpdate.isEmpty() && countdown--;) {
1638         for (unsigned index = 0; index < slotVisitorsToUpdate.size(); ++index) {
1639             SlotVisitor& slotVisitor = *slotVisitorsToUpdate[index];
1640             bool remove = false;
1641             if (slotVisitor.hasAcknowledgedThatTheMutatorIsResumed())
1642                 remove = true;
1643             else if (auto locker = tryHoldLock(slotVisitor.rightToRun())) {
1644                 slotVisitor.updateMutatorIsStopped(locker);
1645                 remove = true;
1646             }
1647             if (remove) {
1648                 slotVisitorsToUpdate[index--] = slotVisitorsToUpdate.last();
1649                 slotVisitorsToUpdate.takeLast();
1650             }
1651         }
1652         WTF::Thread::yield();
1653     }
1654     
1655     for (SlotVisitor* slotVisitor : slotVisitorsToUpdate)
1656         slotVisitor->updateMutatorIsStopped();
1657     
1658     resumeCompilerThreads();
1659 }
1660
1661 bool Heap::stopTheMutator()
1662 {
1663     for (;;) {
1664         unsigned oldState = m_worldState.load();
1665         if (oldState & stoppedBit) {
1666             RELEASE_ASSERT(!(oldState & hasAccessBit));
1667             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1668             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1669             return true;
1670         }
1671         
1672         if (oldState & mutatorHasConnBit) {
1673             RELEASE_ASSERT(!(oldState & hasAccessBit));
1674             RELEASE_ASSERT(!(oldState & stoppedBit));
1675             return false;
1676         }
1677
1678         if (!(oldState & hasAccessBit)) {
1679             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1680             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1681             // We can stop the world instantly.
1682             if (m_worldState.compareExchangeWeak(oldState, oldState | stoppedBit))
1683                 return true;
1684             continue;
1685         }
1686         
1687         // Transfer the conn to the mutator and bail.
1688         RELEASE_ASSERT(oldState & hasAccessBit);
1689         RELEASE_ASSERT(!(oldState & stoppedBit));
1690         unsigned newState = (oldState | mutatorHasConnBit) & ~mutatorWaitingBit;
1691         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1692             if (false)
1693                 dataLog("Handed off the conn.\n");
1694             m_stopIfNecessaryTimer->scheduleSoon();
1695             ParkingLot::unparkAll(&m_worldState);
1696             return false;
1697         }
1698     }
1699 }
1700
1701 NEVER_INLINE void Heap::resumeTheMutator()
1702 {
1703     if (false)
1704         dataLog("Resuming the mutator.\n");
1705     for (;;) {
1706         unsigned oldState = m_worldState.load();
1707         if (!!(oldState & hasAccessBit) != !(oldState & stoppedBit)) {
1708             dataLog("Fatal: hasAccess = ", !!(oldState & hasAccessBit), ", stopped = ", !!(oldState & stoppedBit), "\n");
1709             RELEASE_ASSERT_NOT_REACHED();
1710         }
1711         if (oldState & mutatorHasConnBit) {
1712             dataLog("Fatal: mutator has the conn.\n");
1713             RELEASE_ASSERT_NOT_REACHED();
1714         }
1715         
1716         if (!(oldState & stoppedBit)) {
1717             if (false)
1718                 dataLog("Returning because not stopped.\n");
1719             return;
1720         }
1721         
1722         if (m_worldState.compareExchangeWeak(oldState, oldState & ~stoppedBit)) {
1723             if (false)
1724                 dataLog("CASing and returning.\n");
1725             ParkingLot::unparkAll(&m_worldState);
1726             return;
1727         }
1728     }
1729 }
1730
1731 void Heap::stopIfNecessarySlow()
1732 {
1733     while (stopIfNecessarySlow(m_worldState.load())) { }
1734     
1735     RELEASE_ASSERT(m_worldState.load() & hasAccessBit);
1736     RELEASE_ASSERT(!(m_worldState.load() & stoppedBit));
1737     
1738     handleGCDidJIT();
1739     handleNeedFinalize();
1740     m_mutatorDidRun = true;
1741 }
1742
1743 bool Heap::stopIfNecessarySlow(unsigned oldState)
1744 {
1745     RELEASE_ASSERT(oldState & hasAccessBit);
1746     RELEASE_ASSERT(!(oldState & stoppedBit));
1747     
1748     // It's possible for us to wake up with finalization already requested but the world not yet
1749     // resumed. If that happens, we can't run finalization yet.
1750     if (handleNeedFinalize(oldState))
1751         return true;
1752
1753     // FIXME: When entering the concurrent phase, we could arrange for this branch not to fire, and then
1754     // have the SlotVisitor do things to the m_worldState to make this branch fire again. That would
1755     // prevent us from polling this so much. Ideally, stopIfNecessary would ignore the mutatorHasConnBit
1756     // and there would be some other bit indicating whether we were in some GC phase other than the
1757     // NotRunning or Concurrent ones.
1758     if (oldState & mutatorHasConnBit)
1759         collectInMutatorThread();
1760     
1761     return false;
1762 }
1763
1764 NEVER_INLINE void Heap::collectInMutatorThread()
1765 {
1766     CollectingScope collectingScope(*this);
1767     for (;;) {
1768         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, nullptr);
1769         switch (result) {
1770         case RunCurrentPhaseResult::Finished:
1771             return;
1772         case RunCurrentPhaseResult::Continue:
1773             break;
1774         case RunCurrentPhaseResult::NeedCurrentThreadState:
1775             sanitizeStackForVM(m_vm);
1776             auto lambda = [&] (CurrentThreadState& state) {
1777                 for (;;) {
1778                     RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, &state);
1779                     switch (result) {
1780                     case RunCurrentPhaseResult::Finished:
1781                         return;
1782                     case RunCurrentPhaseResult::Continue:
1783                         break;
1784                     case RunCurrentPhaseResult::NeedCurrentThreadState:
1785                         RELEASE_ASSERT_NOT_REACHED();
1786                         break;
1787                     }
1788                 }
1789             };
1790             callWithCurrentThreadState(scopedLambda<void(CurrentThreadState&)>(WTFMove(lambda)));
1791             return;
1792         }
1793     }
1794 }
1795
1796 template<typename Func>
1797 void Heap::waitForCollector(const Func& func)
1798 {
1799     for (;;) {
1800         bool done;
1801         {
1802             LockHolder locker(*m_threadLock);
1803             done = func(locker);
1804             if (!done) {
1805                 setMutatorWaiting();
1806                 
1807                 // At this point, the collector knows that we intend to wait, and he will clear the
1808                 // waiting bit and then unparkAll when the GC cycle finishes. Clearing the bit
1809                 // prevents us from parking except if there is also stop-the-world. Unparking after
1810                 // clearing means that if the clearing happens after we park, then we will unpark.
1811             }
1812         }
1813         
1814         // If we're in a stop-the-world scenario, we need to wait for that even if done is true.
1815         unsigned oldState = m_worldState.load();
1816         if (stopIfNecessarySlow(oldState))
1817             continue;
1818         
1819         // FIXME: We wouldn't need this if stopIfNecessarySlow() had a mode where it knew to just
1820         // do the collection.
1821         relinquishConn();
1822         
1823         if (done) {
1824             clearMutatorWaiting(); // Clean up just in case.
1825             return;
1826         }
1827         
1828         // If mutatorWaitingBit is still set then we want to wait.
1829         ParkingLot::compareAndPark(&m_worldState, oldState | mutatorWaitingBit);
1830     }
1831 }
1832
1833 void Heap::acquireAccessSlow()
1834 {
1835     for (;;) {
1836         unsigned oldState = m_worldState.load();
1837         RELEASE_ASSERT(!(oldState & hasAccessBit));
1838         
1839         if (oldState & stoppedBit) {
1840             if (verboseStop) {
1841                 dataLog("Stopping in acquireAccess!\n");
1842                 WTFReportBacktrace();
1843             }
1844             // Wait until we're not stopped anymore.
1845             ParkingLot::compareAndPark(&m_worldState, oldState);
1846             continue;
1847         }
1848         
1849         RELEASE_ASSERT(!(oldState & stoppedBit));
1850         unsigned newState = oldState | hasAccessBit;
1851         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1852             handleGCDidJIT();
1853             handleNeedFinalize();
1854             m_mutatorDidRun = true;
1855             stopIfNecessary();
1856             return;
1857         }
1858     }
1859 }
1860
1861 void Heap::releaseAccessSlow()
1862 {
1863     for (;;) {
1864         unsigned oldState = m_worldState.load();
1865         if (!(oldState & hasAccessBit)) {
1866             dataLog("FATAL: Attempting to release access but the mutator does not have access.\n");
1867             RELEASE_ASSERT_NOT_REACHED();
1868         }
1869         if (oldState & stoppedBit) {
1870             dataLog("FATAL: Attempting to release access but the mutator is stopped.\n");
1871             RELEASE_ASSERT_NOT_REACHED();
1872         }
1873         
1874         if (handleNeedFinalize(oldState))
1875             continue;
1876         
1877         unsigned newState = oldState & ~(hasAccessBit | mutatorHasConnBit);
1878         
1879         if ((oldState & mutatorHasConnBit)
1880             && m_nextPhase != m_currentPhase) {
1881             // This means that the collector thread had given us the conn so that we would do something
1882             // for it. Stop ourselves as we release access. This ensures that acquireAccess blocks. In
1883             // the meantime, since we're handing the conn over, the collector will be awoken and it is
1884             // sure to have work to do.
1885             newState |= stoppedBit;
1886         }
1887
1888         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1889             if (oldState & mutatorHasConnBit)
1890                 finishRelinquishingConn();
1891             return;
1892         }
1893     }
1894 }
1895
1896 bool Heap::relinquishConn(unsigned oldState)
1897 {
1898     RELEASE_ASSERT(oldState & hasAccessBit);
1899     RELEASE_ASSERT(!(oldState & stoppedBit));
1900     
1901     if (!(oldState & mutatorHasConnBit))
1902         return false; // Done.
1903     
1904     if (m_threadShouldStop)
1905         return false;
1906     
1907     if (!m_worldState.compareExchangeWeak(oldState, oldState & ~mutatorHasConnBit))
1908         return true; // Loop around.
1909     
1910     finishRelinquishingConn();
1911     return true;
1912 }
1913
1914 void Heap::finishRelinquishingConn()
1915 {
1916     if (false)
1917         dataLog("Relinquished the conn.\n");
1918     
1919     sanitizeStackForVM(m_vm);
1920     
1921     auto locker = holdLock(*m_threadLock);
1922     if (!m_requests.isEmpty())
1923         m_threadCondition->notifyOne(locker);
1924     ParkingLot::unparkAll(&m_worldState);
1925 }
1926
1927 void Heap::relinquishConn()
1928 {
1929     while (relinquishConn(m_worldState.load())) { }
1930 }
1931
1932 bool Heap::handleGCDidJIT(unsigned oldState)
1933 {
1934     RELEASE_ASSERT(oldState & hasAccessBit);
1935     if (!(oldState & gcDidJITBit))
1936         return false;
1937     if (m_worldState.compareExchangeWeak(oldState, oldState & ~gcDidJITBit)) {
1938         WTF::crossModifyingCodeFence();
1939         return true;
1940     }
1941     return true;
1942 }
1943
1944 NEVER_INLINE bool Heap::handleNeedFinalize(unsigned oldState)
1945 {
1946     RELEASE_ASSERT(oldState & hasAccessBit);
1947     RELEASE_ASSERT(!(oldState & stoppedBit));
1948     
1949     if (!(oldState & needFinalizeBit))
1950         return false;
1951     if (m_worldState.compareExchangeWeak(oldState, oldState & ~needFinalizeBit)) {
1952         finalize();
1953         // Wake up anyone waiting for us to finalize. Note that they may have woken up already, in
1954         // which case they would be waiting for us to release heap access.
1955         ParkingLot::unparkAll(&m_worldState);
1956         return true;
1957     }
1958     return true;
1959 }
1960
1961 void Heap::handleGCDidJIT()
1962 {
1963     while (handleGCDidJIT(m_worldState.load())) { }
1964 }
1965
1966 void Heap::handleNeedFinalize()
1967 {
1968     while (handleNeedFinalize(m_worldState.load())) { }
1969 }
1970
1971 void Heap::setGCDidJIT()
1972 {
1973     m_worldState.transaction(
1974         [&] (unsigned& state) -> bool {
1975             RELEASE_ASSERT(state & stoppedBit);
1976             state |= gcDidJITBit;
1977             return true;
1978         });
1979 }
1980
1981 void Heap::setNeedFinalize()
1982 {
1983     m_worldState.exchangeOr(needFinalizeBit);
1984     ParkingLot::unparkAll(&m_worldState);
1985     m_stopIfNecessaryTimer->scheduleSoon();
1986 }
1987
1988 void Heap::waitWhileNeedFinalize()
1989 {
1990     for (;;) {
1991         unsigned oldState = m_worldState.load();
1992         if (!(oldState & needFinalizeBit)) {
1993             // This means that either there was no finalize request or the main thread will finalize
1994             // with heap access, so a subsequent call to stopTheWorld() will return only when
1995             // finalize finishes.
1996             return;
1997         }
1998         ParkingLot::compareAndPark(&m_worldState, oldState);
1999     }
2000 }
2001
2002 void Heap::setMutatorWaiting()
2003 {
2004     m_worldState.exchangeOr(mutatorWaitingBit);
2005 }
2006
2007 void Heap::clearMutatorWaiting()
2008 {
2009     m_worldState.exchangeAnd(~mutatorWaitingBit);
2010 }
2011
2012 void Heap::notifyThreadStopping(const AbstractLocker&)
2013 {
2014     m_threadIsStopping = true;
2015     clearMutatorWaiting();
2016     ParkingLot::unparkAll(&m_worldState);
2017 }
2018
2019 void Heap::finalize()
2020 {
2021     MonotonicTime before;
2022     if (Options::logGC()) {
2023         before = MonotonicTime::now();
2024         dataLog("[GC<", RawPointer(this), ">: finalize ");
2025     }
2026     
2027     {
2028         SweepingScope sweepingScope(*this);
2029         deleteUnmarkedCompiledCode();
2030         deleteSourceProviderCaches();
2031         sweepInFinalize();
2032     }
2033     
2034     if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache())
2035         cache->clear();
2036     
2037     for (const HeapFinalizerCallback& callback : m_heapFinalizerCallbacks)
2038         callback.run(*vm());
2039     
2040     if (Options::sweepSynchronously())
2041         sweepSynchronously();
2042
2043     if (Options::logGC()) {
2044         MonotonicTime after = MonotonicTime::now();
2045         dataLog((after - before).milliseconds(), "ms]\n");
2046     }
2047 }
2048
2049 Heap::Ticket Heap::requestCollection(GCRequest request)
2050 {
2051     stopIfNecessary();
2052     
2053     ASSERT(vm()->currentThreadIsHoldingAPILock());
2054     RELEASE_ASSERT(vm()->atomicStringTable() == WTF::Thread::current().atomicStringTable());
2055     
2056     LockHolder locker(*m_threadLock);
2057     // We may be able to steal the conn. That only works if the collector is definitely not running
2058     // right now. This is an optimization that prevents the collector thread from ever starting in most
2059     // cases.
2060     ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2061     if ((m_lastServedTicket == m_lastGrantedTicket) && (m_currentPhase == CollectorPhase::NotRunning)) {
2062         if (false)
2063             dataLog("Taking the conn.\n");
2064         m_worldState.exchangeOr(mutatorHasConnBit);
2065     }
2066     
2067     m_requests.append(request);
2068     m_lastGrantedTicket++;
2069     if (!(m_worldState.load() & mutatorHasConnBit))
2070         m_threadCondition->notifyOne(locker);
2071     return m_lastGrantedTicket;
2072 }
2073
2074 void Heap::waitForCollection(Ticket ticket)
2075 {
2076     waitForCollector(
2077         [&] (const AbstractLocker&) -> bool {
2078             return m_lastServedTicket >= ticket;
2079         });
2080 }
2081
2082 void Heap::sweepInFinalize()
2083 {
2084     m_objectSpace.sweepLargeAllocations();
2085     vm()->forEachCodeBlockSpace([] (auto& space) { space.space.sweep(); });
2086     vm()->eagerlySweptDestructibleObjectSpace.sweep();
2087 }
2088
2089 void Heap::suspendCompilerThreads()
2090 {
2091 #if ENABLE(DFG_JIT)
2092     // We ensure the worklists so that it's not possible for the mutator to start a new worklist
2093     // after we have suspended the ones that he had started before. That's not very expensive since
2094     // the worklists use AutomaticThreads anyway.
2095     for (unsigned i = DFG::numberOfWorklists(); i--;)
2096         DFG::ensureWorklistForIndex(i).suspendAllThreads();
2097 #endif
2098 }
2099
2100 void Heap::willStartCollection()
2101 {
2102     if (Options::logGC())
2103         dataLog("=> ");
2104     
2105     if (shouldDoFullCollection()) {
2106         m_collectionScope = CollectionScope::Full;
2107         m_shouldDoFullCollection = false;
2108         if (Options::logGC())
2109             dataLog("FullCollection, ");
2110         if (false)
2111             dataLog("Full collection!\n");
2112     } else {
2113         m_collectionScope = CollectionScope::Eden;
2114         if (Options::logGC())
2115             dataLog("EdenCollection, ");
2116         if (false)
2117             dataLog("Eden collection!\n");
2118     }
2119     if (m_collectionScope == CollectionScope::Full) {
2120         m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2121         m_extraMemorySize = 0;
2122         m_deprecatedExtraMemorySize = 0;
2123 #if ENABLE(RESOURCE_USAGE)
2124         m_externalMemorySize = 0;
2125 #endif
2126
2127         if (m_fullActivityCallback)
2128             m_fullActivityCallback->willCollect();
2129     } else {
2130         ASSERT(m_collectionScope == CollectionScope::Eden);
2131         m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2132     }
2133
2134     if (m_edenActivityCallback)
2135         m_edenActivityCallback->willCollect();
2136
2137     for (auto* observer : m_observers)
2138         observer->willGarbageCollect();
2139 }
2140
2141 void Heap::prepareForMarking()
2142 {
2143     m_objectSpace.prepareForMarking();
2144 }
2145
2146 void Heap::reapWeakHandles()
2147 {
2148     m_objectSpace.reapWeakSets();
2149 }
2150
2151 void Heap::pruneStaleEntriesFromWeakGCMaps()
2152 {
2153     if (m_collectionScope != CollectionScope::Full)
2154         return;
2155     for (WeakGCMapBase* weakGCMap : m_weakGCMaps)
2156         weakGCMap->pruneStaleEntries();
2157 }
2158
2159 void Heap::sweepArrayBuffers()
2160 {
2161     m_arrayBuffers.sweep();
2162 }
2163
2164 void Heap::snapshotUnswept()
2165 {
2166     TimingScope timingScope(*this, "Heap::snapshotUnswept");
2167     m_objectSpace.snapshotUnswept();
2168 }
2169
2170 void Heap::deleteSourceProviderCaches()
2171 {
2172     if (*m_lastCollectionScope == CollectionScope::Full)
2173         m_vm->clearSourceProviderCaches();
2174 }
2175
2176 void Heap::notifyIncrementalSweeper()
2177 {
2178     if (m_collectionScope == CollectionScope::Full) {
2179         if (!m_logicallyEmptyWeakBlocks.isEmpty())
2180             m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2181     }
2182
2183     m_sweeper->startSweeping();
2184 }
2185
2186 void Heap::updateAllocationLimits()
2187 {
2188     static const bool verbose = false;
2189     
2190     if (verbose) {
2191         dataLog("\n");
2192         dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n");
2193     }
2194     
2195     // Calculate our current heap size threshold for the purpose of figuring out when we should
2196     // run another collection. This isn't the same as either size() or capacity(), though it should
2197     // be somewhere between the two. The key is to match the size calculations involved calls to
2198     // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
2199     // fragmentation, we may have size() much smaller than capacity().
2200     size_t currentHeapSize = 0;
2201
2202     // For marked space, we use the total number of bytes visited. This matches the logic for
2203     // MarkedAllocator's calls to didAllocate(), which effectively accounts for the total size of
2204     // objects allocated rather than blocks used. This will underestimate capacity(), and in case
2205     // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
2206     // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
2207     currentHeapSize += m_totalBytesVisited;
2208     if (verbose)
2209         dataLog("totalBytesVisited = ", m_totalBytesVisited, ", currentHeapSize = ", currentHeapSize, "\n");
2210
2211     // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
2212     // extra memory reporting.
2213     currentHeapSize += extraMemorySize();
2214     if (!ASSERT_DISABLED) {
2215         Checked<size_t, RecordOverflow> checkedCurrentHeapSize = m_totalBytesVisited;
2216         checkedCurrentHeapSize += extraMemorySize();
2217         ASSERT(!checkedCurrentHeapSize.hasOverflowed() && checkedCurrentHeapSize.unsafeGet() == currentHeapSize);
2218     }
2219
2220     if (verbose)
2221         dataLog("extraMemorySize() = ", extraMemorySize(), ", currentHeapSize = ", currentHeapSize, "\n");
2222     
2223     if (m_collectionScope == CollectionScope::Full) {
2224         // To avoid pathological GC churn in very small and very large heaps, we set
2225         // the new allocation limit based on the current size of the heap, with a
2226         // fixed minimum.
2227         m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
2228         if (verbose)
2229             dataLog("Full: maxHeapSize = ", m_maxHeapSize, "\n");
2230         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2231         if (verbose)
2232             dataLog("Full: maxEdenSize = ", m_maxEdenSize, "\n");
2233         m_sizeAfterLastFullCollect = currentHeapSize;
2234         if (verbose)
2235             dataLog("Full: sizeAfterLastFullCollect = ", currentHeapSize, "\n");
2236         m_bytesAbandonedSinceLastFullCollect = 0;
2237         if (verbose)
2238             dataLog("Full: bytesAbandonedSinceLastFullCollect = ", 0, "\n");
2239     } else {
2240         ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
2241         // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have.
2242         // But we are sloppy, so we have to defend against the overflow.
2243         m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize;
2244         if (verbose)
2245             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2246         m_sizeAfterLastEdenCollect = currentHeapSize;
2247         if (verbose)
2248             dataLog("Eden: sizeAfterLastEdenCollect = ", currentHeapSize, "\n");
2249         double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
2250         double minEdenToOldGenerationRatio = 1.0 / 3.0;
2251         if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
2252             m_shouldDoFullCollection = true;
2253         // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
2254         m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
2255         if (verbose)
2256             dataLog("Eden: maxHeapSize = ", m_maxHeapSize, "\n");
2257         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2258         if (verbose)
2259             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2260         if (m_fullActivityCallback) {
2261             ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
2262             m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
2263         }
2264     }
2265
2266 #if PLATFORM(IOS)
2267     // Get critical memory threshold for next cycle.
2268     overCriticalMemoryThreshold(MemoryThresholdCallType::Direct);
2269 #endif
2270
2271     m_sizeAfterLastCollect = currentHeapSize;
2272     if (verbose)
2273         dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n");
2274     m_bytesAllocatedThisCycle = 0;
2275
2276     if (Options::logGC())
2277         dataLog("=> ", currentHeapSize / 1024, "kb, ");
2278 }
2279
2280 void Heap::didFinishCollection()
2281 {
2282     m_afterGC = MonotonicTime::now();
2283     CollectionScope scope = *m_collectionScope;
2284     if (scope == CollectionScope::Full)
2285         m_lastFullGCLength = m_afterGC - m_beforeGC;
2286     else
2287         m_lastEdenGCLength = m_afterGC - m_beforeGC;
2288
2289 #if ENABLE(RESOURCE_USAGE)
2290     ASSERT(externalMemorySize() <= extraMemorySize());
2291 #endif
2292
2293     if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) {
2294         gatherExtraHeapSnapshotData(*heapProfiler);
2295         removeDeadHeapSnapshotNodes(*heapProfiler);
2296     }
2297
2298     if (UNLIKELY(m_verifier))
2299         m_verifier->endGC();
2300
2301     RELEASE_ASSERT(m_collectionScope);
2302     m_lastCollectionScope = m_collectionScope;
2303     m_collectionScope = std::nullopt;
2304
2305     for (auto* observer : m_observers)
2306         observer->didGarbageCollect(scope);
2307 }
2308
2309 void Heap::resumeCompilerThreads()
2310 {
2311 #if ENABLE(DFG_JIT)
2312     for (unsigned i = DFG::numberOfWorklists(); i--;)
2313         DFG::existingWorklistForIndex(i).resumeAllThreads();
2314 #endif
2315 }
2316
2317 GCActivityCallback* Heap::fullActivityCallback()
2318 {
2319     return m_fullActivityCallback.get();
2320 }
2321
2322 GCActivityCallback* Heap::edenActivityCallback()
2323 {
2324     return m_edenActivityCallback.get();
2325 }
2326
2327 IncrementalSweeper& Heap::sweeper()
2328 {
2329     return *m_sweeper;
2330 }
2331
2332 void Heap::setGarbageCollectionTimerEnabled(bool enable)
2333 {
2334     if (m_fullActivityCallback)
2335         m_fullActivityCallback->setEnabled(enable);
2336     if (m_edenActivityCallback)
2337         m_edenActivityCallback->setEnabled(enable);
2338 }
2339
2340 void Heap::didAllocate(size_t bytes)
2341 {
2342     if (m_edenActivityCallback)
2343         m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
2344     m_bytesAllocatedThisCycle += bytes;
2345     performIncrement(bytes);
2346 }
2347
2348 bool Heap::isValidAllocation(size_t)
2349 {
2350     if (!isValidThreadState(m_vm))
2351         return false;
2352
2353     if (isCurrentThreadBusy())
2354         return false;
2355     
2356     return true;
2357 }
2358
2359 void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
2360 {
2361     WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
2362 }
2363
2364 void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
2365 {
2366     HandleSlot slot = handle.slot();
2367     Finalizer finalizer = reinterpret_cast<Finalizer>(context);
2368     finalizer(slot->asCell());
2369     WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
2370 }
2371
2372 void Heap::addExecutable(ExecutableBase* executable)
2373 {
2374     m_executables.append(executable);
2375 }
2376
2377 void Heap::collectNowFullIfNotDoneRecently(Synchronousness synchronousness)
2378 {
2379     if (!m_fullActivityCallback) {
2380         collectNow(synchronousness, CollectionScope::Full);
2381         return;
2382     }
2383
2384     if (m_fullActivityCallback->didGCRecently()) {
2385         // A synchronous GC was already requested recently so we merely accelerate next collection.
2386         reportAbandonedObjectGraph();
2387         return;
2388     }
2389
2390     m_fullActivityCallback->setDidGCRecently();
2391     collectNow(synchronousness, CollectionScope::Full);
2392 }
2393
2394 bool Heap::shouldDoFullCollection()
2395 {
2396     if (!Options::useGenerationalGC())
2397         return true;
2398
2399     if (!m_currentRequest.scope)
2400         return m_shouldDoFullCollection || overCriticalMemoryThreshold();
2401     return *m_currentRequest.scope == CollectionScope::Full;
2402 }
2403
2404 void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
2405 {
2406     m_logicallyEmptyWeakBlocks.append(block);
2407 }
2408
2409 void Heap::sweepAllLogicallyEmptyWeakBlocks()
2410 {
2411     if (m_logicallyEmptyWeakBlocks.isEmpty())
2412         return;
2413
2414     m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2415     while (sweepNextLogicallyEmptyWeakBlock()) { }
2416 }
2417
2418 bool Heap::sweepNextLogicallyEmptyWeakBlock()
2419 {
2420     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
2421         return false;
2422
2423     WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
2424
2425     block->sweep();
2426     if (block->isEmpty()) {
2427         std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
2428         m_logicallyEmptyWeakBlocks.removeLast();
2429         WeakBlock::destroy(*this, block);
2430     } else
2431         m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
2432
2433     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
2434         m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
2435         return false;
2436     }
2437
2438     return true;
2439 }
2440
2441 size_t Heap::visitCount()
2442 {
2443     size_t result = 0;
2444     forEachSlotVisitor(
2445         [&] (SlotVisitor& visitor) {
2446             result += visitor.visitCount();
2447         });
2448     return result;
2449 }
2450
2451 size_t Heap::bytesVisited()
2452 {
2453     size_t result = 0;
2454     forEachSlotVisitor(
2455         [&] (SlotVisitor& visitor) {
2456             result += visitor.bytesVisited();
2457         });
2458     return result;
2459 }
2460
2461 void Heap::forEachCodeBlockImpl(const ScopedLambda<void(CodeBlock*)>& func)
2462 {
2463     // We don't know the full set of CodeBlocks until compilation has terminated.
2464     completeAllJITPlans();
2465
2466     return m_codeBlocks->iterate(func);
2467 }
2468
2469 void Heap::forEachCodeBlockIgnoringJITPlansImpl(const AbstractLocker& locker, const ScopedLambda<void(CodeBlock*)>& func)
2470 {
2471     return m_codeBlocks->iterate(locker, func);
2472 }
2473
2474 void Heap::writeBarrierSlowPath(const JSCell* from)
2475 {
2476     if (UNLIKELY(mutatorShouldBeFenced())) {
2477         // In this case, the barrierThreshold is the tautological threshold, so from could still be
2478         // not black. But we can't know for sure until we fire off a fence.
2479         WTF::storeLoadFence();
2480         if (from->cellState() != CellState::PossiblyBlack)
2481             return;
2482     }
2483     
2484     addToRememberedSet(from);
2485 }
2486
2487 bool Heap::isCurrentThreadBusy()
2488 {
2489     return mayBeGCThread() || mutatorState() != MutatorState::Running;
2490 }
2491
2492 void Heap::reportExtraMemoryVisited(size_t size)
2493 {
2494     size_t* counter = &m_extraMemorySize;
2495     
2496     for (;;) {
2497         size_t oldSize = *counter;
2498         // FIXME: Change this to use SaturatedArithmetic when available.
2499         // https://bugs.webkit.org/show_bug.cgi?id=170411
2500         Checked<size_t, RecordOverflow> checkedNewSize = oldSize;
2501         checkedNewSize += size;
2502         size_t newSize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
2503         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, newSize))
2504             return;
2505     }
2506 }
2507
2508 #if ENABLE(RESOURCE_USAGE)
2509 void Heap::reportExternalMemoryVisited(size_t size)
2510 {
2511     size_t* counter = &m_externalMemorySize;
2512
2513     for (;;) {
2514         size_t oldSize = *counter;
2515         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size))
2516             return;
2517     }
2518 }
2519 #endif
2520
2521 void Heap::collectIfNecessaryOrDefer(GCDeferralContext* deferralContext)
2522 {
2523     ASSERT(deferralContext || isDeferred() || !DisallowGC::isInEffectOnCurrentThread());
2524
2525     if (!m_isSafeToCollect)
2526         return;
2527     switch (mutatorState()) {
2528     case MutatorState::Running:
2529     case MutatorState::Allocating:
2530         break;
2531     case MutatorState::Sweeping:
2532     case MutatorState::Collecting:
2533         return;
2534     }
2535     if (!Options::useGC())
2536         return;
2537     
2538     if (mayNeedToStop()) {
2539         if (deferralContext)
2540             deferralContext->m_shouldGC = true;
2541         else if (isDeferred())
2542             m_didDeferGCWork = true;
2543         else
2544             stopIfNecessary();
2545     }
2546     
2547     if (UNLIKELY(Options::gcMaxHeapSize())) {
2548         if (m_bytesAllocatedThisCycle <= Options::gcMaxHeapSize())
2549             return;
2550     } else {
2551         size_t bytesAllowedThisCycle = m_maxEdenSize;
2552
2553 #if PLATFORM(IOS)
2554         if (overCriticalMemoryThreshold())
2555             bytesAllowedThisCycle = std::min(m_maxEdenSizeWhenCritical, bytesAllowedThisCycle);
2556 #endif
2557
2558         if (m_bytesAllocatedThisCycle <= bytesAllowedThisCycle)
2559             return;
2560     }
2561
2562     if (deferralContext)
2563         deferralContext->m_shouldGC = true;
2564     else if (isDeferred())
2565         m_didDeferGCWork = true;
2566     else {
2567         collectAsync();
2568         stopIfNecessary(); // This will immediately start the collection if we have the conn.
2569     }
2570 }
2571
2572 void Heap::decrementDeferralDepthAndGCIfNeededSlow()
2573 {
2574     // Can't do anything if we're still deferred.
2575     if (m_deferralDepth)
2576         return;
2577     
2578     ASSERT(!isDeferred());
2579     
2580     m_didDeferGCWork = false;
2581     // FIXME: Bring back something like the DeferGCProbability mode.
2582     // https://bugs.webkit.org/show_bug.cgi?id=166627
2583     collectIfNecessaryOrDefer();
2584 }
2585
2586 void Heap::registerWeakGCMap(WeakGCMapBase* weakGCMap)
2587 {
2588     m_weakGCMaps.add(weakGCMap);
2589 }
2590
2591 void Heap::unregisterWeakGCMap(WeakGCMapBase* weakGCMap)
2592 {
2593     m_weakGCMaps.remove(weakGCMap);
2594 }
2595
2596 void Heap::didAllocateBlock(size_t capacity)
2597 {
2598 #if ENABLE(RESOURCE_USAGE)
2599     m_blockBytesAllocated += capacity;
2600 #else
2601     UNUSED_PARAM(capacity);
2602 #endif
2603 }
2604
2605 void Heap::didFreeBlock(size_t capacity)
2606 {
2607 #if ENABLE(RESOURCE_USAGE)
2608     m_blockBytesAllocated -= capacity;
2609 #else
2610     UNUSED_PARAM(capacity);
2611 #endif
2612 }
2613
2614 void Heap::addCoreConstraints()
2615 {
2616     m_constraintSet->add(
2617         "Cs", "Conservative Scan",
2618         [this, lastVersion = static_cast<uint64_t>(0)] (SlotVisitor& slotVisitor) mutable {
2619             bool shouldNotProduceWork = lastVersion == m_phaseVersion;
2620             if (shouldNotProduceWork)
2621                 return;
2622             
2623             TimingScope preConvergenceTimingScope(*this, "Constraint: conservative scan");
2624             m_objectSpace.prepareForConservativeScan();
2625             ConservativeRoots conservativeRoots(*this);
2626             SuperSamplerScope superSamplerScope(false);
2627             gatherStackRoots(conservativeRoots);
2628             gatherJSStackRoots(conservativeRoots);
2629             gatherScratchBufferRoots(conservativeRoots);
2630             slotVisitor.append(conservativeRoots);
2631             
2632             lastVersion = m_phaseVersion;
2633         },
2634         ConstraintVolatility::GreyedByExecution);
2635     
2636     m_constraintSet->add(
2637         "Msr", "Misc Small Roots",
2638         [this] (SlotVisitor& slotVisitor) {
2639 #if JSC_OBJC_API_ENABLED
2640             scanExternalRememberedSet(*m_vm, slotVisitor);
2641 #endif
2642
2643             if (m_vm->smallStrings.needsToBeVisited(*m_collectionScope))
2644                 m_vm->smallStrings.visitStrongReferences(slotVisitor);
2645             
2646             for (auto& pair : m_protectedValues)
2647                 slotVisitor.appendUnbarriered(pair.key);
2648             
2649             if (m_markListSet && m_markListSet->size())
2650                 MarkedArgumentBuffer::markLists(slotVisitor, *m_markListSet);
2651             
2652             slotVisitor.appendUnbarriered(m_vm->exception());
2653             slotVisitor.appendUnbarriered(m_vm->lastException());
2654         },
2655         ConstraintVolatility::GreyedByExecution);
2656     
2657     m_constraintSet->add(
2658         "Sh", "Strong Handles",
2659         [this] (SlotVisitor& slotVisitor) {
2660             m_handleSet.visitStrongHandles(slotVisitor);
2661         },
2662         ConstraintVolatility::GreyedByExecution);
2663     
2664     m_constraintSet->add(
2665         "D", "Debugger",
2666         [this] (SlotVisitor& slotVisitor) {
2667 #if ENABLE(SAMPLING_PROFILER)
2668             if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
2669                 LockHolder locker(samplingProfiler->getLock());
2670                 samplingProfiler->processUnverifiedStackTraces();
2671                 samplingProfiler->visit(slotVisitor);
2672                 if (Options::logGC() == GCLogging::Verbose)
2673                     dataLog("Sampling Profiler data:\n", slotVisitor);
2674             }
2675 #endif // ENABLE(SAMPLING_PROFILER)
2676             
2677             if (m_vm->typeProfiler())
2678                 m_vm->typeProfilerLog()->visit(slotVisitor);
2679             
2680             m_vm->shadowChicken().visitChildren(slotVisitor);
2681         },
2682         ConstraintVolatility::GreyedByExecution);
2683     
2684     m_constraintSet->add(
2685         "Jsr", "JIT Stub Routines",
2686         [this] (SlotVisitor& slotVisitor) {
2687             m_jitStubRoutines->traceMarkedStubRoutines(slotVisitor);
2688         },
2689         ConstraintVolatility::GreyedByExecution);
2690     
2691     m_constraintSet->add(
2692         "Ws", "Weak Sets",
2693         [this] (SlotVisitor& slotVisitor) {
2694             m_objectSpace.visitWeakSets(slotVisitor);
2695         },
2696         ConstraintVolatility::GreyedByMarking);
2697     
2698     m_constraintSet->add(
2699         "Wrh", "Weak Reference Harvesters",
2700         [this] (SlotVisitor& slotVisitor) {
2701             for (WeakReferenceHarvester* current = m_weakReferenceHarvesters.head(); current; current = current->next())
2702                 current->visitWeakReferences(slotVisitor);
2703         },
2704         ConstraintVolatility::GreyedByMarking);
2705     
2706     m_constraintSet->add(
2707         "O", "Output",
2708         [this] (SlotVisitor& slotVisitor) {
2709             VM& vm = slotVisitor.vm();
2710             
2711             auto callOutputConstraint = [] (SlotVisitor& slotVisitor, HeapCell* heapCell, HeapCell::Kind) {
2712                 VM& vm = slotVisitor.vm();
2713                 JSCell* cell = static_cast<JSCell*>(heapCell);
2714                 cell->methodTable(vm)->visitOutputConstraints(cell, slotVisitor);
2715             };
2716             
2717             auto add = [&] (auto& set) {
2718                 slotVisitor.addParallelConstraintTask(set.forEachMarkedCellInParallel(callOutputConstraint));
2719             };
2720             
2721             add(vm.executableToCodeBlockEdgesWithConstraints);
2722         },
2723         ConstraintVolatility::GreyedByMarking,
2724         ConstraintParallelism::Parallel);
2725     
2726 #if ENABLE(DFG_JIT)
2727     m_constraintSet->add(
2728         "Dw", "DFG Worklists",
2729         [this] (SlotVisitor& slotVisitor) {
2730             for (unsigned i = DFG::numberOfWorklists(); i--;)
2731                 DFG::existingWorklistForIndex(i).visitWeakReferences(slotVisitor);
2732             
2733             // FIXME: This is almost certainly unnecessary.
2734             // https://bugs.webkit.org/show_bug.cgi?id=166829
2735             DFG::iterateCodeBlocksForGC(
2736                 *m_vm,
2737                 [&] (CodeBlock* codeBlock) {
2738                     slotVisitor.appendUnbarriered(codeBlock);
2739                 });
2740             
2741             if (Options::logGC() == GCLogging::Verbose)
2742                 dataLog("DFG Worklists:\n", slotVisitor);
2743         },
2744         ConstraintVolatility::GreyedByMarking);
2745 #endif
2746     
2747     m_constraintSet->add(
2748         "Cb", "CodeBlocks",
2749         [this] (SlotVisitor& slotVisitor) {
2750             iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(
2751                 [&] (CodeBlock* codeBlock) {
2752                     // Visit the CodeBlock as a constraint only if it's black.
2753                     if (Heap::isMarked(codeBlock)
2754                         && codeBlock->cellState() == CellState::PossiblyBlack)
2755                         slotVisitor.visitAsConstraint(codeBlock);
2756                 });
2757         },
2758         ConstraintVolatility::SeldomGreyed);
2759     
2760     m_constraintSet->add(std::make_unique<MarkStackMergingConstraint>(*this));
2761 }
2762
2763 void Heap::addMarkingConstraint(std::unique_ptr<MarkingConstraint> constraint)
2764 {
2765     PreventCollectionScope preventCollectionScope(*this);
2766     m_constraintSet->add(WTFMove(constraint));
2767 }
2768
2769 void Heap::notifyIsSafeToCollect()
2770 {
2771     MonotonicTime before;
2772     if (Options::logGC()) {
2773         before = MonotonicTime::now();
2774         dataLog("[GC<", RawPointer(this), ">: starting ");
2775     }
2776     
2777     addCoreConstraints();
2778     
2779     m_isSafeToCollect = true;
2780     
2781     if (Options::collectContinuously()) {
2782         m_collectContinuouslyThread = WTF::Thread::create(
2783             "JSC DEBUG Continuous GC",
2784             [this] () {
2785                 MonotonicTime initialTime = MonotonicTime::now();
2786                 Seconds period = Seconds::fromMilliseconds(Options::collectContinuouslyPeriodMS());
2787                 while (!m_shouldStopCollectingContinuously) {
2788                     {
2789                         LockHolder locker(*m_threadLock);
2790                         if (m_requests.isEmpty()) {
2791                             m_requests.append(std::nullopt);
2792                             m_lastGrantedTicket++;
2793                             m_threadCondition->notifyOne(locker);
2794                         }
2795                     }
2796                     
2797                     {
2798                         LockHolder locker(m_collectContinuouslyLock);
2799                         Seconds elapsed = MonotonicTime::now() - initialTime;
2800                         Seconds elapsedInPeriod = elapsed % period;
2801                         MonotonicTime timeToWakeUp =
2802                             initialTime + elapsed - elapsedInPeriod + period;
2803                         while (!hasElapsed(timeToWakeUp) && !m_shouldStopCollectingContinuously) {
2804                             m_collectContinuouslyCondition.waitUntil(
2805                                 m_collectContinuouslyLock, timeToWakeUp);
2806                         }
2807                     }
2808                 }
2809             });
2810     }
2811     
2812     if (Options::logGC())
2813         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
2814 }
2815
2816 void Heap::preventCollection()
2817 {
2818     if (!m_isSafeToCollect)
2819         return;
2820     
2821     // This prevents the collectContinuously thread from starting a collection.
2822     m_collectContinuouslyLock.lock();
2823     
2824     // Wait for all collections to finish.
2825     waitForCollector(
2826         [&] (const AbstractLocker&) -> bool {
2827             ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2828             return m_lastServedTicket == m_lastGrantedTicket;
2829         });
2830     
2831     // Now a collection can only start if this thread starts it.
2832     RELEASE_ASSERT(!m_collectionScope);
2833 }
2834
2835 void Heap::allowCollection()
2836 {
2837     if (!m_isSafeToCollect)
2838         return;
2839     
2840     m_collectContinuouslyLock.unlock();
2841 }
2842
2843 void Heap::setMutatorShouldBeFenced(bool value)
2844 {
2845     m_mutatorShouldBeFenced = value;
2846     m_barrierThreshold = value ? tautologicalThreshold : blackThreshold;
2847 }
2848
2849 void Heap::performIncrement(size_t bytes)
2850 {
2851     if (!m_objectSpace.isMarking())
2852         return;
2853
2854     m_incrementBalance += bytes * Options::gcIncrementScale();
2855
2856     // Save ourselves from crazy. Since this is an optimization, it's OK to go back to any consistent
2857     // state when the double goes wild.
2858     if (std::isnan(m_incrementBalance) || std::isinf(m_incrementBalance))
2859         m_incrementBalance = 0;
2860     
2861     if (m_incrementBalance < static_cast<double>(Options::gcIncrementBytes()))
2862         return;
2863
2864     double targetBytes = m_incrementBalance;
2865     if (targetBytes <= 0)
2866         return;
2867     targetBytes = std::min(targetBytes, Options::gcIncrementMaxBytes());
2868
2869     SlotVisitor& slotVisitor = *m_mutatorSlotVisitor;
2870     ParallelModeEnabler parallelModeEnabler(slotVisitor);
2871     size_t bytesVisited = slotVisitor.performIncrementOfDraining(static_cast<size_t>(targetBytes));
2872     // incrementBalance may go negative here because it'll remember how many bytes we overshot.
2873     m_incrementBalance -= bytesVisited;
2874 }
2875
2876 void Heap::addHeapFinalizerCallback(const HeapFinalizerCallback& callback)
2877 {
2878     m_heapFinalizerCallbacks.append(callback);
2879 }
2880
2881 void Heap::removeHeapFinalizerCallback(const HeapFinalizerCallback& callback)
2882 {
2883     m_heapFinalizerCallbacks.removeFirst(callback);
2884 }
2885
2886 void Heap::setBonusVisitorTask(RefPtr<SharedTask<void(SlotVisitor&)>> task)
2887 {
2888     auto locker = holdLock(m_markingMutex);
2889     m_bonusVisitorTask = task;
2890     m_markingConditionVariable.notifyAll();
2891 }
2892
2893 void Heap::runTaskInParallel(RefPtr<SharedTask<void(SlotVisitor&)>> task)
2894 {
2895     unsigned initialRefCount = task->refCount();
2896     setBonusVisitorTask(task);
2897     task->run(*m_collectorSlotVisitor);
2898     setBonusVisitorTask(nullptr);
2899     // The constraint solver expects return of this function to imply termination of the task in all
2900     // threads. This ensures that property.
2901     {
2902         auto locker = holdLock(m_markingMutex);
2903         while (task->refCount() > initialRefCount)
2904             m_markingConditionVariable.wait(m_markingMutex);
2905     }
2906 }
2907
2908 } // namespace JSC