d4d3fec734fc0d8a864fd9c48ff192dd54c2d59b
[WebKit-https.git] / Source / JavaScriptCore / heap / Heap.cpp
1 /*
2  *  Copyright (C) 2003-2017 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "Heap.h"
23
24 #include "CodeBlock.h"
25 #include "CodeBlockSetInlines.h"
26 #include "CollectingScope.h"
27 #include "ConservativeRoots.h"
28 #include "DFGWorklistInlines.h"
29 #include "EdenGCActivityCallback.h"
30 #include "Exception.h"
31 #include "FullGCActivityCallback.h"
32 #include "GCActivityCallback.h"
33 #include "GCIncomingRefCountedSetInlines.h"
34 #include "GCSegmentedArrayInlines.h"
35 #include "GCTypeMap.h"
36 #include "HasOwnPropertyCache.h"
37 #include "HeapHelperPool.h"
38 #include "HeapIterationScope.h"
39 #include "HeapProfiler.h"
40 #include "HeapSnapshot.h"
41 #include "HeapVerifier.h"
42 #include "IncrementalSweeper.h"
43 #include "Interpreter.h"
44 #include "JITStubRoutineSet.h"
45 #include "JITWorklist.h"
46 #include "JSCInlines.h"
47 #include "JSGlobalObject.h"
48 #include "JSLock.h"
49 #include "JSVirtualMachineInternal.h"
50 #include "JSWebAssemblyCodeBlock.h"
51 #include "MachineStackMarker.h"
52 #include "MarkedAllocatorInlines.h"
53 #include "MarkedSpaceInlines.h"
54 #include "MarkingConstraintSet.h"
55 #include "PreventCollectionScope.h"
56 #include "SamplingProfiler.h"
57 #include "ShadowChicken.h"
58 #include "SpaceTimeMutatorScheduler.h"
59 #include "SubspaceInlines.h"
60 #include "SuperSampler.h"
61 #include "StochasticSpaceTimeMutatorScheduler.h"
62 #include "StopIfNecessaryTimer.h"
63 #include "SweepingScope.h"
64 #include "SynchronousStopTheWorldMutatorScheduler.h"
65 #include "TypeProfiler.h"
66 #include "TypeProfilerLog.h"
67 #include "UnlinkedCodeBlock.h"
68 #include "VM.h"
69 #include "WasmMemory.h"
70 #include "WeakSetInlines.h"
71 #include <algorithm>
72 #if PLATFORM(IOS)
73 #include <bmalloc/bmalloc.h>
74 #endif
75 #include <wtf/CurrentTime.h>
76 #include <wtf/ListDump.h>
77 #include <wtf/MainThread.h>
78 #include <wtf/ParallelVectorIterator.h>
79 #include <wtf/ProcessID.h>
80 #include <wtf/RAMSize.h>
81 #include <wtf/SimpleStats.h>
82 #include <wtf/Threading.h>
83
84 #if USE(FOUNDATION)
85 #if __has_include(<objc/objc-internal.h>)
86 #include <objc/objc-internal.h>
87 #else
88 extern "C" void* objc_autoreleasePoolPush(void);
89 extern "C" void objc_autoreleasePoolPop(void *context);
90 #endif
91 #endif // USE(FOUNDATION)
92
93 using namespace std;
94
95 namespace JSC {
96
97 namespace {
98
99 bool verboseStop = false;
100
101 double maxPauseMS(double thisPauseMS)
102 {
103     static double maxPauseMS;
104     maxPauseMS = std::max(thisPauseMS, maxPauseMS);
105     return maxPauseMS;
106 }
107
108 size_t minHeapSize(HeapType heapType, size_t ramSize)
109 {
110     if (heapType == LargeHeap) {
111         double result = min(
112             static_cast<double>(Options::largeHeapSize()),
113             ramSize * Options::smallHeapRAMFraction());
114         return static_cast<size_t>(result);
115     }
116     return Options::smallHeapSize();
117 }
118
119 size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
120 {
121 #if PLATFORM(IOS)
122     size_t memoryFootprint = bmalloc::api::memoryFootprint();
123     if (memoryFootprint < ramSize * Options::smallHeapRAMFraction())
124         return Options::smallHeapGrowthFactor() * heapSize;
125     if (memoryFootprint < ramSize * Options::mediumHeapRAMFraction())
126         return Options::mediumHeapGrowthFactor() * heapSize;
127 #else
128     if (heapSize < ramSize * Options::smallHeapRAMFraction())
129         return Options::smallHeapGrowthFactor() * heapSize;
130     if (heapSize < ramSize * Options::mediumHeapRAMFraction())
131         return Options::mediumHeapGrowthFactor() * heapSize;
132 #endif
133     return Options::largeHeapGrowthFactor() * heapSize;
134 }
135
136 bool isValidSharedInstanceThreadState(VM* vm)
137 {
138     return vm->currentThreadIsHoldingAPILock();
139 }
140
141 bool isValidThreadState(VM* vm)
142 {
143     if (vm->atomicStringTable() != WTF::Thread::current().atomicStringTable())
144         return false;
145
146     if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
147         return false;
148
149     return true;
150 }
151
152 void recordType(VM& vm, TypeCountSet& set, JSCell* cell)
153 {
154     const char* typeName = "[unknown]";
155     const ClassInfo* info = cell->classInfo(vm);
156     if (info && info->className)
157         typeName = info->className;
158     set.add(typeName);
159 }
160
161 bool measurePhaseTiming()
162 {
163     return false;
164 }
165
166 HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats()
167 {
168     static HashMap<const char*, GCTypeMap<SimpleStats>>* result;
169     static std::once_flag once;
170     std::call_once(
171         once,
172         [] {
173             result = new HashMap<const char*, GCTypeMap<SimpleStats>>();
174         });
175     return *result;
176 }
177
178 SimpleStats& timingStats(const char* name, CollectionScope scope)
179 {
180     return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[scope];
181 }
182
183 class TimingScope {
184 public:
185     TimingScope(std::optional<CollectionScope> scope, const char* name)
186         : m_scope(scope)
187         , m_name(name)
188     {
189         if (measurePhaseTiming())
190             m_before = monotonicallyIncreasingTimeMS();
191     }
192     
193     TimingScope(Heap& heap, const char* name)
194         : TimingScope(heap.collectionScope(), name)
195     {
196     }
197     
198     void setScope(std::optional<CollectionScope> scope)
199     {
200         m_scope = scope;
201     }
202     
203     void setScope(Heap& heap)
204     {
205         setScope(heap.collectionScope());
206     }
207     
208     ~TimingScope()
209     {
210         if (measurePhaseTiming()) {
211             double after = monotonicallyIncreasingTimeMS();
212             double timing = after - m_before;
213             SimpleStats& stats = timingStats(m_name, *m_scope);
214             stats.add(timing);
215             dataLog("[GC:", *m_scope, "] ", m_name, " took: ", timing, "ms (average ", stats.mean(), "ms).\n");
216         }
217     }
218 private:
219     std::optional<CollectionScope> m_scope;
220     double m_before;
221     const char* m_name;
222 };
223
224 } // anonymous namespace
225
226 class Heap::Thread : public AutomaticThread {
227 public:
228     Thread(const AbstractLocker& locker, Heap& heap)
229         : AutomaticThread(locker, heap.m_threadLock, heap.m_threadCondition)
230         , m_heap(heap)
231     {
232     }
233     
234 protected:
235     PollResult poll(const AbstractLocker& locker) override
236     {
237         if (m_heap.m_threadShouldStop) {
238             m_heap.notifyThreadStopping(locker);
239             return PollResult::Stop;
240         }
241         if (m_heap.shouldCollectInCollectorThread(locker))
242             return PollResult::Work;
243         return PollResult::Wait;
244     }
245     
246     WorkResult work() override
247     {
248         m_heap.collectInCollectorThread();
249         return WorkResult::Continue;
250     }
251     
252     void threadDidStart() override
253     {
254         WTF::registerGCThread(GCThreadType::Main);
255     }
256
257 private:
258     Heap& m_heap;
259 };
260
261 Heap::Heap(VM* vm, HeapType heapType)
262     : m_heapType(heapType)
263     , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
264     , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
265     , m_sizeAfterLastCollect(0)
266     , m_sizeAfterLastFullCollect(0)
267     , m_sizeBeforeLastFullCollect(0)
268     , m_sizeAfterLastEdenCollect(0)
269     , m_sizeBeforeLastEdenCollect(0)
270     , m_bytesAllocatedThisCycle(0)
271     , m_bytesAbandonedSinceLastFullCollect(0)
272     , m_maxEdenSize(m_minBytesPerCycle)
273     , m_maxHeapSize(m_minBytesPerCycle)
274     , m_shouldDoFullCollection(false)
275     , m_totalBytesVisited(0)
276     , m_objectSpace(this)
277     , m_extraMemorySize(0)
278     , m_deprecatedExtraMemorySize(0)
279     , m_machineThreads(std::make_unique<MachineThreads>())
280     , m_collectorSlotVisitor(std::make_unique<SlotVisitor>(*this, "C"))
281     , m_mutatorSlotVisitor(std::make_unique<SlotVisitor>(*this, "M"))
282     , m_mutatorMarkStack(std::make_unique<MarkStackArray>())
283     , m_raceMarkStack(std::make_unique<MarkStackArray>())
284     , m_constraintSet(std::make_unique<MarkingConstraintSet>())
285     , m_handleSet(vm)
286     , m_codeBlocks(std::make_unique<CodeBlockSet>())
287     , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>())
288     , m_isSafeToCollect(false)
289     , m_vm(vm)
290     // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously 
291     // schedule the timer if we've never done a collection.
292     , m_lastFullGCLength(0.01)
293     , m_lastEdenGCLength(0.01)
294     , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
295     , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
296     , m_sweeper(adoptRef(new IncrementalSweeper(this)))
297     , m_stopIfNecessaryTimer(adoptRef(new StopIfNecessaryTimer(vm)))
298     , m_deferralDepth(0)
299 #if USE(FOUNDATION)
300     , m_delayedReleaseRecursionCount(0)
301 #endif
302     , m_sharedCollectorMarkStack(std::make_unique<MarkStackArray>())
303     , m_sharedMutatorMarkStack(std::make_unique<MarkStackArray>())
304     , m_helperClient(&heapHelperPool())
305     , m_threadLock(Box<Lock>::create())
306     , m_threadCondition(AutomaticThreadCondition::create())
307 {
308     m_worldState.store(0);
309     
310     if (Options::useConcurrentGC()) {
311         if (Options::useStochasticMutatorScheduler())
312             m_scheduler = std::make_unique<StochasticSpaceTimeMutatorScheduler>(*this);
313         else
314             m_scheduler = std::make_unique<SpaceTimeMutatorScheduler>(*this);
315     } else {
316         // We simulate turning off concurrent GC by making the scheduler say that the world
317         // should always be stopped when the collector is running.
318         m_scheduler = std::make_unique<SynchronousStopTheWorldMutatorScheduler>();
319     }
320     
321     if (Options::verifyHeap())
322         m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
323     
324     m_collectorSlotVisitor->optimizeForStoppedMutator();
325
326     // When memory is critical, allow allocating 25% of the amount above the critical threshold before collecting.
327     size_t memoryAboveCriticalThreshold = static_cast<size_t>(static_cast<double>(m_ramSize) * (1.0 - Options::criticalGCMemoryThreshold()));
328     m_maxEdenSizeWhenCritical = memoryAboveCriticalThreshold / 4;
329
330     LockHolder locker(*m_threadLock);
331     m_thread = adoptRef(new Thread(locker, *this));
332 }
333
334 Heap::~Heap()
335 {
336     forEachSlotVisitor(
337         [&] (SlotVisitor& visitor) {
338             visitor.clearMarkStacks();
339         });
340     m_mutatorMarkStack->clear();
341     m_raceMarkStack->clear();
342     
343     for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
344         WeakBlock::destroy(*this, block);
345 }
346
347 bool Heap::isPagedOut(double deadline)
348 {
349     return m_objectSpace.isPagedOut(deadline);
350 }
351
352 // The VM is being destroyed and the collector will never run again.
353 // Run all pending finalizers now because we won't get another chance.
354 void Heap::lastChanceToFinalize()
355 {
356     MonotonicTime before;
357     if (Options::logGC()) {
358         before = MonotonicTime::now();
359         dataLog("[GC<", RawPointer(this), ">: shutdown ");
360     }
361     
362     RELEASE_ASSERT(!m_vm->entryScope);
363     RELEASE_ASSERT(m_mutatorState == MutatorState::Running);
364     
365     if (m_collectContinuouslyThread) {
366         {
367             LockHolder locker(m_collectContinuouslyLock);
368             m_shouldStopCollectingContinuously = true;
369             m_collectContinuouslyCondition.notifyOne();
370         }
371         m_collectContinuouslyThread->waitForCompletion();
372     }
373     
374     if (Options::logGC())
375         dataLog("1");
376     
377     // Prevent new collections from being started. This is probably not even necessary, since we're not
378     // going to call into anything that starts collections. Still, this makes the algorithm more
379     // obviously sound.
380     m_isSafeToCollect = false;
381     
382     if (Options::logGC())
383         dataLog("2");
384
385     bool isCollecting;
386     {
387         auto locker = holdLock(*m_threadLock);
388         RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
389         isCollecting = m_lastServedTicket < m_lastGrantedTicket;
390     }
391     if (isCollecting) {
392         if (Options::logGC())
393             dataLog("...]\n");
394         
395         // Wait for the current collection to finish.
396         waitForCollector(
397             [&] (const AbstractLocker&) -> bool {
398                 RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
399                 return m_lastServedTicket == m_lastGrantedTicket;
400             });
401         
402         if (Options::logGC())
403             dataLog("[GC<", RawPointer(this), ">: shutdown ");
404     }
405     if (Options::logGC())
406         dataLog("3");
407
408     RELEASE_ASSERT(m_requests.isEmpty());
409     RELEASE_ASSERT(m_lastServedTicket == m_lastGrantedTicket);
410     
411     // Carefully bring the thread down.
412     bool stopped = false;
413     {
414         LockHolder locker(*m_threadLock);
415         stopped = m_thread->tryStop(locker);
416         m_threadShouldStop = true;
417         if (!stopped)
418             m_threadCondition->notifyOne(locker);
419     }
420
421     if (Options::logGC())
422         dataLog("4");
423     
424     if (!stopped)
425         m_thread->join();
426     
427     if (Options::logGC())
428         dataLog("5 ");
429     
430     m_arrayBuffers.lastChanceToFinalize();
431     m_codeBlocks->lastChanceToFinalize(*m_vm);
432     m_objectSpace.stopAllocating();
433     m_objectSpace.lastChanceToFinalize();
434     releaseDelayedReleasedObjects();
435
436     sweepAllLogicallyEmptyWeakBlocks();
437     
438     m_objectSpace.freeMemory();
439     
440     if (Options::logGC())
441         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
442 }
443
444 void Heap::releaseDelayedReleasedObjects()
445 {
446 #if USE(FOUNDATION)
447     // We need to guard against the case that releasing an object can create more objects due to the
448     // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
449     // back here and could try to recursively release objects. We guard that with a recursive entry
450     // count. Only the initial call will release objects, recursive calls simple return and let the
451     // the initial call to the function take care of any objects created during release time.
452     // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
453     // and use a temp Vector for the actual releasing.
454     if (!m_delayedReleaseRecursionCount++) {
455         while (!m_delayedReleaseObjects.isEmpty()) {
456             ASSERT(m_vm->currentThreadIsHoldingAPILock());
457
458             Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTFMove(m_delayedReleaseObjects);
459
460             {
461                 // We need to drop locks before calling out to arbitrary code.
462                 JSLock::DropAllLocks dropAllLocks(m_vm);
463
464                 void* context = objc_autoreleasePoolPush();
465                 objectsToRelease.clear();
466                 objc_autoreleasePoolPop(context);
467             }
468         }
469     }
470     m_delayedReleaseRecursionCount--;
471 #endif
472 }
473
474 void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
475 {
476     didAllocate(size);
477     collectIfNecessaryOrDefer();
478 }
479
480 void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
481 {
482     // FIXME: Change this to use SaturatedArithmetic when available.
483     // https://bugs.webkit.org/show_bug.cgi?id=170411
484     Checked<size_t, RecordOverflow> checkedNewSize = m_deprecatedExtraMemorySize;
485     checkedNewSize += size;
486     m_deprecatedExtraMemorySize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
487     reportExtraMemoryAllocatedSlowCase(size);
488 }
489
490 bool Heap::overCriticalMemoryThreshold(MemoryThresholdCallType memoryThresholdCallType)
491 {
492 #if PLATFORM(IOS)
493     if (memoryThresholdCallType == MemoryThresholdCallType::Direct || ++m_precentAvailableMemoryCachedCallCount >= 100) {
494         m_overCriticalMemoryThreshold = bmalloc::api::percentAvailableMemoryInUse() > Options::criticalGCMemoryThreshold();
495         m_precentAvailableMemoryCachedCallCount = 0;
496     }
497
498     return m_overCriticalMemoryThreshold;
499 #else
500     UNUSED_PARAM(memoryThresholdCallType);
501     return false;
502 #endif
503 }
504
505 void Heap::reportAbandonedObjectGraph()
506 {
507     // Our clients don't know exactly how much memory they
508     // are abandoning so we just guess for them.
509     size_t abandonedBytes = static_cast<size_t>(0.1 * capacity());
510
511     // We want to accelerate the next collection. Because memory has just 
512     // been abandoned, the next collection has the potential to 
513     // be more profitable. Since allocation is the trigger for collection, 
514     // we hasten the next collection by pretending that we've allocated more memory. 
515     if (m_fullActivityCallback) {
516         m_fullActivityCallback->didAllocate(
517             m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
518     }
519     m_bytesAbandonedSinceLastFullCollect += abandonedBytes;
520 }
521
522 void Heap::protect(JSValue k)
523 {
524     ASSERT(k);
525     ASSERT(m_vm->currentThreadIsHoldingAPILock());
526
527     if (!k.isCell())
528         return;
529
530     m_protectedValues.add(k.asCell());
531 }
532
533 bool Heap::unprotect(JSValue k)
534 {
535     ASSERT(k);
536     ASSERT(m_vm->currentThreadIsHoldingAPILock());
537
538     if (!k.isCell())
539         return false;
540
541     return m_protectedValues.remove(k.asCell());
542 }
543
544 void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
545 {
546     if (m_arrayBuffers.addReference(cell, buffer)) {
547         collectIfNecessaryOrDefer();
548         didAllocate(buffer->gcSizeEstimateInBytes());
549     }
550 }
551
552 void Heap::finalizeUnconditionalFinalizers()
553 {
554     while (m_unconditionalFinalizers.hasNext()) {
555         UnconditionalFinalizer* finalizer = m_unconditionalFinalizers.removeNext();
556         finalizer->finalizeUnconditionally();
557     }
558 }
559
560 void Heap::willStartIterating()
561 {
562     m_objectSpace.willStartIterating();
563 }
564
565 void Heap::didFinishIterating()
566 {
567     m_objectSpace.didFinishIterating();
568 }
569
570 void Heap::completeAllJITPlans()
571 {
572 #if ENABLE(JIT)
573     JITWorklist::instance()->completeAllForVM(*m_vm);
574 #endif // ENABLE(JIT)
575     DFG::completeAllPlansForVM(*m_vm);
576 }
577
578 template<typename Func>
579 void Heap::iterateExecutingAndCompilingCodeBlocks(const Func& func)
580 {
581     m_codeBlocks->iterateCurrentlyExecuting(func);
582     DFG::iterateCodeBlocksForGC(*m_vm, func);
583 }
584
585 template<typename Func>
586 void Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func& func)
587 {
588     Vector<CodeBlock*, 256> codeBlocks;
589     iterateExecutingAndCompilingCodeBlocks(
590         [&] (CodeBlock* codeBlock) {
591             codeBlocks.append(codeBlock);
592         });
593     for (CodeBlock* codeBlock : codeBlocks)
594         func(codeBlock);
595 }
596
597 void Heap::assertSharedMarkStacksEmpty()
598 {
599     bool ok = true;
600     
601     if (!m_sharedCollectorMarkStack->isEmpty()) {
602         dataLog("FATAL: Shared collector mark stack not empty! It has ", m_sharedCollectorMarkStack->size(), " elements.\n");
603         ok = false;
604     }
605     
606     if (!m_sharedMutatorMarkStack->isEmpty()) {
607         dataLog("FATAL: Shared mutator mark stack not empty! It has ", m_sharedMutatorMarkStack->size(), " elements.\n");
608         ok = false;
609     }
610     
611     RELEASE_ASSERT(ok);
612 }
613
614 void Heap::gatherStackRoots(ConservativeRoots& roots)
615 {
616     m_machineThreads->gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks, m_currentThreadState);
617 }
618
619 void Heap::gatherJSStackRoots(ConservativeRoots& roots)
620 {
621 #if !ENABLE(JIT)
622     m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
623 #else
624     UNUSED_PARAM(roots);
625 #endif
626 }
627
628 void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
629 {
630 #if ENABLE(DFG_JIT)
631     m_vm->gatherConservativeRoots(roots);
632 #else
633     UNUSED_PARAM(roots);
634 #endif
635 }
636
637 void Heap::beginMarking()
638 {
639     TimingScope timingScope(*this, "Heap::beginMarking");
640     if (m_collectionScope == CollectionScope::Full)
641         m_codeBlocks->clearMarksForFullCollection();
642     m_jitStubRoutines->clearMarks();
643     m_objectSpace.beginMarking();
644     setMutatorShouldBeFenced(true);
645 }
646
647 void Heap::removeDeadCompilerWorklistEntries()
648 {
649 #if ENABLE(DFG_JIT)
650     for (unsigned i = DFG::numberOfWorklists(); i--;)
651         DFG::existingWorklistForIndex(i).removeDeadPlans(*m_vm);
652 #endif
653 }
654
655 bool Heap::isHeapSnapshotting() const
656 {
657     HeapProfiler* heapProfiler = m_vm->heapProfiler();
658     if (UNLIKELY(heapProfiler))
659         return heapProfiler->activeSnapshotBuilder();
660     return false;
661 }
662
663 struct GatherHeapSnapshotData : MarkedBlock::CountFunctor {
664     GatherHeapSnapshotData(HeapSnapshotBuilder& builder)
665         : m_builder(builder)
666     {
667     }
668
669     IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const
670     {
671         if (kind == HeapCell::JSCell) {
672             JSCell* cell = static_cast<JSCell*>(heapCell);
673             cell->methodTable()->heapSnapshot(cell, m_builder);
674         }
675         return IterationStatus::Continue;
676     }
677
678     HeapSnapshotBuilder& m_builder;
679 };
680
681 void Heap::gatherExtraHeapSnapshotData(HeapProfiler& heapProfiler)
682 {
683     if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) {
684         HeapIterationScope heapIterationScope(*this);
685         GatherHeapSnapshotData functor(*builder);
686         m_objectSpace.forEachLiveCell(heapIterationScope, functor);
687     }
688 }
689
690 struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor {
691     RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot)
692         : m_snapshot(snapshot)
693     {
694     }
695
696     IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const
697     {
698         if (kind == HeapCell::JSCell)
699             m_snapshot.sweepCell(static_cast<JSCell*>(cell));
700         return IterationStatus::Continue;
701     }
702
703     HeapSnapshot& m_snapshot;
704 };
705
706 void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler)
707 {
708     if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) {
709         HeapIterationScope heapIterationScope(*this);
710         RemoveDeadHeapSnapshotNodes functor(*snapshot);
711         m_objectSpace.forEachDeadCell(heapIterationScope, functor);
712         snapshot->shrinkToFit();
713     }
714 }
715
716 void Heap::updateObjectCounts()
717 {
718     if (m_collectionScope == CollectionScope::Full)
719         m_totalBytesVisited = 0;
720
721     m_totalBytesVisitedThisCycle = bytesVisited();
722     
723     m_totalBytesVisited += m_totalBytesVisitedThisCycle;
724 }
725
726 void Heap::endMarking()
727 {
728     forEachSlotVisitor(
729         [&] (SlotVisitor& visitor) {
730             visitor.reset();
731         });
732
733     assertSharedMarkStacksEmpty();
734     m_weakReferenceHarvesters.removeAll();
735
736     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
737     
738     m_objectSpace.endMarking();
739     setMutatorShouldBeFenced(Options::forceFencedBarrier());
740 }
741
742 size_t Heap::objectCount()
743 {
744     return m_objectSpace.objectCount();
745 }
746
747 size_t Heap::extraMemorySize()
748 {
749     // FIXME: Change this to use SaturatedArithmetic when available.
750     // https://bugs.webkit.org/show_bug.cgi?id=170411
751     Checked<size_t, RecordOverflow> checkedTotal = m_extraMemorySize;
752     checkedTotal += m_deprecatedExtraMemorySize;
753     checkedTotal += m_arrayBuffers.size();
754     size_t total = UNLIKELY(checkedTotal.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedTotal.unsafeGet();
755
756     ASSERT(m_objectSpace.capacity() >= m_objectSpace.size());
757     return std::min(total, std::numeric_limits<size_t>::max() - m_objectSpace.capacity());
758 }
759
760 size_t Heap::size()
761 {
762     return m_objectSpace.size() + extraMemorySize();
763 }
764
765 size_t Heap::capacity()
766 {
767     return m_objectSpace.capacity() + extraMemorySize();
768 }
769
770 size_t Heap::protectedGlobalObjectCount()
771 {
772     size_t result = 0;
773     forEachProtectedCell(
774         [&] (JSCell* cell) {
775             if (cell->isObject() && asObject(cell)->isGlobalObject())
776                 result++;
777         });
778     return result;
779 }
780
781 size_t Heap::globalObjectCount()
782 {
783     HeapIterationScope iterationScope(*this);
784     size_t result = 0;
785     m_objectSpace.forEachLiveCell(
786         iterationScope,
787         [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus {
788             if (kind != HeapCell::JSCell)
789                 return IterationStatus::Continue;
790             JSCell* cell = static_cast<JSCell*>(heapCell);
791             if (cell->isObject() && asObject(cell)->isGlobalObject())
792                 result++;
793             return IterationStatus::Continue;
794         });
795     return result;
796 }
797
798 size_t Heap::protectedObjectCount()
799 {
800     size_t result = 0;
801     forEachProtectedCell(
802         [&] (JSCell*) {
803             result++;
804         });
805     return result;
806 }
807
808 std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
809 {
810     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
811     forEachProtectedCell(
812         [&] (JSCell* cell) {
813             recordType(*vm(), *result, cell);
814         });
815     return result;
816 }
817
818 std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
819 {
820     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
821     HeapIterationScope iterationScope(*this);
822     m_objectSpace.forEachLiveCell(
823         iterationScope,
824         [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
825             if (kind == HeapCell::JSCell)
826                 recordType(*vm(), *result, static_cast<JSCell*>(cell));
827             return IterationStatus::Continue;
828         });
829     return result;
830 }
831
832 void Heap::deleteAllCodeBlocks(DeleteAllCodeEffort effort)
833 {
834     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
835         return;
836     
837     PreventCollectionScope preventCollectionScope(*this);
838     
839     // If JavaScript is running, it's not safe to delete all JavaScript code, since
840     // we'll end up returning to deleted code.
841     RELEASE_ASSERT(!m_vm->entryScope);
842     RELEASE_ASSERT(!m_collectionScope);
843
844     completeAllJITPlans();
845
846     for (ExecutableBase* executable : m_executables)
847         executable->clearCode();
848
849 #if ENABLE(WEBASSEMBLY)
850     {
851         // We must ensure that we clear the JS call ICs from Wasm. Otherwise, Wasm will
852         // have no idea that we cleared the code from all of the Executables in the
853         // VM. This could leave Wasm in an inconsistent state where it has an IC that
854         // points into a CodeBlock that could be dead. The IC will still succeed because
855         // it uses a callee check, but then it will call into dead code.
856         HeapIterationScope heapIterationScope(*this);
857         m_vm->webAssemblyCodeBlockSpace.forEachLiveCell([&] (HeapCell* cell, HeapCell::Kind kind) {
858             ASSERT_UNUSED(kind, kind == HeapCell::Kind::JSCell);
859             JSWebAssemblyCodeBlock* codeBlock = static_cast<JSWebAssemblyCodeBlock*>(cell);
860             codeBlock->clearJSCallICs(*m_vm);
861         });
862     }
863 #endif
864 }
865
866 void Heap::deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort effort)
867 {
868     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
869         return;
870     
871     PreventCollectionScope preventCollectionScope(*this);
872
873     RELEASE_ASSERT(!m_collectionScope);
874     
875     for (ExecutableBase* current : m_executables) {
876         if (!current->isFunctionExecutable())
877             continue;
878         static_cast<FunctionExecutable*>(current)->unlinkedExecutable()->clearCode();
879     }
880 }
881
882 void Heap::clearUnmarkedExecutables()
883 {
884     for (unsigned i = m_executables.size(); i--;) {
885         ExecutableBase* current = m_executables[i];
886         if (isMarked(current))
887             continue;
888
889         // Eagerly dereference the Executable's JITCode in order to run watchpoint
890         // destructors. Otherwise, watchpoints might fire for deleted CodeBlocks.
891         current->clearCode();
892         std::swap(m_executables[i], m_executables.last());
893         m_executables.removeLast();
894     }
895
896     m_executables.shrinkToFit();
897 }
898
899 void Heap::deleteUnmarkedCompiledCode()
900 {
901     clearUnmarkedExecutables();
902     m_codeBlocks->deleteUnmarkedAndUnreferenced(*m_vm, *m_lastCollectionScope);
903     m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines();
904 }
905
906 void Heap::addToRememberedSet(const JSCell* constCell)
907 {
908     JSCell* cell = const_cast<JSCell*>(constCell);
909     ASSERT(cell);
910     ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
911     m_barriersExecuted++;
912     if (m_mutatorShouldBeFenced) {
913         WTF::loadLoadFence();
914         if (!isMarkedConcurrently(cell)) {
915             // During a full collection a store into an unmarked object that had surivived past
916             // collections will manifest as a store to an unmarked PossiblyBlack object. If the
917             // object gets marked at some time after this then it will go down the normal marking
918             // path. So, we don't have to remember this object. We could return here. But we go
919             // further and attempt to re-white the object.
920             
921             RELEASE_ASSERT(m_collectionScope == CollectionScope::Full);
922             
923             if (cell->atomicCompareExchangeCellStateStrong(CellState::PossiblyBlack, CellState::DefinitelyWhite) == CellState::PossiblyBlack) {
924                 // Now we protect against this race:
925                 //
926                 //     1) Object starts out black + unmarked.
927                 //     --> We do isMarkedConcurrently here.
928                 //     2) Object is marked and greyed.
929                 //     3) Object is scanned and blacked.
930                 //     --> We do atomicCompareExchangeCellStateStrong here.
931                 //
932                 // In this case we would have made the object white again, even though it should
933                 // be black. This check lets us correct our mistake. This relies on the fact that
934                 // isMarkedConcurrently converges monotonically to true.
935                 if (isMarkedConcurrently(cell)) {
936                     // It's difficult to work out whether the object should be grey or black at
937                     // this point. We say black conservatively.
938                     cell->setCellState(CellState::PossiblyBlack);
939                 }
940                 
941                 // Either way, we can return. Most likely, the object was not marked, and so the
942                 // object is now labeled white. This means that future barrier executions will not
943                 // fire. In the unlikely event that the object had become marked, we can still
944                 // return anyway, since we proved that the object was not marked at the time that
945                 // we executed this slow path.
946             }
947             
948             return;
949         }
950     } else
951         ASSERT(Heap::isMarkedConcurrently(cell));
952     // It could be that the object was *just* marked. This means that the collector may set the
953     // state to DefinitelyGrey and then to PossiblyOldOrBlack at any time. It's OK for us to
954     // race with the collector here. If we win then this is accurate because the object _will_
955     // get scanned again. If we lose then someone else will barrier the object again. That would
956     // be unfortunate but not the end of the world.
957     cell->setCellState(CellState::PossiblyGrey);
958     m_mutatorMarkStack->append(cell);
959 }
960
961 void Heap::sweepSynchronously()
962 {
963     double before = 0;
964     if (Options::logGC()) {
965         dataLog("Full sweep: ", capacity() / 1024, "kb ");
966         before = currentTimeMS();
967     }
968     m_objectSpace.sweep();
969     m_objectSpace.shrink();
970     if (Options::logGC()) {
971         double after = currentTimeMS();
972         dataLog("=> ", capacity() / 1024, "kb, ", after - before, "ms");
973     }
974 }
975
976 void Heap::collect(Synchronousness synchronousness, GCRequest request)
977 {
978     switch (synchronousness) {
979     case Async:
980         collectAsync(request);
981         return;
982     case Sync:
983         collectSync(request);
984         return;
985     }
986     RELEASE_ASSERT_NOT_REACHED();
987 }
988
989 void Heap::collectNow(Synchronousness synchronousness, GCRequest request)
990 {
991     switch (synchronousness) {
992     case Async: {
993         collectAsync(request);
994         stopIfNecessary();
995         return;
996     }
997         
998     case Sync: {
999         collectSync(request);
1000         
1001         DeferGCForAWhile deferGC(*this);
1002         if (UNLIKELY(Options::useImmortalObjects()))
1003             sweeper().stopSweeping();
1004         
1005         bool alreadySweptInCollectSync = Options::sweepSynchronously();
1006         if (!alreadySweptInCollectSync) {
1007             if (Options::logGC())
1008                 dataLog("[GC<", RawPointer(this), ">: ");
1009             sweepSynchronously();
1010             if (Options::logGC())
1011                 dataLog("]\n");
1012         }
1013         m_objectSpace.assertNoUnswept();
1014         
1015         sweepAllLogicallyEmptyWeakBlocks();
1016         return;
1017     } }
1018     RELEASE_ASSERT_NOT_REACHED();
1019 }
1020
1021 void Heap::collectAsync(GCRequest request)
1022 {
1023     if (!m_isSafeToCollect)
1024         return;
1025
1026     bool alreadyRequested = false;
1027     {
1028         LockHolder locker(*m_threadLock);
1029         for (const GCRequest& previousRequest : m_requests) {
1030             if (request.subsumedBy(previousRequest)) {
1031                 alreadyRequested = true;
1032                 break;
1033             }
1034         }
1035     }
1036     if (alreadyRequested)
1037         return;
1038
1039     requestCollection(request);
1040 }
1041
1042 void Heap::collectSync(GCRequest request)
1043 {
1044     if (!m_isSafeToCollect)
1045         return;
1046     
1047     waitForCollection(requestCollection(request));
1048 }
1049
1050 bool Heap::shouldCollectInCollectorThread(const AbstractLocker&)
1051 {
1052     RELEASE_ASSERT(m_requests.isEmpty() == (m_lastServedTicket == m_lastGrantedTicket));
1053     RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
1054     
1055     if (false)
1056         dataLog("Mutator has the conn = ", !!(m_worldState.load() & mutatorHasConnBit), "\n");
1057     
1058     return !m_requests.isEmpty() && !(m_worldState.load() & mutatorHasConnBit);
1059 }
1060
1061 void Heap::collectInCollectorThread()
1062 {
1063     for (;;) {
1064         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Collector, nullptr);
1065         switch (result) {
1066         case RunCurrentPhaseResult::Finished:
1067             return;
1068         case RunCurrentPhaseResult::Continue:
1069             break;
1070         case RunCurrentPhaseResult::NeedCurrentThreadState:
1071             RELEASE_ASSERT_NOT_REACHED();
1072             break;
1073         }
1074     }
1075 }
1076
1077 void Heap::checkConn(GCConductor conn)
1078 {
1079     switch (conn) {
1080     case GCConductor::Mutator:
1081         RELEASE_ASSERT(m_worldState.load() & mutatorHasConnBit);
1082         return;
1083     case GCConductor::Collector:
1084         RELEASE_ASSERT(!(m_worldState.load() & mutatorHasConnBit));
1085         return;
1086     }
1087     RELEASE_ASSERT_NOT_REACHED();
1088 }
1089
1090 auto Heap::runCurrentPhase(GCConductor conn, CurrentThreadState* currentThreadState) -> RunCurrentPhaseResult
1091 {
1092     checkConn(conn);
1093     m_currentThreadState = currentThreadState;
1094     
1095     if (conn == GCConductor::Mutator)
1096         sanitizeStackForVM(vm());
1097     
1098     // If the collector transfers the conn to the mutator, it leaves us in between phases.
1099     if (!finishChangingPhase(conn)) {
1100         // A mischevious mutator could repeatedly relinquish the conn back to us. We try to avoid doing
1101         // this, but it's probably not the end of the world if it did happen.
1102         if (false)
1103             dataLog("Conn bounce-back.\n");
1104         return RunCurrentPhaseResult::Finished;
1105     }
1106     
1107     bool result = false;
1108     switch (m_currentPhase) {
1109     case CollectorPhase::NotRunning:
1110         result = runNotRunningPhase(conn);
1111         break;
1112         
1113     case CollectorPhase::Begin:
1114         result = runBeginPhase(conn);
1115         break;
1116         
1117     case CollectorPhase::Fixpoint:
1118         if (!currentThreadState && conn == GCConductor::Mutator)
1119             return RunCurrentPhaseResult::NeedCurrentThreadState;
1120         
1121         result = runFixpointPhase(conn);
1122         break;
1123         
1124     case CollectorPhase::Concurrent:
1125         result = runConcurrentPhase(conn);
1126         break;
1127         
1128     case CollectorPhase::Reloop:
1129         result = runReloopPhase(conn);
1130         break;
1131         
1132     case CollectorPhase::End:
1133         result = runEndPhase(conn);
1134         break;
1135     }
1136
1137     return result ? RunCurrentPhaseResult::Continue : RunCurrentPhaseResult::Finished;
1138 }
1139
1140 NEVER_INLINE bool Heap::runNotRunningPhase(GCConductor conn)
1141 {
1142     // Check m_requests since the mutator calls this to poll what's going on.
1143     {
1144         auto locker = holdLock(*m_threadLock);
1145         if (m_requests.isEmpty())
1146             return false;
1147     }
1148     
1149     return changePhase(conn, CollectorPhase::Begin);
1150 }
1151
1152 NEVER_INLINE bool Heap::runBeginPhase(GCConductor conn)
1153 {
1154     m_currentGCStartTime = MonotonicTime::now();
1155     
1156     {
1157         LockHolder locker(*m_threadLock);
1158         RELEASE_ASSERT(!m_requests.isEmpty());
1159         m_currentRequest = m_requests.first();
1160     }
1161         
1162     if (Options::logGC())
1163         dataLog("[GC<", RawPointer(this), ">: START ", gcConductorShortName(conn), " ", capacity() / 1024, "kb ");
1164
1165     m_beforeGC = MonotonicTime::now();
1166
1167     if (m_collectionScope) {
1168         dataLog("Collection scope already set during GC: ", *m_collectionScope, "\n");
1169         RELEASE_ASSERT_NOT_REACHED();
1170     }
1171     
1172     willStartCollection();
1173         
1174     if (UNLIKELY(m_verifier)) {
1175         // Verify that live objects from the last GC cycle haven't been corrupted by
1176         // mutators before we begin this new GC cycle.
1177         m_verifier->verify(HeapVerifier::Phase::BeforeGC);
1178             
1179         m_verifier->startGC();
1180         m_verifier->gatherLiveCells(HeapVerifier::Phase::BeforeMarking);
1181     }
1182         
1183     prepareForMarking();
1184         
1185     if (m_collectionScope == CollectionScope::Full) {
1186         m_opaqueRoots.clear();
1187         m_collectorSlotVisitor->clearMarkStacks();
1188         m_mutatorMarkStack->clear();
1189     }
1190
1191     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
1192
1193     beginMarking();
1194
1195     forEachSlotVisitor(
1196         [&] (SlotVisitor& visitor) {
1197             visitor.didStartMarking();
1198         });
1199
1200     m_parallelMarkersShouldExit = false;
1201
1202     m_helperClient.setFunction(
1203         [this] () {
1204             SlotVisitor* slotVisitor;
1205             {
1206                 LockHolder locker(m_parallelSlotVisitorLock);
1207                 if (m_availableParallelSlotVisitors.isEmpty()) {
1208                     std::unique_ptr<SlotVisitor> newVisitor = std::make_unique<SlotVisitor>(
1209                         *this, toCString("P", m_parallelSlotVisitors.size() + 1));
1210                     
1211                     if (Options::optimizeParallelSlotVisitorsForStoppedMutator())
1212                         newVisitor->optimizeForStoppedMutator();
1213                     
1214                     newVisitor->didStartMarking();
1215                     
1216                     slotVisitor = newVisitor.get();
1217                     m_parallelSlotVisitors.append(WTFMove(newVisitor));
1218                 } else
1219                     slotVisitor = m_availableParallelSlotVisitors.takeLast();
1220             }
1221
1222             WTF::registerGCThread(GCThreadType::Helper);
1223
1224             {
1225                 ParallelModeEnabler parallelModeEnabler(*slotVisitor);
1226                 slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
1227             }
1228
1229             {
1230                 LockHolder locker(m_parallelSlotVisitorLock);
1231                 m_availableParallelSlotVisitors.append(slotVisitor);
1232             }
1233         });
1234
1235     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1236
1237     m_constraintSet->didStartMarking();
1238     
1239     m_scheduler->beginCollection();
1240     if (Options::logGC())
1241         m_scheduler->log();
1242     
1243     // After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()"
1244     // checks because bootstrap would have put things into the visitor. So, we should fall
1245     // through to draining.
1246     
1247     if (!slotVisitor.didReachTermination()) {
1248         dataLog("Fatal: SlotVisitor should think that GC should terminate before constraint solving, but it does not think this.\n");
1249         dataLog("slotVisitor.isEmpty(): ", slotVisitor.isEmpty(), "\n");
1250         dataLog("slotVisitor.collectorMarkStack().isEmpty(): ", slotVisitor.collectorMarkStack().isEmpty(), "\n");
1251         dataLog("slotVisitor.mutatorMarkStack().isEmpty(): ", slotVisitor.mutatorMarkStack().isEmpty(), "\n");
1252         dataLog("m_numberOfActiveParallelMarkers: ", m_numberOfActiveParallelMarkers, "\n");
1253         dataLog("m_sharedCollectorMarkStack->isEmpty(): ", m_sharedCollectorMarkStack->isEmpty(), "\n");
1254         dataLog("m_sharedMutatorMarkStack->isEmpty(): ", m_sharedMutatorMarkStack->isEmpty(), "\n");
1255         dataLog("slotVisitor.didReachTermination(): ", slotVisitor.didReachTermination(), "\n");
1256         RELEASE_ASSERT_NOT_REACHED();
1257     }
1258         
1259     return changePhase(conn, CollectorPhase::Fixpoint);
1260 }
1261
1262 NEVER_INLINE bool Heap::runFixpointPhase(GCConductor conn)
1263 {
1264     RELEASE_ASSERT(conn == GCConductor::Collector || m_currentThreadState);
1265     
1266     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1267     
1268     if (Options::logGC()) {
1269         HashMap<const char*, size_t> visitMap;
1270         forEachSlotVisitor(
1271             [&] (SlotVisitor& slotVisitor) {
1272                 visitMap.add(slotVisitor.codeName(), slotVisitor.bytesVisited() / 1024);
1273             });
1274         
1275         auto perVisitorDump = sortedMapDump(
1276             visitMap,
1277             [] (const char* a, const char* b) -> bool {
1278                 return strcmp(a, b) < 0;
1279             },
1280             ":", " ");
1281         
1282         dataLog("v=", bytesVisited() / 1024, "kb (", perVisitorDump, ") o=", m_opaqueRoots.size(), " b=", m_barriersExecuted, " ");
1283     }
1284         
1285     if (slotVisitor.didReachTermination()) {
1286         m_scheduler->didReachTermination();
1287             
1288         assertSharedMarkStacksEmpty();
1289             
1290         slotVisitor.mergeIfNecessary();
1291         for (auto& parallelVisitor : m_parallelSlotVisitors)
1292             parallelVisitor->mergeIfNecessary();
1293             
1294         // FIXME: Take m_mutatorDidRun into account when scheduling constraints. Most likely,
1295         // we don't have to execute root constraints again unless the mutator did run. At a
1296         // minimum, we could use this for work estimates - but it's probably more than just an
1297         // estimate.
1298         // https://bugs.webkit.org/show_bug.cgi?id=166828
1299             
1300         // FIXME: We should take advantage of the fact that we could timeout. This only comes
1301         // into play if we're executing constraints for the first time. But that will matter
1302         // when we have deep stacks or a lot of DOM stuff.
1303         // https://bugs.webkit.org/show_bug.cgi?id=166831
1304             
1305         // Wondering what this does? Look at Heap::addCoreConstraints(). The DOM and others can also
1306         // add their own using Heap::addMarkingConstraint().
1307         bool converged =
1308             m_constraintSet->executeConvergence(slotVisitor, MonotonicTime::infinity());
1309         if (converged && slotVisitor.isEmpty()) {
1310             assertSharedMarkStacksEmpty();
1311             return changePhase(conn, CollectorPhase::End);
1312         }
1313             
1314         m_scheduler->didExecuteConstraints();
1315     }
1316         
1317     if (Options::logGC())
1318         dataLog(slotVisitor.collectorMarkStack().size(), "+", m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " ");
1319         
1320     {
1321         ParallelModeEnabler enabler(slotVisitor);
1322         slotVisitor.drainInParallel(m_scheduler->timeToResume());
1323     }
1324         
1325     m_scheduler->synchronousDrainingDidStall();
1326
1327     if (slotVisitor.didReachTermination())
1328         return true; // This is like relooping to the top if runFixpointPhase().
1329         
1330     if (!m_scheduler->shouldResume())
1331         return true;
1332
1333     m_scheduler->willResume();
1334         
1335     if (Options::logGC()) {
1336         double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds();
1337         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), ")...]\n");
1338     }
1339
1340     // Forgive the mutator for its past failures to keep up.
1341     // FIXME: Figure out if moving this to different places results in perf changes.
1342     m_incrementBalance = 0;
1343         
1344     return changePhase(conn, CollectorPhase::Concurrent);
1345 }
1346
1347 NEVER_INLINE bool Heap::runConcurrentPhase(GCConductor conn)
1348 {
1349     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1350
1351     switch (conn) {
1352     case GCConductor::Mutator: {
1353         // When the mutator has the conn, we poll runConcurrentPhase() on every time someone says
1354         // stopIfNecessary(), so on every allocation slow path. When that happens we poll if it's time
1355         // to stop and do some work.
1356         if (slotVisitor.didReachTermination()
1357             || m_scheduler->shouldStop())
1358             return changePhase(conn, CollectorPhase::Reloop);
1359         
1360         // We could be coming from a collector phase that stuffed our SlotVisitor, so make sure we donate
1361         // everything. This is super cheap if the SlotVisitor is already empty.
1362         slotVisitor.donateAll();
1363         return false;
1364     }
1365     case GCConductor::Collector: {
1366         {
1367             ParallelModeEnabler enabler(slotVisitor);
1368             slotVisitor.drainInParallelPassively(m_scheduler->timeToStop());
1369         }
1370         return changePhase(conn, CollectorPhase::Reloop);
1371     } }
1372     
1373     RELEASE_ASSERT_NOT_REACHED();
1374     return false;
1375 }
1376
1377 NEVER_INLINE bool Heap::runReloopPhase(GCConductor conn)
1378 {
1379     if (Options::logGC())
1380         dataLog("[GC<", RawPointer(this), ">: ", gcConductorShortName(conn), " ");
1381     
1382     m_scheduler->didStop();
1383     
1384     if (Options::logGC())
1385         m_scheduler->log();
1386     
1387     return changePhase(conn, CollectorPhase::Fixpoint);
1388 }
1389
1390 NEVER_INLINE bool Heap::runEndPhase(GCConductor conn)
1391 {
1392     m_scheduler->endCollection();
1393         
1394     {
1395         auto locker = holdLock(m_markingMutex);
1396         m_parallelMarkersShouldExit = true;
1397         m_markingConditionVariable.notifyAll();
1398     }
1399     m_helperClient.finish();
1400     
1401     iterateExecutingAndCompilingCodeBlocks(
1402         [&] (CodeBlock* codeBlock) {
1403             writeBarrier(codeBlock);
1404         });
1405         
1406     updateObjectCounts();
1407     endMarking();
1408         
1409     if (UNLIKELY(m_verifier)) {
1410         m_verifier->gatherLiveCells(HeapVerifier::Phase::AfterMarking);
1411         m_verifier->verify(HeapVerifier::Phase::AfterMarking);
1412     }
1413         
1414     if (vm()->typeProfiler())
1415         vm()->typeProfiler()->invalidateTypeSetCache();
1416         
1417     reapWeakHandles();
1418     pruneStaleEntriesFromWeakGCMaps();
1419     sweepArrayBuffers();
1420     snapshotUnswept();
1421     finalizeUnconditionalFinalizers();
1422     removeDeadCompilerWorklistEntries();
1423     notifyIncrementalSweeper();
1424     
1425     m_codeBlocks->iterateCurrentlyExecuting(
1426         [&] (CodeBlock* codeBlock) {
1427             writeBarrier(codeBlock);
1428         });
1429     m_codeBlocks->clearCurrentlyExecuting();
1430         
1431     m_objectSpace.prepareForAllocation();
1432     updateAllocationLimits();
1433
1434     if (UNLIKELY(m_verifier)) {
1435         m_verifier->trimDeadCells();
1436         m_verifier->verify(HeapVerifier::Phase::AfterGC);
1437     }
1438
1439     didFinishCollection();
1440     
1441     if (m_currentRequest.didFinishEndPhase)
1442         m_currentRequest.didFinishEndPhase->run();
1443     
1444     if (false) {
1445         dataLog("Heap state after GC:\n");
1446         m_objectSpace.dumpBits();
1447     }
1448     
1449     if (Options::logGC()) {
1450         double thisPauseMS = (m_afterGC - m_stopTime).milliseconds();
1451         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), "), cycle ", (m_afterGC - m_beforeGC).milliseconds(), "ms END]\n");
1452     }
1453     
1454     {
1455         auto locker = holdLock(*m_threadLock);
1456         m_requests.removeFirst();
1457         m_lastServedTicket++;
1458         clearMutatorWaiting();
1459     }
1460     ParkingLot::unparkAll(&m_worldState);
1461
1462     if (false)
1463         dataLog("GC END!\n");
1464
1465     setNeedFinalize();
1466
1467     m_lastGCStartTime = m_currentGCStartTime;
1468     m_lastGCEndTime = MonotonicTime::now();
1469         
1470     return changePhase(conn, CollectorPhase::NotRunning);
1471 }
1472
1473 bool Heap::changePhase(GCConductor conn, CollectorPhase nextPhase)
1474 {
1475     checkConn(conn);
1476
1477     m_nextPhase = nextPhase;
1478
1479     return finishChangingPhase(conn);
1480 }
1481
1482 NEVER_INLINE bool Heap::finishChangingPhase(GCConductor conn)
1483 {
1484     checkConn(conn);
1485     
1486     if (m_nextPhase == m_currentPhase)
1487         return true;
1488
1489     if (false)
1490         dataLog(conn, ": Going to phase: ", m_nextPhase, " (from ", m_currentPhase, ")\n");
1491     
1492     bool suspendedBefore = worldShouldBeSuspended(m_currentPhase);
1493     bool suspendedAfter = worldShouldBeSuspended(m_nextPhase);
1494     
1495     if (suspendedBefore != suspendedAfter) {
1496         if (suspendedBefore) {
1497             RELEASE_ASSERT(!suspendedAfter);
1498             
1499             resumeThePeriphery();
1500             if (conn == GCConductor::Collector)
1501                 resumeTheMutator();
1502             else
1503                 handleNeedFinalize();
1504         } else {
1505             RELEASE_ASSERT(!suspendedBefore);
1506             RELEASE_ASSERT(suspendedAfter);
1507             
1508             if (conn == GCConductor::Collector) {
1509                 waitWhileNeedFinalize();
1510                 if (!stopTheMutator()) {
1511                     if (false)
1512                         dataLog("Returning false.\n");
1513                     return false;
1514                 }
1515             } else {
1516                 sanitizeStackForVM(m_vm);
1517                 handleNeedFinalize();
1518             }
1519             stopThePeriphery(conn);
1520         }
1521     }
1522     
1523     m_currentPhase = m_nextPhase;
1524     return true;
1525 }
1526
1527 void Heap::stopThePeriphery(GCConductor conn)
1528 {
1529     if (m_collectorBelievesThatTheWorldIsStopped) {
1530         dataLog("FATAL: world already stopped.\n");
1531         RELEASE_ASSERT_NOT_REACHED();
1532     }
1533     
1534     if (m_mutatorDidRun)
1535         m_mutatorExecutionVersion++;
1536     
1537     m_mutatorDidRun = false;
1538
1539     suspendCompilerThreads();
1540     m_collectorBelievesThatTheWorldIsStopped = true;
1541
1542     forEachSlotVisitor(
1543         [&] (SlotVisitor& slotVisitor) {
1544             slotVisitor.updateMutatorIsStopped(NoLockingNecessary);
1545         });
1546
1547 #if ENABLE(JIT)
1548     {
1549         DeferGCForAWhile awhile(*this);
1550         if (JITWorklist::instance()->completeAllForVM(*m_vm)
1551             && conn == GCConductor::Collector)
1552             setGCDidJIT();
1553     }
1554 #else
1555     UNUSED_PARAM(conn);
1556 #endif // ENABLE(JIT)
1557     
1558     vm()->shadowChicken().update(*vm(), vm()->topCallFrame);
1559     
1560     m_structureIDTable.flushOldTables();
1561     m_objectSpace.stopAllocating();
1562     
1563     m_stopTime = MonotonicTime::now();
1564 }
1565
1566 NEVER_INLINE void Heap::resumeThePeriphery()
1567 {
1568     // Calling resumeAllocating does the Right Thing depending on whether this is the end of a
1569     // collection cycle or this is just a concurrent phase within a collection cycle:
1570     // - At end of collection cycle: it's a no-op because prepareForAllocation already cleared the
1571     //   last active block.
1572     // - During collection cycle: it reinstates the last active block.
1573     m_objectSpace.resumeAllocating();
1574     
1575     m_barriersExecuted = 0;
1576     
1577     if (!m_collectorBelievesThatTheWorldIsStopped) {
1578         dataLog("Fatal: collector does not believe that the world is stopped.\n");
1579         RELEASE_ASSERT_NOT_REACHED();
1580     }
1581     m_collectorBelievesThatTheWorldIsStopped = false;
1582     
1583     // FIXME: This could be vastly improved: we want to grab the locks in the order in which they
1584     // become available. We basically want a lockAny() method that will lock whatever lock is available
1585     // and tell you which one it locked. That would require teaching ParkingLot how to park on multiple
1586     // queues at once, which is totally achievable - it would just require memory allocation, which is
1587     // suboptimal but not a disaster. Alternatively, we could replace the SlotVisitor rightToRun lock
1588     // with a DLG-style handshake mechanism, but that seems not as general.
1589     Vector<SlotVisitor*, 8> slotVisitorsToUpdate;
1590
1591     forEachSlotVisitor(
1592         [&] (SlotVisitor& slotVisitor) {
1593             slotVisitorsToUpdate.append(&slotVisitor);
1594         });
1595     
1596     for (unsigned countdown = 40; !slotVisitorsToUpdate.isEmpty() && countdown--;) {
1597         for (unsigned index = 0; index < slotVisitorsToUpdate.size(); ++index) {
1598             SlotVisitor& slotVisitor = *slotVisitorsToUpdate[index];
1599             bool remove = false;
1600             if (slotVisitor.hasAcknowledgedThatTheMutatorIsResumed())
1601                 remove = true;
1602             else if (auto locker = tryHoldLock(slotVisitor.rightToRun())) {
1603                 slotVisitor.updateMutatorIsStopped(locker);
1604                 remove = true;
1605             }
1606             if (remove) {
1607                 slotVisitorsToUpdate[index--] = slotVisitorsToUpdate.last();
1608                 slotVisitorsToUpdate.takeLast();
1609             }
1610         }
1611         WTF::Thread::yield();
1612     }
1613     
1614     for (SlotVisitor* slotVisitor : slotVisitorsToUpdate)
1615         slotVisitor->updateMutatorIsStopped();
1616     
1617     resumeCompilerThreads();
1618 }
1619
1620 bool Heap::stopTheMutator()
1621 {
1622     for (;;) {
1623         unsigned oldState = m_worldState.load();
1624         if (oldState & stoppedBit) {
1625             RELEASE_ASSERT(!(oldState & hasAccessBit));
1626             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1627             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1628             return true;
1629         }
1630         
1631         if (oldState & mutatorHasConnBit) {
1632             RELEASE_ASSERT(!(oldState & hasAccessBit));
1633             RELEASE_ASSERT(!(oldState & stoppedBit));
1634             return false;
1635         }
1636
1637         if (!(oldState & hasAccessBit)) {
1638             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1639             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1640             // We can stop the world instantly.
1641             if (m_worldState.compareExchangeWeak(oldState, oldState | stoppedBit))
1642                 return true;
1643             continue;
1644         }
1645         
1646         // Transfer the conn to the mutator and bail.
1647         RELEASE_ASSERT(oldState & hasAccessBit);
1648         RELEASE_ASSERT(!(oldState & stoppedBit));
1649         unsigned newState = (oldState | mutatorHasConnBit) & ~mutatorWaitingBit;
1650         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1651             if (false)
1652                 dataLog("Handed off the conn.\n");
1653             m_stopIfNecessaryTimer->scheduleSoon();
1654             ParkingLot::unparkAll(&m_worldState);
1655             return false;
1656         }
1657     }
1658 }
1659
1660 NEVER_INLINE void Heap::resumeTheMutator()
1661 {
1662     if (false)
1663         dataLog("Resuming the mutator.\n");
1664     for (;;) {
1665         unsigned oldState = m_worldState.load();
1666         if (!!(oldState & hasAccessBit) != !(oldState & stoppedBit)) {
1667             dataLog("Fatal: hasAccess = ", !!(oldState & hasAccessBit), ", stopped = ", !!(oldState & stoppedBit), "\n");
1668             RELEASE_ASSERT_NOT_REACHED();
1669         }
1670         if (oldState & mutatorHasConnBit) {
1671             dataLog("Fatal: mutator has the conn.\n");
1672             RELEASE_ASSERT_NOT_REACHED();
1673         }
1674         
1675         if (!(oldState & stoppedBit)) {
1676             if (false)
1677                 dataLog("Returning because not stopped.\n");
1678             return;
1679         }
1680         
1681         if (m_worldState.compareExchangeWeak(oldState, oldState & ~stoppedBit)) {
1682             if (false)
1683                 dataLog("CASing and returning.\n");
1684             ParkingLot::unparkAll(&m_worldState);
1685             return;
1686         }
1687     }
1688 }
1689
1690 void Heap::stopIfNecessarySlow()
1691 {
1692     while (stopIfNecessarySlow(m_worldState.load())) { }
1693     
1694     RELEASE_ASSERT(m_worldState.load() & hasAccessBit);
1695     RELEASE_ASSERT(!(m_worldState.load() & stoppedBit));
1696     
1697     handleGCDidJIT();
1698     handleNeedFinalize();
1699     m_mutatorDidRun = true;
1700 }
1701
1702 bool Heap::stopIfNecessarySlow(unsigned oldState)
1703 {
1704     RELEASE_ASSERT(oldState & hasAccessBit);
1705     RELEASE_ASSERT(!(oldState & stoppedBit));
1706     
1707     // It's possible for us to wake up with finalization already requested but the world not yet
1708     // resumed. If that happens, we can't run finalization yet.
1709     if (handleNeedFinalize(oldState))
1710         return true;
1711
1712     // FIXME: When entering the concurrent phase, we could arrange for this branch not to fire, and then
1713     // have the SlotVisitor do things to the m_worldState to make this branch fire again. That would
1714     // prevent us from polling this so much. Ideally, stopIfNecessary would ignore the mutatorHasConnBit
1715     // and there would be some other bit indicating whether we were in some GC phase other than the
1716     // NotRunning or Concurrent ones.
1717     if (oldState & mutatorHasConnBit)
1718         collectInMutatorThread();
1719     
1720     return false;
1721 }
1722
1723 NEVER_INLINE void Heap::collectInMutatorThread()
1724 {
1725     CollectingScope collectingScope(*this);
1726     for (;;) {
1727         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, nullptr);
1728         switch (result) {
1729         case RunCurrentPhaseResult::Finished:
1730             return;
1731         case RunCurrentPhaseResult::Continue:
1732             break;
1733         case RunCurrentPhaseResult::NeedCurrentThreadState:
1734             sanitizeStackForVM(m_vm);
1735             auto lambda = [&] (CurrentThreadState& state) {
1736                 for (;;) {
1737                     RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, &state);
1738                     switch (result) {
1739                     case RunCurrentPhaseResult::Finished:
1740                         return;
1741                     case RunCurrentPhaseResult::Continue:
1742                         break;
1743                     case RunCurrentPhaseResult::NeedCurrentThreadState:
1744                         RELEASE_ASSERT_NOT_REACHED();
1745                         break;
1746                     }
1747                 }
1748             };
1749             callWithCurrentThreadState(scopedLambda<void(CurrentThreadState&)>(WTFMove(lambda)));
1750             return;
1751         }
1752     }
1753 }
1754
1755 template<typename Func>
1756 void Heap::waitForCollector(const Func& func)
1757 {
1758     for (;;) {
1759         bool done;
1760         {
1761             LockHolder locker(*m_threadLock);
1762             done = func(locker);
1763             if (!done) {
1764                 setMutatorWaiting();
1765                 
1766                 // At this point, the collector knows that we intend to wait, and he will clear the
1767                 // waiting bit and then unparkAll when the GC cycle finishes. Clearing the bit
1768                 // prevents us from parking except if there is also stop-the-world. Unparking after
1769                 // clearing means that if the clearing happens after we park, then we will unpark.
1770             }
1771         }
1772         
1773         // If we're in a stop-the-world scenario, we need to wait for that even if done is true.
1774         unsigned oldState = m_worldState.load();
1775         if (stopIfNecessarySlow(oldState))
1776             continue;
1777         
1778         // FIXME: We wouldn't need this if stopIfNecessarySlow() had a mode where it knew to just
1779         // do the collection.
1780         relinquishConn();
1781         
1782         if (done) {
1783             clearMutatorWaiting(); // Clean up just in case.
1784             return;
1785         }
1786         
1787         // If mutatorWaitingBit is still set then we want to wait.
1788         ParkingLot::compareAndPark(&m_worldState, oldState | mutatorWaitingBit);
1789     }
1790 }
1791
1792 void Heap::acquireAccessSlow()
1793 {
1794     for (;;) {
1795         unsigned oldState = m_worldState.load();
1796         RELEASE_ASSERT(!(oldState & hasAccessBit));
1797         
1798         if (oldState & stoppedBit) {
1799             if (verboseStop) {
1800                 dataLog("Stopping in acquireAccess!\n");
1801                 WTFReportBacktrace();
1802             }
1803             // Wait until we're not stopped anymore.
1804             ParkingLot::compareAndPark(&m_worldState, oldState);
1805             continue;
1806         }
1807         
1808         RELEASE_ASSERT(!(oldState & stoppedBit));
1809         unsigned newState = oldState | hasAccessBit;
1810         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1811             handleGCDidJIT();
1812             handleNeedFinalize();
1813             m_mutatorDidRun = true;
1814             stopIfNecessary();
1815             return;
1816         }
1817     }
1818 }
1819
1820 void Heap::releaseAccessSlow()
1821 {
1822     for (;;) {
1823         unsigned oldState = m_worldState.load();
1824         if (!(oldState & hasAccessBit)) {
1825             dataLog("FATAL: Attempting to release access but the mutator does not have access.\n");
1826             RELEASE_ASSERT_NOT_REACHED();
1827         }
1828         if (oldState & stoppedBit) {
1829             dataLog("FATAL: Attempting to release access but the mutator is stopped.\n");
1830             RELEASE_ASSERT_NOT_REACHED();
1831         }
1832         
1833         if (handleNeedFinalize(oldState))
1834             continue;
1835         
1836         unsigned newState = oldState & ~(hasAccessBit | mutatorHasConnBit);
1837         
1838         if ((oldState & mutatorHasConnBit)
1839             && m_nextPhase != m_currentPhase) {
1840             // This means that the collector thread had given us the conn so that we would do something
1841             // for it. Stop ourselves as we release access. This ensures that acquireAccess blocks. In
1842             // the meantime, since we're handing the conn over, the collector will be awoken and it is
1843             // sure to have work to do.
1844             newState |= stoppedBit;
1845         }
1846
1847         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1848             if (oldState & mutatorHasConnBit)
1849                 finishRelinquishingConn();
1850             return;
1851         }
1852     }
1853 }
1854
1855 bool Heap::relinquishConn(unsigned oldState)
1856 {
1857     RELEASE_ASSERT(oldState & hasAccessBit);
1858     RELEASE_ASSERT(!(oldState & stoppedBit));
1859     
1860     if (!(oldState & mutatorHasConnBit))
1861         return false; // Done.
1862     
1863     if (m_threadShouldStop)
1864         return false;
1865     
1866     if (!m_worldState.compareExchangeWeak(oldState, oldState & ~mutatorHasConnBit))
1867         return true; // Loop around.
1868     
1869     finishRelinquishingConn();
1870     return true;
1871 }
1872
1873 void Heap::finishRelinquishingConn()
1874 {
1875     if (false)
1876         dataLog("Relinquished the conn.\n");
1877     
1878     sanitizeStackForVM(m_vm);
1879     
1880     auto locker = holdLock(*m_threadLock);
1881     if (!m_requests.isEmpty())
1882         m_threadCondition->notifyOne(locker);
1883     ParkingLot::unparkAll(&m_worldState);
1884 }
1885
1886 void Heap::relinquishConn()
1887 {
1888     while (relinquishConn(m_worldState.load())) { }
1889 }
1890
1891 bool Heap::handleGCDidJIT(unsigned oldState)
1892 {
1893     RELEASE_ASSERT(oldState & hasAccessBit);
1894     if (!(oldState & gcDidJITBit))
1895         return false;
1896     if (m_worldState.compareExchangeWeak(oldState, oldState & ~gcDidJITBit)) {
1897         WTF::crossModifyingCodeFence();
1898         return true;
1899     }
1900     return true;
1901 }
1902
1903 NEVER_INLINE bool Heap::handleNeedFinalize(unsigned oldState)
1904 {
1905     RELEASE_ASSERT(oldState & hasAccessBit);
1906     RELEASE_ASSERT(!(oldState & stoppedBit));
1907     
1908     if (!(oldState & needFinalizeBit))
1909         return false;
1910     if (m_worldState.compareExchangeWeak(oldState, oldState & ~needFinalizeBit)) {
1911         finalize();
1912         // Wake up anyone waiting for us to finalize. Note that they may have woken up already, in
1913         // which case they would be waiting for us to release heap access.
1914         ParkingLot::unparkAll(&m_worldState);
1915         return true;
1916     }
1917     return true;
1918 }
1919
1920 void Heap::handleGCDidJIT()
1921 {
1922     while (handleGCDidJIT(m_worldState.load())) { }
1923 }
1924
1925 void Heap::handleNeedFinalize()
1926 {
1927     while (handleNeedFinalize(m_worldState.load())) { }
1928 }
1929
1930 void Heap::setGCDidJIT()
1931 {
1932     m_worldState.transaction(
1933         [&] (unsigned& state) -> bool {
1934             RELEASE_ASSERT(state & stoppedBit);
1935             state |= gcDidJITBit;
1936             return true;
1937         });
1938 }
1939
1940 void Heap::setNeedFinalize()
1941 {
1942     m_worldState.exchangeOr(needFinalizeBit);
1943     ParkingLot::unparkAll(&m_worldState);
1944     m_stopIfNecessaryTimer->scheduleSoon();
1945 }
1946
1947 void Heap::waitWhileNeedFinalize()
1948 {
1949     for (;;) {
1950         unsigned oldState = m_worldState.load();
1951         if (!(oldState & needFinalizeBit)) {
1952             // This means that either there was no finalize request or the main thread will finalize
1953             // with heap access, so a subsequent call to stopTheWorld() will return only when
1954             // finalize finishes.
1955             return;
1956         }
1957         ParkingLot::compareAndPark(&m_worldState, oldState);
1958     }
1959 }
1960
1961 void Heap::setMutatorWaiting()
1962 {
1963     m_worldState.exchangeOr(mutatorWaitingBit);
1964 }
1965
1966 void Heap::clearMutatorWaiting()
1967 {
1968     m_worldState.exchangeAnd(~mutatorWaitingBit);
1969 }
1970
1971 void Heap::notifyThreadStopping(const AbstractLocker&)
1972 {
1973     m_threadIsStopping = true;
1974     clearMutatorWaiting();
1975     ParkingLot::unparkAll(&m_worldState);
1976 }
1977
1978 void Heap::finalize()
1979 {
1980     MonotonicTime before;
1981     if (Options::logGC()) {
1982         before = MonotonicTime::now();
1983         dataLog("[GC<", RawPointer(this), ">: finalize ");
1984     }
1985     
1986     {
1987         SweepingScope sweepingScope(*this);
1988         deleteUnmarkedCompiledCode();
1989         deleteSourceProviderCaches();
1990         sweepInFinalize();
1991     }
1992     
1993     if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache())
1994         cache->clear();
1995     
1996     for (const HeapFinalizerCallback& callback : m_heapFinalizerCallbacks)
1997         callback.run(*vm());
1998     
1999     if (Options::sweepSynchronously())
2000         sweepSynchronously();
2001
2002     if (Options::logGC()) {
2003         MonotonicTime after = MonotonicTime::now();
2004         dataLog((after - before).milliseconds(), "ms]\n");
2005     }
2006 }
2007
2008 Heap::Ticket Heap::requestCollection(GCRequest request)
2009 {
2010     stopIfNecessary();
2011     
2012     ASSERT(vm()->currentThreadIsHoldingAPILock());
2013     RELEASE_ASSERT(vm()->atomicStringTable() == WTF::Thread::current().atomicStringTable());
2014     
2015     LockHolder locker(*m_threadLock);
2016     // We may be able to steal the conn. That only works if the collector is definitely not running
2017     // right now. This is an optimization that prevents the collector thread from ever starting in most
2018     // cases.
2019     ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2020     if ((m_lastServedTicket == m_lastGrantedTicket) && (m_currentPhase == CollectorPhase::NotRunning)) {
2021         if (false)
2022             dataLog("Taking the conn.\n");
2023         m_worldState.exchangeOr(mutatorHasConnBit);
2024     }
2025     
2026     m_requests.append(request);
2027     m_lastGrantedTicket++;
2028     if (!(m_worldState.load() & mutatorHasConnBit))
2029         m_threadCondition->notifyOne(locker);
2030     return m_lastGrantedTicket;
2031 }
2032
2033 void Heap::waitForCollection(Ticket ticket)
2034 {
2035     waitForCollector(
2036         [&] (const AbstractLocker&) -> bool {
2037             return m_lastServedTicket >= ticket;
2038         });
2039 }
2040
2041 void Heap::sweepInFinalize()
2042 {
2043     m_objectSpace.sweepLargeAllocations();
2044     
2045     auto sweepBlock = [&] (MarkedBlock::Handle* handle) {
2046         handle->sweep(nullptr);
2047     };
2048     
2049     vm()->eagerlySweptDestructibleObjectSpace.forEachMarkedBlock(sweepBlock);
2050 }
2051
2052 void Heap::suspendCompilerThreads()
2053 {
2054 #if ENABLE(DFG_JIT)
2055     // We ensure the worklists so that it's not possible for the mutator to start a new worklist
2056     // after we have suspended the ones that he had started before. That's not very expensive since
2057     // the worklists use AutomaticThreads anyway.
2058     for (unsigned i = DFG::numberOfWorklists(); i--;)
2059         DFG::ensureWorklistForIndex(i).suspendAllThreads();
2060 #endif
2061 }
2062
2063 void Heap::willStartCollection()
2064 {
2065     if (Options::logGC())
2066         dataLog("=> ");
2067     
2068     if (shouldDoFullCollection()) {
2069         m_collectionScope = CollectionScope::Full;
2070         m_shouldDoFullCollection = false;
2071         if (Options::logGC())
2072             dataLog("FullCollection, ");
2073         if (false)
2074             dataLog("Full collection!\n");
2075     } else {
2076         m_collectionScope = CollectionScope::Eden;
2077         if (Options::logGC())
2078             dataLog("EdenCollection, ");
2079         if (false)
2080             dataLog("Eden collection!\n");
2081     }
2082     if (m_collectionScope == CollectionScope::Full) {
2083         m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2084         m_extraMemorySize = 0;
2085         m_deprecatedExtraMemorySize = 0;
2086 #if ENABLE(RESOURCE_USAGE)
2087         m_externalMemorySize = 0;
2088 #endif
2089
2090         if (m_fullActivityCallback)
2091             m_fullActivityCallback->willCollect();
2092     } else {
2093         ASSERT(m_collectionScope == CollectionScope::Eden);
2094         m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2095     }
2096
2097     if (m_edenActivityCallback)
2098         m_edenActivityCallback->willCollect();
2099
2100     for (auto* observer : m_observers)
2101         observer->willGarbageCollect();
2102 }
2103
2104 void Heap::prepareForMarking()
2105 {
2106     m_objectSpace.prepareForMarking();
2107 }
2108
2109 void Heap::reapWeakHandles()
2110 {
2111     m_objectSpace.reapWeakSets();
2112 }
2113
2114 void Heap::pruneStaleEntriesFromWeakGCMaps()
2115 {
2116     if (m_collectionScope != CollectionScope::Full)
2117         return;
2118     for (WeakGCMapBase* weakGCMap : m_weakGCMaps)
2119         weakGCMap->pruneStaleEntries();
2120 }
2121
2122 void Heap::sweepArrayBuffers()
2123 {
2124     m_arrayBuffers.sweep();
2125 }
2126
2127 void Heap::snapshotUnswept()
2128 {
2129     TimingScope timingScope(*this, "Heap::snapshotUnswept");
2130     m_objectSpace.snapshotUnswept();
2131 }
2132
2133 void Heap::deleteSourceProviderCaches()
2134 {
2135     if (*m_lastCollectionScope == CollectionScope::Full)
2136         m_vm->clearSourceProviderCaches();
2137 }
2138
2139 void Heap::notifyIncrementalSweeper()
2140 {
2141     if (m_collectionScope == CollectionScope::Full) {
2142         if (!m_logicallyEmptyWeakBlocks.isEmpty())
2143             m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2144     }
2145
2146     m_sweeper->startSweeping();
2147 }
2148
2149 void Heap::updateAllocationLimits()
2150 {
2151     static const bool verbose = false;
2152     
2153     if (verbose) {
2154         dataLog("\n");
2155         dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n");
2156     }
2157     
2158     // Calculate our current heap size threshold for the purpose of figuring out when we should
2159     // run another collection. This isn't the same as either size() or capacity(), though it should
2160     // be somewhere between the two. The key is to match the size calculations involved calls to
2161     // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
2162     // fragmentation, we may have size() much smaller than capacity().
2163     size_t currentHeapSize = 0;
2164
2165     // For marked space, we use the total number of bytes visited. This matches the logic for
2166     // MarkedAllocator's calls to didAllocate(), which effectively accounts for the total size of
2167     // objects allocated rather than blocks used. This will underestimate capacity(), and in case
2168     // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
2169     // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
2170     currentHeapSize += m_totalBytesVisited;
2171     if (verbose)
2172         dataLog("totalBytesVisited = ", m_totalBytesVisited, ", currentHeapSize = ", currentHeapSize, "\n");
2173
2174     // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
2175     // extra memory reporting.
2176     currentHeapSize += extraMemorySize();
2177     if (!ASSERT_DISABLED) {
2178         Checked<size_t, RecordOverflow> checkedCurrentHeapSize = m_totalBytesVisited;
2179         checkedCurrentHeapSize += extraMemorySize();
2180         ASSERT(!checkedCurrentHeapSize.hasOverflowed() && checkedCurrentHeapSize.unsafeGet() == currentHeapSize);
2181     }
2182
2183     if (verbose)
2184         dataLog("extraMemorySize() = ", extraMemorySize(), ", currentHeapSize = ", currentHeapSize, "\n");
2185     
2186     if (m_collectionScope == CollectionScope::Full) {
2187         // To avoid pathological GC churn in very small and very large heaps, we set
2188         // the new allocation limit based on the current size of the heap, with a
2189         // fixed minimum.
2190         m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
2191         if (verbose)
2192             dataLog("Full: maxHeapSize = ", m_maxHeapSize, "\n");
2193         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2194         if (verbose)
2195             dataLog("Full: maxEdenSize = ", m_maxEdenSize, "\n");
2196         m_sizeAfterLastFullCollect = currentHeapSize;
2197         if (verbose)
2198             dataLog("Full: sizeAfterLastFullCollect = ", currentHeapSize, "\n");
2199         m_bytesAbandonedSinceLastFullCollect = 0;
2200         if (verbose)
2201             dataLog("Full: bytesAbandonedSinceLastFullCollect = ", 0, "\n");
2202     } else {
2203         ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
2204         // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have.
2205         // But we are sloppy, so we have to defend against the overflow.
2206         m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize;
2207         if (verbose)
2208             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2209         m_sizeAfterLastEdenCollect = currentHeapSize;
2210         if (verbose)
2211             dataLog("Eden: sizeAfterLastEdenCollect = ", currentHeapSize, "\n");
2212         double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
2213         double minEdenToOldGenerationRatio = 1.0 / 3.0;
2214         if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
2215             m_shouldDoFullCollection = true;
2216         // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
2217         m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
2218         if (verbose)
2219             dataLog("Eden: maxHeapSize = ", m_maxHeapSize, "\n");
2220         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2221         if (verbose)
2222             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2223         if (m_fullActivityCallback) {
2224             ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
2225             m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
2226         }
2227     }
2228
2229 #if PLATFORM(IOS)
2230     // Get critical memory threshold for next cycle.
2231     overCriticalMemoryThreshold(MemoryThresholdCallType::Direct);
2232 #endif
2233
2234     m_sizeAfterLastCollect = currentHeapSize;
2235     if (verbose)
2236         dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n");
2237     m_bytesAllocatedThisCycle = 0;
2238
2239     if (Options::logGC())
2240         dataLog("=> ", currentHeapSize / 1024, "kb, ");
2241 }
2242
2243 void Heap::didFinishCollection()
2244 {
2245     m_afterGC = MonotonicTime::now();
2246     CollectionScope scope = *m_collectionScope;
2247     if (scope == CollectionScope::Full)
2248         m_lastFullGCLength = m_afterGC - m_beforeGC;
2249     else
2250         m_lastEdenGCLength = m_afterGC - m_beforeGC;
2251
2252 #if ENABLE(RESOURCE_USAGE)
2253     ASSERT(externalMemorySize() <= extraMemorySize());
2254 #endif
2255
2256     if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) {
2257         gatherExtraHeapSnapshotData(*heapProfiler);
2258         removeDeadHeapSnapshotNodes(*heapProfiler);
2259     }
2260
2261     if (UNLIKELY(m_verifier))
2262         m_verifier->endGC();
2263
2264     RELEASE_ASSERT(m_collectionScope);
2265     m_lastCollectionScope = m_collectionScope;
2266     m_collectionScope = std::nullopt;
2267
2268     for (auto* observer : m_observers)
2269         observer->didGarbageCollect(scope);
2270 }
2271
2272 void Heap::resumeCompilerThreads()
2273 {
2274 #if ENABLE(DFG_JIT)
2275     for (unsigned i = DFG::numberOfWorklists(); i--;)
2276         DFG::existingWorklistForIndex(i).resumeAllThreads();
2277 #endif
2278 }
2279
2280 GCActivityCallback* Heap::fullActivityCallback()
2281 {
2282     return m_fullActivityCallback.get();
2283 }
2284
2285 GCActivityCallback* Heap::edenActivityCallback()
2286 {
2287     return m_edenActivityCallback.get();
2288 }
2289
2290 IncrementalSweeper& Heap::sweeper()
2291 {
2292     return *m_sweeper;
2293 }
2294
2295 void Heap::setGarbageCollectionTimerEnabled(bool enable)
2296 {
2297     if (m_fullActivityCallback)
2298         m_fullActivityCallback->setEnabled(enable);
2299     if (m_edenActivityCallback)
2300         m_edenActivityCallback->setEnabled(enable);
2301 }
2302
2303 void Heap::didAllocate(size_t bytes)
2304 {
2305     if (m_edenActivityCallback)
2306         m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
2307     m_bytesAllocatedThisCycle += bytes;
2308     performIncrement(bytes);
2309 }
2310
2311 bool Heap::isValidAllocation(size_t)
2312 {
2313     if (!isValidThreadState(m_vm))
2314         return false;
2315
2316     if (isCurrentThreadBusy())
2317         return false;
2318     
2319     return true;
2320 }
2321
2322 void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
2323 {
2324     WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
2325 }
2326
2327 void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
2328 {
2329     HandleSlot slot = handle.slot();
2330     Finalizer finalizer = reinterpret_cast<Finalizer>(context);
2331     finalizer(slot->asCell());
2332     WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
2333 }
2334
2335 void Heap::addExecutable(ExecutableBase* executable)
2336 {
2337     m_executables.append(executable);
2338 }
2339
2340 void Heap::collectNowFullIfNotDoneRecently(Synchronousness synchronousness)
2341 {
2342     if (!m_fullActivityCallback) {
2343         collectNow(synchronousness, CollectionScope::Full);
2344         return;
2345     }
2346
2347     if (m_fullActivityCallback->didGCRecently()) {
2348         // A synchronous GC was already requested recently so we merely accelerate next collection.
2349         reportAbandonedObjectGraph();
2350         return;
2351     }
2352
2353     m_fullActivityCallback->setDidGCRecently();
2354     collectNow(synchronousness, CollectionScope::Full);
2355 }
2356
2357 bool Heap::shouldDoFullCollection()
2358 {
2359     if (!Options::useGenerationalGC())
2360         return true;
2361
2362     if (!m_currentRequest.scope)
2363         return m_shouldDoFullCollection || overCriticalMemoryThreshold();
2364     return *m_currentRequest.scope == CollectionScope::Full;
2365 }
2366
2367 void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
2368 {
2369     m_logicallyEmptyWeakBlocks.append(block);
2370 }
2371
2372 void Heap::sweepAllLogicallyEmptyWeakBlocks()
2373 {
2374     if (m_logicallyEmptyWeakBlocks.isEmpty())
2375         return;
2376
2377     m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2378     while (sweepNextLogicallyEmptyWeakBlock()) { }
2379 }
2380
2381 bool Heap::sweepNextLogicallyEmptyWeakBlock()
2382 {
2383     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
2384         return false;
2385
2386     WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
2387
2388     block->sweep();
2389     if (block->isEmpty()) {
2390         std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
2391         m_logicallyEmptyWeakBlocks.removeLast();
2392         WeakBlock::destroy(*this, block);
2393     } else
2394         m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
2395
2396     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
2397         m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
2398         return false;
2399     }
2400
2401     return true;
2402 }
2403
2404 size_t Heap::visitCount()
2405 {
2406     size_t result = 0;
2407     forEachSlotVisitor(
2408         [&] (SlotVisitor& visitor) {
2409             result += visitor.visitCount();
2410         });
2411     return result;
2412 }
2413
2414 size_t Heap::bytesVisited()
2415 {
2416     size_t result = 0;
2417     forEachSlotVisitor(
2418         [&] (SlotVisitor& visitor) {
2419             result += visitor.bytesVisited();
2420         });
2421     return result;
2422 }
2423
2424 void Heap::forEachCodeBlockImpl(const ScopedLambda<bool(CodeBlock*)>& func)
2425 {
2426     // We don't know the full set of CodeBlocks until compilation has terminated.
2427     completeAllJITPlans();
2428
2429     return m_codeBlocks->iterate(func);
2430 }
2431
2432 void Heap::forEachCodeBlockIgnoringJITPlansImpl(const AbstractLocker& locker, const ScopedLambda<bool(CodeBlock*)>& func)
2433 {
2434     return m_codeBlocks->iterate(locker, func);
2435 }
2436
2437 void Heap::writeBarrierSlowPath(const JSCell* from)
2438 {
2439     if (UNLIKELY(mutatorShouldBeFenced())) {
2440         // In this case, the barrierThreshold is the tautological threshold, so from could still be
2441         // not black. But we can't know for sure until we fire off a fence.
2442         WTF::storeLoadFence();
2443         if (from->cellState() != CellState::PossiblyBlack)
2444             return;
2445     }
2446     
2447     addToRememberedSet(from);
2448 }
2449
2450 bool Heap::isCurrentThreadBusy()
2451 {
2452     return mayBeGCThread() || mutatorState() != MutatorState::Running;
2453 }
2454
2455 void Heap::reportExtraMemoryVisited(size_t size)
2456 {
2457     size_t* counter = &m_extraMemorySize;
2458     
2459     for (;;) {
2460         size_t oldSize = *counter;
2461         // FIXME: Change this to use SaturatedArithmetic when available.
2462         // https://bugs.webkit.org/show_bug.cgi?id=170411
2463         Checked<size_t, RecordOverflow> checkedNewSize = oldSize;
2464         checkedNewSize += size;
2465         size_t newSize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
2466         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, newSize))
2467             return;
2468     }
2469 }
2470
2471 #if ENABLE(RESOURCE_USAGE)
2472 void Heap::reportExternalMemoryVisited(size_t size)
2473 {
2474     size_t* counter = &m_externalMemorySize;
2475
2476     for (;;) {
2477         size_t oldSize = *counter;
2478         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size))
2479             return;
2480     }
2481 }
2482 #endif
2483
2484 void Heap::collectIfNecessaryOrDefer(GCDeferralContext* deferralContext)
2485 {
2486     ASSERT(deferralContext || isDeferred() || !DisallowGC::isInEffectOnCurrentThread());
2487
2488     if (!m_isSafeToCollect)
2489         return;
2490     switch (mutatorState()) {
2491     case MutatorState::Running:
2492     case MutatorState::Allocating:
2493         break;
2494     case MutatorState::Sweeping:
2495     case MutatorState::Collecting:
2496         return;
2497     }
2498     if (!Options::useGC())
2499         return;
2500     
2501     if (mayNeedToStop()) {
2502         if (deferralContext)
2503             deferralContext->m_shouldGC = true;
2504         else if (isDeferred())
2505             m_didDeferGCWork = true;
2506         else
2507             stopIfNecessary();
2508     }
2509     
2510     if (UNLIKELY(Options::gcMaxHeapSize())) {
2511         if (m_bytesAllocatedThisCycle <= Options::gcMaxHeapSize())
2512             return;
2513     } else {
2514         size_t bytesAllowedThisCycle = m_maxEdenSize;
2515
2516 #if PLATFORM(IOS)
2517         if (overCriticalMemoryThreshold())
2518             bytesAllowedThisCycle = std::min(m_maxEdenSizeWhenCritical, bytesAllowedThisCycle);
2519 #endif
2520
2521         if (m_bytesAllocatedThisCycle <= bytesAllowedThisCycle)
2522             return;
2523     }
2524
2525     if (deferralContext)
2526         deferralContext->m_shouldGC = true;
2527     else if (isDeferred())
2528         m_didDeferGCWork = true;
2529     else {
2530         collectAsync();
2531         stopIfNecessary(); // This will immediately start the collection if we have the conn.
2532     }
2533 }
2534
2535 void Heap::decrementDeferralDepthAndGCIfNeededSlow()
2536 {
2537     // Can't do anything if we're still deferred.
2538     if (m_deferralDepth)
2539         return;
2540     
2541     ASSERT(!isDeferred());
2542     
2543     m_didDeferGCWork = false;
2544     // FIXME: Bring back something like the DeferGCProbability mode.
2545     // https://bugs.webkit.org/show_bug.cgi?id=166627
2546     collectIfNecessaryOrDefer();
2547 }
2548
2549 void Heap::registerWeakGCMap(WeakGCMapBase* weakGCMap)
2550 {
2551     m_weakGCMaps.add(weakGCMap);
2552 }
2553
2554 void Heap::unregisterWeakGCMap(WeakGCMapBase* weakGCMap)
2555 {
2556     m_weakGCMaps.remove(weakGCMap);
2557 }
2558
2559 void Heap::didAllocateBlock(size_t capacity)
2560 {
2561 #if ENABLE(RESOURCE_USAGE)
2562     m_blockBytesAllocated += capacity;
2563 #else
2564     UNUSED_PARAM(capacity);
2565 #endif
2566 }
2567
2568 void Heap::didFreeBlock(size_t capacity)
2569 {
2570 #if ENABLE(RESOURCE_USAGE)
2571     m_blockBytesAllocated -= capacity;
2572 #else
2573     UNUSED_PARAM(capacity);
2574 #endif
2575 }
2576
2577 void Heap::addCoreConstraints()
2578 {
2579     m_constraintSet->add(
2580         "Cs", "Conservative Scan",
2581         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2582             TimingScope preConvergenceTimingScope(*this, "Constraint: conservative scan");
2583             m_objectSpace.prepareForConservativeScan();
2584             ConservativeRoots conservativeRoots(*this);
2585             SuperSamplerScope superSamplerScope(false);
2586             gatherStackRoots(conservativeRoots);
2587             gatherJSStackRoots(conservativeRoots);
2588             gatherScratchBufferRoots(conservativeRoots);
2589             slotVisitor.append(conservativeRoots);
2590         },
2591         ConstraintVolatility::GreyedByExecution);
2592     
2593     m_constraintSet->add(
2594         "Msr", "Misc Small Roots",
2595         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2596 #if JSC_OBJC_API_ENABLED
2597             scanExternalRememberedSet(*m_vm, slotVisitor);
2598 #endif
2599
2600             if (m_vm->smallStrings.needsToBeVisited(*m_collectionScope))
2601                 m_vm->smallStrings.visitStrongReferences(slotVisitor);
2602             
2603             for (auto& pair : m_protectedValues)
2604                 slotVisitor.appendUnbarriered(pair.key);
2605             
2606             if (m_markListSet && m_markListSet->size())
2607                 MarkedArgumentBuffer::markLists(slotVisitor, *m_markListSet);
2608             
2609             slotVisitor.appendUnbarriered(m_vm->exception());
2610             slotVisitor.appendUnbarriered(m_vm->lastException());
2611         },
2612         ConstraintVolatility::GreyedByExecution);
2613     
2614     m_constraintSet->add(
2615         "Sh", "Strong Handles",
2616         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2617             m_handleSet.visitStrongHandles(slotVisitor);
2618             m_handleStack.visit(slotVisitor);
2619         },
2620         ConstraintVolatility::GreyedByExecution);
2621     
2622     m_constraintSet->add(
2623         "D", "Debugger",
2624         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2625 #if ENABLE(SAMPLING_PROFILER)
2626             if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
2627                 LockHolder locker(samplingProfiler->getLock());
2628                 samplingProfiler->processUnverifiedStackTraces();
2629                 samplingProfiler->visit(slotVisitor);
2630                 if (Options::logGC() == GCLogging::Verbose)
2631                     dataLog("Sampling Profiler data:\n", slotVisitor);
2632             }
2633 #endif // ENABLE(SAMPLING_PROFILER)
2634             
2635             if (m_vm->typeProfiler())
2636                 m_vm->typeProfilerLog()->visit(slotVisitor);
2637             
2638             m_vm->shadowChicken().visitChildren(slotVisitor);
2639         },
2640         ConstraintVolatility::GreyedByExecution);
2641     
2642     m_constraintSet->add(
2643         "Jsr", "JIT Stub Routines",
2644         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2645             m_jitStubRoutines->traceMarkedStubRoutines(slotVisitor);
2646         },
2647         ConstraintVolatility::GreyedByExecution);
2648     
2649     m_constraintSet->add(
2650         "Ws", "Weak Sets",
2651         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2652             m_objectSpace.visitWeakSets(slotVisitor);
2653         },
2654         ConstraintVolatility::GreyedByMarking);
2655     
2656     m_constraintSet->add(
2657         "Wrh", "Weak Reference Harvesters",
2658         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2659             for (WeakReferenceHarvester* current = m_weakReferenceHarvesters.head(); current; current = current->next())
2660                 current->visitWeakReferences(slotVisitor);
2661         },
2662         ConstraintVolatility::GreyedByMarking);
2663     
2664 #if ENABLE(DFG_JIT)
2665     m_constraintSet->add(
2666         "Dw", "DFG Worklists",
2667         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2668             for (unsigned i = DFG::numberOfWorklists(); i--;)
2669                 DFG::existingWorklistForIndex(i).visitWeakReferences(slotVisitor);
2670             
2671             // FIXME: This is almost certainly unnecessary.
2672             // https://bugs.webkit.org/show_bug.cgi?id=166829
2673             DFG::iterateCodeBlocksForGC(
2674                 *m_vm,
2675                 [&] (CodeBlock* codeBlock) {
2676                     slotVisitor.appendUnbarriered(codeBlock);
2677                 });
2678             
2679             if (Options::logGC() == GCLogging::Verbose)
2680                 dataLog("DFG Worklists:\n", slotVisitor);
2681         },
2682         ConstraintVolatility::GreyedByMarking);
2683 #endif
2684     
2685     m_constraintSet->add(
2686         "Cb", "CodeBlocks",
2687         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2688             iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(
2689                 [&] (CodeBlock* codeBlock) {
2690                     // Visit the CodeBlock as a constraint only if it's black.
2691                     if (Heap::isMarked(codeBlock)
2692                         && codeBlock->cellState() == CellState::PossiblyBlack)
2693                         slotVisitor.visitAsConstraint(codeBlock);
2694                 });
2695         },
2696         ConstraintVolatility::SeldomGreyed);
2697     
2698     m_constraintSet->add(
2699         "Mrms", "Mutator+Race Mark Stack",
2700         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2701             // Indicate to the fixpoint that we introduced work!
2702             size_t size = m_mutatorMarkStack->size() + m_raceMarkStack->size();
2703             slotVisitor.addToVisitCount(size);
2704             
2705             if (Options::logGC())
2706                 dataLog("(", size, ")");
2707             
2708             m_mutatorMarkStack->transferTo(slotVisitor.mutatorMarkStack());
2709             m_raceMarkStack->transferTo(slotVisitor.mutatorMarkStack());
2710         },
2711         [this] (SlotVisitor&) -> double {
2712             return m_mutatorMarkStack->size() + m_raceMarkStack->size();
2713         },
2714         ConstraintVolatility::GreyedByExecution);
2715 }
2716
2717 void Heap::addMarkingConstraint(std::unique_ptr<MarkingConstraint> constraint)
2718 {
2719     PreventCollectionScope preventCollectionScope(*this);
2720     m_constraintSet->add(WTFMove(constraint));
2721 }
2722
2723 void Heap::notifyIsSafeToCollect()
2724 {
2725     MonotonicTime before;
2726     if (Options::logGC()) {
2727         before = MonotonicTime::now();
2728         dataLog("[GC<", RawPointer(this), ">: starting ");
2729     }
2730     
2731     addCoreConstraints();
2732     
2733     m_isSafeToCollect = true;
2734     
2735     if (Options::collectContinuously()) {
2736         m_collectContinuouslyThread = WTF::Thread::create(
2737             "JSC DEBUG Continuous GC",
2738             [this] () {
2739                 MonotonicTime initialTime = MonotonicTime::now();
2740                 Seconds period = Seconds::fromMilliseconds(Options::collectContinuouslyPeriodMS());
2741                 while (!m_shouldStopCollectingContinuously) {
2742                     {
2743                         LockHolder locker(*m_threadLock);
2744                         if (m_requests.isEmpty()) {
2745                             m_requests.append(std::nullopt);
2746                             m_lastGrantedTicket++;
2747                             m_threadCondition->notifyOne(locker);
2748                         }
2749                     }
2750                     
2751                     {
2752                         LockHolder locker(m_collectContinuouslyLock);
2753                         Seconds elapsed = MonotonicTime::now() - initialTime;
2754                         Seconds elapsedInPeriod = elapsed % period;
2755                         MonotonicTime timeToWakeUp =
2756                             initialTime + elapsed - elapsedInPeriod + period;
2757                         while (!hasElapsed(timeToWakeUp) && !m_shouldStopCollectingContinuously) {
2758                             m_collectContinuouslyCondition.waitUntil(
2759                                 m_collectContinuouslyLock, timeToWakeUp);
2760                         }
2761                     }
2762                 }
2763             });
2764     }
2765     
2766     if (Options::logGC())
2767         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
2768 }
2769
2770 void Heap::preventCollection()
2771 {
2772     if (!m_isSafeToCollect)
2773         return;
2774     
2775     // This prevents the collectContinuously thread from starting a collection.
2776     m_collectContinuouslyLock.lock();
2777     
2778     // Wait for all collections to finish.
2779     waitForCollector(
2780         [&] (const AbstractLocker&) -> bool {
2781             ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2782             return m_lastServedTicket == m_lastGrantedTicket;
2783         });
2784     
2785     // Now a collection can only start if this thread starts it.
2786     RELEASE_ASSERT(!m_collectionScope);
2787 }
2788
2789 void Heap::allowCollection()
2790 {
2791     if (!m_isSafeToCollect)
2792         return;
2793     
2794     m_collectContinuouslyLock.unlock();
2795 }
2796
2797 template<typename Func>
2798 void Heap::forEachSlotVisitor(const Func& func)
2799 {
2800     auto locker = holdLock(m_parallelSlotVisitorLock);
2801     func(*m_collectorSlotVisitor);
2802     func(*m_mutatorSlotVisitor);
2803     for (auto& slotVisitor : m_parallelSlotVisitors)
2804         func(*slotVisitor);
2805 }
2806
2807 void Heap::setMutatorShouldBeFenced(bool value)
2808 {
2809     m_mutatorShouldBeFenced = value;
2810     m_barrierThreshold = value ? tautologicalThreshold : blackThreshold;
2811 }
2812
2813 void Heap::performIncrement(size_t bytes)
2814 {
2815     if (!m_objectSpace.isMarking())
2816         return;
2817
2818     m_incrementBalance += bytes * Options::gcIncrementScale();
2819
2820     // Save ourselves from crazy. Since this is an optimization, it's OK to go back to any consistent
2821     // state when the double goes wild.
2822     if (std::isnan(m_incrementBalance) || std::isinf(m_incrementBalance))
2823         m_incrementBalance = 0;
2824     
2825     if (m_incrementBalance < static_cast<double>(Options::gcIncrementBytes()))
2826         return;
2827
2828     double targetBytes = m_incrementBalance;
2829     if (targetBytes <= 0)
2830         return;
2831     targetBytes = std::min(targetBytes, Options::gcIncrementMaxBytes());
2832
2833     SlotVisitor& slotVisitor = *m_mutatorSlotVisitor;
2834     ParallelModeEnabler parallelModeEnabler(slotVisitor);
2835     size_t bytesVisited = slotVisitor.performIncrementOfDraining(static_cast<size_t>(targetBytes));
2836     // incrementBalance may go negative here because it'll remember how many bytes we overshot.
2837     m_incrementBalance -= bytesVisited;
2838 }
2839
2840 void Heap::addHeapFinalizerCallback(const HeapFinalizerCallback& callback)
2841 {
2842     m_heapFinalizerCallbacks.append(callback);
2843 }
2844
2845 void Heap::removeHeapFinalizerCallback(const HeapFinalizerCallback& callback)
2846 {
2847     m_heapFinalizerCallbacks.removeFirst(callback);
2848 }
2849
2850 } // namespace JSC