Unreviewed, rolling out r220060.
[WebKit.git] / Source / JavaScriptCore / heap / Heap.cpp
1 /*
2  *  Copyright (C) 2003-2017 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "Heap.h"
23
24 #include "CodeBlock.h"
25 #include "CodeBlockSetInlines.h"
26 #include "CollectingScope.h"
27 #include "ConservativeRoots.h"
28 #include "DFGWorklistInlines.h"
29 #include "EdenGCActivityCallback.h"
30 #include "Exception.h"
31 #include "FullGCActivityCallback.h"
32 #include "GCActivityCallback.h"
33 #include "GCIncomingRefCountedSetInlines.h"
34 #include "GCSegmentedArrayInlines.h"
35 #include "GCTypeMap.h"
36 #include "HasOwnPropertyCache.h"
37 #include "HeapHelperPool.h"
38 #include "HeapIterationScope.h"
39 #include "HeapProfiler.h"
40 #include "HeapSnapshot.h"
41 #include "HeapVerifier.h"
42 #include "IncrementalSweeper.h"
43 #include "Interpreter.h"
44 #include "JITStubRoutineSet.h"
45 #include "JITWorklist.h"
46 #include "JSCInlines.h"
47 #include "JSGlobalObject.h"
48 #include "JSLock.h"
49 #include "JSVirtualMachineInternal.h"
50 #include "JSWebAssemblyCodeBlock.h"
51 #include "MachineStackMarker.h"
52 #include "MarkedAllocatorInlines.h"
53 #include "MarkedSpaceInlines.h"
54 #include "MarkingConstraintSet.h"
55 #include "PreventCollectionScope.h"
56 #include "SamplingProfiler.h"
57 #include "ShadowChicken.h"
58 #include "SpaceTimeMutatorScheduler.h"
59 #include "SubspaceInlines.h"
60 #include "SuperSampler.h"
61 #include "StochasticSpaceTimeMutatorScheduler.h"
62 #include "StopIfNecessaryTimer.h"
63 #include "SweepingScope.h"
64 #include "SynchronousStopTheWorldMutatorScheduler.h"
65 #include "TypeProfiler.h"
66 #include "TypeProfilerLog.h"
67 #include "UnlinkedCodeBlock.h"
68 #include "VM.h"
69 #include "WasmMemory.h"
70 #include "WeakSetInlines.h"
71 #include <algorithm>
72 #if PLATFORM(IOS)
73 #include <bmalloc/bmalloc.h>
74 #endif
75 #include <wtf/CurrentTime.h>
76 #include <wtf/ListDump.h>
77 #include <wtf/MainThread.h>
78 #include <wtf/ParallelVectorIterator.h>
79 #include <wtf/ProcessID.h>
80 #include <wtf/RAMSize.h>
81 #include <wtf/SimpleStats.h>
82 #include <wtf/Threading.h>
83
84 #if USE(FOUNDATION)
85 #if __has_include(<objc/objc-internal.h>)
86 #include <objc/objc-internal.h>
87 #else
88 extern "C" void* objc_autoreleasePoolPush(void);
89 extern "C" void objc_autoreleasePoolPop(void *context);
90 #endif
91 #endif // USE(FOUNDATION)
92
93 using namespace std;
94
95 namespace JSC {
96
97 namespace {
98
99 bool verboseStop = false;
100
101 double maxPauseMS(double thisPauseMS)
102 {
103     static double maxPauseMS;
104     maxPauseMS = std::max(thisPauseMS, maxPauseMS);
105     return maxPauseMS;
106 }
107
108 size_t minHeapSize(HeapType heapType, size_t ramSize)
109 {
110     if (heapType == LargeHeap) {
111         double result = min(
112             static_cast<double>(Options::largeHeapSize()),
113             ramSize * Options::smallHeapRAMFraction());
114         return static_cast<size_t>(result);
115     }
116     return Options::smallHeapSize();
117 }
118
119 size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
120 {
121 #if PLATFORM(IOS)
122     size_t memoryFootprint = bmalloc::api::memoryFootprint();
123     if (memoryFootprint < ramSize * Options::smallHeapRAMFraction())
124         return Options::smallHeapGrowthFactor() * heapSize;
125     if (memoryFootprint < ramSize * Options::mediumHeapRAMFraction())
126         return Options::mediumHeapGrowthFactor() * heapSize;
127 #else
128     if (heapSize < ramSize * Options::smallHeapRAMFraction())
129         return Options::smallHeapGrowthFactor() * heapSize;
130     if (heapSize < ramSize * Options::mediumHeapRAMFraction())
131         return Options::mediumHeapGrowthFactor() * heapSize;
132 #endif
133     return Options::largeHeapGrowthFactor() * heapSize;
134 }
135
136 bool isValidSharedInstanceThreadState(VM* vm)
137 {
138     return vm->currentThreadIsHoldingAPILock();
139 }
140
141 bool isValidThreadState(VM* vm)
142 {
143     if (vm->atomicStringTable() != wtfThreadData().atomicStringTable())
144         return false;
145
146     if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
147         return false;
148
149     return true;
150 }
151
152 void recordType(VM& vm, TypeCountSet& set, JSCell* cell)
153 {
154     const char* typeName = "[unknown]";
155     const ClassInfo* info = cell->classInfo(vm);
156     if (info && info->className)
157         typeName = info->className;
158     set.add(typeName);
159 }
160
161 bool measurePhaseTiming()
162 {
163     return false;
164 }
165
166 HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats()
167 {
168     static HashMap<const char*, GCTypeMap<SimpleStats>>* result;
169     static std::once_flag once;
170     std::call_once(
171         once,
172         [] {
173             result = new HashMap<const char*, GCTypeMap<SimpleStats>>();
174         });
175     return *result;
176 }
177
178 SimpleStats& timingStats(const char* name, CollectionScope scope)
179 {
180     return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[scope];
181 }
182
183 class TimingScope {
184 public:
185     TimingScope(std::optional<CollectionScope> scope, const char* name)
186         : m_scope(scope)
187         , m_name(name)
188     {
189         if (measurePhaseTiming())
190             m_before = monotonicallyIncreasingTimeMS();
191     }
192     
193     TimingScope(Heap& heap, const char* name)
194         : TimingScope(heap.collectionScope(), name)
195     {
196     }
197     
198     void setScope(std::optional<CollectionScope> scope)
199     {
200         m_scope = scope;
201     }
202     
203     void setScope(Heap& heap)
204     {
205         setScope(heap.collectionScope());
206     }
207     
208     ~TimingScope()
209     {
210         if (measurePhaseTiming()) {
211             double after = monotonicallyIncreasingTimeMS();
212             double timing = after - m_before;
213             SimpleStats& stats = timingStats(m_name, *m_scope);
214             stats.add(timing);
215             dataLog("[GC:", *m_scope, "] ", m_name, " took: ", timing, "ms (average ", stats.mean(), "ms).\n");
216         }
217     }
218 private:
219     std::optional<CollectionScope> m_scope;
220     double m_before;
221     const char* m_name;
222 };
223
224 } // anonymous namespace
225
226 class Heap::Thread : public AutomaticThread {
227 public:
228     Thread(const AbstractLocker& locker, Heap& heap)
229         : AutomaticThread(locker, heap.m_threadLock, heap.m_threadCondition)
230         , m_heap(heap)
231     {
232     }
233     
234 protected:
235     PollResult poll(const AbstractLocker& locker) override
236     {
237         if (m_heap.m_threadShouldStop) {
238             m_heap.notifyThreadStopping(locker);
239             return PollResult::Stop;
240         }
241         if (m_heap.shouldCollectInCollectorThread(locker))
242             return PollResult::Work;
243         return PollResult::Wait;
244     }
245     
246     WorkResult work() override
247     {
248         m_heap.collectInCollectorThread();
249         return WorkResult::Continue;
250     }
251     
252     void threadDidStart() override
253     {
254         WTF::registerGCThread(GCThreadType::Main);
255     }
256
257 private:
258     Heap& m_heap;
259 };
260
261 Heap::Heap(VM* vm, HeapType heapType)
262     : m_heapType(heapType)
263     , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
264     , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
265     , m_sizeAfterLastCollect(0)
266     , m_sizeAfterLastFullCollect(0)
267     , m_sizeBeforeLastFullCollect(0)
268     , m_sizeAfterLastEdenCollect(0)
269     , m_sizeBeforeLastEdenCollect(0)
270     , m_bytesAllocatedThisCycle(0)
271     , m_webAssemblyFastMemoriesAllocatedThisCycle(0)
272     , m_bytesAbandonedSinceLastFullCollect(0)
273     , m_maxEdenSize(m_minBytesPerCycle)
274     , m_maxHeapSize(m_minBytesPerCycle)
275     , m_shouldDoFullCollection(false)
276     , m_totalBytesVisited(0)
277     , m_objectSpace(this)
278     , m_extraMemorySize(0)
279     , m_deprecatedExtraMemorySize(0)
280     , m_machineThreads(std::make_unique<MachineThreads>())
281     , m_collectorSlotVisitor(std::make_unique<SlotVisitor>(*this, "C"))
282     , m_mutatorSlotVisitor(std::make_unique<SlotVisitor>(*this, "M"))
283     , m_mutatorMarkStack(std::make_unique<MarkStackArray>())
284     , m_raceMarkStack(std::make_unique<MarkStackArray>())
285     , m_constraintSet(std::make_unique<MarkingConstraintSet>())
286     , m_handleSet(vm)
287     , m_codeBlocks(std::make_unique<CodeBlockSet>())
288     , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>())
289     , m_isSafeToCollect(false)
290     , m_vm(vm)
291     // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously 
292     // schedule the timer if we've never done a collection.
293     , m_lastFullGCLength(0.01)
294     , m_lastEdenGCLength(0.01)
295     , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
296     , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
297     , m_sweeper(adoptRef(new IncrementalSweeper(this)))
298     , m_stopIfNecessaryTimer(adoptRef(new StopIfNecessaryTimer(vm)))
299     , m_deferralDepth(0)
300 #if USE(FOUNDATION)
301     , m_delayedReleaseRecursionCount(0)
302 #endif
303     , m_sharedCollectorMarkStack(std::make_unique<MarkStackArray>())
304     , m_sharedMutatorMarkStack(std::make_unique<MarkStackArray>())
305     , m_helperClient(&heapHelperPool())
306     , m_threadLock(Box<Lock>::create())
307     , m_threadCondition(AutomaticThreadCondition::create())
308 {
309     m_worldState.store(0);
310     
311     if (Options::useConcurrentGC()) {
312         if (Options::useStochasticMutatorScheduler())
313             m_scheduler = std::make_unique<StochasticSpaceTimeMutatorScheduler>(*this);
314         else
315             m_scheduler = std::make_unique<SpaceTimeMutatorScheduler>(*this);
316     } else {
317         // We simulate turning off concurrent GC by making the scheduler say that the world
318         // should always be stopped when the collector is running.
319         m_scheduler = std::make_unique<SynchronousStopTheWorldMutatorScheduler>();
320     }
321     
322     if (Options::verifyHeap())
323         m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
324     
325     m_collectorSlotVisitor->optimizeForStoppedMutator();
326
327     // When memory is critical, allow allocating 25% of the amount above the critical threshold before collecting.
328     size_t memoryAboveCriticalThreshold = static_cast<size_t>(static_cast<double>(m_ramSize) * (1.0 - Options::criticalGCMemoryThreshold()));
329     m_maxEdenSizeWhenCritical = memoryAboveCriticalThreshold / 4;
330
331     LockHolder locker(*m_threadLock);
332     m_thread = adoptRef(new Thread(locker, *this));
333 }
334
335 Heap::~Heap()
336 {
337     forEachSlotVisitor(
338         [&] (SlotVisitor& visitor) {
339             visitor.clearMarkStacks();
340         });
341     m_mutatorMarkStack->clear();
342     m_raceMarkStack->clear();
343     
344     for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
345         WeakBlock::destroy(*this, block);
346 }
347
348 bool Heap::isPagedOut(double deadline)
349 {
350     return m_objectSpace.isPagedOut(deadline);
351 }
352
353 // The VM is being destroyed and the collector will never run again.
354 // Run all pending finalizers now because we won't get another chance.
355 void Heap::lastChanceToFinalize()
356 {
357     MonotonicTime before;
358     if (Options::logGC()) {
359         before = MonotonicTime::now();
360         dataLog("[GC<", RawPointer(this), ">: shutdown ");
361     }
362     
363     RELEASE_ASSERT(!m_vm->entryScope);
364     RELEASE_ASSERT(m_mutatorState == MutatorState::Running);
365     
366     if (m_collectContinuouslyThread) {
367         {
368             LockHolder locker(m_collectContinuouslyLock);
369             m_shouldStopCollectingContinuously = true;
370             m_collectContinuouslyCondition.notifyOne();
371         }
372         m_collectContinuouslyThread->waitForCompletion();
373     }
374     
375     if (Options::logGC())
376         dataLog("1");
377     
378     // Prevent new collections from being started. This is probably not even necessary, since we're not
379     // going to call into anything that starts collections. Still, this makes the algorithm more
380     // obviously sound.
381     m_isSafeToCollect = false;
382     
383     if (Options::logGC())
384         dataLog("2");
385
386     bool isCollecting;
387     {
388         auto locker = holdLock(*m_threadLock);
389         RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
390         isCollecting = m_lastServedTicket < m_lastGrantedTicket;
391     }
392     if (isCollecting) {
393         if (Options::logGC())
394             dataLog("...]\n");
395         
396         // Wait for the current collection to finish.
397         waitForCollector(
398             [&] (const AbstractLocker&) -> bool {
399                 RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
400                 return m_lastServedTicket == m_lastGrantedTicket;
401             });
402         
403         if (Options::logGC())
404             dataLog("[GC<", RawPointer(this), ">: shutdown ");
405     }
406     if (Options::logGC())
407         dataLog("3");
408
409     RELEASE_ASSERT(m_requests.isEmpty());
410     RELEASE_ASSERT(m_lastServedTicket == m_lastGrantedTicket);
411     
412     // Carefully bring the thread down.
413     bool stopped = false;
414     {
415         LockHolder locker(*m_threadLock);
416         stopped = m_thread->tryStop(locker);
417         m_threadShouldStop = true;
418         if (!stopped)
419             m_threadCondition->notifyOne(locker);
420     }
421
422     if (Options::logGC())
423         dataLog("4");
424     
425     if (!stopped)
426         m_thread->join();
427     
428     if (Options::logGC())
429         dataLog("5 ");
430     
431     m_arrayBuffers.lastChanceToFinalize();
432     m_codeBlocks->lastChanceToFinalize(*m_vm);
433     m_objectSpace.stopAllocating();
434     m_objectSpace.lastChanceToFinalize();
435     releaseDelayedReleasedObjects();
436
437     sweepAllLogicallyEmptyWeakBlocks();
438     
439     if (Options::logGC())
440         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
441 }
442
443 void Heap::releaseDelayedReleasedObjects()
444 {
445 #if USE(FOUNDATION)
446     // We need to guard against the case that releasing an object can create more objects due to the
447     // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
448     // back here and could try to recursively release objects. We guard that with a recursive entry
449     // count. Only the initial call will release objects, recursive calls simple return and let the
450     // the initial call to the function take care of any objects created during release time.
451     // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
452     // and use a temp Vector for the actual releasing.
453     if (!m_delayedReleaseRecursionCount++) {
454         while (!m_delayedReleaseObjects.isEmpty()) {
455             ASSERT(m_vm->currentThreadIsHoldingAPILock());
456
457             Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTFMove(m_delayedReleaseObjects);
458
459             {
460                 // We need to drop locks before calling out to arbitrary code.
461                 JSLock::DropAllLocks dropAllLocks(m_vm);
462
463                 void* context = objc_autoreleasePoolPush();
464                 objectsToRelease.clear();
465                 objc_autoreleasePoolPop(context);
466             }
467         }
468     }
469     m_delayedReleaseRecursionCount--;
470 #endif
471 }
472
473 void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
474 {
475     didAllocate(size);
476     collectIfNecessaryOrDefer();
477 }
478
479 void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
480 {
481     // FIXME: Change this to use SaturatedArithmetic when available.
482     // https://bugs.webkit.org/show_bug.cgi?id=170411
483     Checked<size_t, RecordOverflow> checkedNewSize = m_deprecatedExtraMemorySize;
484     checkedNewSize += size;
485     m_deprecatedExtraMemorySize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
486     reportExtraMemoryAllocatedSlowCase(size);
487 }
488
489 void Heap::reportWebAssemblyFastMemoriesAllocated(size_t count)
490 {
491     didAllocateWebAssemblyFastMemories(count);
492     collectIfNecessaryOrDefer();
493 }
494
495 bool Heap::webAssemblyFastMemoriesThisCycleAtThreshold() const
496 {
497     // WebAssembly fast memories use large amounts of virtual memory and we
498     // don't know how many can exist in this process. We keep track of the most
499     // fast memories that have existed at any point in time. The GC uses this
500     // top watermark as an indication of whether recent allocations should cause
501     // a collection: get too close and we may be close to the actual limit.
502     size_t fastMemoryThreshold = std::max<size_t>(1, Wasm::Memory::maxFastMemoryCount() / 2);
503     return m_webAssemblyFastMemoriesAllocatedThisCycle > fastMemoryThreshold;
504 }
505
506 bool Heap::overCriticalMemoryThreshold(MemoryThresholdCallType memoryThresholdCallType)
507 {
508 #if PLATFORM(IOS)
509     if (memoryThresholdCallType == MemoryThresholdCallType::Direct || ++m_precentAvailableMemoryCachedCallCount >= 100) {
510         m_overCriticalMemoryThreshold = bmalloc::api::percentAvailableMemoryInUse() > Options::criticalGCMemoryThreshold();
511         m_precentAvailableMemoryCachedCallCount = 0;
512     }
513
514     return m_overCriticalMemoryThreshold;
515 #else
516     UNUSED_PARAM(memoryThresholdCallType);
517     return false;
518 #endif
519 }
520
521 void Heap::reportAbandonedObjectGraph()
522 {
523     // Our clients don't know exactly how much memory they
524     // are abandoning so we just guess for them.
525     size_t abandonedBytes = static_cast<size_t>(0.1 * capacity());
526
527     // We want to accelerate the next collection. Because memory has just 
528     // been abandoned, the next collection has the potential to 
529     // be more profitable. Since allocation is the trigger for collection, 
530     // we hasten the next collection by pretending that we've allocated more memory. 
531     if (m_fullActivityCallback) {
532         m_fullActivityCallback->didAllocate(
533             m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
534     }
535     m_bytesAbandonedSinceLastFullCollect += abandonedBytes;
536 }
537
538 void Heap::protect(JSValue k)
539 {
540     ASSERT(k);
541     ASSERT(m_vm->currentThreadIsHoldingAPILock());
542
543     if (!k.isCell())
544         return;
545
546     m_protectedValues.add(k.asCell());
547 }
548
549 bool Heap::unprotect(JSValue k)
550 {
551     ASSERT(k);
552     ASSERT(m_vm->currentThreadIsHoldingAPILock());
553
554     if (!k.isCell())
555         return false;
556
557     return m_protectedValues.remove(k.asCell());
558 }
559
560 void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
561 {
562     if (m_arrayBuffers.addReference(cell, buffer)) {
563         collectIfNecessaryOrDefer();
564         didAllocate(buffer->gcSizeEstimateInBytes());
565     }
566 }
567
568 void Heap::finalizeUnconditionalFinalizers()
569 {
570     while (m_unconditionalFinalizers.hasNext()) {
571         UnconditionalFinalizer* finalizer = m_unconditionalFinalizers.removeNext();
572         finalizer->finalizeUnconditionally();
573     }
574 }
575
576 void Heap::willStartIterating()
577 {
578     m_objectSpace.willStartIterating();
579 }
580
581 void Heap::didFinishIterating()
582 {
583     m_objectSpace.didFinishIterating();
584 }
585
586 void Heap::completeAllJITPlans()
587 {
588 #if ENABLE(JIT)
589     JITWorklist::instance()->completeAllForVM(*m_vm);
590 #endif // ENABLE(JIT)
591     DFG::completeAllPlansForVM(*m_vm);
592 }
593
594 template<typename Func>
595 void Heap::iterateExecutingAndCompilingCodeBlocks(const Func& func)
596 {
597     m_codeBlocks->iterateCurrentlyExecuting(func);
598     DFG::iterateCodeBlocksForGC(*m_vm, func);
599 }
600
601 template<typename Func>
602 void Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func& func)
603 {
604     Vector<CodeBlock*, 256> codeBlocks;
605     iterateExecutingAndCompilingCodeBlocks(
606         [&] (CodeBlock* codeBlock) {
607             codeBlocks.append(codeBlock);
608         });
609     for (CodeBlock* codeBlock : codeBlocks)
610         func(codeBlock);
611 }
612
613 void Heap::assertSharedMarkStacksEmpty()
614 {
615     bool ok = true;
616     
617     if (!m_sharedCollectorMarkStack->isEmpty()) {
618         dataLog("FATAL: Shared collector mark stack not empty! It has ", m_sharedCollectorMarkStack->size(), " elements.\n");
619         ok = false;
620     }
621     
622     if (!m_sharedMutatorMarkStack->isEmpty()) {
623         dataLog("FATAL: Shared mutator mark stack not empty! It has ", m_sharedMutatorMarkStack->size(), " elements.\n");
624         ok = false;
625     }
626     
627     RELEASE_ASSERT(ok);
628 }
629
630 void Heap::gatherStackRoots(ConservativeRoots& roots)
631 {
632     m_machineThreads->gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks, m_currentThreadState);
633 }
634
635 void Heap::gatherJSStackRoots(ConservativeRoots& roots)
636 {
637 #if !ENABLE(JIT)
638     m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
639 #else
640     UNUSED_PARAM(roots);
641 #endif
642 }
643
644 void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
645 {
646 #if ENABLE(DFG_JIT)
647     m_vm->gatherConservativeRoots(roots);
648 #else
649     UNUSED_PARAM(roots);
650 #endif
651 }
652
653 void Heap::beginMarking()
654 {
655     TimingScope timingScope(*this, "Heap::beginMarking");
656     if (m_collectionScope == CollectionScope::Full)
657         m_codeBlocks->clearMarksForFullCollection();
658     m_jitStubRoutines->clearMarks();
659     m_objectSpace.beginMarking();
660     setMutatorShouldBeFenced(true);
661 }
662
663 void Heap::removeDeadCompilerWorklistEntries()
664 {
665 #if ENABLE(DFG_JIT)
666     for (unsigned i = DFG::numberOfWorklists(); i--;)
667         DFG::existingWorklistForIndex(i).removeDeadPlans(*m_vm);
668 #endif
669 }
670
671 bool Heap::isHeapSnapshotting() const
672 {
673     HeapProfiler* heapProfiler = m_vm->heapProfiler();
674     if (UNLIKELY(heapProfiler))
675         return heapProfiler->activeSnapshotBuilder();
676     return false;
677 }
678
679 struct GatherHeapSnapshotData : MarkedBlock::CountFunctor {
680     GatherHeapSnapshotData(HeapSnapshotBuilder& builder)
681         : m_builder(builder)
682     {
683     }
684
685     IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const
686     {
687         if (kind == HeapCell::JSCell) {
688             JSCell* cell = static_cast<JSCell*>(heapCell);
689             cell->methodTable()->heapSnapshot(cell, m_builder);
690         }
691         return IterationStatus::Continue;
692     }
693
694     HeapSnapshotBuilder& m_builder;
695 };
696
697 void Heap::gatherExtraHeapSnapshotData(HeapProfiler& heapProfiler)
698 {
699     if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) {
700         HeapIterationScope heapIterationScope(*this);
701         GatherHeapSnapshotData functor(*builder);
702         m_objectSpace.forEachLiveCell(heapIterationScope, functor);
703     }
704 }
705
706 struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor {
707     RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot)
708         : m_snapshot(snapshot)
709     {
710     }
711
712     IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const
713     {
714         if (kind == HeapCell::JSCell)
715             m_snapshot.sweepCell(static_cast<JSCell*>(cell));
716         return IterationStatus::Continue;
717     }
718
719     HeapSnapshot& m_snapshot;
720 };
721
722 void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler)
723 {
724     if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) {
725         HeapIterationScope heapIterationScope(*this);
726         RemoveDeadHeapSnapshotNodes functor(*snapshot);
727         m_objectSpace.forEachDeadCell(heapIterationScope, functor);
728         snapshot->shrinkToFit();
729     }
730 }
731
732 void Heap::updateObjectCounts()
733 {
734     if (m_collectionScope == CollectionScope::Full)
735         m_totalBytesVisited = 0;
736
737     m_totalBytesVisitedThisCycle = bytesVisited();
738     
739     m_totalBytesVisited += m_totalBytesVisitedThisCycle;
740 }
741
742 void Heap::endMarking()
743 {
744     forEachSlotVisitor(
745         [&] (SlotVisitor& visitor) {
746             visitor.reset();
747         });
748
749     assertSharedMarkStacksEmpty();
750     m_weakReferenceHarvesters.removeAll();
751
752     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
753     
754     m_objectSpace.endMarking();
755     setMutatorShouldBeFenced(Options::forceFencedBarrier());
756 }
757
758 size_t Heap::objectCount()
759 {
760     return m_objectSpace.objectCount();
761 }
762
763 size_t Heap::extraMemorySize()
764 {
765     // FIXME: Change this to use SaturatedArithmetic when available.
766     // https://bugs.webkit.org/show_bug.cgi?id=170411
767     Checked<size_t, RecordOverflow> checkedTotal = m_extraMemorySize;
768     checkedTotal += m_deprecatedExtraMemorySize;
769     checkedTotal += m_arrayBuffers.size();
770     size_t total = UNLIKELY(checkedTotal.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedTotal.unsafeGet();
771
772     ASSERT(m_objectSpace.capacity() >= m_objectSpace.size());
773     return std::min(total, std::numeric_limits<size_t>::max() - m_objectSpace.capacity());
774 }
775
776 size_t Heap::size()
777 {
778     return m_objectSpace.size() + extraMemorySize();
779 }
780
781 size_t Heap::capacity()
782 {
783     return m_objectSpace.capacity() + extraMemorySize();
784 }
785
786 size_t Heap::protectedGlobalObjectCount()
787 {
788     size_t result = 0;
789     forEachProtectedCell(
790         [&] (JSCell* cell) {
791             if (cell->isObject() && asObject(cell)->isGlobalObject())
792                 result++;
793         });
794     return result;
795 }
796
797 size_t Heap::globalObjectCount()
798 {
799     HeapIterationScope iterationScope(*this);
800     size_t result = 0;
801     m_objectSpace.forEachLiveCell(
802         iterationScope,
803         [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus {
804             if (kind != HeapCell::JSCell)
805                 return IterationStatus::Continue;
806             JSCell* cell = static_cast<JSCell*>(heapCell);
807             if (cell->isObject() && asObject(cell)->isGlobalObject())
808                 result++;
809             return IterationStatus::Continue;
810         });
811     return result;
812 }
813
814 size_t Heap::protectedObjectCount()
815 {
816     size_t result = 0;
817     forEachProtectedCell(
818         [&] (JSCell*) {
819             result++;
820         });
821     return result;
822 }
823
824 std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
825 {
826     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
827     forEachProtectedCell(
828         [&] (JSCell* cell) {
829             recordType(*vm(), *result, cell);
830         });
831     return result;
832 }
833
834 std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
835 {
836     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
837     HeapIterationScope iterationScope(*this);
838     m_objectSpace.forEachLiveCell(
839         iterationScope,
840         [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
841             if (kind == HeapCell::JSCell)
842                 recordType(*vm(), *result, static_cast<JSCell*>(cell));
843             return IterationStatus::Continue;
844         });
845     return result;
846 }
847
848 void Heap::deleteAllCodeBlocks(DeleteAllCodeEffort effort)
849 {
850     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
851         return;
852     
853     PreventCollectionScope preventCollectionScope(*this);
854     
855     // If JavaScript is running, it's not safe to delete all JavaScript code, since
856     // we'll end up returning to deleted code.
857     RELEASE_ASSERT(!m_vm->entryScope);
858     RELEASE_ASSERT(!m_collectionScope);
859
860     completeAllJITPlans();
861
862     for (ExecutableBase* executable : m_executables)
863         executable->clearCode();
864
865 #if ENABLE(WEBASSEMBLY)
866     {
867         // We must ensure that we clear the JS call ICs from Wasm. Otherwise, Wasm will
868         // have no idea that we cleared the code from all of the Executables in the
869         // VM. This could leave Wasm in an inconsistent state where it has an IC that
870         // points into a CodeBlock that could be dead. The IC will still succeed because
871         // it uses a callee check, but then it will call into dead code.
872         HeapIterationScope heapIterationScope(*this);
873         m_vm->webAssemblyCodeBlockSpace.forEachLiveCell([&] (HeapCell* cell, HeapCell::Kind kind) {
874             ASSERT_UNUSED(kind, kind == HeapCell::Kind::JSCell);
875             JSWebAssemblyCodeBlock* codeBlock = static_cast<JSWebAssemblyCodeBlock*>(cell);
876             codeBlock->clearJSCallICs(*m_vm);
877         });
878     }
879 #endif
880 }
881
882 void Heap::deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort effort)
883 {
884     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
885         return;
886     
887     PreventCollectionScope preventCollectionScope(*this);
888
889     RELEASE_ASSERT(!m_collectionScope);
890     
891     for (ExecutableBase* current : m_executables) {
892         if (!current->isFunctionExecutable())
893             continue;
894         static_cast<FunctionExecutable*>(current)->unlinkedExecutable()->clearCode();
895     }
896 }
897
898 void Heap::clearUnmarkedExecutables()
899 {
900     for (unsigned i = m_executables.size(); i--;) {
901         ExecutableBase* current = m_executables[i];
902         if (isMarked(current))
903             continue;
904
905         // Eagerly dereference the Executable's JITCode in order to run watchpoint
906         // destructors. Otherwise, watchpoints might fire for deleted CodeBlocks.
907         current->clearCode();
908         std::swap(m_executables[i], m_executables.last());
909         m_executables.removeLast();
910     }
911
912     m_executables.shrinkToFit();
913 }
914
915 void Heap::deleteUnmarkedCompiledCode()
916 {
917     clearUnmarkedExecutables();
918     m_codeBlocks->deleteUnmarkedAndUnreferenced(*m_vm, *m_lastCollectionScope);
919     m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines();
920 }
921
922 void Heap::addToRememberedSet(const JSCell* constCell)
923 {
924     JSCell* cell = const_cast<JSCell*>(constCell);
925     ASSERT(cell);
926     ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
927     m_barriersExecuted++;
928     if (m_mutatorShouldBeFenced) {
929         WTF::loadLoadFence();
930         if (!isMarkedConcurrently(cell)) {
931             // During a full collection a store into an unmarked object that had surivived past
932             // collections will manifest as a store to an unmarked PossiblyBlack object. If the
933             // object gets marked at some time after this then it will go down the normal marking
934             // path. So, we don't have to remember this object. We could return here. But we go
935             // further and attempt to re-white the object.
936             
937             RELEASE_ASSERT(m_collectionScope == CollectionScope::Full);
938             
939             if (cell->atomicCompareExchangeCellStateStrong(CellState::PossiblyBlack, CellState::DefinitelyWhite) == CellState::PossiblyBlack) {
940                 // Now we protect against this race:
941                 //
942                 //     1) Object starts out black + unmarked.
943                 //     --> We do isMarkedConcurrently here.
944                 //     2) Object is marked and greyed.
945                 //     3) Object is scanned and blacked.
946                 //     --> We do atomicCompareExchangeCellStateStrong here.
947                 //
948                 // In this case we would have made the object white again, even though it should
949                 // be black. This check lets us correct our mistake. This relies on the fact that
950                 // isMarkedConcurrently converges monotonically to true.
951                 if (isMarkedConcurrently(cell)) {
952                     // It's difficult to work out whether the object should be grey or black at
953                     // this point. We say black conservatively.
954                     cell->setCellState(CellState::PossiblyBlack);
955                 }
956                 
957                 // Either way, we can return. Most likely, the object was not marked, and so the
958                 // object is now labeled white. This means that future barrier executions will not
959                 // fire. In the unlikely event that the object had become marked, we can still
960                 // return anyway, since we proved that the object was not marked at the time that
961                 // we executed this slow path.
962             }
963             
964             return;
965         }
966     } else
967         ASSERT(Heap::isMarkedConcurrently(cell));
968     // It could be that the object was *just* marked. This means that the collector may set the
969     // state to DefinitelyGrey and then to PossiblyOldOrBlack at any time. It's OK for us to
970     // race with the collector here. If we win then this is accurate because the object _will_
971     // get scanned again. If we lose then someone else will barrier the object again. That would
972     // be unfortunate but not the end of the world.
973     cell->setCellState(CellState::PossiblyGrey);
974     m_mutatorMarkStack->append(cell);
975 }
976
977 void Heap::sweepSynchronously()
978 {
979     double before = 0;
980     if (Options::logGC()) {
981         dataLog("Full sweep: ", capacity() / 1024, "kb ");
982         before = currentTimeMS();
983     }
984     m_objectSpace.sweep();
985     m_objectSpace.shrink();
986     if (Options::logGC()) {
987         double after = currentTimeMS();
988         dataLog("=> ", capacity() / 1024, "kb, ", after - before, "ms");
989     }
990 }
991
992 void Heap::collect(Synchronousness synchronousness, GCRequest request)
993 {
994     switch (synchronousness) {
995     case Async:
996         collectAsync(request);
997         return;
998     case Sync:
999         collectSync(request);
1000         return;
1001     }
1002     RELEASE_ASSERT_NOT_REACHED();
1003 }
1004
1005 void Heap::collectNow(Synchronousness synchronousness, GCRequest request)
1006 {
1007     switch (synchronousness) {
1008     case Async: {
1009         collectAsync(request);
1010         stopIfNecessary();
1011         return;
1012     }
1013         
1014     case Sync: {
1015         collectSync(request);
1016         
1017         DeferGCForAWhile deferGC(*this);
1018         if (UNLIKELY(Options::useImmortalObjects()))
1019             sweeper().stopSweeping();
1020         
1021         bool alreadySweptInCollectSync = Options::sweepSynchronously();
1022         if (!alreadySweptInCollectSync) {
1023             if (Options::logGC())
1024                 dataLog("[GC<", RawPointer(this), ">: ");
1025             sweepSynchronously();
1026             if (Options::logGC())
1027                 dataLog("]\n");
1028         }
1029         m_objectSpace.assertNoUnswept();
1030         
1031         sweepAllLogicallyEmptyWeakBlocks();
1032         return;
1033     } }
1034     RELEASE_ASSERT_NOT_REACHED();
1035 }
1036
1037 void Heap::collectAsync(GCRequest request)
1038 {
1039     if (!m_isSafeToCollect)
1040         return;
1041
1042     bool alreadyRequested = false;
1043     {
1044         LockHolder locker(*m_threadLock);
1045         for (const GCRequest& previousRequest : m_requests) {
1046             if (request.subsumedBy(previousRequest)) {
1047                 alreadyRequested = true;
1048                 break;
1049             }
1050         }
1051     }
1052     if (alreadyRequested)
1053         return;
1054
1055     requestCollection(request);
1056 }
1057
1058 void Heap::collectSync(GCRequest request)
1059 {
1060     if (!m_isSafeToCollect)
1061         return;
1062     
1063     waitForCollection(requestCollection(request));
1064 }
1065
1066 bool Heap::shouldCollectInCollectorThread(const AbstractLocker&)
1067 {
1068     RELEASE_ASSERT(m_requests.isEmpty() == (m_lastServedTicket == m_lastGrantedTicket));
1069     RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
1070     
1071     if (false)
1072         dataLog("Mutator has the conn = ", !!(m_worldState.load() & mutatorHasConnBit), "\n");
1073     
1074     return !m_requests.isEmpty() && !(m_worldState.load() & mutatorHasConnBit);
1075 }
1076
1077 void Heap::collectInCollectorThread()
1078 {
1079     for (;;) {
1080         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Collector, nullptr);
1081         switch (result) {
1082         case RunCurrentPhaseResult::Finished:
1083             return;
1084         case RunCurrentPhaseResult::Continue:
1085             break;
1086         case RunCurrentPhaseResult::NeedCurrentThreadState:
1087             RELEASE_ASSERT_NOT_REACHED();
1088             break;
1089         }
1090     }
1091 }
1092
1093 void Heap::checkConn(GCConductor conn)
1094 {
1095     switch (conn) {
1096     case GCConductor::Mutator:
1097         RELEASE_ASSERT(m_worldState.load() & mutatorHasConnBit);
1098         return;
1099     case GCConductor::Collector:
1100         RELEASE_ASSERT(!(m_worldState.load() & mutatorHasConnBit));
1101         return;
1102     }
1103     RELEASE_ASSERT_NOT_REACHED();
1104 }
1105
1106 auto Heap::runCurrentPhase(GCConductor conn, CurrentThreadState* currentThreadState) -> RunCurrentPhaseResult
1107 {
1108     checkConn(conn);
1109     m_currentThreadState = currentThreadState;
1110     
1111     // If the collector transfers the conn to the mutator, it leaves us in between phases.
1112     if (!finishChangingPhase(conn)) {
1113         // A mischevious mutator could repeatedly relinquish the conn back to us. We try to avoid doing
1114         // this, but it's probably not the end of the world if it did happen.
1115         if (false)
1116             dataLog("Conn bounce-back.\n");
1117         return RunCurrentPhaseResult::Finished;
1118     }
1119     
1120     bool result = false;
1121     switch (m_currentPhase) {
1122     case CollectorPhase::NotRunning:
1123         result = runNotRunningPhase(conn);
1124         break;
1125         
1126     case CollectorPhase::Begin:
1127         result = runBeginPhase(conn);
1128         break;
1129         
1130     case CollectorPhase::Fixpoint:
1131         if (!currentThreadState && conn == GCConductor::Mutator)
1132             return RunCurrentPhaseResult::NeedCurrentThreadState;
1133         
1134         result = runFixpointPhase(conn);
1135         break;
1136         
1137     case CollectorPhase::Concurrent:
1138         result = runConcurrentPhase(conn);
1139         break;
1140         
1141     case CollectorPhase::Reloop:
1142         result = runReloopPhase(conn);
1143         break;
1144         
1145     case CollectorPhase::End:
1146         result = runEndPhase(conn);
1147         break;
1148     }
1149
1150     return result ? RunCurrentPhaseResult::Continue : RunCurrentPhaseResult::Finished;
1151 }
1152
1153 NEVER_INLINE bool Heap::runNotRunningPhase(GCConductor conn)
1154 {
1155     // Check m_requests since the mutator calls this to poll what's going on.
1156     {
1157         auto locker = holdLock(*m_threadLock);
1158         if (m_requests.isEmpty())
1159             return false;
1160     }
1161     
1162     return changePhase(conn, CollectorPhase::Begin);
1163 }
1164
1165 NEVER_INLINE bool Heap::runBeginPhase(GCConductor conn)
1166 {
1167     m_currentGCStartTime = MonotonicTime::now();
1168     
1169     {
1170         LockHolder locker(*m_threadLock);
1171         RELEASE_ASSERT(!m_requests.isEmpty());
1172         m_currentRequest = m_requests.first();
1173     }
1174         
1175     if (Options::logGC())
1176         dataLog("[GC<", RawPointer(this), ">: START ", gcConductorShortName(conn), " ", capacity() / 1024, "kb ");
1177
1178     m_beforeGC = MonotonicTime::now();
1179
1180     if (m_collectionScope) {
1181         dataLog("Collection scope already set during GC: ", *m_collectionScope, "\n");
1182         RELEASE_ASSERT_NOT_REACHED();
1183     }
1184     
1185     willStartCollection();
1186         
1187     if (UNLIKELY(m_verifier)) {
1188         // Verify that live objects from the last GC cycle haven't been corrupted by
1189         // mutators before we begin this new GC cycle.
1190         m_verifier->verify(HeapVerifier::Phase::BeforeGC);
1191             
1192         m_verifier->startGC();
1193         m_verifier->gatherLiveCells(HeapVerifier::Phase::BeforeMarking);
1194     }
1195         
1196     prepareForMarking();
1197         
1198     if (m_collectionScope == CollectionScope::Full) {
1199         m_opaqueRoots.clear();
1200         m_collectorSlotVisitor->clearMarkStacks();
1201         m_mutatorMarkStack->clear();
1202     }
1203
1204     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
1205
1206     beginMarking();
1207
1208     forEachSlotVisitor(
1209         [&] (SlotVisitor& visitor) {
1210             visitor.didStartMarking();
1211         });
1212
1213     m_parallelMarkersShouldExit = false;
1214
1215     m_helperClient.setFunction(
1216         [this] () {
1217             SlotVisitor* slotVisitor;
1218             {
1219                 LockHolder locker(m_parallelSlotVisitorLock);
1220                 if (m_availableParallelSlotVisitors.isEmpty()) {
1221                     std::unique_ptr<SlotVisitor> newVisitor = std::make_unique<SlotVisitor>(
1222                         *this, toCString("P", m_parallelSlotVisitors.size() + 1));
1223                     
1224                     if (Options::optimizeParallelSlotVisitorsForStoppedMutator())
1225                         newVisitor->optimizeForStoppedMutator();
1226                     
1227                     newVisitor->didStartMarking();
1228                     
1229                     slotVisitor = newVisitor.get();
1230                     m_parallelSlotVisitors.append(WTFMove(newVisitor));
1231                 } else
1232                     slotVisitor = m_availableParallelSlotVisitors.takeLast();
1233             }
1234
1235             WTF::registerGCThread(GCThreadType::Helper);
1236
1237             {
1238                 ParallelModeEnabler parallelModeEnabler(*slotVisitor);
1239                 slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
1240             }
1241
1242             {
1243                 LockHolder locker(m_parallelSlotVisitorLock);
1244                 m_availableParallelSlotVisitors.append(slotVisitor);
1245             }
1246         });
1247
1248     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1249
1250     m_constraintSet->didStartMarking();
1251     
1252     m_scheduler->beginCollection();
1253     if (Options::logGC())
1254         m_scheduler->log();
1255     
1256     // After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()"
1257     // checks because bootstrap would have put things into the visitor. So, we should fall
1258     // through to draining.
1259     
1260     if (!slotVisitor.didReachTermination()) {
1261         dataLog("Fatal: SlotVisitor should think that GC should terminate before constraint solving, but it does not think this.\n");
1262         dataLog("slotVisitor.isEmpty(): ", slotVisitor.isEmpty(), "\n");
1263         dataLog("slotVisitor.collectorMarkStack().isEmpty(): ", slotVisitor.collectorMarkStack().isEmpty(), "\n");
1264         dataLog("slotVisitor.mutatorMarkStack().isEmpty(): ", slotVisitor.mutatorMarkStack().isEmpty(), "\n");
1265         dataLog("m_numberOfActiveParallelMarkers: ", m_numberOfActiveParallelMarkers, "\n");
1266         dataLog("m_sharedCollectorMarkStack->isEmpty(): ", m_sharedCollectorMarkStack->isEmpty(), "\n");
1267         dataLog("m_sharedMutatorMarkStack->isEmpty(): ", m_sharedMutatorMarkStack->isEmpty(), "\n");
1268         dataLog("slotVisitor.didReachTermination(): ", slotVisitor.didReachTermination(), "\n");
1269         RELEASE_ASSERT_NOT_REACHED();
1270     }
1271         
1272     return changePhase(conn, CollectorPhase::Fixpoint);
1273 }
1274
1275 NEVER_INLINE bool Heap::runFixpointPhase(GCConductor conn)
1276 {
1277     RELEASE_ASSERT(conn == GCConductor::Collector || m_currentThreadState);
1278     
1279     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1280     
1281     if (Options::logGC()) {
1282         HashMap<const char*, size_t> visitMap;
1283         forEachSlotVisitor(
1284             [&] (SlotVisitor& slotVisitor) {
1285                 visitMap.add(slotVisitor.codeName(), slotVisitor.bytesVisited() / 1024);
1286             });
1287         
1288         auto perVisitorDump = sortedMapDump(
1289             visitMap,
1290             [] (const char* a, const char* b) -> bool {
1291                 return strcmp(a, b) < 0;
1292             },
1293             ":", " ");
1294         
1295         dataLog("v=", bytesVisited() / 1024, "kb (", perVisitorDump, ") o=", m_opaqueRoots.size(), " b=", m_barriersExecuted, " ");
1296     }
1297         
1298     if (slotVisitor.didReachTermination()) {
1299         m_scheduler->didReachTermination();
1300             
1301         assertSharedMarkStacksEmpty();
1302             
1303         slotVisitor.mergeIfNecessary();
1304         for (auto& parallelVisitor : m_parallelSlotVisitors)
1305             parallelVisitor->mergeIfNecessary();
1306             
1307         // FIXME: Take m_mutatorDidRun into account when scheduling constraints. Most likely,
1308         // we don't have to execute root constraints again unless the mutator did run. At a
1309         // minimum, we could use this for work estimates - but it's probably more than just an
1310         // estimate.
1311         // https://bugs.webkit.org/show_bug.cgi?id=166828
1312             
1313         // FIXME: We should take advantage of the fact that we could timeout. This only comes
1314         // into play if we're executing constraints for the first time. But that will matter
1315         // when we have deep stacks or a lot of DOM stuff.
1316         // https://bugs.webkit.org/show_bug.cgi?id=166831
1317             
1318         // Wondering what this does? Look at Heap::addCoreConstraints(). The DOM and others can also
1319         // add their own using Heap::addMarkingConstraint().
1320         bool converged =
1321             m_constraintSet->executeConvergence(slotVisitor, MonotonicTime::infinity());
1322         if (converged && slotVisitor.isEmpty()) {
1323             assertSharedMarkStacksEmpty();
1324             return changePhase(conn, CollectorPhase::End);
1325         }
1326             
1327         m_scheduler->didExecuteConstraints();
1328     }
1329         
1330     if (Options::logGC())
1331         dataLog(slotVisitor.collectorMarkStack().size(), "+", m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " ");
1332         
1333     {
1334         ParallelModeEnabler enabler(slotVisitor);
1335         slotVisitor.drainInParallel(m_scheduler->timeToResume());
1336     }
1337         
1338     m_scheduler->synchronousDrainingDidStall();
1339
1340     if (slotVisitor.didReachTermination())
1341         return true; // This is like relooping to the top if runFixpointPhase().
1342         
1343     if (!m_scheduler->shouldResume())
1344         return true;
1345
1346     m_scheduler->willResume();
1347         
1348     if (Options::logGC()) {
1349         double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds();
1350         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), ")...]\n");
1351     }
1352
1353     // Forgive the mutator for its past failures to keep up.
1354     // FIXME: Figure out if moving this to different places results in perf changes.
1355     m_incrementBalance = 0;
1356         
1357     return changePhase(conn, CollectorPhase::Concurrent);
1358 }
1359
1360 NEVER_INLINE bool Heap::runConcurrentPhase(GCConductor conn)
1361 {
1362     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1363
1364     switch (conn) {
1365     case GCConductor::Mutator: {
1366         // When the mutator has the conn, we poll runConcurrentPhase() on every time someone says
1367         // stopIfNecessary(), so on every allocation slow path. When that happens we poll if it's time
1368         // to stop and do some work.
1369         if (slotVisitor.didReachTermination()
1370             || m_scheduler->shouldStop())
1371             return changePhase(conn, CollectorPhase::Reloop);
1372         
1373         // We could be coming from a collector phase that stuffed our SlotVisitor, so make sure we donate
1374         // everything. This is super cheap if the SlotVisitor is already empty.
1375         slotVisitor.donateAll();
1376         return false;
1377     }
1378     case GCConductor::Collector: {
1379         {
1380             ParallelModeEnabler enabler(slotVisitor);
1381             slotVisitor.drainInParallelPassively(m_scheduler->timeToStop());
1382         }
1383         return changePhase(conn, CollectorPhase::Reloop);
1384     } }
1385     
1386     RELEASE_ASSERT_NOT_REACHED();
1387     return false;
1388 }
1389
1390 NEVER_INLINE bool Heap::runReloopPhase(GCConductor conn)
1391 {
1392     if (Options::logGC())
1393         dataLog("[GC<", RawPointer(this), ">: ", gcConductorShortName(conn), " ");
1394     
1395     m_scheduler->didStop();
1396     
1397     if (Options::logGC())
1398         m_scheduler->log();
1399     
1400     return changePhase(conn, CollectorPhase::Fixpoint);
1401 }
1402
1403 NEVER_INLINE bool Heap::runEndPhase(GCConductor conn)
1404 {
1405     m_scheduler->endCollection();
1406         
1407     {
1408         auto locker = holdLock(m_markingMutex);
1409         m_parallelMarkersShouldExit = true;
1410         m_markingConditionVariable.notifyAll();
1411     }
1412     m_helperClient.finish();
1413     
1414     iterateExecutingAndCompilingCodeBlocks(
1415         [&] (CodeBlock* codeBlock) {
1416             writeBarrier(codeBlock);
1417         });
1418         
1419     updateObjectCounts();
1420     endMarking();
1421         
1422     if (UNLIKELY(m_verifier)) {
1423         m_verifier->gatherLiveCells(HeapVerifier::Phase::AfterMarking);
1424         m_verifier->verify(HeapVerifier::Phase::AfterMarking);
1425     }
1426         
1427     if (vm()->typeProfiler())
1428         vm()->typeProfiler()->invalidateTypeSetCache();
1429         
1430     reapWeakHandles();
1431     pruneStaleEntriesFromWeakGCMaps();
1432     sweepArrayBuffers();
1433     snapshotUnswept();
1434     finalizeUnconditionalFinalizers();
1435     removeDeadCompilerWorklistEntries();
1436     notifyIncrementalSweeper();
1437     
1438     m_codeBlocks->iterateCurrentlyExecuting(
1439         [&] (CodeBlock* codeBlock) {
1440             writeBarrier(codeBlock);
1441         });
1442     m_codeBlocks->clearCurrentlyExecuting();
1443         
1444     m_objectSpace.prepareForAllocation();
1445     updateAllocationLimits();
1446
1447     if (UNLIKELY(m_verifier)) {
1448         m_verifier->trimDeadCells();
1449         m_verifier->verify(HeapVerifier::Phase::AfterGC);
1450     }
1451
1452     didFinishCollection();
1453     
1454     if (m_currentRequest.didFinishEndPhase)
1455         m_currentRequest.didFinishEndPhase->run();
1456     
1457     if (false) {
1458         dataLog("Heap state after GC:\n");
1459         m_objectSpace.dumpBits();
1460     }
1461     
1462     if (Options::logGC()) {
1463         double thisPauseMS = (m_afterGC - m_stopTime).milliseconds();
1464         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), "), cycle ", (m_afterGC - m_beforeGC).milliseconds(), "ms END]\n");
1465     }
1466     
1467     {
1468         auto locker = holdLock(*m_threadLock);
1469         m_requests.removeFirst();
1470         m_lastServedTicket++;
1471         clearMutatorWaiting();
1472     }
1473     ParkingLot::unparkAll(&m_worldState);
1474
1475     if (false)
1476         dataLog("GC END!\n");
1477
1478     setNeedFinalize();
1479
1480     m_lastGCStartTime = m_currentGCStartTime;
1481     m_lastGCEndTime = MonotonicTime::now();
1482         
1483     return changePhase(conn, CollectorPhase::NotRunning);
1484 }
1485
1486 bool Heap::changePhase(GCConductor conn, CollectorPhase nextPhase)
1487 {
1488     checkConn(conn);
1489
1490     m_nextPhase = nextPhase;
1491
1492     return finishChangingPhase(conn);
1493 }
1494
1495 NEVER_INLINE bool Heap::finishChangingPhase(GCConductor conn)
1496 {
1497     checkConn(conn);
1498     
1499     if (m_nextPhase == m_currentPhase)
1500         return true;
1501
1502     if (false)
1503         dataLog(conn, ": Going to phase: ", m_nextPhase, " (from ", m_currentPhase, ")\n");
1504     
1505     bool suspendedBefore = worldShouldBeSuspended(m_currentPhase);
1506     bool suspendedAfter = worldShouldBeSuspended(m_nextPhase);
1507     
1508     if (suspendedBefore != suspendedAfter) {
1509         if (suspendedBefore) {
1510             RELEASE_ASSERT(!suspendedAfter);
1511             
1512             resumeThePeriphery();
1513             if (conn == GCConductor::Collector)
1514                 resumeTheMutator();
1515             else
1516                 handleNeedFinalize();
1517         } else {
1518             RELEASE_ASSERT(!suspendedBefore);
1519             RELEASE_ASSERT(suspendedAfter);
1520             
1521             if (conn == GCConductor::Collector) {
1522                 waitWhileNeedFinalize();
1523                 if (!stopTheMutator()) {
1524                     if (false)
1525                         dataLog("Returning false.\n");
1526                     return false;
1527                 }
1528             } else {
1529                 sanitizeStackForVM(m_vm);
1530                 handleNeedFinalize();
1531             }
1532             stopThePeriphery(conn);
1533         }
1534     }
1535     
1536     m_currentPhase = m_nextPhase;
1537     return true;
1538 }
1539
1540 void Heap::stopThePeriphery(GCConductor conn)
1541 {
1542     if (m_collectorBelievesThatTheWorldIsStopped) {
1543         dataLog("FATAL: world already stopped.\n");
1544         RELEASE_ASSERT_NOT_REACHED();
1545     }
1546     
1547     if (m_mutatorDidRun)
1548         m_mutatorExecutionVersion++;
1549     
1550     m_mutatorDidRun = false;
1551
1552     suspendCompilerThreads();
1553     m_collectorBelievesThatTheWorldIsStopped = true;
1554
1555     forEachSlotVisitor(
1556         [&] (SlotVisitor& slotVisitor) {
1557             slotVisitor.updateMutatorIsStopped(NoLockingNecessary);
1558         });
1559
1560 #if ENABLE(JIT)
1561     {
1562         DeferGCForAWhile awhile(*this);
1563         if (JITWorklist::instance()->completeAllForVM(*m_vm)
1564             && conn == GCConductor::Collector)
1565             setGCDidJIT();
1566     }
1567 #else
1568     UNUSED_PARAM(conn);
1569 #endif // ENABLE(JIT)
1570     
1571     vm()->shadowChicken().update(*vm(), vm()->topCallFrame);
1572     
1573     m_structureIDTable.flushOldTables();
1574     m_objectSpace.stopAllocating();
1575     
1576     m_stopTime = MonotonicTime::now();
1577 }
1578
1579 NEVER_INLINE void Heap::resumeThePeriphery()
1580 {
1581     // Calling resumeAllocating does the Right Thing depending on whether this is the end of a
1582     // collection cycle or this is just a concurrent phase within a collection cycle:
1583     // - At end of collection cycle: it's a no-op because prepareForAllocation already cleared the
1584     //   last active block.
1585     // - During collection cycle: it reinstates the last active block.
1586     m_objectSpace.resumeAllocating();
1587     
1588     m_barriersExecuted = 0;
1589     
1590     if (!m_collectorBelievesThatTheWorldIsStopped) {
1591         dataLog("Fatal: collector does not believe that the world is stopped.\n");
1592         RELEASE_ASSERT_NOT_REACHED();
1593     }
1594     m_collectorBelievesThatTheWorldIsStopped = false;
1595     
1596     // FIXME: This could be vastly improved: we want to grab the locks in the order in which they
1597     // become available. We basically want a lockAny() method that will lock whatever lock is available
1598     // and tell you which one it locked. That would require teaching ParkingLot how to park on multiple
1599     // queues at once, which is totally achievable - it would just require memory allocation, which is
1600     // suboptimal but not a disaster. Alternatively, we could replace the SlotVisitor rightToRun lock
1601     // with a DLG-style handshake mechanism, but that seems not as general.
1602     Vector<SlotVisitor*, 8> slotVisitorsToUpdate;
1603
1604     forEachSlotVisitor(
1605         [&] (SlotVisitor& slotVisitor) {
1606             slotVisitorsToUpdate.append(&slotVisitor);
1607         });
1608     
1609     for (unsigned countdown = 40; !slotVisitorsToUpdate.isEmpty() && countdown--;) {
1610         for (unsigned index = 0; index < slotVisitorsToUpdate.size(); ++index) {
1611             SlotVisitor& slotVisitor = *slotVisitorsToUpdate[index];
1612             bool remove = false;
1613             if (slotVisitor.hasAcknowledgedThatTheMutatorIsResumed())
1614                 remove = true;
1615             else if (auto locker = tryHoldLock(slotVisitor.rightToRun())) {
1616                 slotVisitor.updateMutatorIsStopped(locker);
1617                 remove = true;
1618             }
1619             if (remove) {
1620                 slotVisitorsToUpdate[index--] = slotVisitorsToUpdate.last();
1621                 slotVisitorsToUpdate.takeLast();
1622             }
1623         }
1624         WTF::Thread::yield();
1625     }
1626     
1627     for (SlotVisitor* slotVisitor : slotVisitorsToUpdate)
1628         slotVisitor->updateMutatorIsStopped();
1629     
1630     resumeCompilerThreads();
1631 }
1632
1633 bool Heap::stopTheMutator()
1634 {
1635     for (;;) {
1636         unsigned oldState = m_worldState.load();
1637         if (oldState & stoppedBit) {
1638             RELEASE_ASSERT(!(oldState & hasAccessBit));
1639             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1640             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1641             return true;
1642         }
1643         
1644         if (oldState & mutatorHasConnBit) {
1645             RELEASE_ASSERT(!(oldState & hasAccessBit));
1646             RELEASE_ASSERT(!(oldState & stoppedBit));
1647             return false;
1648         }
1649
1650         if (!(oldState & hasAccessBit)) {
1651             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1652             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1653             // We can stop the world instantly.
1654             if (m_worldState.compareExchangeWeak(oldState, oldState | stoppedBit))
1655                 return true;
1656             continue;
1657         }
1658         
1659         // Transfer the conn to the mutator and bail.
1660         RELEASE_ASSERT(oldState & hasAccessBit);
1661         RELEASE_ASSERT(!(oldState & stoppedBit));
1662         unsigned newState = (oldState | mutatorHasConnBit) & ~mutatorWaitingBit;
1663         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1664             if (false)
1665                 dataLog("Handed off the conn.\n");
1666             m_stopIfNecessaryTimer->scheduleSoon();
1667             ParkingLot::unparkAll(&m_worldState);
1668             return false;
1669         }
1670     }
1671 }
1672
1673 NEVER_INLINE void Heap::resumeTheMutator()
1674 {
1675     if (false)
1676         dataLog("Resuming the mutator.\n");
1677     for (;;) {
1678         unsigned oldState = m_worldState.load();
1679         if (!!(oldState & hasAccessBit) != !(oldState & stoppedBit)) {
1680             dataLog("Fatal: hasAccess = ", !!(oldState & hasAccessBit), ", stopped = ", !!(oldState & stoppedBit), "\n");
1681             RELEASE_ASSERT_NOT_REACHED();
1682         }
1683         if (oldState & mutatorHasConnBit) {
1684             dataLog("Fatal: mutator has the conn.\n");
1685             RELEASE_ASSERT_NOT_REACHED();
1686         }
1687         
1688         if (!(oldState & stoppedBit)) {
1689             if (false)
1690                 dataLog("Returning because not stopped.\n");
1691             return;
1692         }
1693         
1694         if (m_worldState.compareExchangeWeak(oldState, oldState & ~stoppedBit)) {
1695             if (false)
1696                 dataLog("CASing and returning.\n");
1697             ParkingLot::unparkAll(&m_worldState);
1698             return;
1699         }
1700     }
1701 }
1702
1703 void Heap::stopIfNecessarySlow()
1704 {
1705     while (stopIfNecessarySlow(m_worldState.load())) { }
1706     
1707     RELEASE_ASSERT(m_worldState.load() & hasAccessBit);
1708     RELEASE_ASSERT(!(m_worldState.load() & stoppedBit));
1709     
1710     handleGCDidJIT();
1711     handleNeedFinalize();
1712     m_mutatorDidRun = true;
1713 }
1714
1715 bool Heap::stopIfNecessarySlow(unsigned oldState)
1716 {
1717     RELEASE_ASSERT(oldState & hasAccessBit);
1718     RELEASE_ASSERT(!(oldState & stoppedBit));
1719     
1720     // It's possible for us to wake up with finalization already requested but the world not yet
1721     // resumed. If that happens, we can't run finalization yet.
1722     if (handleNeedFinalize(oldState))
1723         return true;
1724
1725     // FIXME: When entering the concurrent phase, we could arrange for this branch not to fire, and then
1726     // have the SlotVisitor do things to the m_worldState to make this branch fire again. That would
1727     // prevent us from polling this so much. Ideally, stopIfNecessary would ignore the mutatorHasConnBit
1728     // and there would be some other bit indicating whether we were in some GC phase other than the
1729     // NotRunning or Concurrent ones.
1730     if (oldState & mutatorHasConnBit)
1731         collectInMutatorThread();
1732     
1733     return false;
1734 }
1735
1736 NEVER_INLINE void Heap::collectInMutatorThread()
1737 {
1738     CollectingScope collectingScope(*this);
1739     for (;;) {
1740         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, nullptr);
1741         switch (result) {
1742         case RunCurrentPhaseResult::Finished:
1743             return;
1744         case RunCurrentPhaseResult::Continue:
1745             break;
1746         case RunCurrentPhaseResult::NeedCurrentThreadState:
1747             sanitizeStackForVM(m_vm);
1748             auto lambda = [&] (CurrentThreadState& state) {
1749                 for (;;) {
1750                     RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, &state);
1751                     switch (result) {
1752                     case RunCurrentPhaseResult::Finished:
1753                         return;
1754                     case RunCurrentPhaseResult::Continue:
1755                         break;
1756                     case RunCurrentPhaseResult::NeedCurrentThreadState:
1757                         RELEASE_ASSERT_NOT_REACHED();
1758                         break;
1759                     }
1760                 }
1761             };
1762             callWithCurrentThreadState(scopedLambda<void(CurrentThreadState&)>(WTFMove(lambda)));
1763             return;
1764         }
1765     }
1766 }
1767
1768 template<typename Func>
1769 void Heap::waitForCollector(const Func& func)
1770 {
1771     for (;;) {
1772         bool done;
1773         {
1774             LockHolder locker(*m_threadLock);
1775             done = func(locker);
1776             if (!done) {
1777                 setMutatorWaiting();
1778                 
1779                 // At this point, the collector knows that we intend to wait, and he will clear the
1780                 // waiting bit and then unparkAll when the GC cycle finishes. Clearing the bit
1781                 // prevents us from parking except if there is also stop-the-world. Unparking after
1782                 // clearing means that if the clearing happens after we park, then we will unpark.
1783             }
1784         }
1785         
1786         // If we're in a stop-the-world scenario, we need to wait for that even if done is true.
1787         unsigned oldState = m_worldState.load();
1788         if (stopIfNecessarySlow(oldState))
1789             continue;
1790         
1791         // FIXME: We wouldn't need this if stopIfNecessarySlow() had a mode where it knew to just
1792         // do the collection.
1793         relinquishConn();
1794         
1795         if (done) {
1796             clearMutatorWaiting(); // Clean up just in case.
1797             return;
1798         }
1799         
1800         // If mutatorWaitingBit is still set then we want to wait.
1801         ParkingLot::compareAndPark(&m_worldState, oldState | mutatorWaitingBit);
1802     }
1803 }
1804
1805 void Heap::acquireAccessSlow()
1806 {
1807     for (;;) {
1808         unsigned oldState = m_worldState.load();
1809         RELEASE_ASSERT(!(oldState & hasAccessBit));
1810         
1811         if (oldState & stoppedBit) {
1812             if (verboseStop) {
1813                 dataLog("Stopping in acquireAccess!\n");
1814                 WTFReportBacktrace();
1815             }
1816             // Wait until we're not stopped anymore.
1817             ParkingLot::compareAndPark(&m_worldState, oldState);
1818             continue;
1819         }
1820         
1821         RELEASE_ASSERT(!(oldState & stoppedBit));
1822         unsigned newState = oldState | hasAccessBit;
1823         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1824             handleGCDidJIT();
1825             handleNeedFinalize();
1826             m_mutatorDidRun = true;
1827             stopIfNecessary();
1828             return;
1829         }
1830     }
1831 }
1832
1833 void Heap::releaseAccessSlow()
1834 {
1835     for (;;) {
1836         unsigned oldState = m_worldState.load();
1837         if (!(oldState & hasAccessBit)) {
1838             dataLog("FATAL: Attempting to release access but the mutator does not have access.\n");
1839             RELEASE_ASSERT_NOT_REACHED();
1840         }
1841         if (oldState & stoppedBit) {
1842             dataLog("FATAL: Attempting to release access but the mutator is stopped.\n");
1843             RELEASE_ASSERT_NOT_REACHED();
1844         }
1845         
1846         if (handleNeedFinalize(oldState))
1847             continue;
1848         
1849         unsigned newState = oldState & ~(hasAccessBit | mutatorHasConnBit);
1850         
1851         if ((oldState & mutatorHasConnBit)
1852             && m_nextPhase != m_currentPhase) {
1853             // This means that the collector thread had given us the conn so that we would do something
1854             // for it. Stop ourselves as we release access. This ensures that acquireAccess blocks. In
1855             // the meantime, since we're handing the conn over, the collector will be awoken and it is
1856             // sure to have work to do.
1857             newState |= stoppedBit;
1858         }
1859
1860         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1861             if (oldState & mutatorHasConnBit)
1862                 finishRelinquishingConn();
1863             return;
1864         }
1865     }
1866 }
1867
1868 bool Heap::relinquishConn(unsigned oldState)
1869 {
1870     RELEASE_ASSERT(oldState & hasAccessBit);
1871     RELEASE_ASSERT(!(oldState & stoppedBit));
1872     
1873     if (!(oldState & mutatorHasConnBit))
1874         return false; // Done.
1875     
1876     if (m_threadShouldStop)
1877         return false;
1878     
1879     if (!m_worldState.compareExchangeWeak(oldState, oldState & ~mutatorHasConnBit))
1880         return true; // Loop around.
1881     
1882     finishRelinquishingConn();
1883     return true;
1884 }
1885
1886 void Heap::finishRelinquishingConn()
1887 {
1888     if (false)
1889         dataLog("Relinquished the conn.\n");
1890     
1891     sanitizeStackForVM(m_vm);
1892     
1893     auto locker = holdLock(*m_threadLock);
1894     if (!m_requests.isEmpty())
1895         m_threadCondition->notifyOne(locker);
1896     ParkingLot::unparkAll(&m_worldState);
1897 }
1898
1899 void Heap::relinquishConn()
1900 {
1901     while (relinquishConn(m_worldState.load())) { }
1902 }
1903
1904 bool Heap::handleGCDidJIT(unsigned oldState)
1905 {
1906     RELEASE_ASSERT(oldState & hasAccessBit);
1907     if (!(oldState & gcDidJITBit))
1908         return false;
1909     if (m_worldState.compareExchangeWeak(oldState, oldState & ~gcDidJITBit)) {
1910         WTF::crossModifyingCodeFence();
1911         return true;
1912     }
1913     return true;
1914 }
1915
1916 NEVER_INLINE bool Heap::handleNeedFinalize(unsigned oldState)
1917 {
1918     RELEASE_ASSERT(oldState & hasAccessBit);
1919     RELEASE_ASSERT(!(oldState & stoppedBit));
1920     
1921     if (!(oldState & needFinalizeBit))
1922         return false;
1923     if (m_worldState.compareExchangeWeak(oldState, oldState & ~needFinalizeBit)) {
1924         finalize();
1925         // Wake up anyone waiting for us to finalize. Note that they may have woken up already, in
1926         // which case they would be waiting for us to release heap access.
1927         ParkingLot::unparkAll(&m_worldState);
1928         return true;
1929     }
1930     return true;
1931 }
1932
1933 void Heap::handleGCDidJIT()
1934 {
1935     while (handleGCDidJIT(m_worldState.load())) { }
1936 }
1937
1938 void Heap::handleNeedFinalize()
1939 {
1940     while (handleNeedFinalize(m_worldState.load())) { }
1941 }
1942
1943 void Heap::setGCDidJIT()
1944 {
1945     m_worldState.transaction(
1946         [&] (unsigned& state) -> bool {
1947             RELEASE_ASSERT(state & stoppedBit);
1948             state |= gcDidJITBit;
1949             return true;
1950         });
1951 }
1952
1953 void Heap::setNeedFinalize()
1954 {
1955     m_worldState.exchangeOr(needFinalizeBit);
1956     ParkingLot::unparkAll(&m_worldState);
1957     m_stopIfNecessaryTimer->scheduleSoon();
1958 }
1959
1960 void Heap::waitWhileNeedFinalize()
1961 {
1962     for (;;) {
1963         unsigned oldState = m_worldState.load();
1964         if (!(oldState & needFinalizeBit)) {
1965             // This means that either there was no finalize request or the main thread will finalize
1966             // with heap access, so a subsequent call to stopTheWorld() will return only when
1967             // finalize finishes.
1968             return;
1969         }
1970         ParkingLot::compareAndPark(&m_worldState, oldState);
1971     }
1972 }
1973
1974 void Heap::setMutatorWaiting()
1975 {
1976     m_worldState.exchangeOr(mutatorWaitingBit);
1977 }
1978
1979 void Heap::clearMutatorWaiting()
1980 {
1981     m_worldState.exchangeAnd(~mutatorWaitingBit);
1982 }
1983
1984 void Heap::notifyThreadStopping(const AbstractLocker&)
1985 {
1986     m_threadIsStopping = true;
1987     clearMutatorWaiting();
1988     ParkingLot::unparkAll(&m_worldState);
1989 }
1990
1991 void Heap::finalize()
1992 {
1993     MonotonicTime before;
1994     if (Options::logGC()) {
1995         before = MonotonicTime::now();
1996         dataLog("[GC<", RawPointer(this), ">: finalize ");
1997     }
1998     
1999     {
2000         SweepingScope helpingGCScope(*this);
2001         deleteUnmarkedCompiledCode();
2002         deleteSourceProviderCaches();
2003         sweepLargeAllocations();
2004     }
2005     
2006     if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache())
2007         cache->clear();
2008     
2009     for (const HeapFinalizerCallback& callback : m_heapFinalizerCallbacks)
2010         callback.run(*vm());
2011     
2012     if (Options::sweepSynchronously())
2013         sweepSynchronously();
2014
2015     if (Options::logGC()) {
2016         MonotonicTime after = MonotonicTime::now();
2017         dataLog((after - before).milliseconds(), "ms]\n");
2018     }
2019 }
2020
2021 Heap::Ticket Heap::requestCollection(GCRequest request)
2022 {
2023     stopIfNecessary();
2024     
2025     ASSERT(vm()->currentThreadIsHoldingAPILock());
2026     RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
2027     
2028     LockHolder locker(*m_threadLock);
2029     // We may be able to steal the conn. That only works if the collector is definitely not running
2030     // right now. This is an optimization that prevents the collector thread from ever starting in most
2031     // cases.
2032     ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2033     if ((m_lastServedTicket == m_lastGrantedTicket) && (m_currentPhase == CollectorPhase::NotRunning)) {
2034         if (false)
2035             dataLog("Taking the conn.\n");
2036         m_worldState.exchangeOr(mutatorHasConnBit);
2037     }
2038     
2039     m_requests.append(request);
2040     m_lastGrantedTicket++;
2041     if (!(m_worldState.load() & mutatorHasConnBit))
2042         m_threadCondition->notifyOne(locker);
2043     return m_lastGrantedTicket;
2044 }
2045
2046 void Heap::waitForCollection(Ticket ticket)
2047 {
2048     waitForCollector(
2049         [&] (const AbstractLocker&) -> bool {
2050             return m_lastServedTicket >= ticket;
2051         });
2052 }
2053
2054 void Heap::sweepLargeAllocations()
2055 {
2056     m_objectSpace.sweepLargeAllocations();
2057 }
2058
2059 void Heap::suspendCompilerThreads()
2060 {
2061 #if ENABLE(DFG_JIT)
2062     // We ensure the worklists so that it's not possible for the mutator to start a new worklist
2063     // after we have suspended the ones that he had started before. That's not very expensive since
2064     // the worklists use AutomaticThreads anyway.
2065     for (unsigned i = DFG::numberOfWorklists(); i--;)
2066         DFG::ensureWorklistForIndex(i).suspendAllThreads();
2067 #endif
2068 }
2069
2070 void Heap::willStartCollection()
2071 {
2072     if (Options::logGC())
2073         dataLog("=> ");
2074     
2075     if (shouldDoFullCollection()) {
2076         m_collectionScope = CollectionScope::Full;
2077         m_shouldDoFullCollection = false;
2078         if (Options::logGC())
2079             dataLog("FullCollection, ");
2080         if (false)
2081             dataLog("Full collection!\n");
2082     } else {
2083         m_collectionScope = CollectionScope::Eden;
2084         if (Options::logGC())
2085             dataLog("EdenCollection, ");
2086         if (false)
2087             dataLog("Eden collection!\n");
2088     }
2089     if (m_collectionScope == CollectionScope::Full) {
2090         m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2091         m_extraMemorySize = 0;
2092         m_deprecatedExtraMemorySize = 0;
2093 #if ENABLE(RESOURCE_USAGE)
2094         m_externalMemorySize = 0;
2095 #endif
2096
2097         if (m_fullActivityCallback)
2098             m_fullActivityCallback->willCollect();
2099     } else {
2100         ASSERT(m_collectionScope == CollectionScope::Eden);
2101         m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2102     }
2103
2104     if (m_edenActivityCallback)
2105         m_edenActivityCallback->willCollect();
2106
2107     for (auto* observer : m_observers)
2108         observer->willGarbageCollect();
2109 }
2110
2111 void Heap::prepareForMarking()
2112 {
2113     m_objectSpace.prepareForMarking();
2114 }
2115
2116 void Heap::reapWeakHandles()
2117 {
2118     m_objectSpace.reapWeakSets();
2119 }
2120
2121 void Heap::pruneStaleEntriesFromWeakGCMaps()
2122 {
2123     if (m_collectionScope != CollectionScope::Full)
2124         return;
2125     for (auto& pruneCallback : m_weakGCMaps.values())
2126         pruneCallback();
2127 }
2128
2129 void Heap::sweepArrayBuffers()
2130 {
2131     m_arrayBuffers.sweep();
2132 }
2133
2134 void Heap::snapshotUnswept()
2135 {
2136     TimingScope timingScope(*this, "Heap::snapshotUnswept");
2137     m_objectSpace.snapshotUnswept();
2138 }
2139
2140 void Heap::deleteSourceProviderCaches()
2141 {
2142     if (*m_lastCollectionScope == CollectionScope::Full)
2143         m_vm->clearSourceProviderCaches();
2144 }
2145
2146 void Heap::notifyIncrementalSweeper()
2147 {
2148     if (m_collectionScope == CollectionScope::Full) {
2149         if (!m_logicallyEmptyWeakBlocks.isEmpty())
2150             m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2151     }
2152
2153     m_sweeper->startSweeping();
2154 }
2155
2156 void Heap::updateAllocationLimits()
2157 {
2158     static const bool verbose = false;
2159     
2160     if (verbose) {
2161         dataLog("\n");
2162         dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n");
2163         dataLog("webAssemblyFastMemoriesAllocatedThisCycle = ", m_webAssemblyFastMemoriesAllocatedThisCycle, "\n");
2164     }
2165     
2166     // Calculate our current heap size threshold for the purpose of figuring out when we should
2167     // run another collection. This isn't the same as either size() or capacity(), though it should
2168     // be somewhere between the two. The key is to match the size calculations involved calls to
2169     // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
2170     // fragmentation, we may have size() much smaller than capacity().
2171     size_t currentHeapSize = 0;
2172
2173     // For marked space, we use the total number of bytes visited. This matches the logic for
2174     // MarkedAllocator's calls to didAllocate(), which effectively accounts for the total size of
2175     // objects allocated rather than blocks used. This will underestimate capacity(), and in case
2176     // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
2177     // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
2178     currentHeapSize += m_totalBytesVisited;
2179     if (verbose)
2180         dataLog("totalBytesVisited = ", m_totalBytesVisited, ", currentHeapSize = ", currentHeapSize, "\n");
2181
2182     // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
2183     // extra memory reporting.
2184     currentHeapSize += extraMemorySize();
2185     if (!ASSERT_DISABLED) {
2186         Checked<size_t, RecordOverflow> checkedCurrentHeapSize = m_totalBytesVisited;
2187         checkedCurrentHeapSize += extraMemorySize();
2188         ASSERT(!checkedCurrentHeapSize.hasOverflowed() && checkedCurrentHeapSize.unsafeGet() == currentHeapSize);
2189     }
2190
2191     if (verbose)
2192         dataLog("extraMemorySize() = ", extraMemorySize(), ", currentHeapSize = ", currentHeapSize, "\n");
2193     
2194     if (m_collectionScope == CollectionScope::Full) {
2195         // To avoid pathological GC churn in very small and very large heaps, we set
2196         // the new allocation limit based on the current size of the heap, with a
2197         // fixed minimum.
2198         m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
2199         if (verbose)
2200             dataLog("Full: maxHeapSize = ", m_maxHeapSize, "\n");
2201         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2202         if (verbose)
2203             dataLog("Full: maxEdenSize = ", m_maxEdenSize, "\n");
2204         m_sizeAfterLastFullCollect = currentHeapSize;
2205         if (verbose)
2206             dataLog("Full: sizeAfterLastFullCollect = ", currentHeapSize, "\n");
2207         m_bytesAbandonedSinceLastFullCollect = 0;
2208         if (verbose)
2209             dataLog("Full: bytesAbandonedSinceLastFullCollect = ", 0, "\n");
2210     } else {
2211         ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
2212         // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have.
2213         // But we are sloppy, so we have to defend against the overflow.
2214         m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize;
2215         if (verbose)
2216             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2217         m_sizeAfterLastEdenCollect = currentHeapSize;
2218         if (verbose)
2219             dataLog("Eden: sizeAfterLastEdenCollect = ", currentHeapSize, "\n");
2220         double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
2221         double minEdenToOldGenerationRatio = 1.0 / 3.0;
2222         if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
2223             m_shouldDoFullCollection = true;
2224         // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
2225         m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
2226         if (verbose)
2227             dataLog("Eden: maxHeapSize = ", m_maxHeapSize, "\n");
2228         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2229         if (verbose)
2230             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2231         if (m_fullActivityCallback) {
2232             ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
2233             m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
2234         }
2235     }
2236
2237 #if PLATFORM(IOS)
2238     // Get critical memory threshold for next cycle.
2239     overCriticalMemoryThreshold(MemoryThresholdCallType::Direct);
2240 #endif
2241
2242     m_sizeAfterLastCollect = currentHeapSize;
2243     if (verbose)
2244         dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n");
2245     m_bytesAllocatedThisCycle = 0;
2246     m_webAssemblyFastMemoriesAllocatedThisCycle = 0;
2247
2248     if (Options::logGC())
2249         dataLog("=> ", currentHeapSize / 1024, "kb, ");
2250 }
2251
2252 void Heap::didFinishCollection()
2253 {
2254     m_afterGC = MonotonicTime::now();
2255     CollectionScope scope = *m_collectionScope;
2256     if (scope == CollectionScope::Full)
2257         m_lastFullGCLength = m_afterGC - m_beforeGC;
2258     else
2259         m_lastEdenGCLength = m_afterGC - m_beforeGC;
2260
2261 #if ENABLE(RESOURCE_USAGE)
2262     ASSERT(externalMemorySize() <= extraMemorySize());
2263 #endif
2264
2265     if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) {
2266         gatherExtraHeapSnapshotData(*heapProfiler);
2267         removeDeadHeapSnapshotNodes(*heapProfiler);
2268     }
2269
2270     if (UNLIKELY(m_verifier))
2271         m_verifier->endGC();
2272
2273     RELEASE_ASSERT(m_collectionScope);
2274     m_lastCollectionScope = m_collectionScope;
2275     m_collectionScope = std::nullopt;
2276
2277     for (auto* observer : m_observers)
2278         observer->didGarbageCollect(scope);
2279 }
2280
2281 void Heap::resumeCompilerThreads()
2282 {
2283 #if ENABLE(DFG_JIT)
2284     for (unsigned i = DFG::numberOfWorklists(); i--;)
2285         DFG::existingWorklistForIndex(i).resumeAllThreads();
2286 #endif
2287 }
2288
2289 GCActivityCallback* Heap::fullActivityCallback()
2290 {
2291     return m_fullActivityCallback.get();
2292 }
2293
2294 GCActivityCallback* Heap::edenActivityCallback()
2295 {
2296     return m_edenActivityCallback.get();
2297 }
2298
2299 IncrementalSweeper& Heap::sweeper()
2300 {
2301     return *m_sweeper;
2302 }
2303
2304 void Heap::setGarbageCollectionTimerEnabled(bool enable)
2305 {
2306     if (m_fullActivityCallback)
2307         m_fullActivityCallback->setEnabled(enable);
2308     if (m_edenActivityCallback)
2309         m_edenActivityCallback->setEnabled(enable);
2310 }
2311
2312 void Heap::didAllocate(size_t bytes)
2313 {
2314     if (m_edenActivityCallback)
2315         m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
2316     m_bytesAllocatedThisCycle += bytes;
2317     performIncrement(bytes);
2318 }
2319
2320 void Heap::didAllocateWebAssemblyFastMemories(size_t count)
2321 {
2322     m_webAssemblyFastMemoriesAllocatedThisCycle += count;
2323 }
2324
2325 bool Heap::isValidAllocation(size_t)
2326 {
2327     if (!isValidThreadState(m_vm))
2328         return false;
2329
2330     if (isCurrentThreadBusy())
2331         return false;
2332     
2333     return true;
2334 }
2335
2336 void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
2337 {
2338     WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
2339 }
2340
2341 void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
2342 {
2343     HandleSlot slot = handle.slot();
2344     Finalizer finalizer = reinterpret_cast<Finalizer>(context);
2345     finalizer(slot->asCell());
2346     WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
2347 }
2348
2349 void Heap::addExecutable(ExecutableBase* executable)
2350 {
2351     m_executables.append(executable);
2352 }
2353
2354 void Heap::collectNowFullIfNotDoneRecently(Synchronousness synchronousness)
2355 {
2356     if (!m_fullActivityCallback) {
2357         collectNow(synchronousness, CollectionScope::Full);
2358         return;
2359     }
2360
2361     if (m_fullActivityCallback->didGCRecently()) {
2362         // A synchronous GC was already requested recently so we merely accelerate next collection.
2363         reportAbandonedObjectGraph();
2364         return;
2365     }
2366
2367     m_fullActivityCallback->setDidGCRecently();
2368     collectNow(synchronousness, CollectionScope::Full);
2369 }
2370
2371 bool Heap::shouldDoFullCollection()
2372 {
2373     if (!Options::useGenerationalGC())
2374         return true;
2375
2376     if (!m_currentRequest.scope)
2377         return m_shouldDoFullCollection || webAssemblyFastMemoriesThisCycleAtThreshold() || overCriticalMemoryThreshold();
2378     return *m_currentRequest.scope == CollectionScope::Full;
2379 }
2380
2381 void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
2382 {
2383     m_logicallyEmptyWeakBlocks.append(block);
2384 }
2385
2386 void Heap::sweepAllLogicallyEmptyWeakBlocks()
2387 {
2388     if (m_logicallyEmptyWeakBlocks.isEmpty())
2389         return;
2390
2391     m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2392     while (sweepNextLogicallyEmptyWeakBlock()) { }
2393 }
2394
2395 bool Heap::sweepNextLogicallyEmptyWeakBlock()
2396 {
2397     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
2398         return false;
2399
2400     WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
2401
2402     block->sweep();
2403     if (block->isEmpty()) {
2404         std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
2405         m_logicallyEmptyWeakBlocks.removeLast();
2406         WeakBlock::destroy(*this, block);
2407     } else
2408         m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
2409
2410     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
2411         m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
2412         return false;
2413     }
2414
2415     return true;
2416 }
2417
2418 size_t Heap::visitCount()
2419 {
2420     size_t result = 0;
2421     forEachSlotVisitor(
2422         [&] (SlotVisitor& visitor) {
2423             result += visitor.visitCount();
2424         });
2425     return result;
2426 }
2427
2428 size_t Heap::bytesVisited()
2429 {
2430     size_t result = 0;
2431     forEachSlotVisitor(
2432         [&] (SlotVisitor& visitor) {
2433             result += visitor.bytesVisited();
2434         });
2435     return result;
2436 }
2437
2438 void Heap::forEachCodeBlockImpl(const ScopedLambda<bool(CodeBlock*)>& func)
2439 {
2440     // We don't know the full set of CodeBlocks until compilation has terminated.
2441     completeAllJITPlans();
2442
2443     return m_codeBlocks->iterate(func);
2444 }
2445
2446 void Heap::forEachCodeBlockIgnoringJITPlansImpl(const AbstractLocker& locker, const ScopedLambda<bool(CodeBlock*)>& func)
2447 {
2448     return m_codeBlocks->iterate(locker, func);
2449 }
2450
2451 void Heap::writeBarrierSlowPath(const JSCell* from)
2452 {
2453     if (UNLIKELY(mutatorShouldBeFenced())) {
2454         // In this case, the barrierThreshold is the tautological threshold, so from could still be
2455         // not black. But we can't know for sure until we fire off a fence.
2456         WTF::storeLoadFence();
2457         if (from->cellState() != CellState::PossiblyBlack)
2458             return;
2459     }
2460     
2461     addToRememberedSet(from);
2462 }
2463
2464 bool Heap::isCurrentThreadBusy()
2465 {
2466     return mayBeGCThread() || mutatorState() != MutatorState::Running;
2467 }
2468
2469 void Heap::reportExtraMemoryVisited(size_t size)
2470 {
2471     size_t* counter = &m_extraMemorySize;
2472     
2473     for (;;) {
2474         size_t oldSize = *counter;
2475         // FIXME: Change this to use SaturatedArithmetic when available.
2476         // https://bugs.webkit.org/show_bug.cgi?id=170411
2477         Checked<size_t, RecordOverflow> checkedNewSize = oldSize;
2478         checkedNewSize += size;
2479         size_t newSize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
2480         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, newSize))
2481             return;
2482     }
2483 }
2484
2485 #if ENABLE(RESOURCE_USAGE)
2486 void Heap::reportExternalMemoryVisited(size_t size)
2487 {
2488     size_t* counter = &m_externalMemorySize;
2489
2490     for (;;) {
2491         size_t oldSize = *counter;
2492         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size))
2493             return;
2494     }
2495 }
2496 #endif
2497
2498 void Heap::collectIfNecessaryOrDefer(GCDeferralContext* deferralContext)
2499 {
2500     ASSERT(deferralContext || isDeferred() || !DisallowGC::isInEffectOnCurrentThread());
2501
2502     if (!m_isSafeToCollect)
2503         return;
2504     switch (mutatorState()) {
2505     case MutatorState::Running:
2506     case MutatorState::Allocating:
2507         break;
2508     case MutatorState::Sweeping:
2509     case MutatorState::Collecting:
2510         return;
2511     }
2512     if (!Options::useGC())
2513         return;
2514     
2515     if (mayNeedToStop()) {
2516         if (deferralContext)
2517             deferralContext->m_shouldGC = true;
2518         else if (isDeferred())
2519             m_didDeferGCWork = true;
2520         else
2521             stopIfNecessary();
2522     }
2523     
2524     if (UNLIKELY(Options::gcMaxHeapSize())) {
2525         if (m_bytesAllocatedThisCycle <= Options::gcMaxHeapSize())
2526             return;
2527     } else {
2528         size_t bytesAllowedThisCycle = m_maxEdenSize;
2529
2530 #if PLATFORM(IOS)
2531         if (overCriticalMemoryThreshold())
2532             bytesAllowedThisCycle = std::min(m_maxEdenSizeWhenCritical, bytesAllowedThisCycle);
2533 #endif
2534
2535         if (!webAssemblyFastMemoriesThisCycleAtThreshold()
2536             && m_bytesAllocatedThisCycle <= bytesAllowedThisCycle)
2537             return;
2538     }
2539
2540     if (deferralContext)
2541         deferralContext->m_shouldGC = true;
2542     else if (isDeferred())
2543         m_didDeferGCWork = true;
2544     else {
2545         collectAsync();
2546         stopIfNecessary(); // This will immediately start the collection if we have the conn.
2547     }
2548 }
2549
2550 void Heap::decrementDeferralDepthAndGCIfNeededSlow()
2551 {
2552     // Can't do anything if we're still deferred.
2553     if (m_deferralDepth)
2554         return;
2555     
2556     ASSERT(!isDeferred());
2557     
2558     m_didDeferGCWork = false;
2559     // FIXME: Bring back something like the DeferGCProbability mode.
2560     // https://bugs.webkit.org/show_bug.cgi?id=166627
2561     collectIfNecessaryOrDefer();
2562 }
2563
2564 void Heap::registerWeakGCMap(void* weakGCMap, std::function<void()> pruningCallback)
2565 {
2566     m_weakGCMaps.add(weakGCMap, WTFMove(pruningCallback));
2567 }
2568
2569 void Heap::unregisterWeakGCMap(void* weakGCMap)
2570 {
2571     m_weakGCMaps.remove(weakGCMap);
2572 }
2573
2574 void Heap::didAllocateBlock(size_t capacity)
2575 {
2576 #if ENABLE(RESOURCE_USAGE)
2577     m_blockBytesAllocated += capacity;
2578 #else
2579     UNUSED_PARAM(capacity);
2580 #endif
2581 }
2582
2583 void Heap::didFreeBlock(size_t capacity)
2584 {
2585 #if ENABLE(RESOURCE_USAGE)
2586     m_blockBytesAllocated -= capacity;
2587 #else
2588     UNUSED_PARAM(capacity);
2589 #endif
2590 }
2591
2592 void Heap::addCoreConstraints()
2593 {
2594     m_constraintSet->add(
2595         "Cs", "Conservative Scan",
2596         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2597             TimingScope preConvergenceTimingScope(*this, "Constraint: conservative scan");
2598             m_objectSpace.prepareForConservativeScan();
2599             ConservativeRoots conservativeRoots(*this);
2600             SuperSamplerScope superSamplerScope(false);
2601             gatherStackRoots(conservativeRoots);
2602             gatherJSStackRoots(conservativeRoots);
2603             gatherScratchBufferRoots(conservativeRoots);
2604             slotVisitor.append(conservativeRoots);
2605         },
2606         ConstraintVolatility::GreyedByExecution);
2607     
2608     m_constraintSet->add(
2609         "Msr", "Misc Small Roots",
2610         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2611 #if JSC_OBJC_API_ENABLED
2612             scanExternalRememberedSet(*m_vm, slotVisitor);
2613 #endif
2614
2615             if (m_vm->smallStrings.needsToBeVisited(*m_collectionScope))
2616                 m_vm->smallStrings.visitStrongReferences(slotVisitor);
2617             
2618             for (auto& pair : m_protectedValues)
2619                 slotVisitor.appendUnbarriered(pair.key);
2620             
2621             if (m_markListSet && m_markListSet->size())
2622                 MarkedArgumentBuffer::markLists(slotVisitor, *m_markListSet);
2623             
2624             slotVisitor.appendUnbarriered(m_vm->exception());
2625             slotVisitor.appendUnbarriered(m_vm->lastException());
2626         },
2627         ConstraintVolatility::GreyedByExecution);
2628     
2629     m_constraintSet->add(
2630         "Sh", "Strong Handles",
2631         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2632             m_handleSet.visitStrongHandles(slotVisitor);
2633             m_handleStack.visit(slotVisitor);
2634         },
2635         ConstraintVolatility::GreyedByExecution);
2636     
2637     m_constraintSet->add(
2638         "D", "Debugger",
2639         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2640 #if ENABLE(SAMPLING_PROFILER)
2641             if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
2642                 LockHolder locker(samplingProfiler->getLock());
2643                 samplingProfiler->processUnverifiedStackTraces();
2644                 samplingProfiler->visit(slotVisitor);
2645                 if (Options::logGC() == GCLogging::Verbose)
2646                     dataLog("Sampling Profiler data:\n", slotVisitor);
2647             }
2648 #endif // ENABLE(SAMPLING_PROFILER)
2649             
2650             if (m_vm->typeProfiler())
2651                 m_vm->typeProfilerLog()->visit(slotVisitor);
2652             
2653             m_vm->shadowChicken().visitChildren(slotVisitor);
2654         },
2655         ConstraintVolatility::GreyedByExecution);
2656     
2657     m_constraintSet->add(
2658         "Jsr", "JIT Stub Routines",
2659         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2660             m_jitStubRoutines->traceMarkedStubRoutines(slotVisitor);
2661         },
2662         ConstraintVolatility::GreyedByExecution);
2663     
2664     m_constraintSet->add(
2665         "Ws", "Weak Sets",
2666         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2667             m_objectSpace.visitWeakSets(slotVisitor);
2668         },
2669         ConstraintVolatility::GreyedByMarking);
2670     
2671     m_constraintSet->add(
2672         "Wrh", "Weak Reference Harvesters",
2673         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2674             for (WeakReferenceHarvester* current = m_weakReferenceHarvesters.head(); current; current = current->next())
2675                 current->visitWeakReferences(slotVisitor);
2676         },
2677         ConstraintVolatility::GreyedByMarking);
2678     
2679 #if ENABLE(DFG_JIT)
2680     m_constraintSet->add(
2681         "Dw", "DFG Worklists",
2682         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2683             for (unsigned i = DFG::numberOfWorklists(); i--;)
2684                 DFG::existingWorklistForIndex(i).visitWeakReferences(slotVisitor);
2685             
2686             // FIXME: This is almost certainly unnecessary.
2687             // https://bugs.webkit.org/show_bug.cgi?id=166829
2688             DFG::iterateCodeBlocksForGC(
2689                 *m_vm,
2690                 [&] (CodeBlock* codeBlock) {
2691                     slotVisitor.appendUnbarriered(codeBlock);
2692                 });
2693             
2694             if (Options::logGC() == GCLogging::Verbose)
2695                 dataLog("DFG Worklists:\n", slotVisitor);
2696         },
2697         ConstraintVolatility::GreyedByMarking);
2698 #endif
2699     
2700     m_constraintSet->add(
2701         "Cb", "CodeBlocks",
2702         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2703             iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(
2704                 [&] (CodeBlock* codeBlock) {
2705                     // Visit the CodeBlock as a constraint only if it's black.
2706                     if (Heap::isMarked(codeBlock)
2707                         && codeBlock->cellState() == CellState::PossiblyBlack)
2708                         slotVisitor.visitAsConstraint(codeBlock);
2709                 });
2710         },
2711         ConstraintVolatility::SeldomGreyed);
2712     
2713     m_constraintSet->add(
2714         "Mrms", "Mutator+Race Mark Stack",
2715         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2716             // Indicate to the fixpoint that we introduced work!
2717             size_t size = m_mutatorMarkStack->size() + m_raceMarkStack->size();
2718             slotVisitor.addToVisitCount(size);
2719             
2720             if (Options::logGC())
2721                 dataLog("(", size, ")");
2722             
2723             m_mutatorMarkStack->transferTo(slotVisitor.mutatorMarkStack());
2724             m_raceMarkStack->transferTo(slotVisitor.mutatorMarkStack());
2725         },
2726         [this] (SlotVisitor&) -> double {
2727             return m_mutatorMarkStack->size() + m_raceMarkStack->size();
2728         },
2729         ConstraintVolatility::GreyedByExecution);
2730 }
2731
2732 void Heap::addMarkingConstraint(std::unique_ptr<MarkingConstraint> constraint)
2733 {
2734     PreventCollectionScope preventCollectionScope(*this);
2735     m_constraintSet->add(WTFMove(constraint));
2736 }
2737
2738 void Heap::notifyIsSafeToCollect()
2739 {
2740     MonotonicTime before;
2741     if (Options::logGC()) {
2742         before = MonotonicTime::now();
2743         dataLog("[GC<", RawPointer(this), ">: starting ");
2744     }
2745     
2746     addCoreConstraints();
2747     
2748     m_isSafeToCollect = true;
2749     
2750     if (Options::collectContinuously()) {
2751         m_collectContinuouslyThread = WTF::Thread::create(
2752             "JSC DEBUG Continuous GC",
2753             [this] () {
2754                 MonotonicTime initialTime = MonotonicTime::now();
2755                 Seconds period = Seconds::fromMilliseconds(Options::collectContinuouslyPeriodMS());
2756                 while (!m_shouldStopCollectingContinuously) {
2757                     {
2758                         LockHolder locker(*m_threadLock);
2759                         if (m_requests.isEmpty()) {
2760                             m_requests.append(std::nullopt);
2761                             m_lastGrantedTicket++;
2762                             m_threadCondition->notifyOne(locker);
2763                         }
2764                     }
2765                     
2766                     {
2767                         LockHolder locker(m_collectContinuouslyLock);
2768                         Seconds elapsed = MonotonicTime::now() - initialTime;
2769                         Seconds elapsedInPeriod = elapsed % period;
2770                         MonotonicTime timeToWakeUp =
2771                             initialTime + elapsed - elapsedInPeriod + period;
2772                         while (!hasElapsed(timeToWakeUp) && !m_shouldStopCollectingContinuously) {
2773                             m_collectContinuouslyCondition.waitUntil(
2774                                 m_collectContinuouslyLock, timeToWakeUp);
2775                         }
2776                     }
2777                 }
2778             });
2779     }
2780     
2781     if (Options::logGC())
2782         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
2783 }
2784
2785 void Heap::preventCollection()
2786 {
2787     if (!m_isSafeToCollect)
2788         return;
2789     
2790     // This prevents the collectContinuously thread from starting a collection.
2791     m_collectContinuouslyLock.lock();
2792     
2793     // Wait for all collections to finish.
2794     waitForCollector(
2795         [&] (const AbstractLocker&) -> bool {
2796             ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2797             return m_lastServedTicket == m_lastGrantedTicket;
2798         });
2799     
2800     // Now a collection can only start if this thread starts it.
2801     RELEASE_ASSERT(!m_collectionScope);
2802 }
2803
2804 void Heap::allowCollection()
2805 {
2806     if (!m_isSafeToCollect)
2807         return;
2808     
2809     m_collectContinuouslyLock.unlock();
2810 }
2811
2812 template<typename Func>
2813 void Heap::forEachSlotVisitor(const Func& func)
2814 {
2815     auto locker = holdLock(m_parallelSlotVisitorLock);
2816     func(*m_collectorSlotVisitor);
2817     func(*m_mutatorSlotVisitor);
2818     for (auto& slotVisitor : m_parallelSlotVisitors)
2819         func(*slotVisitor);
2820 }
2821
2822 void Heap::setMutatorShouldBeFenced(bool value)
2823 {
2824     m_mutatorShouldBeFenced = value;
2825     m_barrierThreshold = value ? tautologicalThreshold : blackThreshold;
2826 }
2827
2828 void Heap::performIncrement(size_t bytes)
2829 {
2830     if (!m_objectSpace.isMarking())
2831         return;
2832
2833     m_incrementBalance += bytes * Options::gcIncrementScale();
2834
2835     // Save ourselves from crazy. Since this is an optimization, it's OK to go back to any consistent
2836     // state when the double goes wild.
2837     if (std::isnan(m_incrementBalance) || std::isinf(m_incrementBalance))
2838         m_incrementBalance = 0;
2839     
2840     if (m_incrementBalance < static_cast<double>(Options::gcIncrementBytes()))
2841         return;
2842
2843     double targetBytes = m_incrementBalance;
2844     if (targetBytes <= 0)
2845         return;
2846     targetBytes = std::min(targetBytes, Options::gcIncrementMaxBytes());
2847
2848     SlotVisitor& slotVisitor = *m_mutatorSlotVisitor;
2849     ParallelModeEnabler parallelModeEnabler(slotVisitor);
2850     size_t bytesVisited = slotVisitor.performIncrementOfDraining(static_cast<size_t>(targetBytes));
2851     // incrementBalance may go negative here because it'll remember how many bytes we overshot.
2852     m_incrementBalance -= bytesVisited;
2853 }
2854
2855 void Heap::addHeapFinalizerCallback(const HeapFinalizerCallback& callback)
2856 {
2857     m_heapFinalizerCallbacks.append(callback);
2858 }
2859
2860 void Heap::removeHeapFinalizerCallback(const HeapFinalizerCallback& callback)
2861 {
2862     m_heapFinalizerCallbacks.removeFirst(callback);
2863 }
2864
2865 } // namespace JSC