GC constraint solving should be parallel
[WebKit-https.git] / Source / JavaScriptCore / heap / Heap.cpp
1 /*
2  *  Copyright (C) 2003-2017 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "Heap.h"
23
24 #include "CodeBlock.h"
25 #include "CodeBlockSetInlines.h"
26 #include "CollectingScope.h"
27 #include "ConservativeRoots.h"
28 #include "DFGWorklistInlines.h"
29 #include "EdenGCActivityCallback.h"
30 #include "Exception.h"
31 #include "FullGCActivityCallback.h"
32 #include "GCActivityCallback.h"
33 #include "GCIncomingRefCountedSetInlines.h"
34 #include "GCSegmentedArrayInlines.h"
35 #include "GCTypeMap.h"
36 #include "HasOwnPropertyCache.h"
37 #include "HeapHelperPool.h"
38 #include "HeapIterationScope.h"
39 #include "HeapProfiler.h"
40 #include "HeapSnapshot.h"
41 #include "HeapVerifier.h"
42 #include "IncrementalSweeper.h"
43 #include "Interpreter.h"
44 #include "JITStubRoutineSet.h"
45 #include "JITWorklist.h"
46 #include "JSCInlines.h"
47 #include "JSGlobalObject.h"
48 #include "JSLock.h"
49 #include "JSVirtualMachineInternal.h"
50 #include "JSWebAssemblyCodeBlock.h"
51 #include "MachineStackMarker.h"
52 #include "MarkStackMergingConstraint.h"
53 #include "MarkedAllocatorInlines.h"
54 #include "MarkedSpaceInlines.h"
55 #include "MarkingConstraintSet.h"
56 #include "PreventCollectionScope.h"
57 #include "SamplingProfiler.h"
58 #include "ShadowChicken.h"
59 #include "SpaceTimeMutatorScheduler.h"
60 #include "SubspaceInlines.h"
61 #include "SuperSampler.h"
62 #include "StochasticSpaceTimeMutatorScheduler.h"
63 #include "StopIfNecessaryTimer.h"
64 #include "SweepingScope.h"
65 #include "SynchronousStopTheWorldMutatorScheduler.h"
66 #include "TypeProfiler.h"
67 #include "TypeProfilerLog.h"
68 #include "UnlinkedCodeBlock.h"
69 #include "VM.h"
70 #include "VisitCounter.h"
71 #include "WasmMemory.h"
72 #include "WeakSetInlines.h"
73 #include <algorithm>
74 #if PLATFORM(IOS)
75 #include <bmalloc/bmalloc.h>
76 #endif
77 #include <wtf/CurrentTime.h>
78 #include <wtf/ListDump.h>
79 #include <wtf/MainThread.h>
80 #include <wtf/ParallelVectorIterator.h>
81 #include <wtf/ProcessID.h>
82 #include <wtf/RAMSize.h>
83 #include <wtf/SimpleStats.h>
84 #include <wtf/Threading.h>
85
86 #if USE(FOUNDATION)
87 #if __has_include(<objc/objc-internal.h>)
88 #include <objc/objc-internal.h>
89 #else
90 extern "C" void* objc_autoreleasePoolPush(void);
91 extern "C" void objc_autoreleasePoolPop(void *context);
92 #endif
93 #endif // USE(FOUNDATION)
94
95 using namespace std;
96
97 namespace JSC {
98
99 namespace {
100
101 bool verboseStop = false;
102
103 double maxPauseMS(double thisPauseMS)
104 {
105     static double maxPauseMS;
106     maxPauseMS = std::max(thisPauseMS, maxPauseMS);
107     return maxPauseMS;
108 }
109
110 size_t minHeapSize(HeapType heapType, size_t ramSize)
111 {
112     if (heapType == LargeHeap) {
113         double result = min(
114             static_cast<double>(Options::largeHeapSize()),
115             ramSize * Options::smallHeapRAMFraction());
116         return static_cast<size_t>(result);
117     }
118     return Options::smallHeapSize();
119 }
120
121 size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
122 {
123 #if PLATFORM(IOS)
124     size_t memoryFootprint = bmalloc::api::memoryFootprint();
125     if (memoryFootprint < ramSize * Options::smallHeapRAMFraction())
126         return Options::smallHeapGrowthFactor() * heapSize;
127     if (memoryFootprint < ramSize * Options::mediumHeapRAMFraction())
128         return Options::mediumHeapGrowthFactor() * heapSize;
129 #else
130     if (heapSize < ramSize * Options::smallHeapRAMFraction())
131         return Options::smallHeapGrowthFactor() * heapSize;
132     if (heapSize < ramSize * Options::mediumHeapRAMFraction())
133         return Options::mediumHeapGrowthFactor() * heapSize;
134 #endif
135     return Options::largeHeapGrowthFactor() * heapSize;
136 }
137
138 bool isValidSharedInstanceThreadState(VM* vm)
139 {
140     return vm->currentThreadIsHoldingAPILock();
141 }
142
143 bool isValidThreadState(VM* vm)
144 {
145     if (vm->atomicStringTable() != WTF::Thread::current().atomicStringTable())
146         return false;
147
148     if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
149         return false;
150
151     return true;
152 }
153
154 void recordType(VM& vm, TypeCountSet& set, JSCell* cell)
155 {
156     const char* typeName = "[unknown]";
157     const ClassInfo* info = cell->classInfo(vm);
158     if (info && info->className)
159         typeName = info->className;
160     set.add(typeName);
161 }
162
163 bool measurePhaseTiming()
164 {
165     return false;
166 }
167
168 HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats()
169 {
170     static HashMap<const char*, GCTypeMap<SimpleStats>>* result;
171     static std::once_flag once;
172     std::call_once(
173         once,
174         [] {
175             result = new HashMap<const char*, GCTypeMap<SimpleStats>>();
176         });
177     return *result;
178 }
179
180 SimpleStats& timingStats(const char* name, CollectionScope scope)
181 {
182     return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[scope];
183 }
184
185 class TimingScope {
186 public:
187     TimingScope(std::optional<CollectionScope> scope, const char* name)
188         : m_scope(scope)
189         , m_name(name)
190     {
191         if (measurePhaseTiming())
192             m_before = monotonicallyIncreasingTimeMS();
193     }
194     
195     TimingScope(Heap& heap, const char* name)
196         : TimingScope(heap.collectionScope(), name)
197     {
198     }
199     
200     void setScope(std::optional<CollectionScope> scope)
201     {
202         m_scope = scope;
203     }
204     
205     void setScope(Heap& heap)
206     {
207         setScope(heap.collectionScope());
208     }
209     
210     ~TimingScope()
211     {
212         if (measurePhaseTiming()) {
213             double after = monotonicallyIncreasingTimeMS();
214             double timing = after - m_before;
215             SimpleStats& stats = timingStats(m_name, *m_scope);
216             stats.add(timing);
217             dataLog("[GC:", *m_scope, "] ", m_name, " took: ", timing, "ms (average ", stats.mean(), "ms).\n");
218         }
219     }
220 private:
221     std::optional<CollectionScope> m_scope;
222     double m_before;
223     const char* m_name;
224 };
225
226 } // anonymous namespace
227
228 class Heap::Thread : public AutomaticThread {
229 public:
230     Thread(const AbstractLocker& locker, Heap& heap)
231         : AutomaticThread(locker, heap.m_threadLock, heap.m_threadCondition)
232         , m_heap(heap)
233     {
234     }
235     
236 protected:
237     PollResult poll(const AbstractLocker& locker) override
238     {
239         if (m_heap.m_threadShouldStop) {
240             m_heap.notifyThreadStopping(locker);
241             return PollResult::Stop;
242         }
243         if (m_heap.shouldCollectInCollectorThread(locker))
244             return PollResult::Work;
245         return PollResult::Wait;
246     }
247     
248     WorkResult work() override
249     {
250         m_heap.collectInCollectorThread();
251         return WorkResult::Continue;
252     }
253     
254     void threadDidStart() override
255     {
256         WTF::registerGCThread(GCThreadType::Main);
257     }
258
259 private:
260     Heap& m_heap;
261 };
262
263 Heap::Heap(VM* vm, HeapType heapType)
264     : m_heapType(heapType)
265     , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
266     , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
267     , m_sizeAfterLastCollect(0)
268     , m_sizeAfterLastFullCollect(0)
269     , m_sizeBeforeLastFullCollect(0)
270     , m_sizeAfterLastEdenCollect(0)
271     , m_sizeBeforeLastEdenCollect(0)
272     , m_bytesAllocatedThisCycle(0)
273     , m_bytesAbandonedSinceLastFullCollect(0)
274     , m_maxEdenSize(m_minBytesPerCycle)
275     , m_maxHeapSize(m_minBytesPerCycle)
276     , m_shouldDoFullCollection(false)
277     , m_totalBytesVisited(0)
278     , m_objectSpace(this)
279     , m_extraMemorySize(0)
280     , m_deprecatedExtraMemorySize(0)
281     , m_machineThreads(std::make_unique<MachineThreads>())
282     , m_collectorSlotVisitor(std::make_unique<SlotVisitor>(*this, "C"))
283     , m_mutatorSlotVisitor(std::make_unique<SlotVisitor>(*this, "M"))
284     , m_mutatorMarkStack(std::make_unique<MarkStackArray>())
285     , m_raceMarkStack(std::make_unique<MarkStackArray>())
286     , m_constraintSet(std::make_unique<MarkingConstraintSet>(*this))
287     , m_handleSet(vm)
288     , m_codeBlocks(std::make_unique<CodeBlockSet>())
289     , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>())
290     , m_isSafeToCollect(false)
291     , m_vm(vm)
292     // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously 
293     // schedule the timer if we've never done a collection.
294     , m_lastFullGCLength(0.01)
295     , m_lastEdenGCLength(0.01)
296     , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
297     , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
298     , m_sweeper(adoptRef(new IncrementalSweeper(this)))
299     , m_stopIfNecessaryTimer(adoptRef(new StopIfNecessaryTimer(vm)))
300     , m_deferralDepth(0)
301 #if USE(FOUNDATION)
302     , m_delayedReleaseRecursionCount(0)
303 #endif
304     , m_sharedCollectorMarkStack(std::make_unique<MarkStackArray>())
305     , m_sharedMutatorMarkStack(std::make_unique<MarkStackArray>())
306     , m_helperClient(&heapHelperPool())
307     , m_threadLock(Box<Lock>::create())
308     , m_threadCondition(AutomaticThreadCondition::create())
309 {
310     m_worldState.store(0);
311     
312     if (Options::useConcurrentGC()) {
313         if (Options::useStochasticMutatorScheduler())
314             m_scheduler = std::make_unique<StochasticSpaceTimeMutatorScheduler>(*this);
315         else
316             m_scheduler = std::make_unique<SpaceTimeMutatorScheduler>(*this);
317     } else {
318         // We simulate turning off concurrent GC by making the scheduler say that the world
319         // should always be stopped when the collector is running.
320         m_scheduler = std::make_unique<SynchronousStopTheWorldMutatorScheduler>();
321     }
322     
323     if (Options::verifyHeap())
324         m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
325     
326     m_collectorSlotVisitor->optimizeForStoppedMutator();
327
328     // When memory is critical, allow allocating 25% of the amount above the critical threshold before collecting.
329     size_t memoryAboveCriticalThreshold = static_cast<size_t>(static_cast<double>(m_ramSize) * (1.0 - Options::criticalGCMemoryThreshold()));
330     m_maxEdenSizeWhenCritical = memoryAboveCriticalThreshold / 4;
331
332     LockHolder locker(*m_threadLock);
333     m_thread = adoptRef(new Thread(locker, *this));
334 }
335
336 Heap::~Heap()
337 {
338     forEachSlotVisitor(
339         [&] (SlotVisitor& visitor) {
340             visitor.clearMarkStacks();
341         });
342     m_mutatorMarkStack->clear();
343     m_raceMarkStack->clear();
344     
345     for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
346         WeakBlock::destroy(*this, block);
347 }
348
349 bool Heap::isPagedOut(double deadline)
350 {
351     return m_objectSpace.isPagedOut(deadline);
352 }
353
354 // The VM is being destroyed and the collector will never run again.
355 // Run all pending finalizers now because we won't get another chance.
356 void Heap::lastChanceToFinalize()
357 {
358     MonotonicTime before;
359     if (Options::logGC()) {
360         before = MonotonicTime::now();
361         dataLog("[GC<", RawPointer(this), ">: shutdown ");
362     }
363     
364     RELEASE_ASSERT(!m_vm->entryScope);
365     RELEASE_ASSERT(m_mutatorState == MutatorState::Running);
366     
367     if (m_collectContinuouslyThread) {
368         {
369             LockHolder locker(m_collectContinuouslyLock);
370             m_shouldStopCollectingContinuously = true;
371             m_collectContinuouslyCondition.notifyOne();
372         }
373         m_collectContinuouslyThread->waitForCompletion();
374     }
375     
376     if (Options::logGC())
377         dataLog("1");
378     
379     // Prevent new collections from being started. This is probably not even necessary, since we're not
380     // going to call into anything that starts collections. Still, this makes the algorithm more
381     // obviously sound.
382     m_isSafeToCollect = false;
383     
384     if (Options::logGC())
385         dataLog("2");
386
387     bool isCollecting;
388     {
389         auto locker = holdLock(*m_threadLock);
390         RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
391         isCollecting = m_lastServedTicket < m_lastGrantedTicket;
392     }
393     if (isCollecting) {
394         if (Options::logGC())
395             dataLog("...]\n");
396         
397         // Wait for the current collection to finish.
398         waitForCollector(
399             [&] (const AbstractLocker&) -> bool {
400                 RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
401                 return m_lastServedTicket == m_lastGrantedTicket;
402             });
403         
404         if (Options::logGC())
405             dataLog("[GC<", RawPointer(this), ">: shutdown ");
406     }
407     if (Options::logGC())
408         dataLog("3");
409
410     RELEASE_ASSERT(m_requests.isEmpty());
411     RELEASE_ASSERT(m_lastServedTicket == m_lastGrantedTicket);
412     
413     // Carefully bring the thread down.
414     bool stopped = false;
415     {
416         LockHolder locker(*m_threadLock);
417         stopped = m_thread->tryStop(locker);
418         m_threadShouldStop = true;
419         if (!stopped)
420             m_threadCondition->notifyOne(locker);
421     }
422
423     if (Options::logGC())
424         dataLog("4");
425     
426     if (!stopped)
427         m_thread->join();
428     
429     if (Options::logGC())
430         dataLog("5 ");
431     
432     m_arrayBuffers.lastChanceToFinalize();
433     m_codeBlocks->lastChanceToFinalize(*m_vm);
434     m_objectSpace.stopAllocating();
435     m_objectSpace.lastChanceToFinalize();
436     releaseDelayedReleasedObjects();
437
438     sweepAllLogicallyEmptyWeakBlocks();
439     
440     m_objectSpace.freeMemory();
441     
442     if (Options::logGC())
443         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
444 }
445
446 void Heap::releaseDelayedReleasedObjects()
447 {
448 #if USE(FOUNDATION)
449     // We need to guard against the case that releasing an object can create more objects due to the
450     // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
451     // back here and could try to recursively release objects. We guard that with a recursive entry
452     // count. Only the initial call will release objects, recursive calls simple return and let the
453     // the initial call to the function take care of any objects created during release time.
454     // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
455     // and use a temp Vector for the actual releasing.
456     if (!m_delayedReleaseRecursionCount++) {
457         while (!m_delayedReleaseObjects.isEmpty()) {
458             ASSERT(m_vm->currentThreadIsHoldingAPILock());
459
460             Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTFMove(m_delayedReleaseObjects);
461
462             {
463                 // We need to drop locks before calling out to arbitrary code.
464                 JSLock::DropAllLocks dropAllLocks(m_vm);
465
466                 void* context = objc_autoreleasePoolPush();
467                 objectsToRelease.clear();
468                 objc_autoreleasePoolPop(context);
469             }
470         }
471     }
472     m_delayedReleaseRecursionCount--;
473 #endif
474 }
475
476 void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
477 {
478     didAllocate(size);
479     collectIfNecessaryOrDefer();
480 }
481
482 void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
483 {
484     // FIXME: Change this to use SaturatedArithmetic when available.
485     // https://bugs.webkit.org/show_bug.cgi?id=170411
486     Checked<size_t, RecordOverflow> checkedNewSize = m_deprecatedExtraMemorySize;
487     checkedNewSize += size;
488     m_deprecatedExtraMemorySize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
489     reportExtraMemoryAllocatedSlowCase(size);
490 }
491
492 bool Heap::overCriticalMemoryThreshold(MemoryThresholdCallType memoryThresholdCallType)
493 {
494 #if PLATFORM(IOS)
495     if (memoryThresholdCallType == MemoryThresholdCallType::Direct || ++m_precentAvailableMemoryCachedCallCount >= 100) {
496         m_overCriticalMemoryThreshold = bmalloc::api::percentAvailableMemoryInUse() > Options::criticalGCMemoryThreshold();
497         m_precentAvailableMemoryCachedCallCount = 0;
498     }
499
500     return m_overCriticalMemoryThreshold;
501 #else
502     UNUSED_PARAM(memoryThresholdCallType);
503     return false;
504 #endif
505 }
506
507 void Heap::reportAbandonedObjectGraph()
508 {
509     // Our clients don't know exactly how much memory they
510     // are abandoning so we just guess for them.
511     size_t abandonedBytes = static_cast<size_t>(0.1 * capacity());
512
513     // We want to accelerate the next collection. Because memory has just 
514     // been abandoned, the next collection has the potential to 
515     // be more profitable. Since allocation is the trigger for collection, 
516     // we hasten the next collection by pretending that we've allocated more memory. 
517     if (m_fullActivityCallback) {
518         m_fullActivityCallback->didAllocate(
519             m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
520     }
521     m_bytesAbandonedSinceLastFullCollect += abandonedBytes;
522 }
523
524 void Heap::protect(JSValue k)
525 {
526     ASSERT(k);
527     ASSERT(m_vm->currentThreadIsHoldingAPILock());
528
529     if (!k.isCell())
530         return;
531
532     m_protectedValues.add(k.asCell());
533 }
534
535 bool Heap::unprotect(JSValue k)
536 {
537     ASSERT(k);
538     ASSERT(m_vm->currentThreadIsHoldingAPILock());
539
540     if (!k.isCell())
541         return false;
542
543     return m_protectedValues.remove(k.asCell());
544 }
545
546 void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
547 {
548     if (m_arrayBuffers.addReference(cell, buffer)) {
549         collectIfNecessaryOrDefer();
550         didAllocate(buffer->gcSizeEstimateInBytes());
551     }
552 }
553
554 void Heap::finalizeUnconditionalFinalizers()
555 {
556     while (m_unconditionalFinalizers.hasNext()) {
557         UnconditionalFinalizer* finalizer = m_unconditionalFinalizers.removeNext();
558         finalizer->finalizeUnconditionally();
559     }
560 }
561
562 void Heap::willStartIterating()
563 {
564     m_objectSpace.willStartIterating();
565 }
566
567 void Heap::didFinishIterating()
568 {
569     m_objectSpace.didFinishIterating();
570 }
571
572 void Heap::completeAllJITPlans()
573 {
574 #if ENABLE(JIT)
575     JITWorklist::instance()->completeAllForVM(*m_vm);
576 #endif // ENABLE(JIT)
577     DFG::completeAllPlansForVM(*m_vm);
578 }
579
580 template<typename Func>
581 void Heap::iterateExecutingAndCompilingCodeBlocks(const Func& func)
582 {
583     m_codeBlocks->iterateCurrentlyExecuting(func);
584     DFG::iterateCodeBlocksForGC(*m_vm, func);
585 }
586
587 template<typename Func>
588 void Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func& func)
589 {
590     Vector<CodeBlock*, 256> codeBlocks;
591     iterateExecutingAndCompilingCodeBlocks(
592         [&] (CodeBlock* codeBlock) {
593             codeBlocks.append(codeBlock);
594         });
595     for (CodeBlock* codeBlock : codeBlocks)
596         func(codeBlock);
597 }
598
599 void Heap::assertMarkStacksEmpty()
600 {
601     bool ok = true;
602     
603     if (!m_sharedCollectorMarkStack->isEmpty()) {
604         dataLog("FATAL: Shared collector mark stack not empty! It has ", m_sharedCollectorMarkStack->size(), " elements.\n");
605         ok = false;
606     }
607     
608     if (!m_sharedMutatorMarkStack->isEmpty()) {
609         dataLog("FATAL: Shared mutator mark stack not empty! It has ", m_sharedMutatorMarkStack->size(), " elements.\n");
610         ok = false;
611     }
612     
613     forEachSlotVisitor(
614         [&] (SlotVisitor& visitor) {
615             if (visitor.isEmpty())
616                 return;
617             
618             dataLog("FATAL: Visitor ", RawPointer(&visitor), " is not empty!\n");
619             ok = false;
620         });
621     
622     RELEASE_ASSERT(ok);
623 }
624
625 void Heap::gatherStackRoots(ConservativeRoots& roots)
626 {
627     m_machineThreads->gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks, m_currentThreadState, m_currentThread);
628 }
629
630 void Heap::gatherJSStackRoots(ConservativeRoots& roots)
631 {
632 #if !ENABLE(JIT)
633     m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
634 #else
635     UNUSED_PARAM(roots);
636 #endif
637 }
638
639 void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
640 {
641 #if ENABLE(DFG_JIT)
642     m_vm->gatherConservativeRoots(roots);
643 #else
644     UNUSED_PARAM(roots);
645 #endif
646 }
647
648 void Heap::beginMarking()
649 {
650     TimingScope timingScope(*this, "Heap::beginMarking");
651     if (m_collectionScope == CollectionScope::Full)
652         m_codeBlocks->clearMarksForFullCollection();
653     m_jitStubRoutines->clearMarks();
654     m_objectSpace.beginMarking();
655     setMutatorShouldBeFenced(true);
656 }
657
658 void Heap::removeDeadCompilerWorklistEntries()
659 {
660 #if ENABLE(DFG_JIT)
661     for (unsigned i = DFG::numberOfWorklists(); i--;)
662         DFG::existingWorklistForIndex(i).removeDeadPlans(*m_vm);
663 #endif
664 }
665
666 bool Heap::isHeapSnapshotting() const
667 {
668     HeapProfiler* heapProfiler = m_vm->heapProfiler();
669     if (UNLIKELY(heapProfiler))
670         return heapProfiler->activeSnapshotBuilder();
671     return false;
672 }
673
674 struct GatherHeapSnapshotData : MarkedBlock::CountFunctor {
675     GatherHeapSnapshotData(HeapSnapshotBuilder& builder)
676         : m_builder(builder)
677     {
678     }
679
680     IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const
681     {
682         if (kind == HeapCell::JSCell) {
683             JSCell* cell = static_cast<JSCell*>(heapCell);
684             cell->methodTable()->heapSnapshot(cell, m_builder);
685         }
686         return IterationStatus::Continue;
687     }
688
689     HeapSnapshotBuilder& m_builder;
690 };
691
692 void Heap::gatherExtraHeapSnapshotData(HeapProfiler& heapProfiler)
693 {
694     if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) {
695         HeapIterationScope heapIterationScope(*this);
696         GatherHeapSnapshotData functor(*builder);
697         m_objectSpace.forEachLiveCell(heapIterationScope, functor);
698     }
699 }
700
701 struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor {
702     RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot)
703         : m_snapshot(snapshot)
704     {
705     }
706
707     IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const
708     {
709         if (kind == HeapCell::JSCell)
710             m_snapshot.sweepCell(static_cast<JSCell*>(cell));
711         return IterationStatus::Continue;
712     }
713
714     HeapSnapshot& m_snapshot;
715 };
716
717 void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler)
718 {
719     if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) {
720         HeapIterationScope heapIterationScope(*this);
721         RemoveDeadHeapSnapshotNodes functor(*snapshot);
722         m_objectSpace.forEachDeadCell(heapIterationScope, functor);
723         snapshot->shrinkToFit();
724     }
725 }
726
727 void Heap::updateObjectCounts()
728 {
729     if (m_collectionScope == CollectionScope::Full)
730         m_totalBytesVisited = 0;
731
732     m_totalBytesVisitedThisCycle = bytesVisited();
733     
734     m_totalBytesVisited += m_totalBytesVisitedThisCycle;
735 }
736
737 void Heap::endMarking()
738 {
739     forEachSlotVisitor(
740         [&] (SlotVisitor& visitor) {
741             visitor.reset();
742         });
743
744     assertMarkStacksEmpty();
745     m_weakReferenceHarvesters.removeAll();
746
747     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
748     
749     m_objectSpace.endMarking();
750     setMutatorShouldBeFenced(Options::forceFencedBarrier());
751 }
752
753 size_t Heap::objectCount()
754 {
755     return m_objectSpace.objectCount();
756 }
757
758 size_t Heap::extraMemorySize()
759 {
760     // FIXME: Change this to use SaturatedArithmetic when available.
761     // https://bugs.webkit.org/show_bug.cgi?id=170411
762     Checked<size_t, RecordOverflow> checkedTotal = m_extraMemorySize;
763     checkedTotal += m_deprecatedExtraMemorySize;
764     checkedTotal += m_arrayBuffers.size();
765     size_t total = UNLIKELY(checkedTotal.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedTotal.unsafeGet();
766
767     ASSERT(m_objectSpace.capacity() >= m_objectSpace.size());
768     return std::min(total, std::numeric_limits<size_t>::max() - m_objectSpace.capacity());
769 }
770
771 size_t Heap::size()
772 {
773     return m_objectSpace.size() + extraMemorySize();
774 }
775
776 size_t Heap::capacity()
777 {
778     return m_objectSpace.capacity() + extraMemorySize();
779 }
780
781 size_t Heap::protectedGlobalObjectCount()
782 {
783     size_t result = 0;
784     forEachProtectedCell(
785         [&] (JSCell* cell) {
786             if (cell->isObject() && asObject(cell)->isGlobalObject())
787                 result++;
788         });
789     return result;
790 }
791
792 size_t Heap::globalObjectCount()
793 {
794     HeapIterationScope iterationScope(*this);
795     size_t result = 0;
796     m_objectSpace.forEachLiveCell(
797         iterationScope,
798         [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus {
799             if (kind != HeapCell::JSCell)
800                 return IterationStatus::Continue;
801             JSCell* cell = static_cast<JSCell*>(heapCell);
802             if (cell->isObject() && asObject(cell)->isGlobalObject())
803                 result++;
804             return IterationStatus::Continue;
805         });
806     return result;
807 }
808
809 size_t Heap::protectedObjectCount()
810 {
811     size_t result = 0;
812     forEachProtectedCell(
813         [&] (JSCell*) {
814             result++;
815         });
816     return result;
817 }
818
819 std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
820 {
821     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
822     forEachProtectedCell(
823         [&] (JSCell* cell) {
824             recordType(*vm(), *result, cell);
825         });
826     return result;
827 }
828
829 std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
830 {
831     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
832     HeapIterationScope iterationScope(*this);
833     m_objectSpace.forEachLiveCell(
834         iterationScope,
835         [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
836             if (kind == HeapCell::JSCell)
837                 recordType(*vm(), *result, static_cast<JSCell*>(cell));
838             return IterationStatus::Continue;
839         });
840     return result;
841 }
842
843 void Heap::deleteAllCodeBlocks(DeleteAllCodeEffort effort)
844 {
845     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
846         return;
847     
848     PreventCollectionScope preventCollectionScope(*this);
849     
850     // If JavaScript is running, it's not safe to delete all JavaScript code, since
851     // we'll end up returning to deleted code.
852     RELEASE_ASSERT(!m_vm->entryScope);
853     RELEASE_ASSERT(!m_collectionScope);
854
855     completeAllJITPlans();
856
857     for (ExecutableBase* executable : m_executables)
858         executable->clearCode();
859
860 #if ENABLE(WEBASSEMBLY)
861     {
862         // We must ensure that we clear the JS call ICs from Wasm. Otherwise, Wasm will
863         // have no idea that we cleared the code from all of the Executables in the
864         // VM. This could leave Wasm in an inconsistent state where it has an IC that
865         // points into a CodeBlock that could be dead. The IC will still succeed because
866         // it uses a callee check, but then it will call into dead code.
867         HeapIterationScope heapIterationScope(*this);
868         m_vm->webAssemblyCodeBlockSpace.forEachLiveCell([&] (HeapCell* cell, HeapCell::Kind kind) {
869             ASSERT_UNUSED(kind, kind == HeapCell::Kind::JSCell);
870             JSWebAssemblyCodeBlock* codeBlock = static_cast<JSWebAssemblyCodeBlock*>(cell);
871             codeBlock->clearJSCallICs(*m_vm);
872         });
873     }
874 #endif
875 }
876
877 void Heap::deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort effort)
878 {
879     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
880         return;
881     
882     PreventCollectionScope preventCollectionScope(*this);
883
884     RELEASE_ASSERT(!m_collectionScope);
885     
886     for (ExecutableBase* current : m_executables) {
887         if (!current->isFunctionExecutable())
888             continue;
889         static_cast<FunctionExecutable*>(current)->unlinkedExecutable()->clearCode();
890     }
891 }
892
893 void Heap::clearUnmarkedExecutables()
894 {
895     for (unsigned i = m_executables.size(); i--;) {
896         ExecutableBase* current = m_executables[i];
897         if (isMarked(current))
898             continue;
899
900         // Eagerly dereference the Executable's JITCode in order to run watchpoint
901         // destructors. Otherwise, watchpoints might fire for deleted CodeBlocks.
902         current->clearCode();
903         std::swap(m_executables[i], m_executables.last());
904         m_executables.removeLast();
905     }
906
907     m_executables.shrinkToFit();
908 }
909
910 void Heap::deleteUnmarkedCompiledCode()
911 {
912     clearUnmarkedExecutables();
913     m_codeBlocks->deleteUnmarkedAndUnreferenced(*m_vm, *m_lastCollectionScope);
914     m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines();
915 }
916
917 void Heap::addToRememberedSet(const JSCell* constCell)
918 {
919     JSCell* cell = const_cast<JSCell*>(constCell);
920     ASSERT(cell);
921     ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
922     m_barriersExecuted++;
923     if (m_mutatorShouldBeFenced) {
924         WTF::loadLoadFence();
925         if (!isMarked(cell)) {
926             // During a full collection a store into an unmarked object that had surivived past
927             // collections will manifest as a store to an unmarked PossiblyBlack object. If the
928             // object gets marked at some time after this then it will go down the normal marking
929             // path. So, we don't have to remember this object. We could return here. But we go
930             // further and attempt to re-white the object.
931             
932             RELEASE_ASSERT(m_collectionScope == CollectionScope::Full);
933             
934             if (cell->atomicCompareExchangeCellStateStrong(CellState::PossiblyBlack, CellState::DefinitelyWhite) == CellState::PossiblyBlack) {
935                 // Now we protect against this race:
936                 //
937                 //     1) Object starts out black + unmarked.
938                 //     --> We do isMarked here.
939                 //     2) Object is marked and greyed.
940                 //     3) Object is scanned and blacked.
941                 //     --> We do atomicCompareExchangeCellStateStrong here.
942                 //
943                 // In this case we would have made the object white again, even though it should
944                 // be black. This check lets us correct our mistake. This relies on the fact that
945                 // isMarked converges monotonically to true.
946                 if (isMarked(cell)) {
947                     // It's difficult to work out whether the object should be grey or black at
948                     // this point. We say black conservatively.
949                     cell->setCellState(CellState::PossiblyBlack);
950                 }
951                 
952                 // Either way, we can return. Most likely, the object was not marked, and so the
953                 // object is now labeled white. This means that future barrier executions will not
954                 // fire. In the unlikely event that the object had become marked, we can still
955                 // return anyway, since we proved that the object was not marked at the time that
956                 // we executed this slow path.
957             }
958             
959             return;
960         }
961     } else
962         ASSERT(Heap::isMarked(cell));
963     // It could be that the object was *just* marked. This means that the collector may set the
964     // state to DefinitelyGrey and then to PossiblyOldOrBlack at any time. It's OK for us to
965     // race with the collector here. If we win then this is accurate because the object _will_
966     // get scanned again. If we lose then someone else will barrier the object again. That would
967     // be unfortunate but not the end of the world.
968     cell->setCellState(CellState::PossiblyGrey);
969     m_mutatorMarkStack->append(cell);
970 }
971
972 void Heap::sweepSynchronously()
973 {
974     double before = 0;
975     if (Options::logGC()) {
976         dataLog("Full sweep: ", capacity() / 1024, "kb ");
977         before = currentTimeMS();
978     }
979     m_objectSpace.sweep();
980     m_objectSpace.shrink();
981     if (Options::logGC()) {
982         double after = currentTimeMS();
983         dataLog("=> ", capacity() / 1024, "kb, ", after - before, "ms");
984     }
985 }
986
987 void Heap::collect(Synchronousness synchronousness, GCRequest request)
988 {
989     switch (synchronousness) {
990     case Async:
991         collectAsync(request);
992         return;
993     case Sync:
994         collectSync(request);
995         return;
996     }
997     RELEASE_ASSERT_NOT_REACHED();
998 }
999
1000 void Heap::collectNow(Synchronousness synchronousness, GCRequest request)
1001 {
1002     switch (synchronousness) {
1003     case Async: {
1004         collectAsync(request);
1005         stopIfNecessary();
1006         return;
1007     }
1008         
1009     case Sync: {
1010         collectSync(request);
1011         
1012         DeferGCForAWhile deferGC(*this);
1013         if (UNLIKELY(Options::useImmortalObjects()))
1014             sweeper().stopSweeping();
1015         
1016         bool alreadySweptInCollectSync = Options::sweepSynchronously();
1017         if (!alreadySweptInCollectSync) {
1018             if (Options::logGC())
1019                 dataLog("[GC<", RawPointer(this), ">: ");
1020             sweepSynchronously();
1021             if (Options::logGC())
1022                 dataLog("]\n");
1023         }
1024         m_objectSpace.assertNoUnswept();
1025         
1026         sweepAllLogicallyEmptyWeakBlocks();
1027         return;
1028     } }
1029     RELEASE_ASSERT_NOT_REACHED();
1030 }
1031
1032 void Heap::collectAsync(GCRequest request)
1033 {
1034     if (!m_isSafeToCollect)
1035         return;
1036
1037     bool alreadyRequested = false;
1038     {
1039         LockHolder locker(*m_threadLock);
1040         for (const GCRequest& previousRequest : m_requests) {
1041             if (request.subsumedBy(previousRequest)) {
1042                 alreadyRequested = true;
1043                 break;
1044             }
1045         }
1046     }
1047     if (alreadyRequested)
1048         return;
1049
1050     requestCollection(request);
1051 }
1052
1053 void Heap::collectSync(GCRequest request)
1054 {
1055     if (!m_isSafeToCollect)
1056         return;
1057     
1058     waitForCollection(requestCollection(request));
1059 }
1060
1061 bool Heap::shouldCollectInCollectorThread(const AbstractLocker&)
1062 {
1063     RELEASE_ASSERT(m_requests.isEmpty() == (m_lastServedTicket == m_lastGrantedTicket));
1064     RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
1065     
1066     if (false)
1067         dataLog("Mutator has the conn = ", !!(m_worldState.load() & mutatorHasConnBit), "\n");
1068     
1069     return !m_requests.isEmpty() && !(m_worldState.load() & mutatorHasConnBit);
1070 }
1071
1072 void Heap::collectInCollectorThread()
1073 {
1074     for (;;) {
1075         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Collector, nullptr);
1076         switch (result) {
1077         case RunCurrentPhaseResult::Finished:
1078             return;
1079         case RunCurrentPhaseResult::Continue:
1080             break;
1081         case RunCurrentPhaseResult::NeedCurrentThreadState:
1082             RELEASE_ASSERT_NOT_REACHED();
1083             break;
1084         }
1085     }
1086 }
1087
1088 void Heap::checkConn(GCConductor conn)
1089 {
1090     switch (conn) {
1091     case GCConductor::Mutator:
1092         RELEASE_ASSERT(m_worldState.load() & mutatorHasConnBit);
1093         return;
1094     case GCConductor::Collector:
1095         RELEASE_ASSERT(!(m_worldState.load() & mutatorHasConnBit));
1096         return;
1097     }
1098     RELEASE_ASSERT_NOT_REACHED();
1099 }
1100
1101 auto Heap::runCurrentPhase(GCConductor conn, CurrentThreadState* currentThreadState) -> RunCurrentPhaseResult
1102 {
1103     checkConn(conn);
1104     m_currentThreadState = currentThreadState;
1105     m_currentThread = &WTF::Thread::current();
1106     
1107     if (conn == GCConductor::Mutator)
1108         sanitizeStackForVM(vm());
1109     
1110     // If the collector transfers the conn to the mutator, it leaves us in between phases.
1111     if (!finishChangingPhase(conn)) {
1112         // A mischevious mutator could repeatedly relinquish the conn back to us. We try to avoid doing
1113         // this, but it's probably not the end of the world if it did happen.
1114         if (false)
1115             dataLog("Conn bounce-back.\n");
1116         return RunCurrentPhaseResult::Finished;
1117     }
1118     
1119     bool result = false;
1120     switch (m_currentPhase) {
1121     case CollectorPhase::NotRunning:
1122         result = runNotRunningPhase(conn);
1123         break;
1124         
1125     case CollectorPhase::Begin:
1126         result = runBeginPhase(conn);
1127         break;
1128         
1129     case CollectorPhase::Fixpoint:
1130         if (!currentThreadState && conn == GCConductor::Mutator)
1131             return RunCurrentPhaseResult::NeedCurrentThreadState;
1132         
1133         result = runFixpointPhase(conn);
1134         break;
1135         
1136     case CollectorPhase::Concurrent:
1137         result = runConcurrentPhase(conn);
1138         break;
1139         
1140     case CollectorPhase::Reloop:
1141         result = runReloopPhase(conn);
1142         break;
1143         
1144     case CollectorPhase::End:
1145         result = runEndPhase(conn);
1146         break;
1147     }
1148
1149     return result ? RunCurrentPhaseResult::Continue : RunCurrentPhaseResult::Finished;
1150 }
1151
1152 NEVER_INLINE bool Heap::runNotRunningPhase(GCConductor conn)
1153 {
1154     // Check m_requests since the mutator calls this to poll what's going on.
1155     {
1156         auto locker = holdLock(*m_threadLock);
1157         if (m_requests.isEmpty())
1158             return false;
1159     }
1160     
1161     return changePhase(conn, CollectorPhase::Begin);
1162 }
1163
1164 NEVER_INLINE bool Heap::runBeginPhase(GCConductor conn)
1165 {
1166     m_currentGCStartTime = MonotonicTime::now();
1167     
1168     {
1169         LockHolder locker(*m_threadLock);
1170         RELEASE_ASSERT(!m_requests.isEmpty());
1171         m_currentRequest = m_requests.first();
1172     }
1173         
1174     if (Options::logGC())
1175         dataLog("[GC<", RawPointer(this), ">: START ", gcConductorShortName(conn), " ", capacity() / 1024, "kb ");
1176
1177     m_beforeGC = MonotonicTime::now();
1178
1179     if (m_collectionScope) {
1180         dataLog("Collection scope already set during GC: ", *m_collectionScope, "\n");
1181         RELEASE_ASSERT_NOT_REACHED();
1182     }
1183     
1184     willStartCollection();
1185         
1186     if (UNLIKELY(m_verifier)) {
1187         // Verify that live objects from the last GC cycle haven't been corrupted by
1188         // mutators before we begin this new GC cycle.
1189         m_verifier->verify(HeapVerifier::Phase::BeforeGC);
1190             
1191         m_verifier->startGC();
1192         m_verifier->gatherLiveCells(HeapVerifier::Phase::BeforeMarking);
1193     }
1194         
1195     prepareForMarking();
1196         
1197     if (m_collectionScope == CollectionScope::Full) {
1198         m_opaqueRoots.clear();
1199         m_collectorSlotVisitor->clearMarkStacks();
1200         m_mutatorMarkStack->clear();
1201     }
1202
1203     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
1204
1205     beginMarking();
1206
1207     forEachSlotVisitor(
1208         [&] (SlotVisitor& visitor) {
1209             visitor.didStartMarking();
1210         });
1211
1212     m_parallelMarkersShouldExit = false;
1213
1214     m_helperClient.setFunction(
1215         [this] () {
1216             SlotVisitor* slotVisitor;
1217             {
1218                 LockHolder locker(m_parallelSlotVisitorLock);
1219                 if (m_availableParallelSlotVisitors.isEmpty()) {
1220                     std::unique_ptr<SlotVisitor> newVisitor = std::make_unique<SlotVisitor>(
1221                         *this, toCString("P", m_parallelSlotVisitors.size() + 1));
1222                     
1223                     if (Options::optimizeParallelSlotVisitorsForStoppedMutator())
1224                         newVisitor->optimizeForStoppedMutator();
1225                     
1226                     newVisitor->didStartMarking();
1227                     
1228                     slotVisitor = newVisitor.get();
1229                     m_parallelSlotVisitors.append(WTFMove(newVisitor));
1230                 } else
1231                     slotVisitor = m_availableParallelSlotVisitors.takeLast();
1232             }
1233
1234             WTF::registerGCThread(GCThreadType::Helper);
1235
1236             {
1237                 ParallelModeEnabler parallelModeEnabler(*slotVisitor);
1238                 slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
1239             }
1240
1241             {
1242                 LockHolder locker(m_parallelSlotVisitorLock);
1243                 m_availableParallelSlotVisitors.append(slotVisitor);
1244             }
1245         });
1246
1247     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1248
1249     m_constraintSet->didStartMarking();
1250     
1251     m_scheduler->beginCollection();
1252     if (Options::logGC())
1253         m_scheduler->log();
1254     
1255     // After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()"
1256     // checks because bootstrap would have put things into the visitor. So, we should fall
1257     // through to draining.
1258     
1259     if (!slotVisitor.didReachTermination()) {
1260         dataLog("Fatal: SlotVisitor should think that GC should terminate before constraint solving, but it does not think this.\n");
1261         dataLog("slotVisitor.isEmpty(): ", slotVisitor.isEmpty(), "\n");
1262         dataLog("slotVisitor.collectorMarkStack().isEmpty(): ", slotVisitor.collectorMarkStack().isEmpty(), "\n");
1263         dataLog("slotVisitor.mutatorMarkStack().isEmpty(): ", slotVisitor.mutatorMarkStack().isEmpty(), "\n");
1264         dataLog("m_numberOfActiveParallelMarkers: ", m_numberOfActiveParallelMarkers, "\n");
1265         dataLog("m_sharedCollectorMarkStack->isEmpty(): ", m_sharedCollectorMarkStack->isEmpty(), "\n");
1266         dataLog("m_sharedMutatorMarkStack->isEmpty(): ", m_sharedMutatorMarkStack->isEmpty(), "\n");
1267         dataLog("slotVisitor.didReachTermination(): ", slotVisitor.didReachTermination(), "\n");
1268         RELEASE_ASSERT_NOT_REACHED();
1269     }
1270         
1271     return changePhase(conn, CollectorPhase::Fixpoint);
1272 }
1273
1274 NEVER_INLINE bool Heap::runFixpointPhase(GCConductor conn)
1275 {
1276     RELEASE_ASSERT(conn == GCConductor::Collector || m_currentThreadState);
1277     
1278     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1279     
1280     if (Options::logGC()) {
1281         HashMap<const char*, size_t> visitMap;
1282         forEachSlotVisitor(
1283             [&] (SlotVisitor& slotVisitor) {
1284                 visitMap.add(slotVisitor.codeName(), slotVisitor.bytesVisited() / 1024);
1285             });
1286         
1287         auto perVisitorDump = sortedMapDump(
1288             visitMap,
1289             [] (const char* a, const char* b) -> bool {
1290                 return strcmp(a, b) < 0;
1291             },
1292             ":", " ");
1293         
1294         dataLog("v=", bytesVisited() / 1024, "kb (", perVisitorDump, ") o=", m_opaqueRoots.size(), " b=", m_barriersExecuted, " ");
1295     }
1296         
1297     if (slotVisitor.didReachTermination()) {
1298         m_opaqueRoots.deleteOldTables();
1299         
1300         m_scheduler->didReachTermination();
1301         
1302         assertMarkStacksEmpty();
1303             
1304         // FIXME: Take m_mutatorDidRun into account when scheduling constraints. Most likely,
1305         // we don't have to execute root constraints again unless the mutator did run. At a
1306         // minimum, we could use this for work estimates - but it's probably more than just an
1307         // estimate.
1308         // https://bugs.webkit.org/show_bug.cgi?id=166828
1309             
1310         // Wondering what this does? Look at Heap::addCoreConstraints(). The DOM and others can also
1311         // add their own using Heap::addMarkingConstraint().
1312         bool converged = m_constraintSet->executeConvergence(slotVisitor);
1313         
1314         // FIXME: The slotVisitor.isEmpty() check is most likely not needed.
1315         // https://bugs.webkit.org/show_bug.cgi?id=180310
1316         if (converged && slotVisitor.isEmpty()) {
1317             assertMarkStacksEmpty();
1318             return changePhase(conn, CollectorPhase::End);
1319         }
1320             
1321         m_scheduler->didExecuteConstraints();
1322     }
1323         
1324     if (Options::logGC())
1325         dataLog(slotVisitor.collectorMarkStack().size(), "+", m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " ");
1326         
1327     {
1328         ParallelModeEnabler enabler(slotVisitor);
1329         slotVisitor.drainInParallel(m_scheduler->timeToResume());
1330     }
1331         
1332     m_scheduler->synchronousDrainingDidStall();
1333
1334     // This is kinda tricky. The termination check looks at:
1335     //
1336     // - Whether the marking threads are active. If they are not, this means that the marking threads'
1337     //   SlotVisitors are empty.
1338     // - Whether the collector's slot visitor is empty.
1339     // - Whether the shared mark stacks are empty.
1340     //
1341     // This doesn't have to check the mutator SlotVisitor because that one becomes empty after every GC
1342     // work increment, so it must be empty now.
1343     if (slotVisitor.didReachTermination())
1344         return true; // This is like relooping to the top if runFixpointPhase().
1345         
1346     if (!m_scheduler->shouldResume())
1347         return true;
1348
1349     m_scheduler->willResume();
1350         
1351     if (Options::logGC()) {
1352         double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds();
1353         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), ")...]\n");
1354     }
1355
1356     // Forgive the mutator for its past failures to keep up.
1357     // FIXME: Figure out if moving this to different places results in perf changes.
1358     m_incrementBalance = 0;
1359         
1360     return changePhase(conn, CollectorPhase::Concurrent);
1361 }
1362
1363 NEVER_INLINE bool Heap::runConcurrentPhase(GCConductor conn)
1364 {
1365     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1366
1367     switch (conn) {
1368     case GCConductor::Mutator: {
1369         // When the mutator has the conn, we poll runConcurrentPhase() on every time someone says
1370         // stopIfNecessary(), so on every allocation slow path. When that happens we poll if it's time
1371         // to stop and do some work.
1372         if (slotVisitor.didReachTermination()
1373             || m_scheduler->shouldStop())
1374             return changePhase(conn, CollectorPhase::Reloop);
1375         
1376         // We could be coming from a collector phase that stuffed our SlotVisitor, so make sure we donate
1377         // everything. This is super cheap if the SlotVisitor is already empty.
1378         slotVisitor.donateAll();
1379         return false;
1380     }
1381     case GCConductor::Collector: {
1382         {
1383             ParallelModeEnabler enabler(slotVisitor);
1384             slotVisitor.drainInParallelPassively(m_scheduler->timeToStop());
1385         }
1386         return changePhase(conn, CollectorPhase::Reloop);
1387     } }
1388     
1389     RELEASE_ASSERT_NOT_REACHED();
1390     return false;
1391 }
1392
1393 NEVER_INLINE bool Heap::runReloopPhase(GCConductor conn)
1394 {
1395     if (Options::logGC())
1396         dataLog("[GC<", RawPointer(this), ">: ", gcConductorShortName(conn), " ");
1397     
1398     m_scheduler->didStop();
1399     
1400     if (Options::logGC())
1401         m_scheduler->log();
1402     
1403     return changePhase(conn, CollectorPhase::Fixpoint);
1404 }
1405
1406 NEVER_INLINE bool Heap::runEndPhase(GCConductor conn)
1407 {
1408     m_scheduler->endCollection();
1409         
1410     {
1411         auto locker = holdLock(m_markingMutex);
1412         m_parallelMarkersShouldExit = true;
1413         m_markingConditionVariable.notifyAll();
1414     }
1415     m_helperClient.finish();
1416     
1417     iterateExecutingAndCompilingCodeBlocks(
1418         [&] (CodeBlock* codeBlock) {
1419             writeBarrier(codeBlock);
1420         });
1421         
1422     updateObjectCounts();
1423     endMarking();
1424         
1425     if (UNLIKELY(m_verifier)) {
1426         m_verifier->gatherLiveCells(HeapVerifier::Phase::AfterMarking);
1427         m_verifier->verify(HeapVerifier::Phase::AfterMarking);
1428     }
1429         
1430     if (vm()->typeProfiler())
1431         vm()->typeProfiler()->invalidateTypeSetCache();
1432         
1433     reapWeakHandles();
1434     pruneStaleEntriesFromWeakGCMaps();
1435     sweepArrayBuffers();
1436     snapshotUnswept();
1437     finalizeUnconditionalFinalizers();
1438     removeDeadCompilerWorklistEntries();
1439     notifyIncrementalSweeper();
1440     
1441     m_codeBlocks->iterateCurrentlyExecuting(
1442         [&] (CodeBlock* codeBlock) {
1443             writeBarrier(codeBlock);
1444         });
1445     m_codeBlocks->clearCurrentlyExecuting();
1446         
1447     m_objectSpace.prepareForAllocation();
1448     updateAllocationLimits();
1449
1450     if (UNLIKELY(m_verifier)) {
1451         m_verifier->trimDeadCells();
1452         m_verifier->verify(HeapVerifier::Phase::AfterGC);
1453     }
1454
1455     didFinishCollection();
1456     
1457     if (m_currentRequest.didFinishEndPhase)
1458         m_currentRequest.didFinishEndPhase->run();
1459     
1460     if (false) {
1461         dataLog("Heap state after GC:\n");
1462         m_objectSpace.dumpBits();
1463     }
1464     
1465     if (Options::logGC()) {
1466         double thisPauseMS = (m_afterGC - m_stopTime).milliseconds();
1467         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), "), cycle ", (m_afterGC - m_beforeGC).milliseconds(), "ms END]\n");
1468     }
1469     
1470     {
1471         auto locker = holdLock(*m_threadLock);
1472         m_requests.removeFirst();
1473         m_lastServedTicket++;
1474         clearMutatorWaiting();
1475     }
1476     ParkingLot::unparkAll(&m_worldState);
1477
1478     if (false)
1479         dataLog("GC END!\n");
1480
1481     setNeedFinalize();
1482
1483     m_lastGCStartTime = m_currentGCStartTime;
1484     m_lastGCEndTime = MonotonicTime::now();
1485         
1486     return changePhase(conn, CollectorPhase::NotRunning);
1487 }
1488
1489 bool Heap::changePhase(GCConductor conn, CollectorPhase nextPhase)
1490 {
1491     checkConn(conn);
1492
1493     m_nextPhase = nextPhase;
1494
1495     return finishChangingPhase(conn);
1496 }
1497
1498 NEVER_INLINE bool Heap::finishChangingPhase(GCConductor conn)
1499 {
1500     checkConn(conn);
1501     
1502     if (m_nextPhase == m_currentPhase)
1503         return true;
1504
1505     if (false)
1506         dataLog(conn, ": Going to phase: ", m_nextPhase, " (from ", m_currentPhase, ")\n");
1507     
1508     m_phaseVersion++;
1509     
1510     bool suspendedBefore = worldShouldBeSuspended(m_currentPhase);
1511     bool suspendedAfter = worldShouldBeSuspended(m_nextPhase);
1512     
1513     if (suspendedBefore != suspendedAfter) {
1514         if (suspendedBefore) {
1515             RELEASE_ASSERT(!suspendedAfter);
1516             
1517             resumeThePeriphery();
1518             if (conn == GCConductor::Collector)
1519                 resumeTheMutator();
1520             else
1521                 handleNeedFinalize();
1522         } else {
1523             RELEASE_ASSERT(!suspendedBefore);
1524             RELEASE_ASSERT(suspendedAfter);
1525             
1526             if (conn == GCConductor::Collector) {
1527                 waitWhileNeedFinalize();
1528                 if (!stopTheMutator()) {
1529                     if (false)
1530                         dataLog("Returning false.\n");
1531                     return false;
1532                 }
1533             } else {
1534                 sanitizeStackForVM(m_vm);
1535                 handleNeedFinalize();
1536             }
1537             stopThePeriphery(conn);
1538         }
1539     }
1540     
1541     m_currentPhase = m_nextPhase;
1542     return true;
1543 }
1544
1545 void Heap::stopThePeriphery(GCConductor conn)
1546 {
1547     if (m_worldIsStopped) {
1548         dataLog("FATAL: world already stopped.\n");
1549         RELEASE_ASSERT_NOT_REACHED();
1550     }
1551     
1552     if (m_mutatorDidRun)
1553         m_mutatorExecutionVersion++;
1554     
1555     m_mutatorDidRun = false;
1556
1557     suspendCompilerThreads();
1558     m_worldIsStopped = true;
1559
1560     forEachSlotVisitor(
1561         [&] (SlotVisitor& slotVisitor) {
1562             slotVisitor.updateMutatorIsStopped(NoLockingNecessary);
1563         });
1564
1565 #if ENABLE(JIT)
1566     {
1567         DeferGCForAWhile awhile(*this);
1568         if (JITWorklist::instance()->completeAllForVM(*m_vm)
1569             && conn == GCConductor::Collector)
1570             setGCDidJIT();
1571     }
1572 #else
1573     UNUSED_PARAM(conn);
1574 #endif // ENABLE(JIT)
1575     
1576     vm()->shadowChicken().update(*vm(), vm()->topCallFrame);
1577     
1578     m_structureIDTable.flushOldTables();
1579     m_objectSpace.stopAllocating();
1580     
1581     m_stopTime = MonotonicTime::now();
1582 }
1583
1584 NEVER_INLINE void Heap::resumeThePeriphery()
1585 {
1586     // Calling resumeAllocating does the Right Thing depending on whether this is the end of a
1587     // collection cycle or this is just a concurrent phase within a collection cycle:
1588     // - At end of collection cycle: it's a no-op because prepareForAllocation already cleared the
1589     //   last active block.
1590     // - During collection cycle: it reinstates the last active block.
1591     m_objectSpace.resumeAllocating();
1592     
1593     m_barriersExecuted = 0;
1594     
1595     if (!m_worldIsStopped) {
1596         dataLog("Fatal: collector does not believe that the world is stopped.\n");
1597         RELEASE_ASSERT_NOT_REACHED();
1598     }
1599     m_worldIsStopped = false;
1600     
1601     // FIXME: This could be vastly improved: we want to grab the locks in the order in which they
1602     // become available. We basically want a lockAny() method that will lock whatever lock is available
1603     // and tell you which one it locked. That would require teaching ParkingLot how to park on multiple
1604     // queues at once, which is totally achievable - it would just require memory allocation, which is
1605     // suboptimal but not a disaster. Alternatively, we could replace the SlotVisitor rightToRun lock
1606     // with a DLG-style handshake mechanism, but that seems not as general.
1607     Vector<SlotVisitor*, 8> slotVisitorsToUpdate;
1608
1609     forEachSlotVisitor(
1610         [&] (SlotVisitor& slotVisitor) {
1611             slotVisitorsToUpdate.append(&slotVisitor);
1612         });
1613     
1614     for (unsigned countdown = 40; !slotVisitorsToUpdate.isEmpty() && countdown--;) {
1615         for (unsigned index = 0; index < slotVisitorsToUpdate.size(); ++index) {
1616             SlotVisitor& slotVisitor = *slotVisitorsToUpdate[index];
1617             bool remove = false;
1618             if (slotVisitor.hasAcknowledgedThatTheMutatorIsResumed())
1619                 remove = true;
1620             else if (auto locker = tryHoldLock(slotVisitor.rightToRun())) {
1621                 slotVisitor.updateMutatorIsStopped(locker);
1622                 remove = true;
1623             }
1624             if (remove) {
1625                 slotVisitorsToUpdate[index--] = slotVisitorsToUpdate.last();
1626                 slotVisitorsToUpdate.takeLast();
1627             }
1628         }
1629         WTF::Thread::yield();
1630     }
1631     
1632     for (SlotVisitor* slotVisitor : slotVisitorsToUpdate)
1633         slotVisitor->updateMutatorIsStopped();
1634     
1635     resumeCompilerThreads();
1636 }
1637
1638 bool Heap::stopTheMutator()
1639 {
1640     for (;;) {
1641         unsigned oldState = m_worldState.load();
1642         if (oldState & stoppedBit) {
1643             RELEASE_ASSERT(!(oldState & hasAccessBit));
1644             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1645             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1646             return true;
1647         }
1648         
1649         if (oldState & mutatorHasConnBit) {
1650             RELEASE_ASSERT(!(oldState & hasAccessBit));
1651             RELEASE_ASSERT(!(oldState & stoppedBit));
1652             return false;
1653         }
1654
1655         if (!(oldState & hasAccessBit)) {
1656             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1657             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1658             // We can stop the world instantly.
1659             if (m_worldState.compareExchangeWeak(oldState, oldState | stoppedBit))
1660                 return true;
1661             continue;
1662         }
1663         
1664         // Transfer the conn to the mutator and bail.
1665         RELEASE_ASSERT(oldState & hasAccessBit);
1666         RELEASE_ASSERT(!(oldState & stoppedBit));
1667         unsigned newState = (oldState | mutatorHasConnBit) & ~mutatorWaitingBit;
1668         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1669             if (false)
1670                 dataLog("Handed off the conn.\n");
1671             m_stopIfNecessaryTimer->scheduleSoon();
1672             ParkingLot::unparkAll(&m_worldState);
1673             return false;
1674         }
1675     }
1676 }
1677
1678 NEVER_INLINE void Heap::resumeTheMutator()
1679 {
1680     if (false)
1681         dataLog("Resuming the mutator.\n");
1682     for (;;) {
1683         unsigned oldState = m_worldState.load();
1684         if (!!(oldState & hasAccessBit) != !(oldState & stoppedBit)) {
1685             dataLog("Fatal: hasAccess = ", !!(oldState & hasAccessBit), ", stopped = ", !!(oldState & stoppedBit), "\n");
1686             RELEASE_ASSERT_NOT_REACHED();
1687         }
1688         if (oldState & mutatorHasConnBit) {
1689             dataLog("Fatal: mutator has the conn.\n");
1690             RELEASE_ASSERT_NOT_REACHED();
1691         }
1692         
1693         if (!(oldState & stoppedBit)) {
1694             if (false)
1695                 dataLog("Returning because not stopped.\n");
1696             return;
1697         }
1698         
1699         if (m_worldState.compareExchangeWeak(oldState, oldState & ~stoppedBit)) {
1700             if (false)
1701                 dataLog("CASing and returning.\n");
1702             ParkingLot::unparkAll(&m_worldState);
1703             return;
1704         }
1705     }
1706 }
1707
1708 void Heap::stopIfNecessarySlow()
1709 {
1710     while (stopIfNecessarySlow(m_worldState.load())) { }
1711     
1712     RELEASE_ASSERT(m_worldState.load() & hasAccessBit);
1713     RELEASE_ASSERT(!(m_worldState.load() & stoppedBit));
1714     
1715     handleGCDidJIT();
1716     handleNeedFinalize();
1717     m_mutatorDidRun = true;
1718 }
1719
1720 bool Heap::stopIfNecessarySlow(unsigned oldState)
1721 {
1722     RELEASE_ASSERT(oldState & hasAccessBit);
1723     RELEASE_ASSERT(!(oldState & stoppedBit));
1724     
1725     // It's possible for us to wake up with finalization already requested but the world not yet
1726     // resumed. If that happens, we can't run finalization yet.
1727     if (handleNeedFinalize(oldState))
1728         return true;
1729
1730     // FIXME: When entering the concurrent phase, we could arrange for this branch not to fire, and then
1731     // have the SlotVisitor do things to the m_worldState to make this branch fire again. That would
1732     // prevent us from polling this so much. Ideally, stopIfNecessary would ignore the mutatorHasConnBit
1733     // and there would be some other bit indicating whether we were in some GC phase other than the
1734     // NotRunning or Concurrent ones.
1735     if (oldState & mutatorHasConnBit)
1736         collectInMutatorThread();
1737     
1738     return false;
1739 }
1740
1741 NEVER_INLINE void Heap::collectInMutatorThread()
1742 {
1743     CollectingScope collectingScope(*this);
1744     for (;;) {
1745         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, nullptr);
1746         switch (result) {
1747         case RunCurrentPhaseResult::Finished:
1748             return;
1749         case RunCurrentPhaseResult::Continue:
1750             break;
1751         case RunCurrentPhaseResult::NeedCurrentThreadState:
1752             sanitizeStackForVM(m_vm);
1753             auto lambda = [&] (CurrentThreadState& state) {
1754                 for (;;) {
1755                     RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, &state);
1756                     switch (result) {
1757                     case RunCurrentPhaseResult::Finished:
1758                         return;
1759                     case RunCurrentPhaseResult::Continue:
1760                         break;
1761                     case RunCurrentPhaseResult::NeedCurrentThreadState:
1762                         RELEASE_ASSERT_NOT_REACHED();
1763                         break;
1764                     }
1765                 }
1766             };
1767             callWithCurrentThreadState(scopedLambda<void(CurrentThreadState&)>(WTFMove(lambda)));
1768             return;
1769         }
1770     }
1771 }
1772
1773 template<typename Func>
1774 void Heap::waitForCollector(const Func& func)
1775 {
1776     for (;;) {
1777         bool done;
1778         {
1779             LockHolder locker(*m_threadLock);
1780             done = func(locker);
1781             if (!done) {
1782                 setMutatorWaiting();
1783                 
1784                 // At this point, the collector knows that we intend to wait, and he will clear the
1785                 // waiting bit and then unparkAll when the GC cycle finishes. Clearing the bit
1786                 // prevents us from parking except if there is also stop-the-world. Unparking after
1787                 // clearing means that if the clearing happens after we park, then we will unpark.
1788             }
1789         }
1790         
1791         // If we're in a stop-the-world scenario, we need to wait for that even if done is true.
1792         unsigned oldState = m_worldState.load();
1793         if (stopIfNecessarySlow(oldState))
1794             continue;
1795         
1796         // FIXME: We wouldn't need this if stopIfNecessarySlow() had a mode where it knew to just
1797         // do the collection.
1798         relinquishConn();
1799         
1800         if (done) {
1801             clearMutatorWaiting(); // Clean up just in case.
1802             return;
1803         }
1804         
1805         // If mutatorWaitingBit is still set then we want to wait.
1806         ParkingLot::compareAndPark(&m_worldState, oldState | mutatorWaitingBit);
1807     }
1808 }
1809
1810 void Heap::acquireAccessSlow()
1811 {
1812     for (;;) {
1813         unsigned oldState = m_worldState.load();
1814         RELEASE_ASSERT(!(oldState & hasAccessBit));
1815         
1816         if (oldState & stoppedBit) {
1817             if (verboseStop) {
1818                 dataLog("Stopping in acquireAccess!\n");
1819                 WTFReportBacktrace();
1820             }
1821             // Wait until we're not stopped anymore.
1822             ParkingLot::compareAndPark(&m_worldState, oldState);
1823             continue;
1824         }
1825         
1826         RELEASE_ASSERT(!(oldState & stoppedBit));
1827         unsigned newState = oldState | hasAccessBit;
1828         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1829             handleGCDidJIT();
1830             handleNeedFinalize();
1831             m_mutatorDidRun = true;
1832             stopIfNecessary();
1833             return;
1834         }
1835     }
1836 }
1837
1838 void Heap::releaseAccessSlow()
1839 {
1840     for (;;) {
1841         unsigned oldState = m_worldState.load();
1842         if (!(oldState & hasAccessBit)) {
1843             dataLog("FATAL: Attempting to release access but the mutator does not have access.\n");
1844             RELEASE_ASSERT_NOT_REACHED();
1845         }
1846         if (oldState & stoppedBit) {
1847             dataLog("FATAL: Attempting to release access but the mutator is stopped.\n");
1848             RELEASE_ASSERT_NOT_REACHED();
1849         }
1850         
1851         if (handleNeedFinalize(oldState))
1852             continue;
1853         
1854         unsigned newState = oldState & ~(hasAccessBit | mutatorHasConnBit);
1855         
1856         if ((oldState & mutatorHasConnBit)
1857             && m_nextPhase != m_currentPhase) {
1858             // This means that the collector thread had given us the conn so that we would do something
1859             // for it. Stop ourselves as we release access. This ensures that acquireAccess blocks. In
1860             // the meantime, since we're handing the conn over, the collector will be awoken and it is
1861             // sure to have work to do.
1862             newState |= stoppedBit;
1863         }
1864
1865         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1866             if (oldState & mutatorHasConnBit)
1867                 finishRelinquishingConn();
1868             return;
1869         }
1870     }
1871 }
1872
1873 bool Heap::relinquishConn(unsigned oldState)
1874 {
1875     RELEASE_ASSERT(oldState & hasAccessBit);
1876     RELEASE_ASSERT(!(oldState & stoppedBit));
1877     
1878     if (!(oldState & mutatorHasConnBit))
1879         return false; // Done.
1880     
1881     if (m_threadShouldStop)
1882         return false;
1883     
1884     if (!m_worldState.compareExchangeWeak(oldState, oldState & ~mutatorHasConnBit))
1885         return true; // Loop around.
1886     
1887     finishRelinquishingConn();
1888     return true;
1889 }
1890
1891 void Heap::finishRelinquishingConn()
1892 {
1893     if (false)
1894         dataLog("Relinquished the conn.\n");
1895     
1896     sanitizeStackForVM(m_vm);
1897     
1898     auto locker = holdLock(*m_threadLock);
1899     if (!m_requests.isEmpty())
1900         m_threadCondition->notifyOne(locker);
1901     ParkingLot::unparkAll(&m_worldState);
1902 }
1903
1904 void Heap::relinquishConn()
1905 {
1906     while (relinquishConn(m_worldState.load())) { }
1907 }
1908
1909 bool Heap::handleGCDidJIT(unsigned oldState)
1910 {
1911     RELEASE_ASSERT(oldState & hasAccessBit);
1912     if (!(oldState & gcDidJITBit))
1913         return false;
1914     if (m_worldState.compareExchangeWeak(oldState, oldState & ~gcDidJITBit)) {
1915         WTF::crossModifyingCodeFence();
1916         return true;
1917     }
1918     return true;
1919 }
1920
1921 NEVER_INLINE bool Heap::handleNeedFinalize(unsigned oldState)
1922 {
1923     RELEASE_ASSERT(oldState & hasAccessBit);
1924     RELEASE_ASSERT(!(oldState & stoppedBit));
1925     
1926     if (!(oldState & needFinalizeBit))
1927         return false;
1928     if (m_worldState.compareExchangeWeak(oldState, oldState & ~needFinalizeBit)) {
1929         finalize();
1930         // Wake up anyone waiting for us to finalize. Note that they may have woken up already, in
1931         // which case they would be waiting for us to release heap access.
1932         ParkingLot::unparkAll(&m_worldState);
1933         return true;
1934     }
1935     return true;
1936 }
1937
1938 void Heap::handleGCDidJIT()
1939 {
1940     while (handleGCDidJIT(m_worldState.load())) { }
1941 }
1942
1943 void Heap::handleNeedFinalize()
1944 {
1945     while (handleNeedFinalize(m_worldState.load())) { }
1946 }
1947
1948 void Heap::setGCDidJIT()
1949 {
1950     m_worldState.transaction(
1951         [&] (unsigned& state) -> bool {
1952             RELEASE_ASSERT(state & stoppedBit);
1953             state |= gcDidJITBit;
1954             return true;
1955         });
1956 }
1957
1958 void Heap::setNeedFinalize()
1959 {
1960     m_worldState.exchangeOr(needFinalizeBit);
1961     ParkingLot::unparkAll(&m_worldState);
1962     m_stopIfNecessaryTimer->scheduleSoon();
1963 }
1964
1965 void Heap::waitWhileNeedFinalize()
1966 {
1967     for (;;) {
1968         unsigned oldState = m_worldState.load();
1969         if (!(oldState & needFinalizeBit)) {
1970             // This means that either there was no finalize request or the main thread will finalize
1971             // with heap access, so a subsequent call to stopTheWorld() will return only when
1972             // finalize finishes.
1973             return;
1974         }
1975         ParkingLot::compareAndPark(&m_worldState, oldState);
1976     }
1977 }
1978
1979 void Heap::setMutatorWaiting()
1980 {
1981     m_worldState.exchangeOr(mutatorWaitingBit);
1982 }
1983
1984 void Heap::clearMutatorWaiting()
1985 {
1986     m_worldState.exchangeAnd(~mutatorWaitingBit);
1987 }
1988
1989 void Heap::notifyThreadStopping(const AbstractLocker&)
1990 {
1991     m_threadIsStopping = true;
1992     clearMutatorWaiting();
1993     ParkingLot::unparkAll(&m_worldState);
1994 }
1995
1996 void Heap::finalize()
1997 {
1998     MonotonicTime before;
1999     if (Options::logGC()) {
2000         before = MonotonicTime::now();
2001         dataLog("[GC<", RawPointer(this), ">: finalize ");
2002     }
2003     
2004     {
2005         SweepingScope sweepingScope(*this);
2006         deleteUnmarkedCompiledCode();
2007         deleteSourceProviderCaches();
2008         sweepInFinalize();
2009     }
2010     
2011     if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache())
2012         cache->clear();
2013     
2014     for (const HeapFinalizerCallback& callback : m_heapFinalizerCallbacks)
2015         callback.run(*vm());
2016     
2017     if (Options::sweepSynchronously())
2018         sweepSynchronously();
2019
2020     if (Options::logGC()) {
2021         MonotonicTime after = MonotonicTime::now();
2022         dataLog((after - before).milliseconds(), "ms]\n");
2023     }
2024 }
2025
2026 Heap::Ticket Heap::requestCollection(GCRequest request)
2027 {
2028     stopIfNecessary();
2029     
2030     ASSERT(vm()->currentThreadIsHoldingAPILock());
2031     RELEASE_ASSERT(vm()->atomicStringTable() == WTF::Thread::current().atomicStringTable());
2032     
2033     LockHolder locker(*m_threadLock);
2034     // We may be able to steal the conn. That only works if the collector is definitely not running
2035     // right now. This is an optimization that prevents the collector thread from ever starting in most
2036     // cases.
2037     ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2038     if ((m_lastServedTicket == m_lastGrantedTicket) && (m_currentPhase == CollectorPhase::NotRunning)) {
2039         if (false)
2040             dataLog("Taking the conn.\n");
2041         m_worldState.exchangeOr(mutatorHasConnBit);
2042     }
2043     
2044     m_requests.append(request);
2045     m_lastGrantedTicket++;
2046     if (!(m_worldState.load() & mutatorHasConnBit))
2047         m_threadCondition->notifyOne(locker);
2048     return m_lastGrantedTicket;
2049 }
2050
2051 void Heap::waitForCollection(Ticket ticket)
2052 {
2053     waitForCollector(
2054         [&] (const AbstractLocker&) -> bool {
2055             return m_lastServedTicket >= ticket;
2056         });
2057 }
2058
2059 void Heap::sweepInFinalize()
2060 {
2061     m_objectSpace.sweepLargeAllocations();
2062     
2063     auto sweepBlock = [&] (MarkedBlock::Handle* handle) {
2064         handle->sweep(nullptr);
2065     };
2066     
2067     vm()->eagerlySweptDestructibleObjectSpace.forEachMarkedBlock(sweepBlock);
2068 }
2069
2070 void Heap::suspendCompilerThreads()
2071 {
2072 #if ENABLE(DFG_JIT)
2073     // We ensure the worklists so that it's not possible for the mutator to start a new worklist
2074     // after we have suspended the ones that he had started before. That's not very expensive since
2075     // the worklists use AutomaticThreads anyway.
2076     for (unsigned i = DFG::numberOfWorklists(); i--;)
2077         DFG::ensureWorklistForIndex(i).suspendAllThreads();
2078 #endif
2079 }
2080
2081 void Heap::willStartCollection()
2082 {
2083     if (Options::logGC())
2084         dataLog("=> ");
2085     
2086     if (shouldDoFullCollection()) {
2087         m_collectionScope = CollectionScope::Full;
2088         m_shouldDoFullCollection = false;
2089         if (Options::logGC())
2090             dataLog("FullCollection, ");
2091         if (false)
2092             dataLog("Full collection!\n");
2093     } else {
2094         m_collectionScope = CollectionScope::Eden;
2095         if (Options::logGC())
2096             dataLog("EdenCollection, ");
2097         if (false)
2098             dataLog("Eden collection!\n");
2099     }
2100     if (m_collectionScope == CollectionScope::Full) {
2101         m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2102         m_extraMemorySize = 0;
2103         m_deprecatedExtraMemorySize = 0;
2104 #if ENABLE(RESOURCE_USAGE)
2105         m_externalMemorySize = 0;
2106 #endif
2107
2108         if (m_fullActivityCallback)
2109             m_fullActivityCallback->willCollect();
2110     } else {
2111         ASSERT(m_collectionScope == CollectionScope::Eden);
2112         m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2113     }
2114
2115     if (m_edenActivityCallback)
2116         m_edenActivityCallback->willCollect();
2117
2118     for (auto* observer : m_observers)
2119         observer->willGarbageCollect();
2120 }
2121
2122 void Heap::prepareForMarking()
2123 {
2124     m_objectSpace.prepareForMarking();
2125 }
2126
2127 void Heap::reapWeakHandles()
2128 {
2129     m_objectSpace.reapWeakSets();
2130 }
2131
2132 void Heap::pruneStaleEntriesFromWeakGCMaps()
2133 {
2134     if (m_collectionScope != CollectionScope::Full)
2135         return;
2136     for (WeakGCMapBase* weakGCMap : m_weakGCMaps)
2137         weakGCMap->pruneStaleEntries();
2138 }
2139
2140 void Heap::sweepArrayBuffers()
2141 {
2142     m_arrayBuffers.sweep();
2143 }
2144
2145 void Heap::snapshotUnswept()
2146 {
2147     TimingScope timingScope(*this, "Heap::snapshotUnswept");
2148     m_objectSpace.snapshotUnswept();
2149 }
2150
2151 void Heap::deleteSourceProviderCaches()
2152 {
2153     if (*m_lastCollectionScope == CollectionScope::Full)
2154         m_vm->clearSourceProviderCaches();
2155 }
2156
2157 void Heap::notifyIncrementalSweeper()
2158 {
2159     if (m_collectionScope == CollectionScope::Full) {
2160         if (!m_logicallyEmptyWeakBlocks.isEmpty())
2161             m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2162     }
2163
2164     m_sweeper->startSweeping();
2165 }
2166
2167 void Heap::updateAllocationLimits()
2168 {
2169     static const bool verbose = false;
2170     
2171     if (verbose) {
2172         dataLog("\n");
2173         dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n");
2174     }
2175     
2176     // Calculate our current heap size threshold for the purpose of figuring out when we should
2177     // run another collection. This isn't the same as either size() or capacity(), though it should
2178     // be somewhere between the two. The key is to match the size calculations involved calls to
2179     // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
2180     // fragmentation, we may have size() much smaller than capacity().
2181     size_t currentHeapSize = 0;
2182
2183     // For marked space, we use the total number of bytes visited. This matches the logic for
2184     // MarkedAllocator's calls to didAllocate(), which effectively accounts for the total size of
2185     // objects allocated rather than blocks used. This will underestimate capacity(), and in case
2186     // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
2187     // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
2188     currentHeapSize += m_totalBytesVisited;
2189     if (verbose)
2190         dataLog("totalBytesVisited = ", m_totalBytesVisited, ", currentHeapSize = ", currentHeapSize, "\n");
2191
2192     // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
2193     // extra memory reporting.
2194     currentHeapSize += extraMemorySize();
2195     if (!ASSERT_DISABLED) {
2196         Checked<size_t, RecordOverflow> checkedCurrentHeapSize = m_totalBytesVisited;
2197         checkedCurrentHeapSize += extraMemorySize();
2198         ASSERT(!checkedCurrentHeapSize.hasOverflowed() && checkedCurrentHeapSize.unsafeGet() == currentHeapSize);
2199     }
2200
2201     if (verbose)
2202         dataLog("extraMemorySize() = ", extraMemorySize(), ", currentHeapSize = ", currentHeapSize, "\n");
2203     
2204     if (m_collectionScope == CollectionScope::Full) {
2205         // To avoid pathological GC churn in very small and very large heaps, we set
2206         // the new allocation limit based on the current size of the heap, with a
2207         // fixed minimum.
2208         m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
2209         if (verbose)
2210             dataLog("Full: maxHeapSize = ", m_maxHeapSize, "\n");
2211         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2212         if (verbose)
2213             dataLog("Full: maxEdenSize = ", m_maxEdenSize, "\n");
2214         m_sizeAfterLastFullCollect = currentHeapSize;
2215         if (verbose)
2216             dataLog("Full: sizeAfterLastFullCollect = ", currentHeapSize, "\n");
2217         m_bytesAbandonedSinceLastFullCollect = 0;
2218         if (verbose)
2219             dataLog("Full: bytesAbandonedSinceLastFullCollect = ", 0, "\n");
2220     } else {
2221         ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
2222         // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have.
2223         // But we are sloppy, so we have to defend against the overflow.
2224         m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize;
2225         if (verbose)
2226             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2227         m_sizeAfterLastEdenCollect = currentHeapSize;
2228         if (verbose)
2229             dataLog("Eden: sizeAfterLastEdenCollect = ", currentHeapSize, "\n");
2230         double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
2231         double minEdenToOldGenerationRatio = 1.0 / 3.0;
2232         if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
2233             m_shouldDoFullCollection = true;
2234         // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
2235         m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
2236         if (verbose)
2237             dataLog("Eden: maxHeapSize = ", m_maxHeapSize, "\n");
2238         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2239         if (verbose)
2240             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2241         if (m_fullActivityCallback) {
2242             ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
2243             m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
2244         }
2245     }
2246
2247 #if PLATFORM(IOS)
2248     // Get critical memory threshold for next cycle.
2249     overCriticalMemoryThreshold(MemoryThresholdCallType::Direct);
2250 #endif
2251
2252     m_sizeAfterLastCollect = currentHeapSize;
2253     if (verbose)
2254         dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n");
2255     m_bytesAllocatedThisCycle = 0;
2256
2257     if (Options::logGC())
2258         dataLog("=> ", currentHeapSize / 1024, "kb, ");
2259 }
2260
2261 void Heap::didFinishCollection()
2262 {
2263     m_afterGC = MonotonicTime::now();
2264     CollectionScope scope = *m_collectionScope;
2265     if (scope == CollectionScope::Full)
2266         m_lastFullGCLength = m_afterGC - m_beforeGC;
2267     else
2268         m_lastEdenGCLength = m_afterGC - m_beforeGC;
2269
2270 #if ENABLE(RESOURCE_USAGE)
2271     ASSERT(externalMemorySize() <= extraMemorySize());
2272 #endif
2273
2274     if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) {
2275         gatherExtraHeapSnapshotData(*heapProfiler);
2276         removeDeadHeapSnapshotNodes(*heapProfiler);
2277     }
2278
2279     if (UNLIKELY(m_verifier))
2280         m_verifier->endGC();
2281
2282     RELEASE_ASSERT(m_collectionScope);
2283     m_lastCollectionScope = m_collectionScope;
2284     m_collectionScope = std::nullopt;
2285
2286     for (auto* observer : m_observers)
2287         observer->didGarbageCollect(scope);
2288 }
2289
2290 void Heap::resumeCompilerThreads()
2291 {
2292 #if ENABLE(DFG_JIT)
2293     for (unsigned i = DFG::numberOfWorklists(); i--;)
2294         DFG::existingWorklistForIndex(i).resumeAllThreads();
2295 #endif
2296 }
2297
2298 GCActivityCallback* Heap::fullActivityCallback()
2299 {
2300     return m_fullActivityCallback.get();
2301 }
2302
2303 GCActivityCallback* Heap::edenActivityCallback()
2304 {
2305     return m_edenActivityCallback.get();
2306 }
2307
2308 IncrementalSweeper& Heap::sweeper()
2309 {
2310     return *m_sweeper;
2311 }
2312
2313 void Heap::setGarbageCollectionTimerEnabled(bool enable)
2314 {
2315     if (m_fullActivityCallback)
2316         m_fullActivityCallback->setEnabled(enable);
2317     if (m_edenActivityCallback)
2318         m_edenActivityCallback->setEnabled(enable);
2319 }
2320
2321 void Heap::didAllocate(size_t bytes)
2322 {
2323     if (m_edenActivityCallback)
2324         m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
2325     m_bytesAllocatedThisCycle += bytes;
2326     performIncrement(bytes);
2327 }
2328
2329 bool Heap::isValidAllocation(size_t)
2330 {
2331     if (!isValidThreadState(m_vm))
2332         return false;
2333
2334     if (isCurrentThreadBusy())
2335         return false;
2336     
2337     return true;
2338 }
2339
2340 void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
2341 {
2342     WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
2343 }
2344
2345 void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
2346 {
2347     HandleSlot slot = handle.slot();
2348     Finalizer finalizer = reinterpret_cast<Finalizer>(context);
2349     finalizer(slot->asCell());
2350     WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
2351 }
2352
2353 void Heap::addExecutable(ExecutableBase* executable)
2354 {
2355     m_executables.append(executable);
2356 }
2357
2358 void Heap::collectNowFullIfNotDoneRecently(Synchronousness synchronousness)
2359 {
2360     if (!m_fullActivityCallback) {
2361         collectNow(synchronousness, CollectionScope::Full);
2362         return;
2363     }
2364
2365     if (m_fullActivityCallback->didGCRecently()) {
2366         // A synchronous GC was already requested recently so we merely accelerate next collection.
2367         reportAbandonedObjectGraph();
2368         return;
2369     }
2370
2371     m_fullActivityCallback->setDidGCRecently();
2372     collectNow(synchronousness, CollectionScope::Full);
2373 }
2374
2375 bool Heap::shouldDoFullCollection()
2376 {
2377     if (!Options::useGenerationalGC())
2378         return true;
2379
2380     if (!m_currentRequest.scope)
2381         return m_shouldDoFullCollection || overCriticalMemoryThreshold();
2382     return *m_currentRequest.scope == CollectionScope::Full;
2383 }
2384
2385 void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
2386 {
2387     m_logicallyEmptyWeakBlocks.append(block);
2388 }
2389
2390 void Heap::sweepAllLogicallyEmptyWeakBlocks()
2391 {
2392     if (m_logicallyEmptyWeakBlocks.isEmpty())
2393         return;
2394
2395     m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2396     while (sweepNextLogicallyEmptyWeakBlock()) { }
2397 }
2398
2399 bool Heap::sweepNextLogicallyEmptyWeakBlock()
2400 {
2401     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
2402         return false;
2403
2404     WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
2405
2406     block->sweep();
2407     if (block->isEmpty()) {
2408         std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
2409         m_logicallyEmptyWeakBlocks.removeLast();
2410         WeakBlock::destroy(*this, block);
2411     } else
2412         m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
2413
2414     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
2415         m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
2416         return false;
2417     }
2418
2419     return true;
2420 }
2421
2422 size_t Heap::visitCount()
2423 {
2424     size_t result = 0;
2425     forEachSlotVisitor(
2426         [&] (SlotVisitor& visitor) {
2427             result += visitor.visitCount();
2428         });
2429     return result;
2430 }
2431
2432 size_t Heap::bytesVisited()
2433 {
2434     size_t result = 0;
2435     forEachSlotVisitor(
2436         [&] (SlotVisitor& visitor) {
2437             result += visitor.bytesVisited();
2438         });
2439     return result;
2440 }
2441
2442 void Heap::forEachCodeBlockImpl(const ScopedLambda<bool(CodeBlock*)>& func)
2443 {
2444     // We don't know the full set of CodeBlocks until compilation has terminated.
2445     completeAllJITPlans();
2446
2447     return m_codeBlocks->iterate(func);
2448 }
2449
2450 void Heap::forEachCodeBlockIgnoringJITPlansImpl(const AbstractLocker& locker, const ScopedLambda<bool(CodeBlock*)>& func)
2451 {
2452     return m_codeBlocks->iterate(locker, func);
2453 }
2454
2455 void Heap::writeBarrierSlowPath(const JSCell* from)
2456 {
2457     if (UNLIKELY(mutatorShouldBeFenced())) {
2458         // In this case, the barrierThreshold is the tautological threshold, so from could still be
2459         // not black. But we can't know for sure until we fire off a fence.
2460         WTF::storeLoadFence();
2461         if (from->cellState() != CellState::PossiblyBlack)
2462             return;
2463     }
2464     
2465     addToRememberedSet(from);
2466 }
2467
2468 bool Heap::isCurrentThreadBusy()
2469 {
2470     return mayBeGCThread() || mutatorState() != MutatorState::Running;
2471 }
2472
2473 void Heap::reportExtraMemoryVisited(size_t size)
2474 {
2475     size_t* counter = &m_extraMemorySize;
2476     
2477     for (;;) {
2478         size_t oldSize = *counter;
2479         // FIXME: Change this to use SaturatedArithmetic when available.
2480         // https://bugs.webkit.org/show_bug.cgi?id=170411
2481         Checked<size_t, RecordOverflow> checkedNewSize = oldSize;
2482         checkedNewSize += size;
2483         size_t newSize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
2484         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, newSize))
2485             return;
2486     }
2487 }
2488
2489 #if ENABLE(RESOURCE_USAGE)
2490 void Heap::reportExternalMemoryVisited(size_t size)
2491 {
2492     size_t* counter = &m_externalMemorySize;
2493
2494     for (;;) {
2495         size_t oldSize = *counter;
2496         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size))
2497             return;
2498     }
2499 }
2500 #endif
2501
2502 void Heap::collectIfNecessaryOrDefer(GCDeferralContext* deferralContext)
2503 {
2504     ASSERT(deferralContext || isDeferred() || !DisallowGC::isInEffectOnCurrentThread());
2505
2506     if (!m_isSafeToCollect)
2507         return;
2508     switch (mutatorState()) {
2509     case MutatorState::Running:
2510     case MutatorState::Allocating:
2511         break;
2512     case MutatorState::Sweeping:
2513     case MutatorState::Collecting:
2514         return;
2515     }
2516     if (!Options::useGC())
2517         return;
2518     
2519     if (mayNeedToStop()) {
2520         if (deferralContext)
2521             deferralContext->m_shouldGC = true;
2522         else if (isDeferred())
2523             m_didDeferGCWork = true;
2524         else
2525             stopIfNecessary();
2526     }
2527     
2528     if (UNLIKELY(Options::gcMaxHeapSize())) {
2529         if (m_bytesAllocatedThisCycle <= Options::gcMaxHeapSize())
2530             return;
2531     } else {
2532         size_t bytesAllowedThisCycle = m_maxEdenSize;
2533
2534 #if PLATFORM(IOS)
2535         if (overCriticalMemoryThreshold())
2536             bytesAllowedThisCycle = std::min(m_maxEdenSizeWhenCritical, bytesAllowedThisCycle);
2537 #endif
2538
2539         if (m_bytesAllocatedThisCycle <= bytesAllowedThisCycle)
2540             return;
2541     }
2542
2543     if (deferralContext)
2544         deferralContext->m_shouldGC = true;
2545     else if (isDeferred())
2546         m_didDeferGCWork = true;
2547     else {
2548         collectAsync();
2549         stopIfNecessary(); // This will immediately start the collection if we have the conn.
2550     }
2551 }
2552
2553 void Heap::decrementDeferralDepthAndGCIfNeededSlow()
2554 {
2555     // Can't do anything if we're still deferred.
2556     if (m_deferralDepth)
2557         return;
2558     
2559     ASSERT(!isDeferred());
2560     
2561     m_didDeferGCWork = false;
2562     // FIXME: Bring back something like the DeferGCProbability mode.
2563     // https://bugs.webkit.org/show_bug.cgi?id=166627
2564     collectIfNecessaryOrDefer();
2565 }
2566
2567 void Heap::registerWeakGCMap(WeakGCMapBase* weakGCMap)
2568 {
2569     m_weakGCMaps.add(weakGCMap);
2570 }
2571
2572 void Heap::unregisterWeakGCMap(WeakGCMapBase* weakGCMap)
2573 {
2574     m_weakGCMaps.remove(weakGCMap);
2575 }
2576
2577 void Heap::didAllocateBlock(size_t capacity)
2578 {
2579 #if ENABLE(RESOURCE_USAGE)
2580     m_blockBytesAllocated += capacity;
2581 #else
2582     UNUSED_PARAM(capacity);
2583 #endif
2584 }
2585
2586 void Heap::didFreeBlock(size_t capacity)
2587 {
2588 #if ENABLE(RESOURCE_USAGE)
2589     m_blockBytesAllocated -= capacity;
2590 #else
2591     UNUSED_PARAM(capacity);
2592 #endif
2593 }
2594
2595 void Heap::addCoreConstraints()
2596 {
2597     m_constraintSet->add(
2598         "Cs", "Conservative Scan",
2599         [this, lastVersion = static_cast<uint64_t>(0)] (SlotVisitor& slotVisitor) mutable {
2600             bool shouldNotProduceWork = lastVersion == m_phaseVersion;
2601             if (shouldNotProduceWork)
2602                 return;
2603             
2604             TimingScope preConvergenceTimingScope(*this, "Constraint: conservative scan");
2605             m_objectSpace.prepareForConservativeScan();
2606             ConservativeRoots conservativeRoots(*this);
2607             SuperSamplerScope superSamplerScope(false);
2608             gatherStackRoots(conservativeRoots);
2609             gatherJSStackRoots(conservativeRoots);
2610             gatherScratchBufferRoots(conservativeRoots);
2611             slotVisitor.append(conservativeRoots);
2612             
2613             lastVersion = m_phaseVersion;
2614         },
2615         ConstraintVolatility::GreyedByExecution);
2616     
2617     m_constraintSet->add(
2618         "Msr", "Misc Small Roots",
2619         [this] (SlotVisitor& slotVisitor) {
2620 #if JSC_OBJC_API_ENABLED
2621             scanExternalRememberedSet(*m_vm, slotVisitor);
2622 #endif
2623
2624             if (m_vm->smallStrings.needsToBeVisited(*m_collectionScope))
2625                 m_vm->smallStrings.visitStrongReferences(slotVisitor);
2626             
2627             for (auto& pair : m_protectedValues)
2628                 slotVisitor.appendUnbarriered(pair.key);
2629             
2630             if (m_markListSet && m_markListSet->size())
2631                 MarkedArgumentBuffer::markLists(slotVisitor, *m_markListSet);
2632             
2633             slotVisitor.appendUnbarriered(m_vm->exception());
2634             slotVisitor.appendUnbarriered(m_vm->lastException());
2635         },
2636         ConstraintVolatility::GreyedByExecution);
2637     
2638     m_constraintSet->add(
2639         "Sh", "Strong Handles",
2640         [this] (SlotVisitor& slotVisitor) {
2641             m_handleSet.visitStrongHandles(slotVisitor);
2642             m_handleStack.visit(slotVisitor);
2643         },
2644         ConstraintVolatility::GreyedByExecution);
2645     
2646     m_constraintSet->add(
2647         "D", "Debugger",
2648         [this] (SlotVisitor& slotVisitor) {
2649 #if ENABLE(SAMPLING_PROFILER)
2650             if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
2651                 LockHolder locker(samplingProfiler->getLock());
2652                 samplingProfiler->processUnverifiedStackTraces();
2653                 samplingProfiler->visit(slotVisitor);
2654                 if (Options::logGC() == GCLogging::Verbose)
2655                     dataLog("Sampling Profiler data:\n", slotVisitor);
2656             }
2657 #endif // ENABLE(SAMPLING_PROFILER)
2658             
2659             if (m_vm->typeProfiler())
2660                 m_vm->typeProfilerLog()->visit(slotVisitor);
2661             
2662             m_vm->shadowChicken().visitChildren(slotVisitor);
2663         },
2664         ConstraintVolatility::GreyedByExecution);
2665     
2666     m_constraintSet->add(
2667         "Jsr", "JIT Stub Routines",
2668         [this] (SlotVisitor& slotVisitor) {
2669             m_jitStubRoutines->traceMarkedStubRoutines(slotVisitor);
2670         },
2671         ConstraintVolatility::GreyedByExecution);
2672     
2673     m_constraintSet->add(
2674         "Ws", "Weak Sets",
2675         [this] (SlotVisitor& slotVisitor) {
2676             m_objectSpace.visitWeakSets(slotVisitor);
2677         },
2678         ConstraintVolatility::GreyedByMarking);
2679     
2680     m_constraintSet->add(
2681         "Wrh", "Weak Reference Harvesters",
2682         [this] (SlotVisitor& slotVisitor) {
2683             for (WeakReferenceHarvester* current = m_weakReferenceHarvesters.head(); current; current = current->next())
2684                 current->visitWeakReferences(slotVisitor);
2685         },
2686         ConstraintVolatility::GreyedByMarking);
2687     
2688 #if ENABLE(DFG_JIT)
2689     m_constraintSet->add(
2690         "Dw", "DFG Worklists",
2691         [this] (SlotVisitor& slotVisitor) {
2692             for (unsigned i = DFG::numberOfWorklists(); i--;)
2693                 DFG::existingWorklistForIndex(i).visitWeakReferences(slotVisitor);
2694             
2695             // FIXME: This is almost certainly unnecessary.
2696             // https://bugs.webkit.org/show_bug.cgi?id=166829
2697             DFG::iterateCodeBlocksForGC(
2698                 *m_vm,
2699                 [&] (CodeBlock* codeBlock) {
2700                     slotVisitor.appendUnbarriered(codeBlock);
2701                 });
2702             
2703             if (Options::logGC() == GCLogging::Verbose)
2704                 dataLog("DFG Worklists:\n", slotVisitor);
2705         },
2706         ConstraintVolatility::GreyedByMarking);
2707 #endif
2708     
2709     m_constraintSet->add(
2710         "Cb", "CodeBlocks",
2711         [this] (SlotVisitor& slotVisitor) {
2712             iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(
2713                 [&] (CodeBlock* codeBlock) {
2714                     // Visit the CodeBlock as a constraint only if it's black.
2715                     if (Heap::isMarked(codeBlock)
2716                         && codeBlock->cellState() == CellState::PossiblyBlack)
2717                         slotVisitor.visitAsConstraint(codeBlock);
2718                 });
2719         },
2720         ConstraintVolatility::SeldomGreyed);
2721     
2722     m_constraintSet->add(std::make_unique<MarkStackMergingConstraint>(*this));
2723 }
2724
2725 void Heap::addMarkingConstraint(std::unique_ptr<MarkingConstraint> constraint)
2726 {
2727     PreventCollectionScope preventCollectionScope(*this);
2728     m_constraintSet->add(WTFMove(constraint));
2729 }
2730
2731 void Heap::notifyIsSafeToCollect()
2732 {
2733     MonotonicTime before;
2734     if (Options::logGC()) {
2735         before = MonotonicTime::now();
2736         dataLog("[GC<", RawPointer(this), ">: starting ");
2737     }
2738     
2739     addCoreConstraints();
2740     
2741     m_isSafeToCollect = true;
2742     
2743     if (Options::collectContinuously()) {
2744         m_collectContinuouslyThread = WTF::Thread::create(
2745             "JSC DEBUG Continuous GC",
2746             [this] () {
2747                 MonotonicTime initialTime = MonotonicTime::now();
2748                 Seconds period = Seconds::fromMilliseconds(Options::collectContinuouslyPeriodMS());
2749                 while (!m_shouldStopCollectingContinuously) {
2750                     {
2751                         LockHolder locker(*m_threadLock);
2752                         if (m_requests.isEmpty()) {
2753                             m_requests.append(std::nullopt);
2754                             m_lastGrantedTicket++;
2755                             m_threadCondition->notifyOne(locker);
2756                         }
2757                     }
2758                     
2759                     {
2760                         LockHolder locker(m_collectContinuouslyLock);
2761                         Seconds elapsed = MonotonicTime::now() - initialTime;
2762                         Seconds elapsedInPeriod = elapsed % period;
2763                         MonotonicTime timeToWakeUp =
2764                             initialTime + elapsed - elapsedInPeriod + period;
2765                         while (!hasElapsed(timeToWakeUp) && !m_shouldStopCollectingContinuously) {
2766                             m_collectContinuouslyCondition.waitUntil(
2767                                 m_collectContinuouslyLock, timeToWakeUp);
2768                         }
2769                     }
2770                 }
2771             });
2772     }
2773     
2774     if (Options::logGC())
2775         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
2776 }
2777
2778 void Heap::preventCollection()
2779 {
2780     if (!m_isSafeToCollect)
2781         return;
2782     
2783     // This prevents the collectContinuously thread from starting a collection.
2784     m_collectContinuouslyLock.lock();
2785     
2786     // Wait for all collections to finish.
2787     waitForCollector(
2788         [&] (const AbstractLocker&) -> bool {
2789             ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2790             return m_lastServedTicket == m_lastGrantedTicket;
2791         });
2792     
2793     // Now a collection can only start if this thread starts it.
2794     RELEASE_ASSERT(!m_collectionScope);
2795 }
2796
2797 void Heap::allowCollection()
2798 {
2799     if (!m_isSafeToCollect)
2800         return;
2801     
2802     m_collectContinuouslyLock.unlock();
2803 }
2804
2805 void Heap::setMutatorShouldBeFenced(bool value)
2806 {
2807     m_mutatorShouldBeFenced = value;
2808     m_barrierThreshold = value ? tautologicalThreshold : blackThreshold;
2809 }
2810
2811 void Heap::performIncrement(size_t bytes)
2812 {
2813     if (!m_objectSpace.isMarking())
2814         return;
2815
2816     m_incrementBalance += bytes * Options::gcIncrementScale();
2817
2818     // Save ourselves from crazy. Since this is an optimization, it's OK to go back to any consistent
2819     // state when the double goes wild.
2820     if (std::isnan(m_incrementBalance) || std::isinf(m_incrementBalance))
2821         m_incrementBalance = 0;
2822     
2823     if (m_incrementBalance < static_cast<double>(Options::gcIncrementBytes()))
2824         return;
2825
2826     double targetBytes = m_incrementBalance;
2827     if (targetBytes <= 0)
2828         return;
2829     targetBytes = std::min(targetBytes, Options::gcIncrementMaxBytes());
2830
2831     SlotVisitor& slotVisitor = *m_mutatorSlotVisitor;
2832     ParallelModeEnabler parallelModeEnabler(slotVisitor);
2833     size_t bytesVisited = slotVisitor.performIncrementOfDraining(static_cast<size_t>(targetBytes));
2834     // incrementBalance may go negative here because it'll remember how many bytes we overshot.
2835     m_incrementBalance -= bytesVisited;
2836 }
2837
2838 void Heap::addHeapFinalizerCallback(const HeapFinalizerCallback& callback)
2839 {
2840     m_heapFinalizerCallbacks.append(callback);
2841 }
2842
2843 void Heap::removeHeapFinalizerCallback(const HeapFinalizerCallback& callback)
2844 {
2845     m_heapFinalizerCallbacks.removeFirst(callback);
2846 }
2847
2848 void Heap::setBonusVisitorTask(RefPtr<SharedTask<void(SlotVisitor&)>> task)
2849 {
2850     auto locker = holdLock(m_markingMutex);
2851     m_bonusVisitorTask = task;
2852     m_markingConditionVariable.notifyAll();
2853 }
2854
2855 void Heap::runTaskInParallel(RefPtr<SharedTask<void(SlotVisitor&)>> task)
2856 {
2857     unsigned initialRefCount = task->refCount();
2858     setBonusVisitorTask(task);
2859     task->run(*m_collectorSlotVisitor);
2860     setBonusVisitorTask(nullptr);
2861     // The constraint solver expects return of this function to imply termination of the task in all
2862     // threads. This ensures that property.
2863     {
2864         auto locker = holdLock(m_markingMutex);
2865         while (task->refCount() > initialRefCount)
2866             m_markingConditionVariable.wait(m_markingMutex);
2867     }
2868 }
2869
2870 } // namespace JSC