Bmalloc and GC should put auxiliaries (butterflies, typed array backing stores) in...
[WebKit.git] / Source / JavaScriptCore / heap / Heap.cpp
1 /*
2  *  Copyright (C) 2003-2017 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "Heap.h"
23
24 #include "CodeBlock.h"
25 #include "CodeBlockSetInlines.h"
26 #include "CollectingScope.h"
27 #include "ConservativeRoots.h"
28 #include "DFGWorklistInlines.h"
29 #include "EdenGCActivityCallback.h"
30 #include "Exception.h"
31 #include "FullGCActivityCallback.h"
32 #include "GCActivityCallback.h"
33 #include "GCIncomingRefCountedSetInlines.h"
34 #include "GCSegmentedArrayInlines.h"
35 #include "GCTypeMap.h"
36 #include "HasOwnPropertyCache.h"
37 #include "HeapHelperPool.h"
38 #include "HeapIterationScope.h"
39 #include "HeapProfiler.h"
40 #include "HeapSnapshot.h"
41 #include "HeapVerifier.h"
42 #include "IncrementalSweeper.h"
43 #include "Interpreter.h"
44 #include "JITStubRoutineSet.h"
45 #include "JITWorklist.h"
46 #include "JSCInlines.h"
47 #include "JSGlobalObject.h"
48 #include "JSLock.h"
49 #include "JSVirtualMachineInternal.h"
50 #include "JSWebAssemblyCodeBlock.h"
51 #include "MachineStackMarker.h"
52 #include "MarkedAllocatorInlines.h"
53 #include "MarkedSpaceInlines.h"
54 #include "MarkingConstraintSet.h"
55 #include "PreventCollectionScope.h"
56 #include "SamplingProfiler.h"
57 #include "ShadowChicken.h"
58 #include "SpaceTimeMutatorScheduler.h"
59 #include "SubspaceInlines.h"
60 #include "SuperSampler.h"
61 #include "StochasticSpaceTimeMutatorScheduler.h"
62 #include "StopIfNecessaryTimer.h"
63 #include "SweepingScope.h"
64 #include "SynchronousStopTheWorldMutatorScheduler.h"
65 #include "TypeProfiler.h"
66 #include "TypeProfilerLog.h"
67 #include "UnlinkedCodeBlock.h"
68 #include "VM.h"
69 #include "WasmMemory.h"
70 #include "WeakSetInlines.h"
71 #include <algorithm>
72 #if PLATFORM(IOS)
73 #include <bmalloc/bmalloc.h>
74 #endif
75 #include <wtf/CurrentTime.h>
76 #include <wtf/ListDump.h>
77 #include <wtf/MainThread.h>
78 #include <wtf/ParallelVectorIterator.h>
79 #include <wtf/ProcessID.h>
80 #include <wtf/RAMSize.h>
81 #include <wtf/SimpleStats.h>
82 #include <wtf/Threading.h>
83
84 #if USE(FOUNDATION)
85 #if __has_include(<objc/objc-internal.h>)
86 #include <objc/objc-internal.h>
87 #else
88 extern "C" void* objc_autoreleasePoolPush(void);
89 extern "C" void objc_autoreleasePoolPop(void *context);
90 #endif
91 #endif // USE(FOUNDATION)
92
93 using namespace std;
94
95 namespace JSC {
96
97 namespace {
98
99 bool verboseStop = false;
100
101 double maxPauseMS(double thisPauseMS)
102 {
103     static double maxPauseMS;
104     maxPauseMS = std::max(thisPauseMS, maxPauseMS);
105     return maxPauseMS;
106 }
107
108 size_t minHeapSize(HeapType heapType, size_t ramSize)
109 {
110     if (heapType == LargeHeap) {
111         double result = min(
112             static_cast<double>(Options::largeHeapSize()),
113             ramSize * Options::smallHeapRAMFraction());
114         return static_cast<size_t>(result);
115     }
116     return Options::smallHeapSize();
117 }
118
119 size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
120 {
121 #if PLATFORM(IOS)
122     size_t memoryFootprint = bmalloc::api::memoryFootprint();
123     if (memoryFootprint < ramSize * Options::smallHeapRAMFraction())
124         return Options::smallHeapGrowthFactor() * heapSize;
125     if (memoryFootprint < ramSize * Options::mediumHeapRAMFraction())
126         return Options::mediumHeapGrowthFactor() * heapSize;
127 #else
128     if (heapSize < ramSize * Options::smallHeapRAMFraction())
129         return Options::smallHeapGrowthFactor() * heapSize;
130     if (heapSize < ramSize * Options::mediumHeapRAMFraction())
131         return Options::mediumHeapGrowthFactor() * heapSize;
132 #endif
133     return Options::largeHeapGrowthFactor() * heapSize;
134 }
135
136 bool isValidSharedInstanceThreadState(VM* vm)
137 {
138     return vm->currentThreadIsHoldingAPILock();
139 }
140
141 bool isValidThreadState(VM* vm)
142 {
143     if (vm->atomicStringTable() != wtfThreadData().atomicStringTable())
144         return false;
145
146     if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
147         return false;
148
149     return true;
150 }
151
152 void recordType(VM& vm, TypeCountSet& set, JSCell* cell)
153 {
154     const char* typeName = "[unknown]";
155     const ClassInfo* info = cell->classInfo(vm);
156     if (info && info->className)
157         typeName = info->className;
158     set.add(typeName);
159 }
160
161 bool measurePhaseTiming()
162 {
163     return false;
164 }
165
166 HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats()
167 {
168     static HashMap<const char*, GCTypeMap<SimpleStats>>* result;
169     static std::once_flag once;
170     std::call_once(
171         once,
172         [] {
173             result = new HashMap<const char*, GCTypeMap<SimpleStats>>();
174         });
175     return *result;
176 }
177
178 SimpleStats& timingStats(const char* name, CollectionScope scope)
179 {
180     return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[scope];
181 }
182
183 class TimingScope {
184 public:
185     TimingScope(std::optional<CollectionScope> scope, const char* name)
186         : m_scope(scope)
187         , m_name(name)
188     {
189         if (measurePhaseTiming())
190             m_before = monotonicallyIncreasingTimeMS();
191     }
192     
193     TimingScope(Heap& heap, const char* name)
194         : TimingScope(heap.collectionScope(), name)
195     {
196     }
197     
198     void setScope(std::optional<CollectionScope> scope)
199     {
200         m_scope = scope;
201     }
202     
203     void setScope(Heap& heap)
204     {
205         setScope(heap.collectionScope());
206     }
207     
208     ~TimingScope()
209     {
210         if (measurePhaseTiming()) {
211             double after = monotonicallyIncreasingTimeMS();
212             double timing = after - m_before;
213             SimpleStats& stats = timingStats(m_name, *m_scope);
214             stats.add(timing);
215             dataLog("[GC:", *m_scope, "] ", m_name, " took: ", timing, "ms (average ", stats.mean(), "ms).\n");
216         }
217     }
218 private:
219     std::optional<CollectionScope> m_scope;
220     double m_before;
221     const char* m_name;
222 };
223
224 } // anonymous namespace
225
226 class Heap::Thread : public AutomaticThread {
227 public:
228     Thread(const AbstractLocker& locker, Heap& heap)
229         : AutomaticThread(locker, heap.m_threadLock, heap.m_threadCondition)
230         , m_heap(heap)
231     {
232     }
233     
234 protected:
235     PollResult poll(const AbstractLocker& locker) override
236     {
237         if (m_heap.m_threadShouldStop) {
238             m_heap.notifyThreadStopping(locker);
239             return PollResult::Stop;
240         }
241         if (m_heap.shouldCollectInCollectorThread(locker))
242             return PollResult::Work;
243         return PollResult::Wait;
244     }
245     
246     WorkResult work() override
247     {
248         m_heap.collectInCollectorThread();
249         return WorkResult::Continue;
250     }
251     
252     void threadDidStart() override
253     {
254         WTF::registerGCThread(GCThreadType::Main);
255     }
256
257 private:
258     Heap& m_heap;
259 };
260
261 Heap::Heap(VM* vm, HeapType heapType)
262     : m_heapType(heapType)
263     , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
264     , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
265     , m_sizeAfterLastCollect(0)
266     , m_sizeAfterLastFullCollect(0)
267     , m_sizeBeforeLastFullCollect(0)
268     , m_sizeAfterLastEdenCollect(0)
269     , m_sizeBeforeLastEdenCollect(0)
270     , m_bytesAllocatedThisCycle(0)
271     , m_bytesAbandonedSinceLastFullCollect(0)
272     , m_maxEdenSize(m_minBytesPerCycle)
273     , m_maxHeapSize(m_minBytesPerCycle)
274     , m_shouldDoFullCollection(false)
275     , m_totalBytesVisited(0)
276     , m_objectSpace(this)
277     , m_extraMemorySize(0)
278     , m_deprecatedExtraMemorySize(0)
279     , m_machineThreads(std::make_unique<MachineThreads>())
280     , m_collectorSlotVisitor(std::make_unique<SlotVisitor>(*this, "C"))
281     , m_mutatorSlotVisitor(std::make_unique<SlotVisitor>(*this, "M"))
282     , m_mutatorMarkStack(std::make_unique<MarkStackArray>())
283     , m_raceMarkStack(std::make_unique<MarkStackArray>())
284     , m_constraintSet(std::make_unique<MarkingConstraintSet>())
285     , m_handleSet(vm)
286     , m_codeBlocks(std::make_unique<CodeBlockSet>())
287     , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>())
288     , m_isSafeToCollect(false)
289     , m_vm(vm)
290     // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously 
291     // schedule the timer if we've never done a collection.
292     , m_lastFullGCLength(0.01)
293     , m_lastEdenGCLength(0.01)
294     , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
295     , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
296     , m_sweeper(adoptRef(new IncrementalSweeper(this)))
297     , m_stopIfNecessaryTimer(adoptRef(new StopIfNecessaryTimer(vm)))
298     , m_deferralDepth(0)
299 #if USE(FOUNDATION)
300     , m_delayedReleaseRecursionCount(0)
301 #endif
302     , m_sharedCollectorMarkStack(std::make_unique<MarkStackArray>())
303     , m_sharedMutatorMarkStack(std::make_unique<MarkStackArray>())
304     , m_helperClient(&heapHelperPool())
305     , m_threadLock(Box<Lock>::create())
306     , m_threadCondition(AutomaticThreadCondition::create())
307 {
308     m_worldState.store(0);
309     
310     if (Options::useConcurrentGC()) {
311         if (Options::useStochasticMutatorScheduler())
312             m_scheduler = std::make_unique<StochasticSpaceTimeMutatorScheduler>(*this);
313         else
314             m_scheduler = std::make_unique<SpaceTimeMutatorScheduler>(*this);
315     } else {
316         // We simulate turning off concurrent GC by making the scheduler say that the world
317         // should always be stopped when the collector is running.
318         m_scheduler = std::make_unique<SynchronousStopTheWorldMutatorScheduler>();
319     }
320     
321     if (Options::verifyHeap())
322         m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
323     
324     m_collectorSlotVisitor->optimizeForStoppedMutator();
325
326     // When memory is critical, allow allocating 25% of the amount above the critical threshold before collecting.
327     size_t memoryAboveCriticalThreshold = static_cast<size_t>(static_cast<double>(m_ramSize) * (1.0 - Options::criticalGCMemoryThreshold()));
328     m_maxEdenSizeWhenCritical = memoryAboveCriticalThreshold / 4;
329
330     LockHolder locker(*m_threadLock);
331     m_thread = adoptRef(new Thread(locker, *this));
332 }
333
334 Heap::~Heap()
335 {
336     forEachSlotVisitor(
337         [&] (SlotVisitor& visitor) {
338             visitor.clearMarkStacks();
339         });
340     m_mutatorMarkStack->clear();
341     m_raceMarkStack->clear();
342     
343     for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
344         WeakBlock::destroy(*this, block);
345 }
346
347 bool Heap::isPagedOut(double deadline)
348 {
349     return m_objectSpace.isPagedOut(deadline);
350 }
351
352 // The VM is being destroyed and the collector will never run again.
353 // Run all pending finalizers now because we won't get another chance.
354 void Heap::lastChanceToFinalize()
355 {
356     MonotonicTime before;
357     if (Options::logGC()) {
358         before = MonotonicTime::now();
359         dataLog("[GC<", RawPointer(this), ">: shutdown ");
360     }
361     
362     RELEASE_ASSERT(!m_vm->entryScope);
363     RELEASE_ASSERT(m_mutatorState == MutatorState::Running);
364     
365     if (m_collectContinuouslyThread) {
366         {
367             LockHolder locker(m_collectContinuouslyLock);
368             m_shouldStopCollectingContinuously = true;
369             m_collectContinuouslyCondition.notifyOne();
370         }
371         m_collectContinuouslyThread->waitForCompletion();
372     }
373     
374     if (Options::logGC())
375         dataLog("1");
376     
377     // Prevent new collections from being started. This is probably not even necessary, since we're not
378     // going to call into anything that starts collections. Still, this makes the algorithm more
379     // obviously sound.
380     m_isSafeToCollect = false;
381     
382     if (Options::logGC())
383         dataLog("2");
384
385     bool isCollecting;
386     {
387         auto locker = holdLock(*m_threadLock);
388         RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
389         isCollecting = m_lastServedTicket < m_lastGrantedTicket;
390     }
391     if (isCollecting) {
392         if (Options::logGC())
393             dataLog("...]\n");
394         
395         // Wait for the current collection to finish.
396         waitForCollector(
397             [&] (const AbstractLocker&) -> bool {
398                 RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
399                 return m_lastServedTicket == m_lastGrantedTicket;
400             });
401         
402         if (Options::logGC())
403             dataLog("[GC<", RawPointer(this), ">: shutdown ");
404     }
405     if (Options::logGC())
406         dataLog("3");
407
408     RELEASE_ASSERT(m_requests.isEmpty());
409     RELEASE_ASSERT(m_lastServedTicket == m_lastGrantedTicket);
410     
411     // Carefully bring the thread down.
412     bool stopped = false;
413     {
414         LockHolder locker(*m_threadLock);
415         stopped = m_thread->tryStop(locker);
416         m_threadShouldStop = true;
417         if (!stopped)
418             m_threadCondition->notifyOne(locker);
419     }
420
421     if (Options::logGC())
422         dataLog("4");
423     
424     if (!stopped)
425         m_thread->join();
426     
427     if (Options::logGC())
428         dataLog("5 ");
429     
430     m_arrayBuffers.lastChanceToFinalize();
431     m_codeBlocks->lastChanceToFinalize(*m_vm);
432     m_objectSpace.stopAllocating();
433     m_objectSpace.lastChanceToFinalize();
434     releaseDelayedReleasedObjects();
435
436     sweepAllLogicallyEmptyWeakBlocks();
437     
438     m_objectSpace.freeMemory();
439     
440     if (Options::logGC())
441         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
442 }
443
444 void Heap::releaseDelayedReleasedObjects()
445 {
446 #if USE(FOUNDATION)
447     // We need to guard against the case that releasing an object can create more objects due to the
448     // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
449     // back here and could try to recursively release objects. We guard that with a recursive entry
450     // count. Only the initial call will release objects, recursive calls simple return and let the
451     // the initial call to the function take care of any objects created during release time.
452     // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
453     // and use a temp Vector for the actual releasing.
454     if (!m_delayedReleaseRecursionCount++) {
455         while (!m_delayedReleaseObjects.isEmpty()) {
456             ASSERT(m_vm->currentThreadIsHoldingAPILock());
457
458             Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTFMove(m_delayedReleaseObjects);
459
460             {
461                 // We need to drop locks before calling out to arbitrary code.
462                 JSLock::DropAllLocks dropAllLocks(m_vm);
463
464                 void* context = objc_autoreleasePoolPush();
465                 objectsToRelease.clear();
466                 objc_autoreleasePoolPop(context);
467             }
468         }
469     }
470     m_delayedReleaseRecursionCount--;
471 #endif
472 }
473
474 void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
475 {
476     didAllocate(size);
477     collectIfNecessaryOrDefer();
478 }
479
480 void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
481 {
482     // FIXME: Change this to use SaturatedArithmetic when available.
483     // https://bugs.webkit.org/show_bug.cgi?id=170411
484     Checked<size_t, RecordOverflow> checkedNewSize = m_deprecatedExtraMemorySize;
485     checkedNewSize += size;
486     m_deprecatedExtraMemorySize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
487     reportExtraMemoryAllocatedSlowCase(size);
488 }
489
490 bool Heap::overCriticalMemoryThreshold(MemoryThresholdCallType memoryThresholdCallType)
491 {
492 #if PLATFORM(IOS)
493     if (memoryThresholdCallType == MemoryThresholdCallType::Direct || ++m_precentAvailableMemoryCachedCallCount >= 100) {
494         m_overCriticalMemoryThreshold = bmalloc::api::percentAvailableMemoryInUse() > Options::criticalGCMemoryThreshold();
495         m_precentAvailableMemoryCachedCallCount = 0;
496     }
497
498     return m_overCriticalMemoryThreshold;
499 #else
500     UNUSED_PARAM(memoryThresholdCallType);
501     return false;
502 #endif
503 }
504
505 void Heap::reportAbandonedObjectGraph()
506 {
507     // Our clients don't know exactly how much memory they
508     // are abandoning so we just guess for them.
509     size_t abandonedBytes = static_cast<size_t>(0.1 * capacity());
510
511     // We want to accelerate the next collection. Because memory has just 
512     // been abandoned, the next collection has the potential to 
513     // be more profitable. Since allocation is the trigger for collection, 
514     // we hasten the next collection by pretending that we've allocated more memory. 
515     if (m_fullActivityCallback) {
516         m_fullActivityCallback->didAllocate(
517             m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
518     }
519     m_bytesAbandonedSinceLastFullCollect += abandonedBytes;
520 }
521
522 void Heap::protect(JSValue k)
523 {
524     ASSERT(k);
525     ASSERT(m_vm->currentThreadIsHoldingAPILock());
526
527     if (!k.isCell())
528         return;
529
530     m_protectedValues.add(k.asCell());
531 }
532
533 bool Heap::unprotect(JSValue k)
534 {
535     ASSERT(k);
536     ASSERT(m_vm->currentThreadIsHoldingAPILock());
537
538     if (!k.isCell())
539         return false;
540
541     return m_protectedValues.remove(k.asCell());
542 }
543
544 void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
545 {
546     if (m_arrayBuffers.addReference(cell, buffer)) {
547         collectIfNecessaryOrDefer();
548         didAllocate(buffer->gcSizeEstimateInBytes());
549     }
550 }
551
552 void Heap::finalizeUnconditionalFinalizers()
553 {
554     while (m_unconditionalFinalizers.hasNext()) {
555         UnconditionalFinalizer* finalizer = m_unconditionalFinalizers.removeNext();
556         finalizer->finalizeUnconditionally();
557     }
558 }
559
560 void Heap::willStartIterating()
561 {
562     m_objectSpace.willStartIterating();
563 }
564
565 void Heap::didFinishIterating()
566 {
567     m_objectSpace.didFinishIterating();
568 }
569
570 void Heap::completeAllJITPlans()
571 {
572 #if ENABLE(JIT)
573     JITWorklist::instance()->completeAllForVM(*m_vm);
574 #endif // ENABLE(JIT)
575     DFG::completeAllPlansForVM(*m_vm);
576 }
577
578 template<typename Func>
579 void Heap::iterateExecutingAndCompilingCodeBlocks(const Func& func)
580 {
581     m_codeBlocks->iterateCurrentlyExecuting(func);
582     DFG::iterateCodeBlocksForGC(*m_vm, func);
583 }
584
585 template<typename Func>
586 void Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func& func)
587 {
588     Vector<CodeBlock*, 256> codeBlocks;
589     iterateExecutingAndCompilingCodeBlocks(
590         [&] (CodeBlock* codeBlock) {
591             codeBlocks.append(codeBlock);
592         });
593     for (CodeBlock* codeBlock : codeBlocks)
594         func(codeBlock);
595 }
596
597 void Heap::assertSharedMarkStacksEmpty()
598 {
599     bool ok = true;
600     
601     if (!m_sharedCollectorMarkStack->isEmpty()) {
602         dataLog("FATAL: Shared collector mark stack not empty! It has ", m_sharedCollectorMarkStack->size(), " elements.\n");
603         ok = false;
604     }
605     
606     if (!m_sharedMutatorMarkStack->isEmpty()) {
607         dataLog("FATAL: Shared mutator mark stack not empty! It has ", m_sharedMutatorMarkStack->size(), " elements.\n");
608         ok = false;
609     }
610     
611     RELEASE_ASSERT(ok);
612 }
613
614 void Heap::gatherStackRoots(ConservativeRoots& roots)
615 {
616     m_machineThreads->gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks, m_currentThreadState);
617 }
618
619 void Heap::gatherJSStackRoots(ConservativeRoots& roots)
620 {
621 #if !ENABLE(JIT)
622     m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
623 #else
624     UNUSED_PARAM(roots);
625 #endif
626 }
627
628 void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
629 {
630 #if ENABLE(DFG_JIT)
631     m_vm->gatherConservativeRoots(roots);
632 #else
633     UNUSED_PARAM(roots);
634 #endif
635 }
636
637 void Heap::beginMarking()
638 {
639     TimingScope timingScope(*this, "Heap::beginMarking");
640     if (m_collectionScope == CollectionScope::Full)
641         m_codeBlocks->clearMarksForFullCollection();
642     m_jitStubRoutines->clearMarks();
643     m_objectSpace.beginMarking();
644     setMutatorShouldBeFenced(true);
645 }
646
647 void Heap::removeDeadCompilerWorklistEntries()
648 {
649 #if ENABLE(DFG_JIT)
650     for (unsigned i = DFG::numberOfWorklists(); i--;)
651         DFG::existingWorklistForIndex(i).removeDeadPlans(*m_vm);
652 #endif
653 }
654
655 bool Heap::isHeapSnapshotting() const
656 {
657     HeapProfiler* heapProfiler = m_vm->heapProfiler();
658     if (UNLIKELY(heapProfiler))
659         return heapProfiler->activeSnapshotBuilder();
660     return false;
661 }
662
663 struct GatherHeapSnapshotData : MarkedBlock::CountFunctor {
664     GatherHeapSnapshotData(HeapSnapshotBuilder& builder)
665         : m_builder(builder)
666     {
667     }
668
669     IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const
670     {
671         if (kind == HeapCell::JSCell) {
672             JSCell* cell = static_cast<JSCell*>(heapCell);
673             cell->methodTable()->heapSnapshot(cell, m_builder);
674         }
675         return IterationStatus::Continue;
676     }
677
678     HeapSnapshotBuilder& m_builder;
679 };
680
681 void Heap::gatherExtraHeapSnapshotData(HeapProfiler& heapProfiler)
682 {
683     if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) {
684         HeapIterationScope heapIterationScope(*this);
685         GatherHeapSnapshotData functor(*builder);
686         m_objectSpace.forEachLiveCell(heapIterationScope, functor);
687     }
688 }
689
690 struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor {
691     RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot)
692         : m_snapshot(snapshot)
693     {
694     }
695
696     IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const
697     {
698         if (kind == HeapCell::JSCell)
699             m_snapshot.sweepCell(static_cast<JSCell*>(cell));
700         return IterationStatus::Continue;
701     }
702
703     HeapSnapshot& m_snapshot;
704 };
705
706 void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler)
707 {
708     if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) {
709         HeapIterationScope heapIterationScope(*this);
710         RemoveDeadHeapSnapshotNodes functor(*snapshot);
711         m_objectSpace.forEachDeadCell(heapIterationScope, functor);
712         snapshot->shrinkToFit();
713     }
714 }
715
716 void Heap::updateObjectCounts()
717 {
718     if (m_collectionScope == CollectionScope::Full)
719         m_totalBytesVisited = 0;
720
721     m_totalBytesVisitedThisCycle = bytesVisited();
722     
723     m_totalBytesVisited += m_totalBytesVisitedThisCycle;
724 }
725
726 void Heap::endMarking()
727 {
728     forEachSlotVisitor(
729         [&] (SlotVisitor& visitor) {
730             visitor.reset();
731         });
732
733     assertSharedMarkStacksEmpty();
734     m_weakReferenceHarvesters.removeAll();
735
736     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
737     
738     m_objectSpace.endMarking();
739     setMutatorShouldBeFenced(Options::forceFencedBarrier());
740 }
741
742 size_t Heap::objectCount()
743 {
744     return m_objectSpace.objectCount();
745 }
746
747 size_t Heap::extraMemorySize()
748 {
749     // FIXME: Change this to use SaturatedArithmetic when available.
750     // https://bugs.webkit.org/show_bug.cgi?id=170411
751     Checked<size_t, RecordOverflow> checkedTotal = m_extraMemorySize;
752     checkedTotal += m_deprecatedExtraMemorySize;
753     checkedTotal += m_arrayBuffers.size();
754     size_t total = UNLIKELY(checkedTotal.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedTotal.unsafeGet();
755
756     ASSERT(m_objectSpace.capacity() >= m_objectSpace.size());
757     return std::min(total, std::numeric_limits<size_t>::max() - m_objectSpace.capacity());
758 }
759
760 size_t Heap::size()
761 {
762     return m_objectSpace.size() + extraMemorySize();
763 }
764
765 size_t Heap::capacity()
766 {
767     return m_objectSpace.capacity() + extraMemorySize();
768 }
769
770 size_t Heap::protectedGlobalObjectCount()
771 {
772     size_t result = 0;
773     forEachProtectedCell(
774         [&] (JSCell* cell) {
775             if (cell->isObject() && asObject(cell)->isGlobalObject())
776                 result++;
777         });
778     return result;
779 }
780
781 size_t Heap::globalObjectCount()
782 {
783     HeapIterationScope iterationScope(*this);
784     size_t result = 0;
785     m_objectSpace.forEachLiveCell(
786         iterationScope,
787         [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus {
788             if (kind != HeapCell::JSCell)
789                 return IterationStatus::Continue;
790             JSCell* cell = static_cast<JSCell*>(heapCell);
791             if (cell->isObject() && asObject(cell)->isGlobalObject())
792                 result++;
793             return IterationStatus::Continue;
794         });
795     return result;
796 }
797
798 size_t Heap::protectedObjectCount()
799 {
800     size_t result = 0;
801     forEachProtectedCell(
802         [&] (JSCell*) {
803             result++;
804         });
805     return result;
806 }
807
808 std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
809 {
810     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
811     forEachProtectedCell(
812         [&] (JSCell* cell) {
813             recordType(*vm(), *result, cell);
814         });
815     return result;
816 }
817
818 std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
819 {
820     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
821     HeapIterationScope iterationScope(*this);
822     m_objectSpace.forEachLiveCell(
823         iterationScope,
824         [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
825             if (kind == HeapCell::JSCell)
826                 recordType(*vm(), *result, static_cast<JSCell*>(cell));
827             return IterationStatus::Continue;
828         });
829     return result;
830 }
831
832 void Heap::deleteAllCodeBlocks(DeleteAllCodeEffort effort)
833 {
834     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
835         return;
836     
837     PreventCollectionScope preventCollectionScope(*this);
838     
839     // If JavaScript is running, it's not safe to delete all JavaScript code, since
840     // we'll end up returning to deleted code.
841     RELEASE_ASSERT(!m_vm->entryScope);
842     RELEASE_ASSERT(!m_collectionScope);
843
844     completeAllJITPlans();
845
846     for (ExecutableBase* executable : m_executables)
847         executable->clearCode();
848
849 #if ENABLE(WEBASSEMBLY)
850     {
851         // We must ensure that we clear the JS call ICs from Wasm. Otherwise, Wasm will
852         // have no idea that we cleared the code from all of the Executables in the
853         // VM. This could leave Wasm in an inconsistent state where it has an IC that
854         // points into a CodeBlock that could be dead. The IC will still succeed because
855         // it uses a callee check, but then it will call into dead code.
856         HeapIterationScope heapIterationScope(*this);
857         m_vm->webAssemblyCodeBlockSpace.forEachLiveCell([&] (HeapCell* cell, HeapCell::Kind kind) {
858             ASSERT_UNUSED(kind, kind == HeapCell::Kind::JSCell);
859             JSWebAssemblyCodeBlock* codeBlock = static_cast<JSWebAssemblyCodeBlock*>(cell);
860             codeBlock->clearJSCallICs(*m_vm);
861         });
862     }
863 #endif
864 }
865
866 void Heap::deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort effort)
867 {
868     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
869         return;
870     
871     PreventCollectionScope preventCollectionScope(*this);
872
873     RELEASE_ASSERT(!m_collectionScope);
874     
875     for (ExecutableBase* current : m_executables) {
876         if (!current->isFunctionExecutable())
877             continue;
878         static_cast<FunctionExecutable*>(current)->unlinkedExecutable()->clearCode();
879     }
880 }
881
882 void Heap::clearUnmarkedExecutables()
883 {
884     for (unsigned i = m_executables.size(); i--;) {
885         ExecutableBase* current = m_executables[i];
886         if (isMarked(current))
887             continue;
888
889         // Eagerly dereference the Executable's JITCode in order to run watchpoint
890         // destructors. Otherwise, watchpoints might fire for deleted CodeBlocks.
891         current->clearCode();
892         std::swap(m_executables[i], m_executables.last());
893         m_executables.removeLast();
894     }
895
896     m_executables.shrinkToFit();
897 }
898
899 void Heap::deleteUnmarkedCompiledCode()
900 {
901     clearUnmarkedExecutables();
902     m_codeBlocks->deleteUnmarkedAndUnreferenced(*m_vm, *m_lastCollectionScope);
903     m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines();
904 }
905
906 void Heap::addToRememberedSet(const JSCell* constCell)
907 {
908     JSCell* cell = const_cast<JSCell*>(constCell);
909     ASSERT(cell);
910     ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
911     m_barriersExecuted++;
912     if (m_mutatorShouldBeFenced) {
913         WTF::loadLoadFence();
914         if (!isMarkedConcurrently(cell)) {
915             // During a full collection a store into an unmarked object that had surivived past
916             // collections will manifest as a store to an unmarked PossiblyBlack object. If the
917             // object gets marked at some time after this then it will go down the normal marking
918             // path. So, we don't have to remember this object. We could return here. But we go
919             // further and attempt to re-white the object.
920             
921             RELEASE_ASSERT(m_collectionScope == CollectionScope::Full);
922             
923             if (cell->atomicCompareExchangeCellStateStrong(CellState::PossiblyBlack, CellState::DefinitelyWhite) == CellState::PossiblyBlack) {
924                 // Now we protect against this race:
925                 //
926                 //     1) Object starts out black + unmarked.
927                 //     --> We do isMarkedConcurrently here.
928                 //     2) Object is marked and greyed.
929                 //     3) Object is scanned and blacked.
930                 //     --> We do atomicCompareExchangeCellStateStrong here.
931                 //
932                 // In this case we would have made the object white again, even though it should
933                 // be black. This check lets us correct our mistake. This relies on the fact that
934                 // isMarkedConcurrently converges monotonically to true.
935                 if (isMarkedConcurrently(cell)) {
936                     // It's difficult to work out whether the object should be grey or black at
937                     // this point. We say black conservatively.
938                     cell->setCellState(CellState::PossiblyBlack);
939                 }
940                 
941                 // Either way, we can return. Most likely, the object was not marked, and so the
942                 // object is now labeled white. This means that future barrier executions will not
943                 // fire. In the unlikely event that the object had become marked, we can still
944                 // return anyway, since we proved that the object was not marked at the time that
945                 // we executed this slow path.
946             }
947             
948             return;
949         }
950     } else
951         ASSERT(Heap::isMarkedConcurrently(cell));
952     // It could be that the object was *just* marked. This means that the collector may set the
953     // state to DefinitelyGrey and then to PossiblyOldOrBlack at any time. It's OK for us to
954     // race with the collector here. If we win then this is accurate because the object _will_
955     // get scanned again. If we lose then someone else will barrier the object again. That would
956     // be unfortunate but not the end of the world.
957     cell->setCellState(CellState::PossiblyGrey);
958     m_mutatorMarkStack->append(cell);
959 }
960
961 void Heap::sweepSynchronously()
962 {
963     double before = 0;
964     if (Options::logGC()) {
965         dataLog("Full sweep: ", capacity() / 1024, "kb ");
966         before = currentTimeMS();
967     }
968     m_objectSpace.sweep();
969     m_objectSpace.shrink();
970     if (Options::logGC()) {
971         double after = currentTimeMS();
972         dataLog("=> ", capacity() / 1024, "kb, ", after - before, "ms");
973     }
974 }
975
976 void Heap::collect(Synchronousness synchronousness, GCRequest request)
977 {
978     switch (synchronousness) {
979     case Async:
980         collectAsync(request);
981         return;
982     case Sync:
983         collectSync(request);
984         return;
985     }
986     RELEASE_ASSERT_NOT_REACHED();
987 }
988
989 void Heap::collectNow(Synchronousness synchronousness, GCRequest request)
990 {
991     switch (synchronousness) {
992     case Async: {
993         collectAsync(request);
994         stopIfNecessary();
995         return;
996     }
997         
998     case Sync: {
999         collectSync(request);
1000         
1001         DeferGCForAWhile deferGC(*this);
1002         if (UNLIKELY(Options::useImmortalObjects()))
1003             sweeper().stopSweeping();
1004         
1005         bool alreadySweptInCollectSync = Options::sweepSynchronously();
1006         if (!alreadySweptInCollectSync) {
1007             if (Options::logGC())
1008                 dataLog("[GC<", RawPointer(this), ">: ");
1009             sweepSynchronously();
1010             if (Options::logGC())
1011                 dataLog("]\n");
1012         }
1013         m_objectSpace.assertNoUnswept();
1014         
1015         sweepAllLogicallyEmptyWeakBlocks();
1016         return;
1017     } }
1018     RELEASE_ASSERT_NOT_REACHED();
1019 }
1020
1021 void Heap::collectAsync(GCRequest request)
1022 {
1023     if (!m_isSafeToCollect)
1024         return;
1025
1026     bool alreadyRequested = false;
1027     {
1028         LockHolder locker(*m_threadLock);
1029         for (const GCRequest& previousRequest : m_requests) {
1030             if (request.subsumedBy(previousRequest)) {
1031                 alreadyRequested = true;
1032                 break;
1033             }
1034         }
1035     }
1036     if (alreadyRequested)
1037         return;
1038
1039     requestCollection(request);
1040 }
1041
1042 void Heap::collectSync(GCRequest request)
1043 {
1044     if (!m_isSafeToCollect)
1045         return;
1046     
1047     waitForCollection(requestCollection(request));
1048 }
1049
1050 bool Heap::shouldCollectInCollectorThread(const AbstractLocker&)
1051 {
1052     RELEASE_ASSERT(m_requests.isEmpty() == (m_lastServedTicket == m_lastGrantedTicket));
1053     RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
1054     
1055     if (false)
1056         dataLog("Mutator has the conn = ", !!(m_worldState.load() & mutatorHasConnBit), "\n");
1057     
1058     return !m_requests.isEmpty() && !(m_worldState.load() & mutatorHasConnBit);
1059 }
1060
1061 void Heap::collectInCollectorThread()
1062 {
1063     for (;;) {
1064         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Collector, nullptr);
1065         switch (result) {
1066         case RunCurrentPhaseResult::Finished:
1067             return;
1068         case RunCurrentPhaseResult::Continue:
1069             break;
1070         case RunCurrentPhaseResult::NeedCurrentThreadState:
1071             RELEASE_ASSERT_NOT_REACHED();
1072             break;
1073         }
1074     }
1075 }
1076
1077 void Heap::checkConn(GCConductor conn)
1078 {
1079     switch (conn) {
1080     case GCConductor::Mutator:
1081         RELEASE_ASSERT(m_worldState.load() & mutatorHasConnBit);
1082         return;
1083     case GCConductor::Collector:
1084         RELEASE_ASSERT(!(m_worldState.load() & mutatorHasConnBit));
1085         return;
1086     }
1087     RELEASE_ASSERT_NOT_REACHED();
1088 }
1089
1090 auto Heap::runCurrentPhase(GCConductor conn, CurrentThreadState* currentThreadState) -> RunCurrentPhaseResult
1091 {
1092     checkConn(conn);
1093     m_currentThreadState = currentThreadState;
1094     
1095     // If the collector transfers the conn to the mutator, it leaves us in between phases.
1096     if (!finishChangingPhase(conn)) {
1097         // A mischevious mutator could repeatedly relinquish the conn back to us. We try to avoid doing
1098         // this, but it's probably not the end of the world if it did happen.
1099         if (false)
1100             dataLog("Conn bounce-back.\n");
1101         return RunCurrentPhaseResult::Finished;
1102     }
1103     
1104     bool result = false;
1105     switch (m_currentPhase) {
1106     case CollectorPhase::NotRunning:
1107         result = runNotRunningPhase(conn);
1108         break;
1109         
1110     case CollectorPhase::Begin:
1111         result = runBeginPhase(conn);
1112         break;
1113         
1114     case CollectorPhase::Fixpoint:
1115         if (!currentThreadState && conn == GCConductor::Mutator)
1116             return RunCurrentPhaseResult::NeedCurrentThreadState;
1117         
1118         result = runFixpointPhase(conn);
1119         break;
1120         
1121     case CollectorPhase::Concurrent:
1122         result = runConcurrentPhase(conn);
1123         break;
1124         
1125     case CollectorPhase::Reloop:
1126         result = runReloopPhase(conn);
1127         break;
1128         
1129     case CollectorPhase::End:
1130         result = runEndPhase(conn);
1131         break;
1132     }
1133
1134     return result ? RunCurrentPhaseResult::Continue : RunCurrentPhaseResult::Finished;
1135 }
1136
1137 NEVER_INLINE bool Heap::runNotRunningPhase(GCConductor conn)
1138 {
1139     // Check m_requests since the mutator calls this to poll what's going on.
1140     {
1141         auto locker = holdLock(*m_threadLock);
1142         if (m_requests.isEmpty())
1143             return false;
1144     }
1145     
1146     return changePhase(conn, CollectorPhase::Begin);
1147 }
1148
1149 NEVER_INLINE bool Heap::runBeginPhase(GCConductor conn)
1150 {
1151     m_currentGCStartTime = MonotonicTime::now();
1152     
1153     {
1154         LockHolder locker(*m_threadLock);
1155         RELEASE_ASSERT(!m_requests.isEmpty());
1156         m_currentRequest = m_requests.first();
1157     }
1158         
1159     if (Options::logGC())
1160         dataLog("[GC<", RawPointer(this), ">: START ", gcConductorShortName(conn), " ", capacity() / 1024, "kb ");
1161
1162     m_beforeGC = MonotonicTime::now();
1163
1164     if (m_collectionScope) {
1165         dataLog("Collection scope already set during GC: ", *m_collectionScope, "\n");
1166         RELEASE_ASSERT_NOT_REACHED();
1167     }
1168     
1169     willStartCollection();
1170         
1171     if (UNLIKELY(m_verifier)) {
1172         // Verify that live objects from the last GC cycle haven't been corrupted by
1173         // mutators before we begin this new GC cycle.
1174         m_verifier->verify(HeapVerifier::Phase::BeforeGC);
1175             
1176         m_verifier->startGC();
1177         m_verifier->gatherLiveCells(HeapVerifier::Phase::BeforeMarking);
1178     }
1179         
1180     prepareForMarking();
1181         
1182     if (m_collectionScope == CollectionScope::Full) {
1183         m_opaqueRoots.clear();
1184         m_collectorSlotVisitor->clearMarkStacks();
1185         m_mutatorMarkStack->clear();
1186     }
1187
1188     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
1189
1190     beginMarking();
1191
1192     forEachSlotVisitor(
1193         [&] (SlotVisitor& visitor) {
1194             visitor.didStartMarking();
1195         });
1196
1197     m_parallelMarkersShouldExit = false;
1198
1199     m_helperClient.setFunction(
1200         [this] () {
1201             SlotVisitor* slotVisitor;
1202             {
1203                 LockHolder locker(m_parallelSlotVisitorLock);
1204                 if (m_availableParallelSlotVisitors.isEmpty()) {
1205                     std::unique_ptr<SlotVisitor> newVisitor = std::make_unique<SlotVisitor>(
1206                         *this, toCString("P", m_parallelSlotVisitors.size() + 1));
1207                     
1208                     if (Options::optimizeParallelSlotVisitorsForStoppedMutator())
1209                         newVisitor->optimizeForStoppedMutator();
1210                     
1211                     newVisitor->didStartMarking();
1212                     
1213                     slotVisitor = newVisitor.get();
1214                     m_parallelSlotVisitors.append(WTFMove(newVisitor));
1215                 } else
1216                     slotVisitor = m_availableParallelSlotVisitors.takeLast();
1217             }
1218
1219             WTF::registerGCThread(GCThreadType::Helper);
1220
1221             {
1222                 ParallelModeEnabler parallelModeEnabler(*slotVisitor);
1223                 slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
1224             }
1225
1226             {
1227                 LockHolder locker(m_parallelSlotVisitorLock);
1228                 m_availableParallelSlotVisitors.append(slotVisitor);
1229             }
1230         });
1231
1232     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1233
1234     m_constraintSet->didStartMarking();
1235     
1236     m_scheduler->beginCollection();
1237     if (Options::logGC())
1238         m_scheduler->log();
1239     
1240     // After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()"
1241     // checks because bootstrap would have put things into the visitor. So, we should fall
1242     // through to draining.
1243     
1244     if (!slotVisitor.didReachTermination()) {
1245         dataLog("Fatal: SlotVisitor should think that GC should terminate before constraint solving, but it does not think this.\n");
1246         dataLog("slotVisitor.isEmpty(): ", slotVisitor.isEmpty(), "\n");
1247         dataLog("slotVisitor.collectorMarkStack().isEmpty(): ", slotVisitor.collectorMarkStack().isEmpty(), "\n");
1248         dataLog("slotVisitor.mutatorMarkStack().isEmpty(): ", slotVisitor.mutatorMarkStack().isEmpty(), "\n");
1249         dataLog("m_numberOfActiveParallelMarkers: ", m_numberOfActiveParallelMarkers, "\n");
1250         dataLog("m_sharedCollectorMarkStack->isEmpty(): ", m_sharedCollectorMarkStack->isEmpty(), "\n");
1251         dataLog("m_sharedMutatorMarkStack->isEmpty(): ", m_sharedMutatorMarkStack->isEmpty(), "\n");
1252         dataLog("slotVisitor.didReachTermination(): ", slotVisitor.didReachTermination(), "\n");
1253         RELEASE_ASSERT_NOT_REACHED();
1254     }
1255         
1256     return changePhase(conn, CollectorPhase::Fixpoint);
1257 }
1258
1259 NEVER_INLINE bool Heap::runFixpointPhase(GCConductor conn)
1260 {
1261     RELEASE_ASSERT(conn == GCConductor::Collector || m_currentThreadState);
1262     
1263     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1264     
1265     if (Options::logGC()) {
1266         HashMap<const char*, size_t> visitMap;
1267         forEachSlotVisitor(
1268             [&] (SlotVisitor& slotVisitor) {
1269                 visitMap.add(slotVisitor.codeName(), slotVisitor.bytesVisited() / 1024);
1270             });
1271         
1272         auto perVisitorDump = sortedMapDump(
1273             visitMap,
1274             [] (const char* a, const char* b) -> bool {
1275                 return strcmp(a, b) < 0;
1276             },
1277             ":", " ");
1278         
1279         dataLog("v=", bytesVisited() / 1024, "kb (", perVisitorDump, ") o=", m_opaqueRoots.size(), " b=", m_barriersExecuted, " ");
1280     }
1281         
1282     if (slotVisitor.didReachTermination()) {
1283         m_scheduler->didReachTermination();
1284             
1285         assertSharedMarkStacksEmpty();
1286             
1287         slotVisitor.mergeIfNecessary();
1288         for (auto& parallelVisitor : m_parallelSlotVisitors)
1289             parallelVisitor->mergeIfNecessary();
1290             
1291         // FIXME: Take m_mutatorDidRun into account when scheduling constraints. Most likely,
1292         // we don't have to execute root constraints again unless the mutator did run. At a
1293         // minimum, we could use this for work estimates - but it's probably more than just an
1294         // estimate.
1295         // https://bugs.webkit.org/show_bug.cgi?id=166828
1296             
1297         // FIXME: We should take advantage of the fact that we could timeout. This only comes
1298         // into play if we're executing constraints for the first time. But that will matter
1299         // when we have deep stacks or a lot of DOM stuff.
1300         // https://bugs.webkit.org/show_bug.cgi?id=166831
1301             
1302         // Wondering what this does? Look at Heap::addCoreConstraints(). The DOM and others can also
1303         // add their own using Heap::addMarkingConstraint().
1304         bool converged =
1305             m_constraintSet->executeConvergence(slotVisitor, MonotonicTime::infinity());
1306         if (converged && slotVisitor.isEmpty()) {
1307             assertSharedMarkStacksEmpty();
1308             return changePhase(conn, CollectorPhase::End);
1309         }
1310             
1311         m_scheduler->didExecuteConstraints();
1312     }
1313         
1314     if (Options::logGC())
1315         dataLog(slotVisitor.collectorMarkStack().size(), "+", m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " ");
1316         
1317     {
1318         ParallelModeEnabler enabler(slotVisitor);
1319         slotVisitor.drainInParallel(m_scheduler->timeToResume());
1320     }
1321         
1322     m_scheduler->synchronousDrainingDidStall();
1323
1324     if (slotVisitor.didReachTermination())
1325         return true; // This is like relooping to the top if runFixpointPhase().
1326         
1327     if (!m_scheduler->shouldResume())
1328         return true;
1329
1330     m_scheduler->willResume();
1331         
1332     if (Options::logGC()) {
1333         double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds();
1334         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), ")...]\n");
1335     }
1336
1337     // Forgive the mutator for its past failures to keep up.
1338     // FIXME: Figure out if moving this to different places results in perf changes.
1339     m_incrementBalance = 0;
1340         
1341     return changePhase(conn, CollectorPhase::Concurrent);
1342 }
1343
1344 NEVER_INLINE bool Heap::runConcurrentPhase(GCConductor conn)
1345 {
1346     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1347
1348     switch (conn) {
1349     case GCConductor::Mutator: {
1350         // When the mutator has the conn, we poll runConcurrentPhase() on every time someone says
1351         // stopIfNecessary(), so on every allocation slow path. When that happens we poll if it's time
1352         // to stop and do some work.
1353         if (slotVisitor.didReachTermination()
1354             || m_scheduler->shouldStop())
1355             return changePhase(conn, CollectorPhase::Reloop);
1356         
1357         // We could be coming from a collector phase that stuffed our SlotVisitor, so make sure we donate
1358         // everything. This is super cheap if the SlotVisitor is already empty.
1359         slotVisitor.donateAll();
1360         return false;
1361     }
1362     case GCConductor::Collector: {
1363         {
1364             ParallelModeEnabler enabler(slotVisitor);
1365             slotVisitor.drainInParallelPassively(m_scheduler->timeToStop());
1366         }
1367         return changePhase(conn, CollectorPhase::Reloop);
1368     } }
1369     
1370     RELEASE_ASSERT_NOT_REACHED();
1371     return false;
1372 }
1373
1374 NEVER_INLINE bool Heap::runReloopPhase(GCConductor conn)
1375 {
1376     if (Options::logGC())
1377         dataLog("[GC<", RawPointer(this), ">: ", gcConductorShortName(conn), " ");
1378     
1379     m_scheduler->didStop();
1380     
1381     if (Options::logGC())
1382         m_scheduler->log();
1383     
1384     return changePhase(conn, CollectorPhase::Fixpoint);
1385 }
1386
1387 NEVER_INLINE bool Heap::runEndPhase(GCConductor conn)
1388 {
1389     m_scheduler->endCollection();
1390         
1391     {
1392         auto locker = holdLock(m_markingMutex);
1393         m_parallelMarkersShouldExit = true;
1394         m_markingConditionVariable.notifyAll();
1395     }
1396     m_helperClient.finish();
1397     
1398     iterateExecutingAndCompilingCodeBlocks(
1399         [&] (CodeBlock* codeBlock) {
1400             writeBarrier(codeBlock);
1401         });
1402         
1403     updateObjectCounts();
1404     endMarking();
1405         
1406     if (UNLIKELY(m_verifier)) {
1407         m_verifier->gatherLiveCells(HeapVerifier::Phase::AfterMarking);
1408         m_verifier->verify(HeapVerifier::Phase::AfterMarking);
1409     }
1410         
1411     if (vm()->typeProfiler())
1412         vm()->typeProfiler()->invalidateTypeSetCache();
1413         
1414     reapWeakHandles();
1415     pruneStaleEntriesFromWeakGCMaps();
1416     sweepArrayBuffers();
1417     snapshotUnswept();
1418     finalizeUnconditionalFinalizers();
1419     removeDeadCompilerWorklistEntries();
1420     notifyIncrementalSweeper();
1421     
1422     m_codeBlocks->iterateCurrentlyExecuting(
1423         [&] (CodeBlock* codeBlock) {
1424             writeBarrier(codeBlock);
1425         });
1426     m_codeBlocks->clearCurrentlyExecuting();
1427         
1428     m_objectSpace.prepareForAllocation();
1429     updateAllocationLimits();
1430
1431     if (UNLIKELY(m_verifier)) {
1432         m_verifier->trimDeadCells();
1433         m_verifier->verify(HeapVerifier::Phase::AfterGC);
1434     }
1435
1436     didFinishCollection();
1437     
1438     if (m_currentRequest.didFinishEndPhase)
1439         m_currentRequest.didFinishEndPhase->run();
1440     
1441     if (false) {
1442         dataLog("Heap state after GC:\n");
1443         m_objectSpace.dumpBits();
1444     }
1445     
1446     if (Options::logGC()) {
1447         double thisPauseMS = (m_afterGC - m_stopTime).milliseconds();
1448         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), "), cycle ", (m_afterGC - m_beforeGC).milliseconds(), "ms END]\n");
1449     }
1450     
1451     {
1452         auto locker = holdLock(*m_threadLock);
1453         m_requests.removeFirst();
1454         m_lastServedTicket++;
1455         clearMutatorWaiting();
1456     }
1457     ParkingLot::unparkAll(&m_worldState);
1458
1459     if (false)
1460         dataLog("GC END!\n");
1461
1462     setNeedFinalize();
1463
1464     m_lastGCStartTime = m_currentGCStartTime;
1465     m_lastGCEndTime = MonotonicTime::now();
1466         
1467     return changePhase(conn, CollectorPhase::NotRunning);
1468 }
1469
1470 bool Heap::changePhase(GCConductor conn, CollectorPhase nextPhase)
1471 {
1472     checkConn(conn);
1473
1474     m_nextPhase = nextPhase;
1475
1476     return finishChangingPhase(conn);
1477 }
1478
1479 NEVER_INLINE bool Heap::finishChangingPhase(GCConductor conn)
1480 {
1481     checkConn(conn);
1482     
1483     if (m_nextPhase == m_currentPhase)
1484         return true;
1485
1486     if (false)
1487         dataLog(conn, ": Going to phase: ", m_nextPhase, " (from ", m_currentPhase, ")\n");
1488     
1489     bool suspendedBefore = worldShouldBeSuspended(m_currentPhase);
1490     bool suspendedAfter = worldShouldBeSuspended(m_nextPhase);
1491     
1492     if (suspendedBefore != suspendedAfter) {
1493         if (suspendedBefore) {
1494             RELEASE_ASSERT(!suspendedAfter);
1495             
1496             resumeThePeriphery();
1497             if (conn == GCConductor::Collector)
1498                 resumeTheMutator();
1499             else
1500                 handleNeedFinalize();
1501         } else {
1502             RELEASE_ASSERT(!suspendedBefore);
1503             RELEASE_ASSERT(suspendedAfter);
1504             
1505             if (conn == GCConductor::Collector) {
1506                 waitWhileNeedFinalize();
1507                 if (!stopTheMutator()) {
1508                     if (false)
1509                         dataLog("Returning false.\n");
1510                     return false;
1511                 }
1512             } else {
1513                 sanitizeStackForVM(m_vm);
1514                 handleNeedFinalize();
1515             }
1516             stopThePeriphery(conn);
1517         }
1518     }
1519     
1520     m_currentPhase = m_nextPhase;
1521     return true;
1522 }
1523
1524 void Heap::stopThePeriphery(GCConductor conn)
1525 {
1526     if (m_collectorBelievesThatTheWorldIsStopped) {
1527         dataLog("FATAL: world already stopped.\n");
1528         RELEASE_ASSERT_NOT_REACHED();
1529     }
1530     
1531     if (m_mutatorDidRun)
1532         m_mutatorExecutionVersion++;
1533     
1534     m_mutatorDidRun = false;
1535
1536     suspendCompilerThreads();
1537     m_collectorBelievesThatTheWorldIsStopped = true;
1538
1539     forEachSlotVisitor(
1540         [&] (SlotVisitor& slotVisitor) {
1541             slotVisitor.updateMutatorIsStopped(NoLockingNecessary);
1542         });
1543
1544 #if ENABLE(JIT)
1545     {
1546         DeferGCForAWhile awhile(*this);
1547         if (JITWorklist::instance()->completeAllForVM(*m_vm)
1548             && conn == GCConductor::Collector)
1549             setGCDidJIT();
1550     }
1551 #else
1552     UNUSED_PARAM(conn);
1553 #endif // ENABLE(JIT)
1554     
1555     vm()->shadowChicken().update(*vm(), vm()->topCallFrame);
1556     
1557     m_structureIDTable.flushOldTables();
1558     m_objectSpace.stopAllocating();
1559     
1560     m_stopTime = MonotonicTime::now();
1561 }
1562
1563 NEVER_INLINE void Heap::resumeThePeriphery()
1564 {
1565     // Calling resumeAllocating does the Right Thing depending on whether this is the end of a
1566     // collection cycle or this is just a concurrent phase within a collection cycle:
1567     // - At end of collection cycle: it's a no-op because prepareForAllocation already cleared the
1568     //   last active block.
1569     // - During collection cycle: it reinstates the last active block.
1570     m_objectSpace.resumeAllocating();
1571     
1572     m_barriersExecuted = 0;
1573     
1574     if (!m_collectorBelievesThatTheWorldIsStopped) {
1575         dataLog("Fatal: collector does not believe that the world is stopped.\n");
1576         RELEASE_ASSERT_NOT_REACHED();
1577     }
1578     m_collectorBelievesThatTheWorldIsStopped = false;
1579     
1580     // FIXME: This could be vastly improved: we want to grab the locks in the order in which they
1581     // become available. We basically want a lockAny() method that will lock whatever lock is available
1582     // and tell you which one it locked. That would require teaching ParkingLot how to park on multiple
1583     // queues at once, which is totally achievable - it would just require memory allocation, which is
1584     // suboptimal but not a disaster. Alternatively, we could replace the SlotVisitor rightToRun lock
1585     // with a DLG-style handshake mechanism, but that seems not as general.
1586     Vector<SlotVisitor*, 8> slotVisitorsToUpdate;
1587
1588     forEachSlotVisitor(
1589         [&] (SlotVisitor& slotVisitor) {
1590             slotVisitorsToUpdate.append(&slotVisitor);
1591         });
1592     
1593     for (unsigned countdown = 40; !slotVisitorsToUpdate.isEmpty() && countdown--;) {
1594         for (unsigned index = 0; index < slotVisitorsToUpdate.size(); ++index) {
1595             SlotVisitor& slotVisitor = *slotVisitorsToUpdate[index];
1596             bool remove = false;
1597             if (slotVisitor.hasAcknowledgedThatTheMutatorIsResumed())
1598                 remove = true;
1599             else if (auto locker = tryHoldLock(slotVisitor.rightToRun())) {
1600                 slotVisitor.updateMutatorIsStopped(locker);
1601                 remove = true;
1602             }
1603             if (remove) {
1604                 slotVisitorsToUpdate[index--] = slotVisitorsToUpdate.last();
1605                 slotVisitorsToUpdate.takeLast();
1606             }
1607         }
1608         WTF::Thread::yield();
1609     }
1610     
1611     for (SlotVisitor* slotVisitor : slotVisitorsToUpdate)
1612         slotVisitor->updateMutatorIsStopped();
1613     
1614     resumeCompilerThreads();
1615 }
1616
1617 bool Heap::stopTheMutator()
1618 {
1619     for (;;) {
1620         unsigned oldState = m_worldState.load();
1621         if (oldState & stoppedBit) {
1622             RELEASE_ASSERT(!(oldState & hasAccessBit));
1623             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1624             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1625             return true;
1626         }
1627         
1628         if (oldState & mutatorHasConnBit) {
1629             RELEASE_ASSERT(!(oldState & hasAccessBit));
1630             RELEASE_ASSERT(!(oldState & stoppedBit));
1631             return false;
1632         }
1633
1634         if (!(oldState & hasAccessBit)) {
1635             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1636             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1637             // We can stop the world instantly.
1638             if (m_worldState.compareExchangeWeak(oldState, oldState | stoppedBit))
1639                 return true;
1640             continue;
1641         }
1642         
1643         // Transfer the conn to the mutator and bail.
1644         RELEASE_ASSERT(oldState & hasAccessBit);
1645         RELEASE_ASSERT(!(oldState & stoppedBit));
1646         unsigned newState = (oldState | mutatorHasConnBit) & ~mutatorWaitingBit;
1647         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1648             if (false)
1649                 dataLog("Handed off the conn.\n");
1650             m_stopIfNecessaryTimer->scheduleSoon();
1651             ParkingLot::unparkAll(&m_worldState);
1652             return false;
1653         }
1654     }
1655 }
1656
1657 NEVER_INLINE void Heap::resumeTheMutator()
1658 {
1659     if (false)
1660         dataLog("Resuming the mutator.\n");
1661     for (;;) {
1662         unsigned oldState = m_worldState.load();
1663         if (!!(oldState & hasAccessBit) != !(oldState & stoppedBit)) {
1664             dataLog("Fatal: hasAccess = ", !!(oldState & hasAccessBit), ", stopped = ", !!(oldState & stoppedBit), "\n");
1665             RELEASE_ASSERT_NOT_REACHED();
1666         }
1667         if (oldState & mutatorHasConnBit) {
1668             dataLog("Fatal: mutator has the conn.\n");
1669             RELEASE_ASSERT_NOT_REACHED();
1670         }
1671         
1672         if (!(oldState & stoppedBit)) {
1673             if (false)
1674                 dataLog("Returning because not stopped.\n");
1675             return;
1676         }
1677         
1678         if (m_worldState.compareExchangeWeak(oldState, oldState & ~stoppedBit)) {
1679             if (false)
1680                 dataLog("CASing and returning.\n");
1681             ParkingLot::unparkAll(&m_worldState);
1682             return;
1683         }
1684     }
1685 }
1686
1687 void Heap::stopIfNecessarySlow()
1688 {
1689     while (stopIfNecessarySlow(m_worldState.load())) { }
1690     
1691     RELEASE_ASSERT(m_worldState.load() & hasAccessBit);
1692     RELEASE_ASSERT(!(m_worldState.load() & stoppedBit));
1693     
1694     handleGCDidJIT();
1695     handleNeedFinalize();
1696     m_mutatorDidRun = true;
1697 }
1698
1699 bool Heap::stopIfNecessarySlow(unsigned oldState)
1700 {
1701     RELEASE_ASSERT(oldState & hasAccessBit);
1702     RELEASE_ASSERT(!(oldState & stoppedBit));
1703     
1704     // It's possible for us to wake up with finalization already requested but the world not yet
1705     // resumed. If that happens, we can't run finalization yet.
1706     if (handleNeedFinalize(oldState))
1707         return true;
1708
1709     // FIXME: When entering the concurrent phase, we could arrange for this branch not to fire, and then
1710     // have the SlotVisitor do things to the m_worldState to make this branch fire again. That would
1711     // prevent us from polling this so much. Ideally, stopIfNecessary would ignore the mutatorHasConnBit
1712     // and there would be some other bit indicating whether we were in some GC phase other than the
1713     // NotRunning or Concurrent ones.
1714     if (oldState & mutatorHasConnBit)
1715         collectInMutatorThread();
1716     
1717     return false;
1718 }
1719
1720 NEVER_INLINE void Heap::collectInMutatorThread()
1721 {
1722     CollectingScope collectingScope(*this);
1723     for (;;) {
1724         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, nullptr);
1725         switch (result) {
1726         case RunCurrentPhaseResult::Finished:
1727             return;
1728         case RunCurrentPhaseResult::Continue:
1729             break;
1730         case RunCurrentPhaseResult::NeedCurrentThreadState:
1731             sanitizeStackForVM(m_vm);
1732             auto lambda = [&] (CurrentThreadState& state) {
1733                 for (;;) {
1734                     RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, &state);
1735                     switch (result) {
1736                     case RunCurrentPhaseResult::Finished:
1737                         return;
1738                     case RunCurrentPhaseResult::Continue:
1739                         break;
1740                     case RunCurrentPhaseResult::NeedCurrentThreadState:
1741                         RELEASE_ASSERT_NOT_REACHED();
1742                         break;
1743                     }
1744                 }
1745             };
1746             callWithCurrentThreadState(scopedLambda<void(CurrentThreadState&)>(WTFMove(lambda)));
1747             return;
1748         }
1749     }
1750 }
1751
1752 template<typename Func>
1753 void Heap::waitForCollector(const Func& func)
1754 {
1755     for (;;) {
1756         bool done;
1757         {
1758             LockHolder locker(*m_threadLock);
1759             done = func(locker);
1760             if (!done) {
1761                 setMutatorWaiting();
1762                 
1763                 // At this point, the collector knows that we intend to wait, and he will clear the
1764                 // waiting bit and then unparkAll when the GC cycle finishes. Clearing the bit
1765                 // prevents us from parking except if there is also stop-the-world. Unparking after
1766                 // clearing means that if the clearing happens after we park, then we will unpark.
1767             }
1768         }
1769         
1770         // If we're in a stop-the-world scenario, we need to wait for that even if done is true.
1771         unsigned oldState = m_worldState.load();
1772         if (stopIfNecessarySlow(oldState))
1773             continue;
1774         
1775         // FIXME: We wouldn't need this if stopIfNecessarySlow() had a mode where it knew to just
1776         // do the collection.
1777         relinquishConn();
1778         
1779         if (done) {
1780             clearMutatorWaiting(); // Clean up just in case.
1781             return;
1782         }
1783         
1784         // If mutatorWaitingBit is still set then we want to wait.
1785         ParkingLot::compareAndPark(&m_worldState, oldState | mutatorWaitingBit);
1786     }
1787 }
1788
1789 void Heap::acquireAccessSlow()
1790 {
1791     for (;;) {
1792         unsigned oldState = m_worldState.load();
1793         RELEASE_ASSERT(!(oldState & hasAccessBit));
1794         
1795         if (oldState & stoppedBit) {
1796             if (verboseStop) {
1797                 dataLog("Stopping in acquireAccess!\n");
1798                 WTFReportBacktrace();
1799             }
1800             // Wait until we're not stopped anymore.
1801             ParkingLot::compareAndPark(&m_worldState, oldState);
1802             continue;
1803         }
1804         
1805         RELEASE_ASSERT(!(oldState & stoppedBit));
1806         unsigned newState = oldState | hasAccessBit;
1807         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1808             handleGCDidJIT();
1809             handleNeedFinalize();
1810             m_mutatorDidRun = true;
1811             stopIfNecessary();
1812             return;
1813         }
1814     }
1815 }
1816
1817 void Heap::releaseAccessSlow()
1818 {
1819     for (;;) {
1820         unsigned oldState = m_worldState.load();
1821         if (!(oldState & hasAccessBit)) {
1822             dataLog("FATAL: Attempting to release access but the mutator does not have access.\n");
1823             RELEASE_ASSERT_NOT_REACHED();
1824         }
1825         if (oldState & stoppedBit) {
1826             dataLog("FATAL: Attempting to release access but the mutator is stopped.\n");
1827             RELEASE_ASSERT_NOT_REACHED();
1828         }
1829         
1830         if (handleNeedFinalize(oldState))
1831             continue;
1832         
1833         unsigned newState = oldState & ~(hasAccessBit | mutatorHasConnBit);
1834         
1835         if ((oldState & mutatorHasConnBit)
1836             && m_nextPhase != m_currentPhase) {
1837             // This means that the collector thread had given us the conn so that we would do something
1838             // for it. Stop ourselves as we release access. This ensures that acquireAccess blocks. In
1839             // the meantime, since we're handing the conn over, the collector will be awoken and it is
1840             // sure to have work to do.
1841             newState |= stoppedBit;
1842         }
1843
1844         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1845             if (oldState & mutatorHasConnBit)
1846                 finishRelinquishingConn();
1847             return;
1848         }
1849     }
1850 }
1851
1852 bool Heap::relinquishConn(unsigned oldState)
1853 {
1854     RELEASE_ASSERT(oldState & hasAccessBit);
1855     RELEASE_ASSERT(!(oldState & stoppedBit));
1856     
1857     if (!(oldState & mutatorHasConnBit))
1858         return false; // Done.
1859     
1860     if (m_threadShouldStop)
1861         return false;
1862     
1863     if (!m_worldState.compareExchangeWeak(oldState, oldState & ~mutatorHasConnBit))
1864         return true; // Loop around.
1865     
1866     finishRelinquishingConn();
1867     return true;
1868 }
1869
1870 void Heap::finishRelinquishingConn()
1871 {
1872     if (false)
1873         dataLog("Relinquished the conn.\n");
1874     
1875     sanitizeStackForVM(m_vm);
1876     
1877     auto locker = holdLock(*m_threadLock);
1878     if (!m_requests.isEmpty())
1879         m_threadCondition->notifyOne(locker);
1880     ParkingLot::unparkAll(&m_worldState);
1881 }
1882
1883 void Heap::relinquishConn()
1884 {
1885     while (relinquishConn(m_worldState.load())) { }
1886 }
1887
1888 bool Heap::handleGCDidJIT(unsigned oldState)
1889 {
1890     RELEASE_ASSERT(oldState & hasAccessBit);
1891     if (!(oldState & gcDidJITBit))
1892         return false;
1893     if (m_worldState.compareExchangeWeak(oldState, oldState & ~gcDidJITBit)) {
1894         WTF::crossModifyingCodeFence();
1895         return true;
1896     }
1897     return true;
1898 }
1899
1900 NEVER_INLINE bool Heap::handleNeedFinalize(unsigned oldState)
1901 {
1902     RELEASE_ASSERT(oldState & hasAccessBit);
1903     RELEASE_ASSERT(!(oldState & stoppedBit));
1904     
1905     if (!(oldState & needFinalizeBit))
1906         return false;
1907     if (m_worldState.compareExchangeWeak(oldState, oldState & ~needFinalizeBit)) {
1908         finalize();
1909         // Wake up anyone waiting for us to finalize. Note that they may have woken up already, in
1910         // which case they would be waiting for us to release heap access.
1911         ParkingLot::unparkAll(&m_worldState);
1912         return true;
1913     }
1914     return true;
1915 }
1916
1917 void Heap::handleGCDidJIT()
1918 {
1919     while (handleGCDidJIT(m_worldState.load())) { }
1920 }
1921
1922 void Heap::handleNeedFinalize()
1923 {
1924     while (handleNeedFinalize(m_worldState.load())) { }
1925 }
1926
1927 void Heap::setGCDidJIT()
1928 {
1929     m_worldState.transaction(
1930         [&] (unsigned& state) -> bool {
1931             RELEASE_ASSERT(state & stoppedBit);
1932             state |= gcDidJITBit;
1933             return true;
1934         });
1935 }
1936
1937 void Heap::setNeedFinalize()
1938 {
1939     m_worldState.exchangeOr(needFinalizeBit);
1940     ParkingLot::unparkAll(&m_worldState);
1941     m_stopIfNecessaryTimer->scheduleSoon();
1942 }
1943
1944 void Heap::waitWhileNeedFinalize()
1945 {
1946     for (;;) {
1947         unsigned oldState = m_worldState.load();
1948         if (!(oldState & needFinalizeBit)) {
1949             // This means that either there was no finalize request or the main thread will finalize
1950             // with heap access, so a subsequent call to stopTheWorld() will return only when
1951             // finalize finishes.
1952             return;
1953         }
1954         ParkingLot::compareAndPark(&m_worldState, oldState);
1955     }
1956 }
1957
1958 void Heap::setMutatorWaiting()
1959 {
1960     m_worldState.exchangeOr(mutatorWaitingBit);
1961 }
1962
1963 void Heap::clearMutatorWaiting()
1964 {
1965     m_worldState.exchangeAnd(~mutatorWaitingBit);
1966 }
1967
1968 void Heap::notifyThreadStopping(const AbstractLocker&)
1969 {
1970     m_threadIsStopping = true;
1971     clearMutatorWaiting();
1972     ParkingLot::unparkAll(&m_worldState);
1973 }
1974
1975 void Heap::finalize()
1976 {
1977     MonotonicTime before;
1978     if (Options::logGC()) {
1979         before = MonotonicTime::now();
1980         dataLog("[GC<", RawPointer(this), ">: finalize ");
1981     }
1982     
1983     {
1984         SweepingScope sweepingScope(*this);
1985         deleteUnmarkedCompiledCode();
1986         deleteSourceProviderCaches();
1987         sweepInFinalize();
1988     }
1989     
1990     if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache())
1991         cache->clear();
1992     
1993     for (const HeapFinalizerCallback& callback : m_heapFinalizerCallbacks)
1994         callback.run(*vm());
1995     
1996     if (Options::sweepSynchronously())
1997         sweepSynchronously();
1998
1999     if (Options::logGC()) {
2000         MonotonicTime after = MonotonicTime::now();
2001         dataLog((after - before).milliseconds(), "ms]\n");
2002     }
2003 }
2004
2005 Heap::Ticket Heap::requestCollection(GCRequest request)
2006 {
2007     stopIfNecessary();
2008     
2009     ASSERT(vm()->currentThreadIsHoldingAPILock());
2010     RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
2011     
2012     LockHolder locker(*m_threadLock);
2013     // We may be able to steal the conn. That only works if the collector is definitely not running
2014     // right now. This is an optimization that prevents the collector thread from ever starting in most
2015     // cases.
2016     ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2017     if ((m_lastServedTicket == m_lastGrantedTicket) && (m_currentPhase == CollectorPhase::NotRunning)) {
2018         if (false)
2019             dataLog("Taking the conn.\n");
2020         m_worldState.exchangeOr(mutatorHasConnBit);
2021     }
2022     
2023     m_requests.append(request);
2024     m_lastGrantedTicket++;
2025     if (!(m_worldState.load() & mutatorHasConnBit))
2026         m_threadCondition->notifyOne(locker);
2027     return m_lastGrantedTicket;
2028 }
2029
2030 void Heap::waitForCollection(Ticket ticket)
2031 {
2032     waitForCollector(
2033         [&] (const AbstractLocker&) -> bool {
2034             return m_lastServedTicket >= ticket;
2035         });
2036 }
2037
2038 void Heap::sweepInFinalize()
2039 {
2040     m_objectSpace.sweepLargeAllocations();
2041     
2042     auto sweepBlock = [&] (MarkedBlock::Handle* handle) {
2043         handle->sweep(nullptr);
2044     };
2045     
2046     vm()->eagerlySweptDestructibleObjectSpace.forEachMarkedBlock(sweepBlock);
2047 }
2048
2049 void Heap::suspendCompilerThreads()
2050 {
2051 #if ENABLE(DFG_JIT)
2052     // We ensure the worklists so that it's not possible for the mutator to start a new worklist
2053     // after we have suspended the ones that he had started before. That's not very expensive since
2054     // the worklists use AutomaticThreads anyway.
2055     for (unsigned i = DFG::numberOfWorklists(); i--;)
2056         DFG::ensureWorklistForIndex(i).suspendAllThreads();
2057 #endif
2058 }
2059
2060 void Heap::willStartCollection()
2061 {
2062     if (Options::logGC())
2063         dataLog("=> ");
2064     
2065     if (shouldDoFullCollection()) {
2066         m_collectionScope = CollectionScope::Full;
2067         m_shouldDoFullCollection = false;
2068         if (Options::logGC())
2069             dataLog("FullCollection, ");
2070         if (false)
2071             dataLog("Full collection!\n");
2072     } else {
2073         m_collectionScope = CollectionScope::Eden;
2074         if (Options::logGC())
2075             dataLog("EdenCollection, ");
2076         if (false)
2077             dataLog("Eden collection!\n");
2078     }
2079     if (m_collectionScope == CollectionScope::Full) {
2080         m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2081         m_extraMemorySize = 0;
2082         m_deprecatedExtraMemorySize = 0;
2083 #if ENABLE(RESOURCE_USAGE)
2084         m_externalMemorySize = 0;
2085 #endif
2086
2087         if (m_fullActivityCallback)
2088             m_fullActivityCallback->willCollect();
2089     } else {
2090         ASSERT(m_collectionScope == CollectionScope::Eden);
2091         m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2092     }
2093
2094     if (m_edenActivityCallback)
2095         m_edenActivityCallback->willCollect();
2096
2097     for (auto* observer : m_observers)
2098         observer->willGarbageCollect();
2099 }
2100
2101 void Heap::prepareForMarking()
2102 {
2103     m_objectSpace.prepareForMarking();
2104 }
2105
2106 void Heap::reapWeakHandles()
2107 {
2108     m_objectSpace.reapWeakSets();
2109 }
2110
2111 void Heap::pruneStaleEntriesFromWeakGCMaps()
2112 {
2113     if (m_collectionScope != CollectionScope::Full)
2114         return;
2115     for (auto& pruneCallback : m_weakGCMaps.values())
2116         pruneCallback();
2117 }
2118
2119 void Heap::sweepArrayBuffers()
2120 {
2121     m_arrayBuffers.sweep();
2122 }
2123
2124 void Heap::snapshotUnswept()
2125 {
2126     TimingScope timingScope(*this, "Heap::snapshotUnswept");
2127     m_objectSpace.snapshotUnswept();
2128 }
2129
2130 void Heap::deleteSourceProviderCaches()
2131 {
2132     if (*m_lastCollectionScope == CollectionScope::Full)
2133         m_vm->clearSourceProviderCaches();
2134 }
2135
2136 void Heap::notifyIncrementalSweeper()
2137 {
2138     if (m_collectionScope == CollectionScope::Full) {
2139         if (!m_logicallyEmptyWeakBlocks.isEmpty())
2140             m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2141     }
2142
2143     m_sweeper->startSweeping();
2144 }
2145
2146 void Heap::updateAllocationLimits()
2147 {
2148     static const bool verbose = false;
2149     
2150     if (verbose) {
2151         dataLog("\n");
2152         dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n");
2153     }
2154     
2155     // Calculate our current heap size threshold for the purpose of figuring out when we should
2156     // run another collection. This isn't the same as either size() or capacity(), though it should
2157     // be somewhere between the two. The key is to match the size calculations involved calls to
2158     // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
2159     // fragmentation, we may have size() much smaller than capacity().
2160     size_t currentHeapSize = 0;
2161
2162     // For marked space, we use the total number of bytes visited. This matches the logic for
2163     // MarkedAllocator's calls to didAllocate(), which effectively accounts for the total size of
2164     // objects allocated rather than blocks used. This will underestimate capacity(), and in case
2165     // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
2166     // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
2167     currentHeapSize += m_totalBytesVisited;
2168     if (verbose)
2169         dataLog("totalBytesVisited = ", m_totalBytesVisited, ", currentHeapSize = ", currentHeapSize, "\n");
2170
2171     // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
2172     // extra memory reporting.
2173     currentHeapSize += extraMemorySize();
2174     if (!ASSERT_DISABLED) {
2175         Checked<size_t, RecordOverflow> checkedCurrentHeapSize = m_totalBytesVisited;
2176         checkedCurrentHeapSize += extraMemorySize();
2177         ASSERT(!checkedCurrentHeapSize.hasOverflowed() && checkedCurrentHeapSize.unsafeGet() == currentHeapSize);
2178     }
2179
2180     if (verbose)
2181         dataLog("extraMemorySize() = ", extraMemorySize(), ", currentHeapSize = ", currentHeapSize, "\n");
2182     
2183     if (m_collectionScope == CollectionScope::Full) {
2184         // To avoid pathological GC churn in very small and very large heaps, we set
2185         // the new allocation limit based on the current size of the heap, with a
2186         // fixed minimum.
2187         m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
2188         if (verbose)
2189             dataLog("Full: maxHeapSize = ", m_maxHeapSize, "\n");
2190         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2191         if (verbose)
2192             dataLog("Full: maxEdenSize = ", m_maxEdenSize, "\n");
2193         m_sizeAfterLastFullCollect = currentHeapSize;
2194         if (verbose)
2195             dataLog("Full: sizeAfterLastFullCollect = ", currentHeapSize, "\n");
2196         m_bytesAbandonedSinceLastFullCollect = 0;
2197         if (verbose)
2198             dataLog("Full: bytesAbandonedSinceLastFullCollect = ", 0, "\n");
2199     } else {
2200         ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
2201         // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have.
2202         // But we are sloppy, so we have to defend against the overflow.
2203         m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize;
2204         if (verbose)
2205             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2206         m_sizeAfterLastEdenCollect = currentHeapSize;
2207         if (verbose)
2208             dataLog("Eden: sizeAfterLastEdenCollect = ", currentHeapSize, "\n");
2209         double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
2210         double minEdenToOldGenerationRatio = 1.0 / 3.0;
2211         if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
2212             m_shouldDoFullCollection = true;
2213         // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
2214         m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
2215         if (verbose)
2216             dataLog("Eden: maxHeapSize = ", m_maxHeapSize, "\n");
2217         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2218         if (verbose)
2219             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2220         if (m_fullActivityCallback) {
2221             ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
2222             m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
2223         }
2224     }
2225
2226 #if PLATFORM(IOS)
2227     // Get critical memory threshold for next cycle.
2228     overCriticalMemoryThreshold(MemoryThresholdCallType::Direct);
2229 #endif
2230
2231     m_sizeAfterLastCollect = currentHeapSize;
2232     if (verbose)
2233         dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n");
2234     m_bytesAllocatedThisCycle = 0;
2235
2236     if (Options::logGC())
2237         dataLog("=> ", currentHeapSize / 1024, "kb, ");
2238 }
2239
2240 void Heap::didFinishCollection()
2241 {
2242     m_afterGC = MonotonicTime::now();
2243     CollectionScope scope = *m_collectionScope;
2244     if (scope == CollectionScope::Full)
2245         m_lastFullGCLength = m_afterGC - m_beforeGC;
2246     else
2247         m_lastEdenGCLength = m_afterGC - m_beforeGC;
2248
2249 #if ENABLE(RESOURCE_USAGE)
2250     ASSERT(externalMemorySize() <= extraMemorySize());
2251 #endif
2252
2253     if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) {
2254         gatherExtraHeapSnapshotData(*heapProfiler);
2255         removeDeadHeapSnapshotNodes(*heapProfiler);
2256     }
2257
2258     if (UNLIKELY(m_verifier))
2259         m_verifier->endGC();
2260
2261     RELEASE_ASSERT(m_collectionScope);
2262     m_lastCollectionScope = m_collectionScope;
2263     m_collectionScope = std::nullopt;
2264
2265     for (auto* observer : m_observers)
2266         observer->didGarbageCollect(scope);
2267 }
2268
2269 void Heap::resumeCompilerThreads()
2270 {
2271 #if ENABLE(DFG_JIT)
2272     for (unsigned i = DFG::numberOfWorklists(); i--;)
2273         DFG::existingWorklistForIndex(i).resumeAllThreads();
2274 #endif
2275 }
2276
2277 GCActivityCallback* Heap::fullActivityCallback()
2278 {
2279     return m_fullActivityCallback.get();
2280 }
2281
2282 GCActivityCallback* Heap::edenActivityCallback()
2283 {
2284     return m_edenActivityCallback.get();
2285 }
2286
2287 IncrementalSweeper& Heap::sweeper()
2288 {
2289     return *m_sweeper;
2290 }
2291
2292 void Heap::setGarbageCollectionTimerEnabled(bool enable)
2293 {
2294     if (m_fullActivityCallback)
2295         m_fullActivityCallback->setEnabled(enable);
2296     if (m_edenActivityCallback)
2297         m_edenActivityCallback->setEnabled(enable);
2298 }
2299
2300 void Heap::didAllocate(size_t bytes)
2301 {
2302     if (m_edenActivityCallback)
2303         m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
2304     m_bytesAllocatedThisCycle += bytes;
2305     performIncrement(bytes);
2306 }
2307
2308 bool Heap::isValidAllocation(size_t)
2309 {
2310     if (!isValidThreadState(m_vm))
2311         return false;
2312
2313     if (isCurrentThreadBusy())
2314         return false;
2315     
2316     return true;
2317 }
2318
2319 void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
2320 {
2321     WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
2322 }
2323
2324 void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
2325 {
2326     HandleSlot slot = handle.slot();
2327     Finalizer finalizer = reinterpret_cast<Finalizer>(context);
2328     finalizer(slot->asCell());
2329     WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
2330 }
2331
2332 void Heap::addExecutable(ExecutableBase* executable)
2333 {
2334     m_executables.append(executable);
2335 }
2336
2337 void Heap::collectNowFullIfNotDoneRecently(Synchronousness synchronousness)
2338 {
2339     if (!m_fullActivityCallback) {
2340         collectNow(synchronousness, CollectionScope::Full);
2341         return;
2342     }
2343
2344     if (m_fullActivityCallback->didGCRecently()) {
2345         // A synchronous GC was already requested recently so we merely accelerate next collection.
2346         reportAbandonedObjectGraph();
2347         return;
2348     }
2349
2350     m_fullActivityCallback->setDidGCRecently();
2351     collectNow(synchronousness, CollectionScope::Full);
2352 }
2353
2354 bool Heap::shouldDoFullCollection()
2355 {
2356     if (!Options::useGenerationalGC())
2357         return true;
2358
2359     if (!m_currentRequest.scope)
2360         return m_shouldDoFullCollection || overCriticalMemoryThreshold();
2361     return *m_currentRequest.scope == CollectionScope::Full;
2362 }
2363
2364 void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
2365 {
2366     m_logicallyEmptyWeakBlocks.append(block);
2367 }
2368
2369 void Heap::sweepAllLogicallyEmptyWeakBlocks()
2370 {
2371     if (m_logicallyEmptyWeakBlocks.isEmpty())
2372         return;
2373
2374     m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2375     while (sweepNextLogicallyEmptyWeakBlock()) { }
2376 }
2377
2378 bool Heap::sweepNextLogicallyEmptyWeakBlock()
2379 {
2380     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
2381         return false;
2382
2383     WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
2384
2385     block->sweep();
2386     if (block->isEmpty()) {
2387         std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
2388         m_logicallyEmptyWeakBlocks.removeLast();
2389         WeakBlock::destroy(*this, block);
2390     } else
2391         m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
2392
2393     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
2394         m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
2395         return false;
2396     }
2397
2398     return true;
2399 }
2400
2401 size_t Heap::visitCount()
2402 {
2403     size_t result = 0;
2404     forEachSlotVisitor(
2405         [&] (SlotVisitor& visitor) {
2406             result += visitor.visitCount();
2407         });
2408     return result;
2409 }
2410
2411 size_t Heap::bytesVisited()
2412 {
2413     size_t result = 0;
2414     forEachSlotVisitor(
2415         [&] (SlotVisitor& visitor) {
2416             result += visitor.bytesVisited();
2417         });
2418     return result;
2419 }
2420
2421 void Heap::forEachCodeBlockImpl(const ScopedLambda<bool(CodeBlock*)>& func)
2422 {
2423     // We don't know the full set of CodeBlocks until compilation has terminated.
2424     completeAllJITPlans();
2425
2426     return m_codeBlocks->iterate(func);
2427 }
2428
2429 void Heap::forEachCodeBlockIgnoringJITPlansImpl(const AbstractLocker& locker, const ScopedLambda<bool(CodeBlock*)>& func)
2430 {
2431     return m_codeBlocks->iterate(locker, func);
2432 }
2433
2434 void Heap::writeBarrierSlowPath(const JSCell* from)
2435 {
2436     if (UNLIKELY(mutatorShouldBeFenced())) {
2437         // In this case, the barrierThreshold is the tautological threshold, so from could still be
2438         // not black. But we can't know for sure until we fire off a fence.
2439         WTF::storeLoadFence();
2440         if (from->cellState() != CellState::PossiblyBlack)
2441             return;
2442     }
2443     
2444     addToRememberedSet(from);
2445 }
2446
2447 bool Heap::isCurrentThreadBusy()
2448 {
2449     return mayBeGCThread() || mutatorState() != MutatorState::Running;
2450 }
2451
2452 void Heap::reportExtraMemoryVisited(size_t size)
2453 {
2454     size_t* counter = &m_extraMemorySize;
2455     
2456     for (;;) {
2457         size_t oldSize = *counter;
2458         // FIXME: Change this to use SaturatedArithmetic when available.
2459         // https://bugs.webkit.org/show_bug.cgi?id=170411
2460         Checked<size_t, RecordOverflow> checkedNewSize = oldSize;
2461         checkedNewSize += size;
2462         size_t newSize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
2463         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, newSize))
2464             return;
2465     }
2466 }
2467
2468 #if ENABLE(RESOURCE_USAGE)
2469 void Heap::reportExternalMemoryVisited(size_t size)
2470 {
2471     size_t* counter = &m_externalMemorySize;
2472
2473     for (;;) {
2474         size_t oldSize = *counter;
2475         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size))
2476             return;
2477     }
2478 }
2479 #endif
2480
2481 void Heap::collectIfNecessaryOrDefer(GCDeferralContext* deferralContext)
2482 {
2483     ASSERT(deferralContext || isDeferred() || !DisallowGC::isInEffectOnCurrentThread());
2484
2485     if (!m_isSafeToCollect)
2486         return;
2487     switch (mutatorState()) {
2488     case MutatorState::Running:
2489     case MutatorState::Allocating:
2490         break;
2491     case MutatorState::Sweeping:
2492     case MutatorState::Collecting:
2493         return;
2494     }
2495     if (!Options::useGC())
2496         return;
2497     
2498     if (mayNeedToStop()) {
2499         if (deferralContext)
2500             deferralContext->m_shouldGC = true;
2501         else if (isDeferred())
2502             m_didDeferGCWork = true;
2503         else
2504             stopIfNecessary();
2505     }
2506     
2507     if (UNLIKELY(Options::gcMaxHeapSize())) {
2508         if (m_bytesAllocatedThisCycle <= Options::gcMaxHeapSize())
2509             return;
2510     } else {
2511         size_t bytesAllowedThisCycle = m_maxEdenSize;
2512
2513 #if PLATFORM(IOS)
2514         if (overCriticalMemoryThreshold())
2515             bytesAllowedThisCycle = std::min(m_maxEdenSizeWhenCritical, bytesAllowedThisCycle);
2516 #endif
2517
2518         if (m_bytesAllocatedThisCycle <= bytesAllowedThisCycle)
2519             return;
2520     }
2521
2522     if (deferralContext)
2523         deferralContext->m_shouldGC = true;
2524     else if (isDeferred())
2525         m_didDeferGCWork = true;
2526     else {
2527         collectAsync();
2528         stopIfNecessary(); // This will immediately start the collection if we have the conn.
2529     }
2530 }
2531
2532 void Heap::decrementDeferralDepthAndGCIfNeededSlow()
2533 {
2534     // Can't do anything if we're still deferred.
2535     if (m_deferralDepth)
2536         return;
2537     
2538     ASSERT(!isDeferred());
2539     
2540     m_didDeferGCWork = false;
2541     // FIXME: Bring back something like the DeferGCProbability mode.
2542     // https://bugs.webkit.org/show_bug.cgi?id=166627
2543     collectIfNecessaryOrDefer();
2544 }
2545
2546 void Heap::registerWeakGCMap(void* weakGCMap, std::function<void()> pruningCallback)
2547 {
2548     m_weakGCMaps.add(weakGCMap, WTFMove(pruningCallback));
2549 }
2550
2551 void Heap::unregisterWeakGCMap(void* weakGCMap)
2552 {
2553     m_weakGCMaps.remove(weakGCMap);
2554 }
2555
2556 void Heap::didAllocateBlock(size_t capacity)
2557 {
2558 #if ENABLE(RESOURCE_USAGE)
2559     m_blockBytesAllocated += capacity;
2560 #else
2561     UNUSED_PARAM(capacity);
2562 #endif
2563 }
2564
2565 void Heap::didFreeBlock(size_t capacity)
2566 {
2567 #if ENABLE(RESOURCE_USAGE)
2568     m_blockBytesAllocated -= capacity;
2569 #else
2570     UNUSED_PARAM(capacity);
2571 #endif
2572 }
2573
2574 void Heap::addCoreConstraints()
2575 {
2576     m_constraintSet->add(
2577         "Cs", "Conservative Scan",
2578         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2579             TimingScope preConvergenceTimingScope(*this, "Constraint: conservative scan");
2580             m_objectSpace.prepareForConservativeScan();
2581             ConservativeRoots conservativeRoots(*this);
2582             SuperSamplerScope superSamplerScope(false);
2583             gatherStackRoots(conservativeRoots);
2584             gatherJSStackRoots(conservativeRoots);
2585             gatherScratchBufferRoots(conservativeRoots);
2586             slotVisitor.append(conservativeRoots);
2587         },
2588         ConstraintVolatility::GreyedByExecution);
2589     
2590     m_constraintSet->add(
2591         "Msr", "Misc Small Roots",
2592         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2593 #if JSC_OBJC_API_ENABLED
2594             scanExternalRememberedSet(*m_vm, slotVisitor);
2595 #endif
2596
2597             if (m_vm->smallStrings.needsToBeVisited(*m_collectionScope))
2598                 m_vm->smallStrings.visitStrongReferences(slotVisitor);
2599             
2600             for (auto& pair : m_protectedValues)
2601                 slotVisitor.appendUnbarriered(pair.key);
2602             
2603             if (m_markListSet && m_markListSet->size())
2604                 MarkedArgumentBuffer::markLists(slotVisitor, *m_markListSet);
2605             
2606             slotVisitor.appendUnbarriered(m_vm->exception());
2607             slotVisitor.appendUnbarriered(m_vm->lastException());
2608         },
2609         ConstraintVolatility::GreyedByExecution);
2610     
2611     m_constraintSet->add(
2612         "Sh", "Strong Handles",
2613         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2614             m_handleSet.visitStrongHandles(slotVisitor);
2615             m_handleStack.visit(slotVisitor);
2616         },
2617         ConstraintVolatility::GreyedByExecution);
2618     
2619     m_constraintSet->add(
2620         "D", "Debugger",
2621         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2622 #if ENABLE(SAMPLING_PROFILER)
2623             if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
2624                 LockHolder locker(samplingProfiler->getLock());
2625                 samplingProfiler->processUnverifiedStackTraces();
2626                 samplingProfiler->visit(slotVisitor);
2627                 if (Options::logGC() == GCLogging::Verbose)
2628                     dataLog("Sampling Profiler data:\n", slotVisitor);
2629             }
2630 #endif // ENABLE(SAMPLING_PROFILER)
2631             
2632             if (m_vm->typeProfiler())
2633                 m_vm->typeProfilerLog()->visit(slotVisitor);
2634             
2635             m_vm->shadowChicken().visitChildren(slotVisitor);
2636         },
2637         ConstraintVolatility::GreyedByExecution);
2638     
2639     m_constraintSet->add(
2640         "Jsr", "JIT Stub Routines",
2641         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2642             m_jitStubRoutines->traceMarkedStubRoutines(slotVisitor);
2643         },
2644         ConstraintVolatility::GreyedByExecution);
2645     
2646     m_constraintSet->add(
2647         "Ws", "Weak Sets",
2648         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2649             m_objectSpace.visitWeakSets(slotVisitor);
2650         },
2651         ConstraintVolatility::GreyedByMarking);
2652     
2653     m_constraintSet->add(
2654         "Wrh", "Weak Reference Harvesters",
2655         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2656             for (WeakReferenceHarvester* current = m_weakReferenceHarvesters.head(); current; current = current->next())
2657                 current->visitWeakReferences(slotVisitor);
2658         },
2659         ConstraintVolatility::GreyedByMarking);
2660     
2661 #if ENABLE(DFG_JIT)
2662     m_constraintSet->add(
2663         "Dw", "DFG Worklists",
2664         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2665             for (unsigned i = DFG::numberOfWorklists(); i--;)
2666                 DFG::existingWorklistForIndex(i).visitWeakReferences(slotVisitor);
2667             
2668             // FIXME: This is almost certainly unnecessary.
2669             // https://bugs.webkit.org/show_bug.cgi?id=166829
2670             DFG::iterateCodeBlocksForGC(
2671                 *m_vm,
2672                 [&] (CodeBlock* codeBlock) {
2673                     slotVisitor.appendUnbarriered(codeBlock);
2674                 });
2675             
2676             if (Options::logGC() == GCLogging::Verbose)
2677                 dataLog("DFG Worklists:\n", slotVisitor);
2678         },
2679         ConstraintVolatility::GreyedByMarking);
2680 #endif
2681     
2682     m_constraintSet->add(
2683         "Cb", "CodeBlocks",
2684         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2685             iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(
2686                 [&] (CodeBlock* codeBlock) {
2687                     // Visit the CodeBlock as a constraint only if it's black.
2688                     if (Heap::isMarked(codeBlock)
2689                         && codeBlock->cellState() == CellState::PossiblyBlack)
2690                         slotVisitor.visitAsConstraint(codeBlock);
2691                 });
2692         },
2693         ConstraintVolatility::SeldomGreyed);
2694     
2695     m_constraintSet->add(
2696         "Mrms", "Mutator+Race Mark Stack",
2697         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2698             // Indicate to the fixpoint that we introduced work!
2699             size_t size = m_mutatorMarkStack->size() + m_raceMarkStack->size();
2700             slotVisitor.addToVisitCount(size);
2701             
2702             if (Options::logGC())
2703                 dataLog("(", size, ")");
2704             
2705             m_mutatorMarkStack->transferTo(slotVisitor.mutatorMarkStack());
2706             m_raceMarkStack->transferTo(slotVisitor.mutatorMarkStack());
2707         },
2708         [this] (SlotVisitor&) -> double {
2709             return m_mutatorMarkStack->size() + m_raceMarkStack->size();
2710         },
2711         ConstraintVolatility::GreyedByExecution);
2712 }
2713
2714 void Heap::addMarkingConstraint(std::unique_ptr<MarkingConstraint> constraint)
2715 {
2716     PreventCollectionScope preventCollectionScope(*this);
2717     m_constraintSet->add(WTFMove(constraint));
2718 }
2719
2720 void Heap::notifyIsSafeToCollect()
2721 {
2722     MonotonicTime before;
2723     if (Options::logGC()) {
2724         before = MonotonicTime::now();
2725         dataLog("[GC<", RawPointer(this), ">: starting ");
2726     }
2727     
2728     addCoreConstraints();
2729     
2730     m_isSafeToCollect = true;
2731     
2732     if (Options::collectContinuously()) {
2733         m_collectContinuouslyThread = WTF::Thread::create(
2734             "JSC DEBUG Continuous GC",
2735             [this] () {
2736                 MonotonicTime initialTime = MonotonicTime::now();
2737                 Seconds period = Seconds::fromMilliseconds(Options::collectContinuouslyPeriodMS());
2738                 while (!m_shouldStopCollectingContinuously) {
2739                     {
2740                         LockHolder locker(*m_threadLock);
2741                         if (m_requests.isEmpty()) {
2742                             m_requests.append(std::nullopt);
2743                             m_lastGrantedTicket++;
2744                             m_threadCondition->notifyOne(locker);
2745                         }
2746                     }
2747                     
2748                     {
2749                         LockHolder locker(m_collectContinuouslyLock);
2750                         Seconds elapsed = MonotonicTime::now() - initialTime;
2751                         Seconds elapsedInPeriod = elapsed % period;
2752                         MonotonicTime timeToWakeUp =
2753                             initialTime + elapsed - elapsedInPeriod + period;
2754                         while (!hasElapsed(timeToWakeUp) && !m_shouldStopCollectingContinuously) {
2755                             m_collectContinuouslyCondition.waitUntil(
2756                                 m_collectContinuouslyLock, timeToWakeUp);
2757                         }
2758                     }
2759                 }
2760             });
2761     }
2762     
2763     if (Options::logGC())
2764         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
2765 }
2766
2767 void Heap::preventCollection()
2768 {
2769     if (!m_isSafeToCollect)
2770         return;
2771     
2772     // This prevents the collectContinuously thread from starting a collection.
2773     m_collectContinuouslyLock.lock();
2774     
2775     // Wait for all collections to finish.
2776     waitForCollector(
2777         [&] (const AbstractLocker&) -> bool {
2778             ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2779             return m_lastServedTicket == m_lastGrantedTicket;
2780         });
2781     
2782     // Now a collection can only start if this thread starts it.
2783     RELEASE_ASSERT(!m_collectionScope);
2784 }
2785
2786 void Heap::allowCollection()
2787 {
2788     if (!m_isSafeToCollect)
2789         return;
2790     
2791     m_collectContinuouslyLock.unlock();
2792 }
2793
2794 template<typename Func>
2795 void Heap::forEachSlotVisitor(const Func& func)
2796 {
2797     auto locker = holdLock(m_parallelSlotVisitorLock);
2798     func(*m_collectorSlotVisitor);
2799     func(*m_mutatorSlotVisitor);
2800     for (auto& slotVisitor : m_parallelSlotVisitors)
2801         func(*slotVisitor);
2802 }
2803
2804 void Heap::setMutatorShouldBeFenced(bool value)
2805 {
2806     m_mutatorShouldBeFenced = value;
2807     m_barrierThreshold = value ? tautologicalThreshold : blackThreshold;
2808 }
2809
2810 void Heap::performIncrement(size_t bytes)
2811 {
2812     if (!m_objectSpace.isMarking())
2813         return;
2814
2815     m_incrementBalance += bytes * Options::gcIncrementScale();
2816
2817     // Save ourselves from crazy. Since this is an optimization, it's OK to go back to any consistent
2818     // state when the double goes wild.
2819     if (std::isnan(m_incrementBalance) || std::isinf(m_incrementBalance))
2820         m_incrementBalance = 0;
2821     
2822     if (m_incrementBalance < static_cast<double>(Options::gcIncrementBytes()))
2823         return;
2824
2825     double targetBytes = m_incrementBalance;
2826     if (targetBytes <= 0)
2827         return;
2828     targetBytes = std::min(targetBytes, Options::gcIncrementMaxBytes());
2829
2830     SlotVisitor& slotVisitor = *m_mutatorSlotVisitor;
2831     ParallelModeEnabler parallelModeEnabler(slotVisitor);
2832     size_t bytesVisited = slotVisitor.performIncrementOfDraining(static_cast<size_t>(targetBytes));
2833     // incrementBalance may go negative here because it'll remember how many bytes we overshot.
2834     m_incrementBalance -= bytesVisited;
2835 }
2836
2837 void Heap::addHeapFinalizerCallback(const HeapFinalizerCallback& callback)
2838 {
2839     m_heapFinalizerCallbacks.append(callback);
2840 }
2841
2842 void Heap::removeHeapFinalizerCallback(const HeapFinalizerCallback& callback)
2843 {
2844     m_heapFinalizerCallbacks.removeFirst(callback);
2845 }
2846
2847 } // namespace JSC