f84f7e64442b6454c6fba4ef22c95f010b8f6130
[WebKit-https.git] / Source / JavaScriptCore / heap / Heap.cpp
1 /*
2  *  Copyright (C) 2003-2018 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "Heap.h"
23
24 #include "BlockDirectoryInlines.h"
25 #include "CodeBlock.h"
26 #include "CodeBlockSetInlines.h"
27 #include "CollectingScope.h"
28 #include "ConservativeRoots.h"
29 #include "DFGWorklistInlines.h"
30 #include "EdenGCActivityCallback.h"
31 #include "Exception.h"
32 #include "FullGCActivityCallback.h"
33 #include "GCActivityCallback.h"
34 #include "GCIncomingRefCountedSetInlines.h"
35 #include "GCSegmentedArrayInlines.h"
36 #include "GCTypeMap.h"
37 #include "HasOwnPropertyCache.h"
38 #include "HeapHelperPool.h"
39 #include "HeapIterationScope.h"
40 #include "HeapProfiler.h"
41 #include "HeapSnapshot.h"
42 #include "HeapVerifier.h"
43 #include "IncrementalSweeper.h"
44 #include "InferredTypeInlines.h"
45 #include "InferredValueInlines.h"
46 #include "Interpreter.h"
47 #include "IsoCellSetInlines.h"
48 #include "JITStubRoutineSet.h"
49 #include "JITWorklist.h"
50 #include "JSCInlines.h"
51 #include "JSGlobalObject.h"
52 #include "JSLock.h"
53 #include "JSVirtualMachineInternal.h"
54 #include "JSWeakMap.h"
55 #include "JSWeakSet.h"
56 #include "JSWebAssemblyCodeBlock.h"
57 #include "MachineStackMarker.h"
58 #include "MarkStackMergingConstraint.h"
59 #include "MarkedSpaceInlines.h"
60 #include "MarkingConstraintSet.h"
61 #include "PreventCollectionScope.h"
62 #include "SamplingProfiler.h"
63 #include "ShadowChicken.h"
64 #include "SpaceTimeMutatorScheduler.h"
65 #include "StochasticSpaceTimeMutatorScheduler.h"
66 #include "StopIfNecessaryTimer.h"
67 #include "SubspaceInlines.h"
68 #include "SuperSampler.h"
69 #include "SweepingScope.h"
70 #include "SynchronousStopTheWorldMutatorScheduler.h"
71 #include "TypeProfiler.h"
72 #include "TypeProfilerLog.h"
73 #include "UnlinkedCodeBlock.h"
74 #include "VM.h"
75 #include "VisitCounter.h"
76 #include "WasmMemory.h"
77 #include "WeakMapImplInlines.h"
78 #include "WeakSetInlines.h"
79 #include <algorithm>
80 #include <wtf/ListDump.h>
81 #include <wtf/MainThread.h>
82 #include <wtf/ParallelVectorIterator.h>
83 #include <wtf/ProcessID.h>
84 #include <wtf/RAMSize.h>
85 #include <wtf/SimpleStats.h>
86 #include <wtf/Threading.h>
87
88 #if PLATFORM(IOS)
89 #include <bmalloc/bmalloc.h>
90 #endif
91
92 #if USE(FOUNDATION)
93 #include <wtf/spi/cocoa/objcSPI.h>
94 #endif
95
96 #if USE(GLIB)
97 #include "JSCGLibWrapperObject.h"
98 #endif
99
100 namespace JSC {
101
102 namespace {
103
104 bool verboseStop = false;
105
106 double maxPauseMS(double thisPauseMS)
107 {
108     static double maxPauseMS;
109     maxPauseMS = std::max(thisPauseMS, maxPauseMS);
110     return maxPauseMS;
111 }
112
113 size_t minHeapSize(HeapType heapType, size_t ramSize)
114 {
115     if (heapType == LargeHeap) {
116         double result = std::min(
117             static_cast<double>(Options::largeHeapSize()),
118             ramSize * Options::smallHeapRAMFraction());
119         return static_cast<size_t>(result);
120     }
121     return Options::smallHeapSize();
122 }
123
124 size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
125 {
126     if (VM::isInMiniMode())
127         return Options::miniVMHeapGrowthFactor() * heapSize;
128
129 #if PLATFORM(IOS)
130     size_t memoryFootprint = bmalloc::api::memoryFootprint();
131     if (memoryFootprint < ramSize * Options::smallHeapRAMFraction())
132         return Options::smallHeapGrowthFactor() * heapSize;
133     if (memoryFootprint < ramSize * Options::mediumHeapRAMFraction())
134         return Options::mediumHeapGrowthFactor() * heapSize;
135 #else
136     if (heapSize < ramSize * Options::smallHeapRAMFraction())
137         return Options::smallHeapGrowthFactor() * heapSize;
138     if (heapSize < ramSize * Options::mediumHeapRAMFraction())
139         return Options::mediumHeapGrowthFactor() * heapSize;
140 #endif
141     return Options::largeHeapGrowthFactor() * heapSize;
142 }
143
144 bool isValidSharedInstanceThreadState(VM* vm)
145 {
146     return vm->currentThreadIsHoldingAPILock();
147 }
148
149 bool isValidThreadState(VM* vm)
150 {
151     if (vm->atomicStringTable() != WTF::Thread::current().atomicStringTable())
152         return false;
153
154     if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
155         return false;
156
157     return true;
158 }
159
160 void recordType(VM& vm, TypeCountSet& set, JSCell* cell)
161 {
162     const char* typeName = "[unknown]";
163     const ClassInfo* info = cell->classInfo(vm);
164     if (info && info->className)
165         typeName = info->className;
166     set.add(typeName);
167 }
168
169 bool measurePhaseTiming()
170 {
171     return false;
172 }
173
174 HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats()
175 {
176     static HashMap<const char*, GCTypeMap<SimpleStats>>* result;
177     static std::once_flag once;
178     std::call_once(
179         once,
180         [] {
181             result = new HashMap<const char*, GCTypeMap<SimpleStats>>();
182         });
183     return *result;
184 }
185
186 SimpleStats& timingStats(const char* name, CollectionScope scope)
187 {
188     return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[scope];
189 }
190
191 class TimingScope {
192 public:
193     TimingScope(std::optional<CollectionScope> scope, const char* name)
194         : m_scope(scope)
195         , m_name(name)
196     {
197         if (measurePhaseTiming())
198             m_before = MonotonicTime::now();
199     }
200     
201     TimingScope(Heap& heap, const char* name)
202         : TimingScope(heap.collectionScope(), name)
203     {
204     }
205     
206     void setScope(std::optional<CollectionScope> scope)
207     {
208         m_scope = scope;
209     }
210     
211     void setScope(Heap& heap)
212     {
213         setScope(heap.collectionScope());
214     }
215     
216     ~TimingScope()
217     {
218         if (measurePhaseTiming()) {
219             MonotonicTime after = MonotonicTime::now();
220             Seconds timing = after - m_before;
221             SimpleStats& stats = timingStats(m_name, *m_scope);
222             stats.add(timing.milliseconds());
223             dataLog("[GC:", *m_scope, "] ", m_name, " took: ", timing.milliseconds(), "ms (average ", stats.mean(), "ms).\n");
224         }
225     }
226 private:
227     std::optional<CollectionScope> m_scope;
228     MonotonicTime m_before;
229     const char* m_name;
230 };
231
232 } // anonymous namespace
233
234 class Heap::Thread : public AutomaticThread {
235 public:
236     Thread(const AbstractLocker& locker, Heap& heap)
237         : AutomaticThread(locker, heap.m_threadLock, heap.m_threadCondition.copyRef())
238         , m_heap(heap)
239     {
240     }
241
242     const char* name() const override
243     {
244         return "JSC Heap Collector Thread";
245     }
246     
247 protected:
248     PollResult poll(const AbstractLocker& locker) override
249     {
250         if (m_heap.m_threadShouldStop) {
251             m_heap.notifyThreadStopping(locker);
252             return PollResult::Stop;
253         }
254         if (m_heap.shouldCollectInCollectorThread(locker))
255             return PollResult::Work;
256         return PollResult::Wait;
257     }
258     
259     WorkResult work() override
260     {
261         m_heap.collectInCollectorThread();
262         return WorkResult::Continue;
263     }
264     
265     void threadDidStart() override
266     {
267         WTF::registerGCThread(GCThreadType::Main);
268     }
269
270 private:
271     Heap& m_heap;
272 };
273
274 Heap::Heap(VM* vm, HeapType heapType)
275     : m_heapType(heapType)
276     , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
277     , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
278     , m_sizeAfterLastCollect(0)
279     , m_sizeAfterLastFullCollect(0)
280     , m_sizeBeforeLastFullCollect(0)
281     , m_sizeAfterLastEdenCollect(0)
282     , m_sizeBeforeLastEdenCollect(0)
283     , m_bytesAllocatedThisCycle(0)
284     , m_bytesAbandonedSinceLastFullCollect(0)
285     , m_maxEdenSize(m_minBytesPerCycle)
286     , m_maxHeapSize(m_minBytesPerCycle)
287     , m_shouldDoFullCollection(false)
288     , m_totalBytesVisited(0)
289     , m_objectSpace(this)
290     , m_extraMemorySize(0)
291     , m_deprecatedExtraMemorySize(0)
292     , m_machineThreads(std::make_unique<MachineThreads>())
293     , m_collectorSlotVisitor(std::make_unique<SlotVisitor>(*this, "C"))
294     , m_mutatorSlotVisitor(std::make_unique<SlotVisitor>(*this, "M"))
295     , m_mutatorMarkStack(std::make_unique<MarkStackArray>())
296     , m_raceMarkStack(std::make_unique<MarkStackArray>())
297     , m_constraintSet(std::make_unique<MarkingConstraintSet>(*this))
298     , m_handleSet(vm)
299     , m_codeBlocks(std::make_unique<CodeBlockSet>())
300     , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>())
301     , m_isSafeToCollect(false)
302     , m_vm(vm)
303     // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously 
304     // schedule the timer if we've never done a collection.
305     , m_lastFullGCLength(0.01)
306     , m_lastEdenGCLength(0.01)
307     , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
308     , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
309     , m_sweeper(adoptRef(new IncrementalSweeper(this)))
310     , m_stopIfNecessaryTimer(adoptRef(new StopIfNecessaryTimer(vm)))
311     , m_deferralDepth(0)
312 #if USE(FOUNDATION)
313     , m_delayedReleaseRecursionCount(0)
314 #endif
315     , m_sharedCollectorMarkStack(std::make_unique<MarkStackArray>())
316     , m_sharedMutatorMarkStack(std::make_unique<MarkStackArray>())
317     , m_helperClient(&heapHelperPool())
318     , m_threadLock(Box<Lock>::create())
319     , m_threadCondition(AutomaticThreadCondition::create())
320 {
321     m_worldState.store(0);
322
323     for (unsigned i = 0, numberOfParallelThreads = heapHelperPool().numberOfThreads(); i < numberOfParallelThreads; ++i) {
324         std::unique_ptr<SlotVisitor> visitor = std::make_unique<SlotVisitor>(*this, toCString("P", i + 1));
325         if (Options::optimizeParallelSlotVisitorsForStoppedMutator())
326             visitor->optimizeForStoppedMutator();
327         m_availableParallelSlotVisitors.append(visitor.get());
328         m_parallelSlotVisitors.append(WTFMove(visitor));
329     }
330     
331     if (Options::useConcurrentGC()) {
332         if (Options::useStochasticMutatorScheduler())
333             m_scheduler = std::make_unique<StochasticSpaceTimeMutatorScheduler>(*this);
334         else
335             m_scheduler = std::make_unique<SpaceTimeMutatorScheduler>(*this);
336     } else {
337         // We simulate turning off concurrent GC by making the scheduler say that the world
338         // should always be stopped when the collector is running.
339         m_scheduler = std::make_unique<SynchronousStopTheWorldMutatorScheduler>();
340     }
341     
342     if (Options::verifyHeap())
343         m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
344     
345     m_collectorSlotVisitor->optimizeForStoppedMutator();
346
347     // When memory is critical, allow allocating 25% of the amount above the critical threshold before collecting.
348     size_t memoryAboveCriticalThreshold = static_cast<size_t>(static_cast<double>(m_ramSize) * (1.0 - Options::criticalGCMemoryThreshold()));
349     m_maxEdenSizeWhenCritical = memoryAboveCriticalThreshold / 4;
350
351     LockHolder locker(*m_threadLock);
352     m_thread = adoptRef(new Thread(locker, *this));
353 }
354
355 Heap::~Heap()
356 {
357     forEachSlotVisitor(
358         [&] (SlotVisitor& visitor) {
359             visitor.clearMarkStacks();
360         });
361     m_mutatorMarkStack->clear();
362     m_raceMarkStack->clear();
363     
364     for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
365         WeakBlock::destroy(*this, block);
366 }
367
368 bool Heap::isPagedOut(MonotonicTime deadline)
369 {
370     return m_objectSpace.isPagedOut(deadline);
371 }
372
373 // The VM is being destroyed and the collector will never run again.
374 // Run all pending finalizers now because we won't get another chance.
375 void Heap::lastChanceToFinalize()
376 {
377     MonotonicTime before;
378     if (Options::logGC()) {
379         before = MonotonicTime::now();
380         dataLog("[GC<", RawPointer(this), ">: shutdown ");
381     }
382     
383     m_isShuttingDown = true;
384     
385     RELEASE_ASSERT(!m_vm->entryScope);
386     RELEASE_ASSERT(m_mutatorState == MutatorState::Running);
387     
388     if (m_collectContinuouslyThread) {
389         {
390             LockHolder locker(m_collectContinuouslyLock);
391             m_shouldStopCollectingContinuously = true;
392             m_collectContinuouslyCondition.notifyOne();
393         }
394         m_collectContinuouslyThread->waitForCompletion();
395     }
396     
397     if (Options::logGC())
398         dataLog("1");
399     
400     // Prevent new collections from being started. This is probably not even necessary, since we're not
401     // going to call into anything that starts collections. Still, this makes the algorithm more
402     // obviously sound.
403     m_isSafeToCollect = false;
404     
405     if (Options::logGC())
406         dataLog("2");
407
408     bool isCollecting;
409     {
410         auto locker = holdLock(*m_threadLock);
411         RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
412         isCollecting = m_lastServedTicket < m_lastGrantedTicket;
413     }
414     if (isCollecting) {
415         if (Options::logGC())
416             dataLog("...]\n");
417         
418         // Wait for the current collection to finish.
419         waitForCollector(
420             [&] (const AbstractLocker&) -> bool {
421                 RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
422                 return m_lastServedTicket == m_lastGrantedTicket;
423             });
424         
425         if (Options::logGC())
426             dataLog("[GC<", RawPointer(this), ">: shutdown ");
427     }
428     if (Options::logGC())
429         dataLog("3");
430
431     RELEASE_ASSERT(m_requests.isEmpty());
432     RELEASE_ASSERT(m_lastServedTicket == m_lastGrantedTicket);
433     
434     // Carefully bring the thread down.
435     bool stopped = false;
436     {
437         LockHolder locker(*m_threadLock);
438         stopped = m_thread->tryStop(locker);
439         m_threadShouldStop = true;
440         if (!stopped)
441             m_threadCondition->notifyOne(locker);
442     }
443
444     if (Options::logGC())
445         dataLog("4");
446     
447     if (!stopped)
448         m_thread->join();
449     
450     if (Options::logGC())
451         dataLog("5 ");
452     
453     m_arrayBuffers.lastChanceToFinalize();
454     m_objectSpace.stopAllocatingForGood();
455     m_objectSpace.lastChanceToFinalize();
456     releaseDelayedReleasedObjects();
457
458     sweepAllLogicallyEmptyWeakBlocks();
459     
460     m_objectSpace.freeMemory();
461     
462     if (Options::logGC())
463         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
464 }
465
466 void Heap::releaseDelayedReleasedObjects()
467 {
468 #if USE(FOUNDATION) || USE(GLIB)
469     // We need to guard against the case that releasing an object can create more objects due to the
470     // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
471     // back here and could try to recursively release objects. We guard that with a recursive entry
472     // count. Only the initial call will release objects, recursive calls simple return and let the
473     // the initial call to the function take care of any objects created during release time.
474     // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
475     // and use a temp Vector for the actual releasing.
476     if (!m_delayedReleaseRecursionCount++) {
477         while (!m_delayedReleaseObjects.isEmpty()) {
478             ASSERT(m_vm->currentThreadIsHoldingAPILock());
479
480             auto objectsToRelease = WTFMove(m_delayedReleaseObjects);
481
482             {
483                 // We need to drop locks before calling out to arbitrary code.
484                 JSLock::DropAllLocks dropAllLocks(m_vm);
485
486 #if USE(FOUNDATION)
487                 void* context = objc_autoreleasePoolPush();
488 #endif
489                 objectsToRelease.clear();
490 #if USE(FOUNDATION)
491                 objc_autoreleasePoolPop(context);
492 #endif
493             }
494         }
495     }
496     m_delayedReleaseRecursionCount--;
497 #endif
498 }
499
500 void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
501 {
502     didAllocate(size);
503     collectIfNecessaryOrDefer();
504 }
505
506 void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
507 {
508     // FIXME: Change this to use SaturatedArithmetic when available.
509     // https://bugs.webkit.org/show_bug.cgi?id=170411
510     Checked<size_t, RecordOverflow> checkedNewSize = m_deprecatedExtraMemorySize;
511     checkedNewSize += size;
512     m_deprecatedExtraMemorySize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
513     reportExtraMemoryAllocatedSlowCase(size);
514 }
515
516 bool Heap::overCriticalMemoryThreshold(MemoryThresholdCallType memoryThresholdCallType)
517 {
518 #if PLATFORM(IOS)
519     if (memoryThresholdCallType == MemoryThresholdCallType::Direct || ++m_precentAvailableMemoryCachedCallCount >= 100) {
520         m_overCriticalMemoryThreshold = bmalloc::api::percentAvailableMemoryInUse() > Options::criticalGCMemoryThreshold();
521         m_precentAvailableMemoryCachedCallCount = 0;
522     }
523
524     return m_overCriticalMemoryThreshold;
525 #else
526     UNUSED_PARAM(memoryThresholdCallType);
527     return false;
528 #endif
529 }
530
531 void Heap::reportAbandonedObjectGraph()
532 {
533     // Our clients don't know exactly how much memory they
534     // are abandoning so we just guess for them.
535     size_t abandonedBytes = static_cast<size_t>(0.1 * capacity());
536
537     // We want to accelerate the next collection. Because memory has just 
538     // been abandoned, the next collection has the potential to 
539     // be more profitable. Since allocation is the trigger for collection, 
540     // we hasten the next collection by pretending that we've allocated more memory. 
541     if (m_fullActivityCallback) {
542         m_fullActivityCallback->didAllocate(*this,
543             m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
544     }
545     m_bytesAbandonedSinceLastFullCollect += abandonedBytes;
546 }
547
548 void Heap::protect(JSValue k)
549 {
550     ASSERT(k);
551     ASSERT(m_vm->currentThreadIsHoldingAPILock());
552
553     if (!k.isCell())
554         return;
555
556     m_protectedValues.add(k.asCell());
557 }
558
559 bool Heap::unprotect(JSValue k)
560 {
561     ASSERT(k);
562     ASSERT(m_vm->currentThreadIsHoldingAPILock());
563
564     if (!k.isCell())
565         return false;
566
567     return m_protectedValues.remove(k.asCell());
568 }
569
570 void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
571 {
572     if (m_arrayBuffers.addReference(cell, buffer)) {
573         collectIfNecessaryOrDefer();
574         didAllocate(buffer->gcSizeEstimateInBytes());
575     }
576 }
577
578 template<typename CellType, typename CellSet>
579 void Heap::finalizeMarkedUnconditionalFinalizers(CellSet& cellSet)
580 {
581     cellSet.forEachMarkedCell(
582         [&] (HeapCell* cell, HeapCell::Kind) {
583             static_cast<CellType*>(cell)->finalizeUnconditionally(*vm());
584         });
585 }
586
587 void Heap::finalizeUnconditionalFinalizers()
588 {
589     finalizeMarkedUnconditionalFinalizers<InferredType>(vm()->inferredTypesWithFinalizers);
590     finalizeMarkedUnconditionalFinalizers<InferredValue>(vm()->inferredValuesWithFinalizers);
591     vm()->forEachCodeBlockSpace(
592         [&] (auto& space) {
593             this->finalizeMarkedUnconditionalFinalizers<CodeBlock>(space.finalizerSet);
594         });
595     finalizeMarkedUnconditionalFinalizers<ExecutableToCodeBlockEdge>(vm()->executableToCodeBlockEdgesWithFinalizers);
596     finalizeMarkedUnconditionalFinalizers<JSWeakSet>(vm()->weakSetSpace);
597     finalizeMarkedUnconditionalFinalizers<JSWeakMap>(vm()->weakMapSpace);
598     finalizeMarkedUnconditionalFinalizers<ErrorInstance>(vm()->errorInstanceSpace);
599
600 #if ENABLE(WEBASSEMBLY)
601     finalizeMarkedUnconditionalFinalizers<JSWebAssemblyCodeBlock>(vm()->webAssemblyCodeBlockSpace);
602 #endif
603 }
604
605 void Heap::willStartIterating()
606 {
607     m_objectSpace.willStartIterating();
608 }
609
610 void Heap::didFinishIterating()
611 {
612     m_objectSpace.didFinishIterating();
613 }
614
615 void Heap::completeAllJITPlans()
616 {
617 #if ENABLE(JIT)
618     JITWorklist::instance()->completeAllForVM(*m_vm);
619 #endif // ENABLE(JIT)
620     DFG::completeAllPlansForVM(*m_vm);
621 }
622
623 template<typename Func>
624 void Heap::iterateExecutingAndCompilingCodeBlocks(const Func& func)
625 {
626     m_codeBlocks->iterateCurrentlyExecuting(func);
627     DFG::iterateCodeBlocksForGC(*m_vm, func);
628 }
629
630 template<typename Func>
631 void Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func& func)
632 {
633     Vector<CodeBlock*, 256> codeBlocks;
634     iterateExecutingAndCompilingCodeBlocks(
635         [&] (CodeBlock* codeBlock) {
636             codeBlocks.append(codeBlock);
637         });
638     for (CodeBlock* codeBlock : codeBlocks)
639         func(codeBlock);
640 }
641
642 void Heap::assertMarkStacksEmpty()
643 {
644     bool ok = true;
645     
646     if (!m_sharedCollectorMarkStack->isEmpty()) {
647         dataLog("FATAL: Shared collector mark stack not empty! It has ", m_sharedCollectorMarkStack->size(), " elements.\n");
648         ok = false;
649     }
650     
651     if (!m_sharedMutatorMarkStack->isEmpty()) {
652         dataLog("FATAL: Shared mutator mark stack not empty! It has ", m_sharedMutatorMarkStack->size(), " elements.\n");
653         ok = false;
654     }
655     
656     forEachSlotVisitor(
657         [&] (SlotVisitor& visitor) {
658             if (visitor.isEmpty())
659                 return;
660             
661             dataLog("FATAL: Visitor ", RawPointer(&visitor), " is not empty!\n");
662             ok = false;
663         });
664     
665     RELEASE_ASSERT(ok);
666 }
667
668 void Heap::gatherStackRoots(ConservativeRoots& roots)
669 {
670     m_machineThreads->gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks, m_currentThreadState, m_currentThread);
671 }
672
673 void Heap::gatherJSStackRoots(ConservativeRoots& roots)
674 {
675 #if !ENABLE(JIT)
676     m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
677 #else
678     UNUSED_PARAM(roots);
679 #endif
680 }
681
682 void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
683 {
684 #if ENABLE(DFG_JIT)
685     m_vm->gatherConservativeRoots(roots);
686 #else
687     UNUSED_PARAM(roots);
688 #endif
689 }
690
691 void Heap::beginMarking()
692 {
693     TimingScope timingScope(*this, "Heap::beginMarking");
694     m_jitStubRoutines->clearMarks();
695     m_objectSpace.beginMarking();
696     setMutatorShouldBeFenced(true);
697 }
698
699 void Heap::removeDeadCompilerWorklistEntries()
700 {
701 #if ENABLE(DFG_JIT)
702     for (unsigned i = DFG::numberOfWorklists(); i--;)
703         DFG::existingWorklistForIndex(i).removeDeadPlans(*m_vm);
704 #endif
705 }
706
707 bool Heap::isHeapSnapshotting() const
708 {
709     HeapProfiler* heapProfiler = m_vm->heapProfiler();
710     if (UNLIKELY(heapProfiler))
711         return heapProfiler->activeSnapshotBuilder();
712     return false;
713 }
714
715 struct GatherHeapSnapshotData : MarkedBlock::CountFunctor {
716     GatherHeapSnapshotData(VM& vm, HeapSnapshotBuilder& builder)
717         : m_vm(vm)
718         , m_builder(builder)
719     {
720     }
721
722     IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const
723     {
724         if (isJSCellKind(kind)) {
725             JSCell* cell = static_cast<JSCell*>(heapCell);
726             cell->methodTable(m_vm)->heapSnapshot(cell, m_builder);
727         }
728         return IterationStatus::Continue;
729     }
730
731     VM& m_vm;
732     HeapSnapshotBuilder& m_builder;
733 };
734
735 void Heap::gatherExtraHeapSnapshotData(HeapProfiler& heapProfiler)
736 {
737     if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) {
738         HeapIterationScope heapIterationScope(*this);
739         GatherHeapSnapshotData functor(*m_vm, *builder);
740         m_objectSpace.forEachLiveCell(heapIterationScope, functor);
741     }
742 }
743
744 struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor {
745     RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot)
746         : m_snapshot(snapshot)
747     {
748     }
749
750     IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const
751     {
752         if (isJSCellKind(kind))
753             m_snapshot.sweepCell(static_cast<JSCell*>(cell));
754         return IterationStatus::Continue;
755     }
756
757     HeapSnapshot& m_snapshot;
758 };
759
760 void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler)
761 {
762     if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) {
763         HeapIterationScope heapIterationScope(*this);
764         RemoveDeadHeapSnapshotNodes functor(*snapshot);
765         m_objectSpace.forEachDeadCell(heapIterationScope, functor);
766         snapshot->shrinkToFit();
767     }
768 }
769
770 void Heap::updateObjectCounts()
771 {
772     if (m_collectionScope == CollectionScope::Full)
773         m_totalBytesVisited = 0;
774
775     m_totalBytesVisitedThisCycle = bytesVisited();
776     
777     m_totalBytesVisited += m_totalBytesVisitedThisCycle;
778 }
779
780 void Heap::endMarking()
781 {
782     forEachSlotVisitor(
783         [&] (SlotVisitor& visitor) {
784             visitor.reset();
785         });
786
787     assertMarkStacksEmpty();
788
789     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
790     
791     m_objectSpace.endMarking();
792     setMutatorShouldBeFenced(Options::forceFencedBarrier());
793 }
794
795 size_t Heap::objectCount()
796 {
797     return m_objectSpace.objectCount();
798 }
799
800 size_t Heap::extraMemorySize()
801 {
802     // FIXME: Change this to use SaturatedArithmetic when available.
803     // https://bugs.webkit.org/show_bug.cgi?id=170411
804     Checked<size_t, RecordOverflow> checkedTotal = m_extraMemorySize;
805     checkedTotal += m_deprecatedExtraMemorySize;
806     checkedTotal += m_arrayBuffers.size();
807     size_t total = UNLIKELY(checkedTotal.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedTotal.unsafeGet();
808
809     ASSERT(m_objectSpace.capacity() >= m_objectSpace.size());
810     return std::min(total, std::numeric_limits<size_t>::max() - m_objectSpace.capacity());
811 }
812
813 size_t Heap::size()
814 {
815     return m_objectSpace.size() + extraMemorySize();
816 }
817
818 size_t Heap::capacity()
819 {
820     return m_objectSpace.capacity() + extraMemorySize();
821 }
822
823 size_t Heap::protectedGlobalObjectCount()
824 {
825     size_t result = 0;
826     forEachProtectedCell(
827         [&] (JSCell* cell) {
828             if (cell->isObject() && asObject(cell)->isGlobalObject())
829                 result++;
830         });
831     return result;
832 }
833
834 size_t Heap::globalObjectCount()
835 {
836     HeapIterationScope iterationScope(*this);
837     size_t result = 0;
838     m_objectSpace.forEachLiveCell(
839         iterationScope,
840         [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus {
841             if (!isJSCellKind(kind))
842                 return IterationStatus::Continue;
843             JSCell* cell = static_cast<JSCell*>(heapCell);
844             if (cell->isObject() && asObject(cell)->isGlobalObject())
845                 result++;
846             return IterationStatus::Continue;
847         });
848     return result;
849 }
850
851 size_t Heap::protectedObjectCount()
852 {
853     size_t result = 0;
854     forEachProtectedCell(
855         [&] (JSCell*) {
856             result++;
857         });
858     return result;
859 }
860
861 std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
862 {
863     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
864     forEachProtectedCell(
865         [&] (JSCell* cell) {
866             recordType(*vm(), *result, cell);
867         });
868     return result;
869 }
870
871 std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
872 {
873     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
874     HeapIterationScope iterationScope(*this);
875     m_objectSpace.forEachLiveCell(
876         iterationScope,
877         [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
878             if (isJSCellKind(kind))
879                 recordType(*vm(), *result, static_cast<JSCell*>(cell));
880             return IterationStatus::Continue;
881         });
882     return result;
883 }
884
885 void Heap::deleteAllCodeBlocks(DeleteAllCodeEffort effort)
886 {
887     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
888         return;
889
890     VM& vm = *m_vm;
891     PreventCollectionScope preventCollectionScope(*this);
892     
893     // If JavaScript is running, it's not safe to delete all JavaScript code, since
894     // we'll end up returning to deleted code.
895     RELEASE_ASSERT(!vm.entryScope);
896     RELEASE_ASSERT(!m_collectionScope);
897
898     completeAllJITPlans();
899
900     vm.forEachScriptExecutableSpace(
901         [&] (auto& spaceAndSet) {
902             HeapIterationScope heapIterationScope(*this);
903             auto& clearableCodeSet = spaceAndSet.clearableCodeSet;
904             clearableCodeSet.forEachLiveCell(
905                 [&] (HeapCell* cell, HeapCell::Kind) {
906                     ScriptExecutable* executable = static_cast<ScriptExecutable*>(cell);
907                     executable->clearCode(clearableCodeSet);
908                 });
909         });
910
911 #if ENABLE(WEBASSEMBLY)
912     {
913         // We must ensure that we clear the JS call ICs from Wasm. Otherwise, Wasm will
914         // have no idea that we cleared the code from all of the Executables in the
915         // VM. This could leave Wasm in an inconsistent state where it has an IC that
916         // points into a CodeBlock that could be dead. The IC will still succeed because
917         // it uses a callee check, but then it will call into dead code.
918         HeapIterationScope heapIterationScope(*this);
919         vm.webAssemblyCodeBlockSpace.forEachLiveCell([&] (HeapCell* cell, HeapCell::Kind kind) {
920             ASSERT_UNUSED(kind, kind == HeapCell::JSCell);
921             JSWebAssemblyCodeBlock* codeBlock = static_cast<JSWebAssemblyCodeBlock*>(cell);
922             codeBlock->clearJSCallICs(vm);
923         });
924     }
925 #endif
926 }
927
928 void Heap::deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort effort)
929 {
930     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
931         return;
932
933     VM& vm = *m_vm;
934     PreventCollectionScope preventCollectionScope(*this);
935
936     RELEASE_ASSERT(!m_collectionScope);
937
938     HeapIterationScope heapIterationScope(*this);
939     vm.unlinkedFunctionExecutableSpace.clearableCodeSet.forEachLiveCell(
940         [&] (HeapCell* cell, HeapCell::Kind) {
941             UnlinkedFunctionExecutable* executable = static_cast<UnlinkedFunctionExecutable*>(cell);
942             executable->clearCode(vm);
943         });
944 }
945
946 void Heap::deleteUnmarkedCompiledCode()
947 {
948     vm()->forEachScriptExecutableSpace([] (auto& space) { space.space.sweep(); });
949     vm()->forEachCodeBlockSpace([] (auto& space) { space.space.sweep(); }); // Sweeping must occur before deleting stubs, otherwise the stubs might still think they're alive as they get deleted.
950     m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines();
951 }
952
953 void Heap::addToRememberedSet(const JSCell* constCell)
954 {
955     JSCell* cell = const_cast<JSCell*>(constCell);
956     ASSERT(cell);
957     ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
958     m_barriersExecuted++;
959     if (m_mutatorShouldBeFenced) {
960         WTF::loadLoadFence();
961         if (!isMarked(cell)) {
962             // During a full collection a store into an unmarked object that had surivived past
963             // collections will manifest as a store to an unmarked PossiblyBlack object. If the
964             // object gets marked at some time after this then it will go down the normal marking
965             // path. So, we don't have to remember this object. We could return here. But we go
966             // further and attempt to re-white the object.
967             
968             RELEASE_ASSERT(m_collectionScope == CollectionScope::Full);
969             
970             if (cell->atomicCompareExchangeCellStateStrong(CellState::PossiblyBlack, CellState::DefinitelyWhite) == CellState::PossiblyBlack) {
971                 // Now we protect against this race:
972                 //
973                 //     1) Object starts out black + unmarked.
974                 //     --> We do isMarked here.
975                 //     2) Object is marked and greyed.
976                 //     3) Object is scanned and blacked.
977                 //     --> We do atomicCompareExchangeCellStateStrong here.
978                 //
979                 // In this case we would have made the object white again, even though it should
980                 // be black. This check lets us correct our mistake. This relies on the fact that
981                 // isMarked converges monotonically to true.
982                 if (isMarked(cell)) {
983                     // It's difficult to work out whether the object should be grey or black at
984                     // this point. We say black conservatively.
985                     cell->setCellState(CellState::PossiblyBlack);
986                 }
987                 
988                 // Either way, we can return. Most likely, the object was not marked, and so the
989                 // object is now labeled white. This means that future barrier executions will not
990                 // fire. In the unlikely event that the object had become marked, we can still
991                 // return anyway, since we proved that the object was not marked at the time that
992                 // we executed this slow path.
993             }
994             
995             return;
996         }
997     } else
998         ASSERT(Heap::isMarked(cell));
999     // It could be that the object was *just* marked. This means that the collector may set the
1000     // state to DefinitelyGrey and then to PossiblyOldOrBlack at any time. It's OK for us to
1001     // race with the collector here. If we win then this is accurate because the object _will_
1002     // get scanned again. If we lose then someone else will barrier the object again. That would
1003     // be unfortunate but not the end of the world.
1004     cell->setCellState(CellState::PossiblyGrey);
1005     m_mutatorMarkStack->append(cell);
1006 }
1007
1008 void Heap::sweepSynchronously()
1009 {
1010     MonotonicTime before { };
1011     if (Options::logGC()) {
1012         dataLog("Full sweep: ", capacity() / 1024, "kb ");
1013         before = MonotonicTime::now();
1014     }
1015     m_objectSpace.sweep();
1016     m_objectSpace.shrink();
1017     if (Options::logGC()) {
1018         MonotonicTime after = MonotonicTime::now();
1019         dataLog("=> ", capacity() / 1024, "kb, ", (after - before).milliseconds(), "ms");
1020     }
1021 }
1022
1023 void Heap::collect(Synchronousness synchronousness, GCRequest request)
1024 {
1025     switch (synchronousness) {
1026     case Async:
1027         collectAsync(request);
1028         return;
1029     case Sync:
1030         collectSync(request);
1031         return;
1032     }
1033     RELEASE_ASSERT_NOT_REACHED();
1034 }
1035
1036 void Heap::collectNow(Synchronousness synchronousness, GCRequest request)
1037 {
1038     switch (synchronousness) {
1039     case Async: {
1040         collectAsync(request);
1041         stopIfNecessary();
1042         return;
1043     }
1044         
1045     case Sync: {
1046         collectSync(request);
1047         
1048         DeferGCForAWhile deferGC(*this);
1049         if (UNLIKELY(Options::useImmortalObjects()))
1050             sweeper().stopSweeping();
1051         
1052         bool alreadySweptInCollectSync = shouldSweepSynchronously();
1053         if (!alreadySweptInCollectSync) {
1054             if (Options::logGC())
1055                 dataLog("[GC<", RawPointer(this), ">: ");
1056             sweepSynchronously();
1057             if (Options::logGC())
1058                 dataLog("]\n");
1059         }
1060         m_objectSpace.assertNoUnswept();
1061         
1062         sweepAllLogicallyEmptyWeakBlocks();
1063         return;
1064     } }
1065     RELEASE_ASSERT_NOT_REACHED();
1066 }
1067
1068 void Heap::collectAsync(GCRequest request)
1069 {
1070     if (!m_isSafeToCollect)
1071         return;
1072
1073     bool alreadyRequested = false;
1074     {
1075         LockHolder locker(*m_threadLock);
1076         for (const GCRequest& previousRequest : m_requests) {
1077             if (request.subsumedBy(previousRequest)) {
1078                 alreadyRequested = true;
1079                 break;
1080             }
1081         }
1082     }
1083     if (alreadyRequested)
1084         return;
1085
1086     requestCollection(request);
1087 }
1088
1089 void Heap::collectSync(GCRequest request)
1090 {
1091     if (!m_isSafeToCollect)
1092         return;
1093     
1094     waitForCollection(requestCollection(request));
1095 }
1096
1097 bool Heap::shouldCollectInCollectorThread(const AbstractLocker&)
1098 {
1099     RELEASE_ASSERT(m_requests.isEmpty() == (m_lastServedTicket == m_lastGrantedTicket));
1100     RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
1101     
1102     if (false)
1103         dataLog("Mutator has the conn = ", !!(m_worldState.load() & mutatorHasConnBit), "\n");
1104     
1105     return !m_requests.isEmpty() && !(m_worldState.load() & mutatorHasConnBit);
1106 }
1107
1108 void Heap::collectInCollectorThread()
1109 {
1110     for (;;) {
1111         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Collector, nullptr);
1112         switch (result) {
1113         case RunCurrentPhaseResult::Finished:
1114             return;
1115         case RunCurrentPhaseResult::Continue:
1116             break;
1117         case RunCurrentPhaseResult::NeedCurrentThreadState:
1118             RELEASE_ASSERT_NOT_REACHED();
1119             break;
1120         }
1121     }
1122 }
1123
1124 ALWAYS_INLINE int asInt(CollectorPhase phase)
1125 {
1126     return static_cast<int>(phase);
1127 }
1128
1129 void Heap::checkConn(GCConductor conn)
1130 {
1131     unsigned worldState = m_worldState.load();
1132     switch (conn) {
1133     case GCConductor::Mutator:
1134         RELEASE_ASSERT(worldState & mutatorHasConnBit, worldState, asInt(m_lastPhase), asInt(m_currentPhase), asInt(m_nextPhase), vm()->id(), VM::numberOfIDs(), vm()->isEntered());
1135         return;
1136     case GCConductor::Collector:
1137         RELEASE_ASSERT(!(worldState & mutatorHasConnBit), worldState, asInt(m_lastPhase), asInt(m_currentPhase), asInt(m_nextPhase), vm()->id(), VM::numberOfIDs(), vm()->isEntered());
1138         return;
1139     }
1140     RELEASE_ASSERT_NOT_REACHED();
1141 }
1142
1143 auto Heap::runCurrentPhase(GCConductor conn, CurrentThreadState* currentThreadState) -> RunCurrentPhaseResult
1144 {
1145     checkConn(conn);
1146     m_currentThreadState = currentThreadState;
1147     m_currentThread = &WTF::Thread::current();
1148     
1149     if (conn == GCConductor::Mutator)
1150         sanitizeStackForVM(vm());
1151     
1152     // If the collector transfers the conn to the mutator, it leaves us in between phases.
1153     if (!finishChangingPhase(conn)) {
1154         // A mischevious mutator could repeatedly relinquish the conn back to us. We try to avoid doing
1155         // this, but it's probably not the end of the world if it did happen.
1156         if (false)
1157             dataLog("Conn bounce-back.\n");
1158         return RunCurrentPhaseResult::Finished;
1159     }
1160     
1161     bool result = false;
1162     switch (m_currentPhase) {
1163     case CollectorPhase::NotRunning:
1164         result = runNotRunningPhase(conn);
1165         break;
1166         
1167     case CollectorPhase::Begin:
1168         result = runBeginPhase(conn);
1169         break;
1170         
1171     case CollectorPhase::Fixpoint:
1172         if (!currentThreadState && conn == GCConductor::Mutator)
1173             return RunCurrentPhaseResult::NeedCurrentThreadState;
1174         
1175         result = runFixpointPhase(conn);
1176         break;
1177         
1178     case CollectorPhase::Concurrent:
1179         result = runConcurrentPhase(conn);
1180         break;
1181         
1182     case CollectorPhase::Reloop:
1183         result = runReloopPhase(conn);
1184         break;
1185         
1186     case CollectorPhase::End:
1187         result = runEndPhase(conn);
1188         break;
1189     }
1190
1191     return result ? RunCurrentPhaseResult::Continue : RunCurrentPhaseResult::Finished;
1192 }
1193
1194 NEVER_INLINE bool Heap::runNotRunningPhase(GCConductor conn)
1195 {
1196     // Check m_requests since the mutator calls this to poll what's going on.
1197     {
1198         auto locker = holdLock(*m_threadLock);
1199         if (m_requests.isEmpty())
1200             return false;
1201     }
1202     
1203     return changePhase(conn, CollectorPhase::Begin);
1204 }
1205
1206 NEVER_INLINE bool Heap::runBeginPhase(GCConductor conn)
1207 {
1208     m_currentGCStartTime = MonotonicTime::now();
1209     
1210     {
1211         LockHolder locker(*m_threadLock);
1212         RELEASE_ASSERT(!m_requests.isEmpty());
1213         m_currentRequest = m_requests.first();
1214     }
1215         
1216     if (Options::logGC())
1217         dataLog("[GC<", RawPointer(this), ">: START ", gcConductorShortName(conn), " ", capacity() / 1024, "kb ");
1218
1219     m_beforeGC = MonotonicTime::now();
1220
1221     if (m_collectionScope) {
1222         dataLog("Collection scope already set during GC: ", *m_collectionScope, "\n");
1223         RELEASE_ASSERT_NOT_REACHED();
1224     }
1225     
1226     willStartCollection();
1227         
1228     if (UNLIKELY(m_verifier)) {
1229         // Verify that live objects from the last GC cycle haven't been corrupted by
1230         // mutators before we begin this new GC cycle.
1231         m_verifier->verify(HeapVerifier::Phase::BeforeGC);
1232             
1233         m_verifier->startGC();
1234         m_verifier->gatherLiveCells(HeapVerifier::Phase::BeforeMarking);
1235     }
1236         
1237     prepareForMarking();
1238         
1239     if (m_collectionScope == CollectionScope::Full) {
1240         m_opaqueRoots.clear();
1241         m_collectorSlotVisitor->clearMarkStacks();
1242         m_mutatorMarkStack->clear();
1243     }
1244
1245     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
1246
1247     beginMarking();
1248
1249     forEachSlotVisitor(
1250         [&] (SlotVisitor& visitor) {
1251             visitor.didStartMarking();
1252         });
1253
1254     m_parallelMarkersShouldExit = false;
1255
1256     m_helperClient.setFunction(
1257         [this] () {
1258             SlotVisitor* slotVisitor;
1259             {
1260                 LockHolder locker(m_parallelSlotVisitorLock);
1261                 RELEASE_ASSERT_WITH_MESSAGE(!m_availableParallelSlotVisitors.isEmpty(), "Parallel SlotVisitors are allocated apriori");
1262                 slotVisitor = m_availableParallelSlotVisitors.takeLast();
1263             }
1264
1265             WTF::registerGCThread(GCThreadType::Helper);
1266
1267             {
1268                 ParallelModeEnabler parallelModeEnabler(*slotVisitor);
1269                 slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
1270             }
1271
1272             {
1273                 LockHolder locker(m_parallelSlotVisitorLock);
1274                 m_availableParallelSlotVisitors.append(slotVisitor);
1275             }
1276         });
1277
1278     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1279
1280     m_constraintSet->didStartMarking();
1281     
1282     m_scheduler->beginCollection();
1283     if (Options::logGC())
1284         m_scheduler->log();
1285     
1286     // After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()"
1287     // checks because bootstrap would have put things into the visitor. So, we should fall
1288     // through to draining.
1289     
1290     if (!slotVisitor.didReachTermination()) {
1291         dataLog("Fatal: SlotVisitor should think that GC should terminate before constraint solving, but it does not think this.\n");
1292         dataLog("slotVisitor.isEmpty(): ", slotVisitor.isEmpty(), "\n");
1293         dataLog("slotVisitor.collectorMarkStack().isEmpty(): ", slotVisitor.collectorMarkStack().isEmpty(), "\n");
1294         dataLog("slotVisitor.mutatorMarkStack().isEmpty(): ", slotVisitor.mutatorMarkStack().isEmpty(), "\n");
1295         dataLog("m_numberOfActiveParallelMarkers: ", m_numberOfActiveParallelMarkers, "\n");
1296         dataLog("m_sharedCollectorMarkStack->isEmpty(): ", m_sharedCollectorMarkStack->isEmpty(), "\n");
1297         dataLog("m_sharedMutatorMarkStack->isEmpty(): ", m_sharedMutatorMarkStack->isEmpty(), "\n");
1298         dataLog("slotVisitor.didReachTermination(): ", slotVisitor.didReachTermination(), "\n");
1299         RELEASE_ASSERT_NOT_REACHED();
1300     }
1301         
1302     return changePhase(conn, CollectorPhase::Fixpoint);
1303 }
1304
1305 NEVER_INLINE bool Heap::runFixpointPhase(GCConductor conn)
1306 {
1307     RELEASE_ASSERT(conn == GCConductor::Collector || m_currentThreadState);
1308     
1309     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1310     
1311     if (Options::logGC()) {
1312         HashMap<const char*, size_t> visitMap;
1313         forEachSlotVisitor(
1314             [&] (SlotVisitor& slotVisitor) {
1315                 visitMap.add(slotVisitor.codeName(), slotVisitor.bytesVisited() / 1024);
1316             });
1317         
1318         auto perVisitorDump = sortedMapDump(
1319             visitMap,
1320             [] (const char* a, const char* b) -> bool {
1321                 return strcmp(a, b) < 0;
1322             },
1323             ":", " ");
1324         
1325         dataLog("v=", bytesVisited() / 1024, "kb (", perVisitorDump, ") o=", m_opaqueRoots.size(), " b=", m_barriersExecuted, " ");
1326     }
1327         
1328     if (slotVisitor.didReachTermination()) {
1329         m_opaqueRoots.deleteOldTables();
1330         
1331         m_scheduler->didReachTermination();
1332         
1333         assertMarkStacksEmpty();
1334             
1335         // FIXME: Take m_mutatorDidRun into account when scheduling constraints. Most likely,
1336         // we don't have to execute root constraints again unless the mutator did run. At a
1337         // minimum, we could use this for work estimates - but it's probably more than just an
1338         // estimate.
1339         // https://bugs.webkit.org/show_bug.cgi?id=166828
1340             
1341         // Wondering what this does? Look at Heap::addCoreConstraints(). The DOM and others can also
1342         // add their own using Heap::addMarkingConstraint().
1343         bool converged = m_constraintSet->executeConvergence(slotVisitor);
1344         
1345         // FIXME: The slotVisitor.isEmpty() check is most likely not needed.
1346         // https://bugs.webkit.org/show_bug.cgi?id=180310
1347         if (converged && slotVisitor.isEmpty()) {
1348             assertMarkStacksEmpty();
1349             return changePhase(conn, CollectorPhase::End);
1350         }
1351             
1352         m_scheduler->didExecuteConstraints();
1353     }
1354         
1355     if (Options::logGC())
1356         dataLog(slotVisitor.collectorMarkStack().size(), "+", m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " ");
1357         
1358     {
1359         ParallelModeEnabler enabler(slotVisitor);
1360         slotVisitor.drainInParallel(m_scheduler->timeToResume());
1361     }
1362         
1363     m_scheduler->synchronousDrainingDidStall();
1364
1365     // This is kinda tricky. The termination check looks at:
1366     //
1367     // - Whether the marking threads are active. If they are not, this means that the marking threads'
1368     //   SlotVisitors are empty.
1369     // - Whether the collector's slot visitor is empty.
1370     // - Whether the shared mark stacks are empty.
1371     //
1372     // This doesn't have to check the mutator SlotVisitor because that one becomes empty after every GC
1373     // work increment, so it must be empty now.
1374     if (slotVisitor.didReachTermination())
1375         return true; // This is like relooping to the top if runFixpointPhase().
1376         
1377     if (!m_scheduler->shouldResume())
1378         return true;
1379
1380     m_scheduler->willResume();
1381         
1382     if (Options::logGC()) {
1383         double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds();
1384         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), ")...]\n");
1385     }
1386
1387     // Forgive the mutator for its past failures to keep up.
1388     // FIXME: Figure out if moving this to different places results in perf changes.
1389     m_incrementBalance = 0;
1390         
1391     return changePhase(conn, CollectorPhase::Concurrent);
1392 }
1393
1394 NEVER_INLINE bool Heap::runConcurrentPhase(GCConductor conn)
1395 {
1396     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1397
1398     switch (conn) {
1399     case GCConductor::Mutator: {
1400         // When the mutator has the conn, we poll runConcurrentPhase() on every time someone says
1401         // stopIfNecessary(), so on every allocation slow path. When that happens we poll if it's time
1402         // to stop and do some work.
1403         if (slotVisitor.didReachTermination()
1404             || m_scheduler->shouldStop())
1405             return changePhase(conn, CollectorPhase::Reloop);
1406         
1407         // We could be coming from a collector phase that stuffed our SlotVisitor, so make sure we donate
1408         // everything. This is super cheap if the SlotVisitor is already empty.
1409         slotVisitor.donateAll();
1410         return false;
1411     }
1412     case GCConductor::Collector: {
1413         {
1414             ParallelModeEnabler enabler(slotVisitor);
1415             slotVisitor.drainInParallelPassively(m_scheduler->timeToStop());
1416         }
1417         return changePhase(conn, CollectorPhase::Reloop);
1418     } }
1419     
1420     RELEASE_ASSERT_NOT_REACHED();
1421     return false;
1422 }
1423
1424 NEVER_INLINE bool Heap::runReloopPhase(GCConductor conn)
1425 {
1426     if (Options::logGC())
1427         dataLog("[GC<", RawPointer(this), ">: ", gcConductorShortName(conn), " ");
1428     
1429     m_scheduler->didStop();
1430     
1431     if (Options::logGC())
1432         m_scheduler->log();
1433     
1434     return changePhase(conn, CollectorPhase::Fixpoint);
1435 }
1436
1437 NEVER_INLINE bool Heap::runEndPhase(GCConductor conn)
1438 {
1439     m_scheduler->endCollection();
1440         
1441     {
1442         auto locker = holdLock(m_markingMutex);
1443         m_parallelMarkersShouldExit = true;
1444         m_markingConditionVariable.notifyAll();
1445     }
1446     m_helperClient.finish();
1447     
1448     iterateExecutingAndCompilingCodeBlocks(
1449         [&] (CodeBlock* codeBlock) {
1450             writeBarrier(codeBlock);
1451         });
1452         
1453     updateObjectCounts();
1454     endMarking();
1455         
1456     if (UNLIKELY(m_verifier)) {
1457         m_verifier->gatherLiveCells(HeapVerifier::Phase::AfterMarking);
1458         m_verifier->verify(HeapVerifier::Phase::AfterMarking);
1459     }
1460         
1461     if (vm()->typeProfiler())
1462         vm()->typeProfiler()->invalidateTypeSetCache();
1463
1464     if (ValueProfile* profile = vm()->noJITValueProfileSingleton.get())
1465         *profile = ValueProfile(0);
1466         
1467     reapWeakHandles();
1468     pruneStaleEntriesFromWeakGCMaps();
1469     sweepArrayBuffers();
1470     snapshotUnswept();
1471     finalizeUnconditionalFinalizers();
1472     removeDeadCompilerWorklistEntries();
1473     notifyIncrementalSweeper();
1474     
1475     m_codeBlocks->iterateCurrentlyExecuting(
1476         [&] (CodeBlock* codeBlock) {
1477             writeBarrier(codeBlock);
1478         });
1479     m_codeBlocks->clearCurrentlyExecuting();
1480         
1481     m_objectSpace.prepareForAllocation();
1482     updateAllocationLimits();
1483
1484     if (UNLIKELY(m_verifier)) {
1485         m_verifier->trimDeadCells();
1486         m_verifier->verify(HeapVerifier::Phase::AfterGC);
1487     }
1488
1489     didFinishCollection();
1490     
1491     if (m_currentRequest.didFinishEndPhase)
1492         m_currentRequest.didFinishEndPhase->run();
1493     
1494     if (false) {
1495         dataLog("Heap state after GC:\n");
1496         m_objectSpace.dumpBits();
1497     }
1498     
1499     if (Options::logGC()) {
1500         double thisPauseMS = (m_afterGC - m_stopTime).milliseconds();
1501         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), "), cycle ", (m_afterGC - m_beforeGC).milliseconds(), "ms END]\n");
1502     }
1503     
1504     {
1505         auto locker = holdLock(*m_threadLock);
1506         m_requests.removeFirst();
1507         m_lastServedTicket++;
1508         clearMutatorWaiting();
1509     }
1510     ParkingLot::unparkAll(&m_worldState);
1511
1512     if (false)
1513         dataLog("GC END!\n");
1514
1515     setNeedFinalize();
1516
1517     m_lastGCStartTime = m_currentGCStartTime;
1518     m_lastGCEndTime = MonotonicTime::now();
1519     m_totalGCTime += m_lastGCEndTime - m_lastGCStartTime;
1520         
1521     return changePhase(conn, CollectorPhase::NotRunning);
1522 }
1523
1524 bool Heap::changePhase(GCConductor conn, CollectorPhase nextPhase)
1525 {
1526     checkConn(conn);
1527
1528     m_lastPhase = m_currentPhase;
1529     m_nextPhase = nextPhase;
1530
1531     return finishChangingPhase(conn);
1532 }
1533
1534 NEVER_INLINE bool Heap::finishChangingPhase(GCConductor conn)
1535 {
1536     checkConn(conn);
1537     
1538     if (m_nextPhase == m_currentPhase)
1539         return true;
1540
1541     if (false)
1542         dataLog(conn, ": Going to phase: ", m_nextPhase, " (from ", m_currentPhase, ")\n");
1543     
1544     m_phaseVersion++;
1545     
1546     bool suspendedBefore = worldShouldBeSuspended(m_currentPhase);
1547     bool suspendedAfter = worldShouldBeSuspended(m_nextPhase);
1548     
1549     if (suspendedBefore != suspendedAfter) {
1550         if (suspendedBefore) {
1551             RELEASE_ASSERT(!suspendedAfter);
1552             
1553             resumeThePeriphery();
1554             if (conn == GCConductor::Collector)
1555                 resumeTheMutator();
1556             else
1557                 handleNeedFinalize();
1558         } else {
1559             RELEASE_ASSERT(!suspendedBefore);
1560             RELEASE_ASSERT(suspendedAfter);
1561             
1562             if (conn == GCConductor::Collector) {
1563                 waitWhileNeedFinalize();
1564                 if (!stopTheMutator()) {
1565                     if (false)
1566                         dataLog("Returning false.\n");
1567                     return false;
1568                 }
1569             } else {
1570                 sanitizeStackForVM(m_vm);
1571                 handleNeedFinalize();
1572             }
1573             stopThePeriphery(conn);
1574         }
1575     }
1576     
1577     m_currentPhase = m_nextPhase;
1578     return true;
1579 }
1580
1581 void Heap::stopThePeriphery(GCConductor conn)
1582 {
1583     if (m_worldIsStopped) {
1584         dataLog("FATAL: world already stopped.\n");
1585         RELEASE_ASSERT_NOT_REACHED();
1586     }
1587     
1588     if (m_mutatorDidRun)
1589         m_mutatorExecutionVersion++;
1590     
1591     m_mutatorDidRun = false;
1592
1593     suspendCompilerThreads();
1594     m_worldIsStopped = true;
1595
1596     forEachSlotVisitor(
1597         [&] (SlotVisitor& slotVisitor) {
1598             slotVisitor.updateMutatorIsStopped(NoLockingNecessary);
1599         });
1600
1601 #if ENABLE(JIT)
1602     {
1603         DeferGCForAWhile awhile(*this);
1604         if (JITWorklist::instance()->completeAllForVM(*m_vm)
1605             && conn == GCConductor::Collector)
1606             setGCDidJIT();
1607     }
1608 #else
1609     UNUSED_PARAM(conn);
1610 #endif // ENABLE(JIT)
1611     
1612     vm()->shadowChicken().update(*vm(), vm()->topCallFrame);
1613     
1614     m_structureIDTable.flushOldTables();
1615     m_objectSpace.stopAllocating();
1616     
1617     m_stopTime = MonotonicTime::now();
1618 }
1619
1620 NEVER_INLINE void Heap::resumeThePeriphery()
1621 {
1622     // Calling resumeAllocating does the Right Thing depending on whether this is the end of a
1623     // collection cycle or this is just a concurrent phase within a collection cycle:
1624     // - At end of collection cycle: it's a no-op because prepareForAllocation already cleared the
1625     //   last active block.
1626     // - During collection cycle: it reinstates the last active block.
1627     m_objectSpace.resumeAllocating();
1628     
1629     m_barriersExecuted = 0;
1630     
1631     if (!m_worldIsStopped) {
1632         dataLog("Fatal: collector does not believe that the world is stopped.\n");
1633         RELEASE_ASSERT_NOT_REACHED();
1634     }
1635     m_worldIsStopped = false;
1636     
1637     // FIXME: This could be vastly improved: we want to grab the locks in the order in which they
1638     // become available. We basically want a lockAny() method that will lock whatever lock is available
1639     // and tell you which one it locked. That would require teaching ParkingLot how to park on multiple
1640     // queues at once, which is totally achievable - it would just require memory allocation, which is
1641     // suboptimal but not a disaster. Alternatively, we could replace the SlotVisitor rightToRun lock
1642     // with a DLG-style handshake mechanism, but that seems not as general.
1643     Vector<SlotVisitor*, 8> slotVisitorsToUpdate;
1644
1645     forEachSlotVisitor(
1646         [&] (SlotVisitor& slotVisitor) {
1647             slotVisitorsToUpdate.append(&slotVisitor);
1648         });
1649     
1650     for (unsigned countdown = 40; !slotVisitorsToUpdate.isEmpty() && countdown--;) {
1651         for (unsigned index = 0; index < slotVisitorsToUpdate.size(); ++index) {
1652             SlotVisitor& slotVisitor = *slotVisitorsToUpdate[index];
1653             bool remove = false;
1654             if (slotVisitor.hasAcknowledgedThatTheMutatorIsResumed())
1655                 remove = true;
1656             else if (auto locker = tryHoldLock(slotVisitor.rightToRun())) {
1657                 slotVisitor.updateMutatorIsStopped(locker);
1658                 remove = true;
1659             }
1660             if (remove) {
1661                 slotVisitorsToUpdate[index--] = slotVisitorsToUpdate.last();
1662                 slotVisitorsToUpdate.takeLast();
1663             }
1664         }
1665         WTF::Thread::yield();
1666     }
1667     
1668     for (SlotVisitor* slotVisitor : slotVisitorsToUpdate)
1669         slotVisitor->updateMutatorIsStopped();
1670     
1671     resumeCompilerThreads();
1672 }
1673
1674 bool Heap::stopTheMutator()
1675 {
1676     for (;;) {
1677         unsigned oldState = m_worldState.load();
1678         if (oldState & stoppedBit) {
1679             RELEASE_ASSERT(!(oldState & hasAccessBit));
1680             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1681             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1682             return true;
1683         }
1684         
1685         if (oldState & mutatorHasConnBit) {
1686             RELEASE_ASSERT(!(oldState & hasAccessBit));
1687             RELEASE_ASSERT(!(oldState & stoppedBit));
1688             return false;
1689         }
1690
1691         if (!(oldState & hasAccessBit)) {
1692             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1693             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1694             // We can stop the world instantly.
1695             if (m_worldState.compareExchangeWeak(oldState, oldState | stoppedBit))
1696                 return true;
1697             continue;
1698         }
1699         
1700         // Transfer the conn to the mutator and bail.
1701         RELEASE_ASSERT(oldState & hasAccessBit);
1702         RELEASE_ASSERT(!(oldState & stoppedBit));
1703         unsigned newState = (oldState | mutatorHasConnBit) & ~mutatorWaitingBit;
1704         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1705             if (false)
1706                 dataLog("Handed off the conn.\n");
1707             m_stopIfNecessaryTimer->scheduleSoon();
1708             ParkingLot::unparkAll(&m_worldState);
1709             return false;
1710         }
1711     }
1712 }
1713
1714 NEVER_INLINE void Heap::resumeTheMutator()
1715 {
1716     if (false)
1717         dataLog("Resuming the mutator.\n");
1718     for (;;) {
1719         unsigned oldState = m_worldState.load();
1720         if (!!(oldState & hasAccessBit) != !(oldState & stoppedBit)) {
1721             dataLog("Fatal: hasAccess = ", !!(oldState & hasAccessBit), ", stopped = ", !!(oldState & stoppedBit), "\n");
1722             RELEASE_ASSERT_NOT_REACHED();
1723         }
1724         if (oldState & mutatorHasConnBit) {
1725             dataLog("Fatal: mutator has the conn.\n");
1726             RELEASE_ASSERT_NOT_REACHED();
1727         }
1728         
1729         if (!(oldState & stoppedBit)) {
1730             if (false)
1731                 dataLog("Returning because not stopped.\n");
1732             return;
1733         }
1734         
1735         if (m_worldState.compareExchangeWeak(oldState, oldState & ~stoppedBit)) {
1736             if (false)
1737                 dataLog("CASing and returning.\n");
1738             ParkingLot::unparkAll(&m_worldState);
1739             return;
1740         }
1741     }
1742 }
1743
1744 void Heap::stopIfNecessarySlow()
1745 {
1746     while (stopIfNecessarySlow(m_worldState.load())) { }
1747     
1748     RELEASE_ASSERT(m_worldState.load() & hasAccessBit);
1749     RELEASE_ASSERT(!(m_worldState.load() & stoppedBit));
1750     
1751     handleGCDidJIT();
1752     handleNeedFinalize();
1753     m_mutatorDidRun = true;
1754 }
1755
1756 bool Heap::stopIfNecessarySlow(unsigned oldState)
1757 {
1758     RELEASE_ASSERT(oldState & hasAccessBit);
1759     RELEASE_ASSERT(!(oldState & stoppedBit));
1760     
1761     // It's possible for us to wake up with finalization already requested but the world not yet
1762     // resumed. If that happens, we can't run finalization yet.
1763     if (handleNeedFinalize(oldState))
1764         return true;
1765
1766     // FIXME: When entering the concurrent phase, we could arrange for this branch not to fire, and then
1767     // have the SlotVisitor do things to the m_worldState to make this branch fire again. That would
1768     // prevent us from polling this so much. Ideally, stopIfNecessary would ignore the mutatorHasConnBit
1769     // and there would be some other bit indicating whether we were in some GC phase other than the
1770     // NotRunning or Concurrent ones.
1771     if (oldState & mutatorHasConnBit)
1772         collectInMutatorThread();
1773     
1774     return false;
1775 }
1776
1777 NEVER_INLINE void Heap::collectInMutatorThread()
1778 {
1779     CollectingScope collectingScope(*this);
1780     for (;;) {
1781         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, nullptr);
1782         switch (result) {
1783         case RunCurrentPhaseResult::Finished:
1784             return;
1785         case RunCurrentPhaseResult::Continue:
1786             break;
1787         case RunCurrentPhaseResult::NeedCurrentThreadState:
1788             sanitizeStackForVM(m_vm);
1789             auto lambda = [&] (CurrentThreadState& state) {
1790                 for (;;) {
1791                     RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, &state);
1792                     switch (result) {
1793                     case RunCurrentPhaseResult::Finished:
1794                         return;
1795                     case RunCurrentPhaseResult::Continue:
1796                         break;
1797                     case RunCurrentPhaseResult::NeedCurrentThreadState:
1798                         RELEASE_ASSERT_NOT_REACHED();
1799                         break;
1800                     }
1801                 }
1802             };
1803             callWithCurrentThreadState(scopedLambda<void(CurrentThreadState&)>(WTFMove(lambda)));
1804             return;
1805         }
1806     }
1807 }
1808
1809 template<typename Func>
1810 void Heap::waitForCollector(const Func& func)
1811 {
1812     for (;;) {
1813         bool done;
1814         {
1815             LockHolder locker(*m_threadLock);
1816             done = func(locker);
1817             if (!done) {
1818                 setMutatorWaiting();
1819                 
1820                 // At this point, the collector knows that we intend to wait, and he will clear the
1821                 // waiting bit and then unparkAll when the GC cycle finishes. Clearing the bit
1822                 // prevents us from parking except if there is also stop-the-world. Unparking after
1823                 // clearing means that if the clearing happens after we park, then we will unpark.
1824             }
1825         }
1826         
1827         // If we're in a stop-the-world scenario, we need to wait for that even if done is true.
1828         unsigned oldState = m_worldState.load();
1829         if (stopIfNecessarySlow(oldState))
1830             continue;
1831         
1832         // FIXME: We wouldn't need this if stopIfNecessarySlow() had a mode where it knew to just
1833         // do the collection.
1834         relinquishConn();
1835         
1836         if (done) {
1837             clearMutatorWaiting(); // Clean up just in case.
1838             return;
1839         }
1840         
1841         // If mutatorWaitingBit is still set then we want to wait.
1842         ParkingLot::compareAndPark(&m_worldState, oldState | mutatorWaitingBit);
1843     }
1844 }
1845
1846 void Heap::acquireAccessSlow()
1847 {
1848     for (;;) {
1849         unsigned oldState = m_worldState.load();
1850         RELEASE_ASSERT(!(oldState & hasAccessBit));
1851         
1852         if (oldState & stoppedBit) {
1853             if (verboseStop) {
1854                 dataLog("Stopping in acquireAccess!\n");
1855                 WTFReportBacktrace();
1856             }
1857             // Wait until we're not stopped anymore.
1858             ParkingLot::compareAndPark(&m_worldState, oldState);
1859             continue;
1860         }
1861         
1862         RELEASE_ASSERT(!(oldState & stoppedBit));
1863         unsigned newState = oldState | hasAccessBit;
1864         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1865             handleGCDidJIT();
1866             handleNeedFinalize();
1867             m_mutatorDidRun = true;
1868             stopIfNecessary();
1869             return;
1870         }
1871     }
1872 }
1873
1874 void Heap::releaseAccessSlow()
1875 {
1876     for (;;) {
1877         unsigned oldState = m_worldState.load();
1878         if (!(oldState & hasAccessBit)) {
1879             dataLog("FATAL: Attempting to release access but the mutator does not have access.\n");
1880             RELEASE_ASSERT_NOT_REACHED();
1881         }
1882         if (oldState & stoppedBit) {
1883             dataLog("FATAL: Attempting to release access but the mutator is stopped.\n");
1884             RELEASE_ASSERT_NOT_REACHED();
1885         }
1886         
1887         if (handleNeedFinalize(oldState))
1888             continue;
1889         
1890         unsigned newState = oldState & ~(hasAccessBit | mutatorHasConnBit);
1891         
1892         if ((oldState & mutatorHasConnBit)
1893             && m_nextPhase != m_currentPhase) {
1894             // This means that the collector thread had given us the conn so that we would do something
1895             // for it. Stop ourselves as we release access. This ensures that acquireAccess blocks. In
1896             // the meantime, since we're handing the conn over, the collector will be awoken and it is
1897             // sure to have work to do.
1898             newState |= stoppedBit;
1899         }
1900
1901         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1902             if (oldState & mutatorHasConnBit)
1903                 finishRelinquishingConn();
1904             return;
1905         }
1906     }
1907 }
1908
1909 bool Heap::relinquishConn(unsigned oldState)
1910 {
1911     RELEASE_ASSERT(oldState & hasAccessBit);
1912     RELEASE_ASSERT(!(oldState & stoppedBit));
1913     
1914     if (!(oldState & mutatorHasConnBit))
1915         return false; // Done.
1916     
1917     if (m_threadShouldStop)
1918         return false;
1919     
1920     if (!m_worldState.compareExchangeWeak(oldState, oldState & ~mutatorHasConnBit))
1921         return true; // Loop around.
1922     
1923     finishRelinquishingConn();
1924     return true;
1925 }
1926
1927 void Heap::finishRelinquishingConn()
1928 {
1929     if (false)
1930         dataLog("Relinquished the conn.\n");
1931     
1932     sanitizeStackForVM(m_vm);
1933     
1934     auto locker = holdLock(*m_threadLock);
1935     if (!m_requests.isEmpty())
1936         m_threadCondition->notifyOne(locker);
1937     ParkingLot::unparkAll(&m_worldState);
1938 }
1939
1940 void Heap::relinquishConn()
1941 {
1942     while (relinquishConn(m_worldState.load())) { }
1943 }
1944
1945 bool Heap::handleGCDidJIT(unsigned oldState)
1946 {
1947     RELEASE_ASSERT(oldState & hasAccessBit);
1948     if (!(oldState & gcDidJITBit))
1949         return false;
1950     if (m_worldState.compareExchangeWeak(oldState, oldState & ~gcDidJITBit)) {
1951         WTF::crossModifyingCodeFence();
1952         return true;
1953     }
1954     return true;
1955 }
1956
1957 NEVER_INLINE bool Heap::handleNeedFinalize(unsigned oldState)
1958 {
1959     RELEASE_ASSERT(oldState & hasAccessBit);
1960     RELEASE_ASSERT(!(oldState & stoppedBit));
1961     
1962     if (!(oldState & needFinalizeBit))
1963         return false;
1964     if (m_worldState.compareExchangeWeak(oldState, oldState & ~needFinalizeBit)) {
1965         finalize();
1966         // Wake up anyone waiting for us to finalize. Note that they may have woken up already, in
1967         // which case they would be waiting for us to release heap access.
1968         ParkingLot::unparkAll(&m_worldState);
1969         return true;
1970     }
1971     return true;
1972 }
1973
1974 void Heap::handleGCDidJIT()
1975 {
1976     while (handleGCDidJIT(m_worldState.load())) { }
1977 }
1978
1979 void Heap::handleNeedFinalize()
1980 {
1981     while (handleNeedFinalize(m_worldState.load())) { }
1982 }
1983
1984 void Heap::setGCDidJIT()
1985 {
1986     m_worldState.transaction(
1987         [&] (unsigned& state) -> bool {
1988             RELEASE_ASSERT(state & stoppedBit);
1989             state |= gcDidJITBit;
1990             return true;
1991         });
1992 }
1993
1994 void Heap::setNeedFinalize()
1995 {
1996     m_worldState.exchangeOr(needFinalizeBit);
1997     ParkingLot::unparkAll(&m_worldState);
1998     m_stopIfNecessaryTimer->scheduleSoon();
1999 }
2000
2001 void Heap::waitWhileNeedFinalize()
2002 {
2003     for (;;) {
2004         unsigned oldState = m_worldState.load();
2005         if (!(oldState & needFinalizeBit)) {
2006             // This means that either there was no finalize request or the main thread will finalize
2007             // with heap access, so a subsequent call to stopTheWorld() will return only when
2008             // finalize finishes.
2009             return;
2010         }
2011         ParkingLot::compareAndPark(&m_worldState, oldState);
2012     }
2013 }
2014
2015 void Heap::setMutatorWaiting()
2016 {
2017     m_worldState.exchangeOr(mutatorWaitingBit);
2018 }
2019
2020 void Heap::clearMutatorWaiting()
2021 {
2022     m_worldState.exchangeAnd(~mutatorWaitingBit);
2023 }
2024
2025 void Heap::notifyThreadStopping(const AbstractLocker&)
2026 {
2027     m_threadIsStopping = true;
2028     clearMutatorWaiting();
2029     ParkingLot::unparkAll(&m_worldState);
2030 }
2031
2032 void Heap::finalize()
2033 {
2034     MonotonicTime before;
2035     if (Options::logGC()) {
2036         before = MonotonicTime::now();
2037         dataLog("[GC<", RawPointer(this), ">: finalize ");
2038     }
2039     
2040     {
2041         SweepingScope sweepingScope(*this);
2042         deleteUnmarkedCompiledCode();
2043         deleteSourceProviderCaches();
2044         sweepInFinalize();
2045     }
2046     
2047     if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache())
2048         cache->clear();
2049
2050     immutableButterflyToStringCache.clear();
2051     
2052     for (const HeapFinalizerCallback& callback : m_heapFinalizerCallbacks)
2053         callback.run(*vm());
2054     
2055     if (shouldSweepSynchronously())
2056         sweepSynchronously();
2057
2058     if (Options::logGC()) {
2059         MonotonicTime after = MonotonicTime::now();
2060         dataLog((after - before).milliseconds(), "ms]\n");
2061     }
2062 }
2063
2064 Heap::Ticket Heap::requestCollection(GCRequest request)
2065 {
2066     stopIfNecessary();
2067     
2068     ASSERT(vm()->currentThreadIsHoldingAPILock());
2069     RELEASE_ASSERT(vm()->atomicStringTable() == WTF::Thread::current().atomicStringTable());
2070     
2071     LockHolder locker(*m_threadLock);
2072     // We may be able to steal the conn. That only works if the collector is definitely not running
2073     // right now. This is an optimization that prevents the collector thread from ever starting in most
2074     // cases.
2075     ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2076     if ((m_lastServedTicket == m_lastGrantedTicket) && (m_currentPhase == CollectorPhase::NotRunning)) {
2077         if (false)
2078             dataLog("Taking the conn.\n");
2079         m_worldState.exchangeOr(mutatorHasConnBit);
2080     }
2081     
2082     m_requests.append(request);
2083     m_lastGrantedTicket++;
2084     if (!(m_worldState.load() & mutatorHasConnBit))
2085         m_threadCondition->notifyOne(locker);
2086     return m_lastGrantedTicket;
2087 }
2088
2089 void Heap::waitForCollection(Ticket ticket)
2090 {
2091     waitForCollector(
2092         [&] (const AbstractLocker&) -> bool {
2093             return m_lastServedTicket >= ticket;
2094         });
2095 }
2096
2097 void Heap::sweepInFinalize()
2098 {
2099     m_objectSpace.sweepLargeAllocations();
2100     vm()->eagerlySweptDestructibleObjectSpace.sweep();
2101 }
2102
2103 void Heap::suspendCompilerThreads()
2104 {
2105 #if ENABLE(DFG_JIT)
2106     // We ensure the worklists so that it's not possible for the mutator to start a new worklist
2107     // after we have suspended the ones that he had started before. That's not very expensive since
2108     // the worklists use AutomaticThreads anyway.
2109     for (unsigned i = DFG::numberOfWorklists(); i--;)
2110         DFG::ensureWorklistForIndex(i).suspendAllThreads();
2111 #endif
2112 }
2113
2114 void Heap::willStartCollection()
2115 {
2116     if (Options::logGC())
2117         dataLog("=> ");
2118     
2119     if (shouldDoFullCollection()) {
2120         m_collectionScope = CollectionScope::Full;
2121         m_shouldDoFullCollection = false;
2122         if (Options::logGC())
2123             dataLog("FullCollection, ");
2124         if (false)
2125             dataLog("Full collection!\n");
2126     } else {
2127         m_collectionScope = CollectionScope::Eden;
2128         if (Options::logGC())
2129             dataLog("EdenCollection, ");
2130         if (false)
2131             dataLog("Eden collection!\n");
2132     }
2133     if (m_collectionScope == CollectionScope::Full) {
2134         m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2135         m_extraMemorySize = 0;
2136         m_deprecatedExtraMemorySize = 0;
2137 #if ENABLE(RESOURCE_USAGE)
2138         m_externalMemorySize = 0;
2139 #endif
2140
2141         if (m_fullActivityCallback)
2142             m_fullActivityCallback->willCollect();
2143     } else {
2144         ASSERT(m_collectionScope == CollectionScope::Eden);
2145         m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2146     }
2147
2148     if (m_edenActivityCallback)
2149         m_edenActivityCallback->willCollect();
2150
2151     for (auto* observer : m_observers)
2152         observer->willGarbageCollect();
2153 }
2154
2155 void Heap::prepareForMarking()
2156 {
2157     m_objectSpace.prepareForMarking();
2158 }
2159
2160 void Heap::reapWeakHandles()
2161 {
2162     m_objectSpace.reapWeakSets();
2163 }
2164
2165 void Heap::pruneStaleEntriesFromWeakGCMaps()
2166 {
2167     if (m_collectionScope != CollectionScope::Full)
2168         return;
2169     for (WeakGCMapBase* weakGCMap : m_weakGCMaps)
2170         weakGCMap->pruneStaleEntries();
2171 }
2172
2173 void Heap::sweepArrayBuffers()
2174 {
2175     m_arrayBuffers.sweep();
2176 }
2177
2178 void Heap::snapshotUnswept()
2179 {
2180     TimingScope timingScope(*this, "Heap::snapshotUnswept");
2181     m_objectSpace.snapshotUnswept();
2182 }
2183
2184 void Heap::deleteSourceProviderCaches()
2185 {
2186     if (*m_lastCollectionScope == CollectionScope::Full)
2187         m_vm->clearSourceProviderCaches();
2188 }
2189
2190 void Heap::notifyIncrementalSweeper()
2191 {
2192     if (m_collectionScope == CollectionScope::Full) {
2193         if (!m_logicallyEmptyWeakBlocks.isEmpty())
2194             m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2195     }
2196
2197     m_sweeper->startSweeping(*this);
2198 }
2199
2200 void Heap::updateAllocationLimits()
2201 {
2202     static const bool verbose = false;
2203     
2204     if (verbose) {
2205         dataLog("\n");
2206         dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n");
2207     }
2208     
2209     // Calculate our current heap size threshold for the purpose of figuring out when we should
2210     // run another collection. This isn't the same as either size() or capacity(), though it should
2211     // be somewhere between the two. The key is to match the size calculations involved calls to
2212     // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
2213     // fragmentation, we may have size() much smaller than capacity().
2214     size_t currentHeapSize = 0;
2215
2216     // For marked space, we use the total number of bytes visited. This matches the logic for
2217     // BlockDirectory's calls to didAllocate(), which effectively accounts for the total size of
2218     // objects allocated rather than blocks used. This will underestimate capacity(), and in case
2219     // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
2220     // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
2221     currentHeapSize += m_totalBytesVisited;
2222     if (verbose)
2223         dataLog("totalBytesVisited = ", m_totalBytesVisited, ", currentHeapSize = ", currentHeapSize, "\n");
2224
2225     // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
2226     // extra memory reporting.
2227     currentHeapSize += extraMemorySize();
2228     if (!ASSERT_DISABLED) {
2229         Checked<size_t, RecordOverflow> checkedCurrentHeapSize = m_totalBytesVisited;
2230         checkedCurrentHeapSize += extraMemorySize();
2231         ASSERT(!checkedCurrentHeapSize.hasOverflowed() && checkedCurrentHeapSize.unsafeGet() == currentHeapSize);
2232     }
2233
2234     if (verbose)
2235         dataLog("extraMemorySize() = ", extraMemorySize(), ", currentHeapSize = ", currentHeapSize, "\n");
2236     
2237     if (m_collectionScope == CollectionScope::Full) {
2238         // To avoid pathological GC churn in very small and very large heaps, we set
2239         // the new allocation limit based on the current size of the heap, with a
2240         // fixed minimum.
2241         m_maxHeapSize = std::max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
2242         if (verbose)
2243             dataLog("Full: maxHeapSize = ", m_maxHeapSize, "\n");
2244         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2245         if (verbose)
2246             dataLog("Full: maxEdenSize = ", m_maxEdenSize, "\n");
2247         m_sizeAfterLastFullCollect = currentHeapSize;
2248         if (verbose)
2249             dataLog("Full: sizeAfterLastFullCollect = ", currentHeapSize, "\n");
2250         m_bytesAbandonedSinceLastFullCollect = 0;
2251         if (verbose)
2252             dataLog("Full: bytesAbandonedSinceLastFullCollect = ", 0, "\n");
2253     } else {
2254         ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
2255         // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have.
2256         // But we are sloppy, so we have to defend against the overflow.
2257         m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize;
2258         if (verbose)
2259             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2260         m_sizeAfterLastEdenCollect = currentHeapSize;
2261         if (verbose)
2262             dataLog("Eden: sizeAfterLastEdenCollect = ", currentHeapSize, "\n");
2263         double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
2264         double minEdenToOldGenerationRatio = 1.0 / 3.0;
2265         if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
2266             m_shouldDoFullCollection = true;
2267         // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
2268         m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
2269         if (verbose)
2270             dataLog("Eden: maxHeapSize = ", m_maxHeapSize, "\n");
2271         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2272         if (verbose)
2273             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2274         if (m_fullActivityCallback) {
2275             ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
2276             m_fullActivityCallback->didAllocate(*this, currentHeapSize - m_sizeAfterLastFullCollect);
2277         }
2278     }
2279
2280 #if PLATFORM(IOS)
2281     // Get critical memory threshold for next cycle.
2282     overCriticalMemoryThreshold(MemoryThresholdCallType::Direct);
2283 #endif
2284
2285     m_sizeAfterLastCollect = currentHeapSize;
2286     if (verbose)
2287         dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n");
2288     m_bytesAllocatedThisCycle = 0;
2289
2290     if (Options::logGC())
2291         dataLog("=> ", currentHeapSize / 1024, "kb, ");
2292 }
2293
2294 void Heap::didFinishCollection()
2295 {
2296     m_afterGC = MonotonicTime::now();
2297     CollectionScope scope = *m_collectionScope;
2298     if (scope == CollectionScope::Full)
2299         m_lastFullGCLength = m_afterGC - m_beforeGC;
2300     else
2301         m_lastEdenGCLength = m_afterGC - m_beforeGC;
2302
2303 #if ENABLE(RESOURCE_USAGE)
2304     ASSERT(externalMemorySize() <= extraMemorySize());
2305 #endif
2306
2307     if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) {
2308         gatherExtraHeapSnapshotData(*heapProfiler);
2309         removeDeadHeapSnapshotNodes(*heapProfiler);
2310     }
2311
2312     if (UNLIKELY(m_verifier))
2313         m_verifier->endGC();
2314
2315     RELEASE_ASSERT(m_collectionScope);
2316     m_lastCollectionScope = m_collectionScope;
2317     m_collectionScope = std::nullopt;
2318
2319     for (auto* observer : m_observers)
2320         observer->didGarbageCollect(scope);
2321 }
2322
2323 void Heap::resumeCompilerThreads()
2324 {
2325 #if ENABLE(DFG_JIT)
2326     for (unsigned i = DFG::numberOfWorklists(); i--;)
2327         DFG::existingWorklistForIndex(i).resumeAllThreads();
2328 #endif
2329 }
2330
2331 GCActivityCallback* Heap::fullActivityCallback()
2332 {
2333     return m_fullActivityCallback.get();
2334 }
2335
2336 GCActivityCallback* Heap::edenActivityCallback()
2337 {
2338     return m_edenActivityCallback.get();
2339 }
2340
2341 IncrementalSweeper& Heap::sweeper()
2342 {
2343     return *m_sweeper;
2344 }
2345
2346 void Heap::setGarbageCollectionTimerEnabled(bool enable)
2347 {
2348     if (m_fullActivityCallback)
2349         m_fullActivityCallback->setEnabled(enable);
2350     if (m_edenActivityCallback)
2351         m_edenActivityCallback->setEnabled(enable);
2352 }
2353
2354 void Heap::didAllocate(size_t bytes)
2355 {
2356     if (m_edenActivityCallback)
2357         m_edenActivityCallback->didAllocate(*this, m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
2358     m_bytesAllocatedThisCycle += bytes;
2359     performIncrement(bytes);
2360 }
2361
2362 bool Heap::isValidAllocation(size_t)
2363 {
2364     if (!isValidThreadState(m_vm))
2365         return false;
2366
2367     if (isCurrentThreadBusy())
2368         return false;
2369     
2370     return true;
2371 }
2372
2373 void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
2374 {
2375     WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
2376 }
2377
2378 void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
2379 {
2380     HandleSlot slot = handle.slot();
2381     Finalizer finalizer = reinterpret_cast<Finalizer>(context);
2382     finalizer(slot->asCell());
2383     WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
2384 }
2385
2386 void Heap::collectNowFullIfNotDoneRecently(Synchronousness synchronousness)
2387 {
2388     if (!m_fullActivityCallback) {
2389         collectNow(synchronousness, CollectionScope::Full);
2390         return;
2391     }
2392
2393     if (m_fullActivityCallback->didGCRecently()) {
2394         // A synchronous GC was already requested recently so we merely accelerate next collection.
2395         reportAbandonedObjectGraph();
2396         return;
2397     }
2398
2399     m_fullActivityCallback->setDidGCRecently();
2400     collectNow(synchronousness, CollectionScope::Full);
2401 }
2402
2403 bool Heap::useGenerationalGC()
2404 {
2405     return Options::useGenerationalGC() && !VM::isInMiniMode();
2406 }
2407
2408 bool Heap::shouldSweepSynchronously()
2409 {
2410     return Options::sweepSynchronously() || VM::isInMiniMode();
2411 }
2412
2413 bool Heap::shouldDoFullCollection()
2414 {
2415     if (!useGenerationalGC())
2416         return true;
2417
2418     if (!m_currentRequest.scope)
2419         return m_shouldDoFullCollection || overCriticalMemoryThreshold();
2420     return *m_currentRequest.scope == CollectionScope::Full;
2421 }
2422
2423 void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
2424 {
2425     m_logicallyEmptyWeakBlocks.append(block);
2426 }
2427
2428 void Heap::sweepAllLogicallyEmptyWeakBlocks()
2429 {
2430     if (m_logicallyEmptyWeakBlocks.isEmpty())
2431         return;
2432
2433     m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2434     while (sweepNextLogicallyEmptyWeakBlock()) { }
2435 }
2436
2437 bool Heap::sweepNextLogicallyEmptyWeakBlock()
2438 {
2439     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
2440         return false;
2441
2442     WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
2443
2444     block->sweep();
2445     if (block->isEmpty()) {
2446         std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
2447         m_logicallyEmptyWeakBlocks.removeLast();
2448         WeakBlock::destroy(*this, block);
2449     } else
2450         m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
2451
2452     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
2453         m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
2454         return false;
2455     }
2456
2457     return true;
2458 }
2459
2460 size_t Heap::visitCount()
2461 {
2462     size_t result = 0;
2463     forEachSlotVisitor(
2464         [&] (SlotVisitor& visitor) {
2465             result += visitor.visitCount();
2466         });
2467     return result;
2468 }
2469
2470 size_t Heap::bytesVisited()
2471 {
2472     size_t result = 0;
2473     forEachSlotVisitor(
2474         [&] (SlotVisitor& visitor) {
2475             result += visitor.bytesVisited();
2476         });
2477     return result;
2478 }
2479
2480 void Heap::forEachCodeBlockImpl(const ScopedLambda<void(CodeBlock*)>& func)
2481 {
2482     // We don't know the full set of CodeBlocks until compilation has terminated.
2483     completeAllJITPlans();
2484
2485     return m_codeBlocks->iterate(func);
2486 }
2487
2488 void Heap::forEachCodeBlockIgnoringJITPlansImpl(const AbstractLocker& locker, const ScopedLambda<void(CodeBlock*)>& func)
2489 {
2490     return m_codeBlocks->iterate(locker, func);
2491 }
2492
2493 void Heap::writeBarrierSlowPath(const JSCell* from)
2494 {
2495     if (UNLIKELY(mutatorShouldBeFenced())) {
2496         // In this case, the barrierThreshold is the tautological threshold, so from could still be
2497         // not black. But we can't know for sure until we fire off a fence.
2498         WTF::storeLoadFence();
2499         if (from->cellState() != CellState::PossiblyBlack)
2500             return;
2501     }
2502     
2503     addToRememberedSet(from);
2504 }
2505
2506 bool Heap::isCurrentThreadBusy()
2507 {
2508     return mayBeGCThread() || mutatorState() != MutatorState::Running;
2509 }
2510
2511 void Heap::reportExtraMemoryVisited(size_t size)
2512 {
2513     size_t* counter = &m_extraMemorySize;
2514     
2515     for (;;) {
2516         size_t oldSize = *counter;
2517         // FIXME: Change this to use SaturatedArithmetic when available.
2518         // https://bugs.webkit.org/show_bug.cgi?id=170411
2519         Checked<size_t, RecordOverflow> checkedNewSize = oldSize;
2520         checkedNewSize += size;
2521         size_t newSize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
2522         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, newSize))
2523             return;
2524     }
2525 }
2526
2527 #if ENABLE(RESOURCE_USAGE)
2528 void Heap::reportExternalMemoryVisited(size_t size)
2529 {
2530     size_t* counter = &m_externalMemorySize;
2531
2532     for (;;) {
2533         size_t oldSize = *counter;
2534         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size))
2535             return;
2536     }
2537 }
2538 #endif
2539
2540 void Heap::collectIfNecessaryOrDefer(GCDeferralContext* deferralContext)
2541 {
2542     ASSERT(deferralContext || isDeferred() || !DisallowGC::isInEffectOnCurrentThread());
2543
2544     if (!m_isSafeToCollect)
2545         return;
2546     switch (mutatorState()) {
2547     case MutatorState::Running:
2548     case MutatorState::Allocating:
2549         break;
2550     case MutatorState::Sweeping:
2551     case MutatorState::Collecting:
2552         return;
2553     }
2554     if (!Options::useGC())
2555         return;
2556     
2557     if (mayNeedToStop()) {
2558         if (deferralContext)
2559             deferralContext->m_shouldGC = true;
2560         else if (isDeferred())
2561             m_didDeferGCWork = true;
2562         else
2563             stopIfNecessary();
2564     }
2565     
2566     if (UNLIKELY(Options::gcMaxHeapSize())) {
2567         if (m_bytesAllocatedThisCycle <= Options::gcMaxHeapSize())
2568             return;
2569     } else {
2570         size_t bytesAllowedThisCycle = m_maxEdenSize;
2571
2572 #if PLATFORM(IOS)
2573         if (overCriticalMemoryThreshold())
2574             bytesAllowedThisCycle = std::min(m_maxEdenSizeWhenCritical, bytesAllowedThisCycle);
2575 #endif
2576
2577         if (m_bytesAllocatedThisCycle <= bytesAllowedThisCycle)
2578             return;
2579     }
2580
2581     if (deferralContext)
2582         deferralContext->m_shouldGC = true;
2583     else if (isDeferred())
2584         m_didDeferGCWork = true;
2585     else {
2586         collectAsync();
2587         stopIfNecessary(); // This will immediately start the collection if we have the conn.
2588     }
2589 }
2590
2591 void Heap::decrementDeferralDepthAndGCIfNeededSlow()
2592 {
2593     // Can't do anything if we're still deferred.
2594     if (m_deferralDepth)
2595         return;
2596     
2597     ASSERT(!isDeferred());
2598     
2599     m_didDeferGCWork = false;
2600     // FIXME: Bring back something like the DeferGCProbability mode.
2601     // https://bugs.webkit.org/show_bug.cgi?id=166627
2602     collectIfNecessaryOrDefer();
2603 }
2604
2605 void Heap::registerWeakGCMap(WeakGCMapBase* weakGCMap)
2606 {
2607     m_weakGCMaps.add(weakGCMap);
2608 }
2609
2610 void Heap::unregisterWeakGCMap(WeakGCMapBase* weakGCMap)
2611 {
2612     m_weakGCMaps.remove(weakGCMap);
2613 }
2614
2615 void Heap::didAllocateBlock(size_t capacity)
2616 {
2617 #if ENABLE(RESOURCE_USAGE)
2618     m_blockBytesAllocated += capacity;
2619 #else
2620     UNUSED_PARAM(capacity);
2621 #endif
2622 }
2623
2624 void Heap::didFreeBlock(size_t capacity)
2625 {
2626 #if ENABLE(RESOURCE_USAGE)
2627     m_blockBytesAllocated -= capacity;
2628 #else
2629     UNUSED_PARAM(capacity);
2630 #endif
2631 }
2632
2633 void Heap::addCoreConstraints()
2634 {
2635     m_constraintSet->add(
2636         "Cs", "Conservative Scan",
2637         [this, lastVersion = static_cast<uint64_t>(0)] (SlotVisitor& slotVisitor) mutable {
2638             bool shouldNotProduceWork = lastVersion == m_phaseVersion;
2639             if (shouldNotProduceWork)
2640                 return;
2641             
2642             TimingScope preConvergenceTimingScope(*this, "Constraint: conservative scan");
2643             m_objectSpace.prepareForConservativeScan();
2644             ConservativeRoots conservativeRoots(*this);
2645             SuperSamplerScope superSamplerScope(false);
2646             gatherStackRoots(conservativeRoots);
2647             gatherJSStackRoots(conservativeRoots);
2648             gatherScratchBufferRoots(conservativeRoots);
2649             slotVisitor.append(conservativeRoots);
2650             
2651             lastVersion = m_phaseVersion;
2652         },
2653         ConstraintVolatility::GreyedByExecution);
2654     
2655     m_constraintSet->add(
2656         "Msr", "Misc Small Roots",
2657         [this] (SlotVisitor& slotVisitor) {
2658 #if JSC_OBJC_API_ENABLED
2659             scanExternalRememberedSet(*m_vm, slotVisitor);
2660 #endif
2661
2662             if (m_vm->smallStrings.needsToBeVisited(*m_collectionScope))
2663                 m_vm->smallStrings.visitStrongReferences(slotVisitor);
2664             
2665             for (auto& pair : m_protectedValues)
2666                 slotVisitor.appendUnbarriered(pair.key);
2667             
2668             if (m_markListSet && m_markListSet->size())
2669                 MarkedArgumentBuffer::markLists(slotVisitor, *m_markListSet);
2670             
2671             slotVisitor.appendUnbarriered(m_vm->exception());
2672             slotVisitor.appendUnbarriered(m_vm->lastException());
2673         },
2674         ConstraintVolatility::GreyedByExecution);
2675     
2676     m_constraintSet->add(
2677         "Sh", "Strong Handles",
2678         [this] (SlotVisitor& slotVisitor) {
2679             m_handleSet.visitStrongHandles(slotVisitor);
2680         },
2681         ConstraintVolatility::GreyedByExecution);
2682     
2683     m_constraintSet->add(
2684         "D", "Debugger",
2685         [this] (SlotVisitor& slotVisitor) {
2686 #if ENABLE(SAMPLING_PROFILER)
2687             if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
2688                 LockHolder locker(samplingProfiler->getLock());
2689                 samplingProfiler->processUnverifiedStackTraces();
2690                 samplingProfiler->visit(slotVisitor);
2691                 if (Options::logGC() == GCLogging::Verbose)
2692                     dataLog("Sampling Profiler data:\n", slotVisitor);
2693             }
2694 #endif // ENABLE(SAMPLING_PROFILER)
2695             
2696             if (m_vm->typeProfiler())
2697                 m_vm->typeProfilerLog()->visit(slotVisitor);
2698             
2699             m_vm->shadowChicken().visitChildren(slotVisitor);
2700         },
2701         ConstraintVolatility::GreyedByExecution);
2702     
2703     m_constraintSet->add(
2704         "Jsr", "JIT Stub Routines",
2705         [this] (SlotVisitor& slotVisitor) {
2706             m_jitStubRoutines->traceMarkedStubRoutines(slotVisitor);
2707         },
2708         ConstraintVolatility::GreyedByExecution);
2709     
2710     m_constraintSet->add(
2711         "Ws", "Weak Sets",
2712         [this] (SlotVisitor& slotVisitor) {
2713             m_objectSpace.visitWeakSets(slotVisitor);
2714         },
2715         ConstraintVolatility::GreyedByMarking);
2716     
2717     m_constraintSet->add(
2718         "O", "Output",
2719         [] (SlotVisitor& slotVisitor) {
2720             VM& vm = slotVisitor.vm();
2721             
2722             auto callOutputConstraint = [] (SlotVisitor& slotVisitor, HeapCell* heapCell, HeapCell::Kind) {
2723                 VM& vm = slotVisitor.vm();
2724                 JSCell* cell = static_cast<JSCell*>(heapCell);
2725                 cell->methodTable(vm)->visitOutputConstraints(cell, slotVisitor);
2726             };
2727             
2728             auto add = [&] (auto& set) {
2729                 slotVisitor.addParallelConstraintTask(set.forEachMarkedCellInParallel(callOutputConstraint));
2730             };
2731             
2732             add(vm.executableToCodeBlockEdgesWithConstraints);
2733             add(vm.weakMapSpace);
2734         },
2735         ConstraintVolatility::GreyedByMarking,
2736         ConstraintParallelism::Parallel);
2737     
2738 #if ENABLE(DFG_JIT)
2739     m_constraintSet->add(
2740         "Dw", "DFG Worklists",
2741         [this] (SlotVisitor& slotVisitor) {
2742             for (unsigned i = DFG::numberOfWorklists(); i--;)
2743                 DFG::existingWorklistForIndex(i).visitWeakReferences(slotVisitor);
2744             
2745             // FIXME: This is almost certainly unnecessary.
2746             // https://bugs.webkit.org/show_bug.cgi?id=166829
2747             DFG::iterateCodeBlocksForGC(
2748                 *m_vm,
2749                 [&] (CodeBlock* codeBlock) {
2750                     slotVisitor.appendUnbarriered(codeBlock);
2751                 });
2752             
2753             if (Options::logGC() == GCLogging::Verbose)
2754                 dataLog("DFG Worklists:\n", slotVisitor);
2755         },
2756         ConstraintVolatility::GreyedByMarking);
2757 #endif
2758     
2759     m_constraintSet->add(
2760         "Cb", "CodeBlocks",
2761         [this] (SlotVisitor& slotVisitor) {
2762             iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(
2763                 [&] (CodeBlock* codeBlock) {
2764                     // Visit the CodeBlock as a constraint only if it's black.
2765                     if (Heap::isMarked(codeBlock)
2766                         && codeBlock->cellState() == CellState::PossiblyBlack)
2767                         slotVisitor.visitAsConstraint(codeBlock);
2768                 });
2769         },
2770         ConstraintVolatility::SeldomGreyed);
2771     
2772     m_constraintSet->add(std::make_unique<MarkStackMergingConstraint>(*this));
2773 }
2774
2775 void Heap::addMarkingConstraint(std::unique_ptr<MarkingConstraint> constraint)
2776 {
2777     PreventCollectionScope preventCollectionScope(*this);
2778     m_constraintSet->add(WTFMove(constraint));
2779 }
2780
2781 void Heap::notifyIsSafeToCollect()
2782 {
2783     MonotonicTime before;
2784     if (Options::logGC()) {
2785         before = MonotonicTime::now();
2786         dataLog("[GC<", RawPointer(this), ">: starting ");
2787     }
2788     
2789     addCoreConstraints();
2790     
2791     m_isSafeToCollect = true;
2792     
2793     if (Options::collectContinuously()) {
2794         m_collectContinuouslyThread = WTF::Thread::create(
2795             "JSC DEBUG Continuous GC",
2796             [this] () {
2797                 MonotonicTime initialTime = MonotonicTime::now();
2798                 Seconds period = Seconds::fromMilliseconds(Options::collectContinuouslyPeriodMS());
2799                 while (!m_shouldStopCollectingContinuously) {
2800                     {
2801                         LockHolder locker(*m_threadLock);
2802                         if (m_requests.isEmpty()) {
2803                             m_requests.append(std::nullopt);
2804                             m_lastGrantedTicket++;
2805                             m_threadCondition->notifyOne(locker);
2806                         }
2807                     }
2808                     
2809                     {
2810                         LockHolder locker(m_collectContinuouslyLock);
2811                         Seconds elapsed = MonotonicTime::now() - initialTime;
2812                         Seconds elapsedInPeriod = elapsed % period;
2813                         MonotonicTime timeToWakeUp =
2814                             initialTime + elapsed - elapsedInPeriod + period;
2815                         while (!hasElapsed(timeToWakeUp) && !m_shouldStopCollectingContinuously) {
2816                             m_collectContinuouslyCondition.waitUntil(
2817                                 m_collectContinuouslyLock, timeToWakeUp);
2818                         }
2819                     }
2820                 }
2821             });
2822     }
2823     
2824     if (Options::logGC())
2825         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
2826 }
2827
2828 void Heap::preventCollection()
2829 {
2830     if (!m_isSafeToCollect)
2831         return;
2832     
2833     // This prevents the collectContinuously thread from starting a collection.
2834     m_collectContinuouslyLock.lock();
2835     
2836     // Wait for all collections to finish.
2837     waitForCollector(
2838         [&] (const AbstractLocker&) -> bool {
2839             ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2840             return m_lastServedTicket == m_lastGrantedTicket;
2841         });
2842     
2843     // Now a collection can only start if this thread starts it.
2844     RELEASE_ASSERT(!m_collectionScope);
2845 }
2846
2847 void Heap::allowCollection()
2848 {
2849     if (!m_isSafeToCollect)
2850         return;
2851     
2852     m_collectContinuouslyLock.unlock();
2853 }
2854
2855 void Heap::setMutatorShouldBeFenced(bool value)
2856 {
2857     m_mutatorShouldBeFenced = value;
2858     m_barrierThreshold = value ? tautologicalThreshold : blackThreshold;
2859 }
2860
2861 void Heap::performIncrement(size_t bytes)
2862 {
2863     if (!m_objectSpace.isMarking())
2864         return;
2865
2866     m_incrementBalance += bytes * Options::gcIncrementScale();
2867
2868     // Save ourselves from crazy. Since this is an optimization, it's OK to go back to any consistent
2869     // state when the double goes wild.
2870     if (std::isnan(m_incrementBalance) || std::isinf(m_incrementBalance))
2871         m_incrementBalance = 0;
2872     
2873     if (m_incrementBalance < static_cast<double>(Options::gcIncrementBytes()))
2874         return;
2875
2876     double targetBytes = m_incrementBalance;
2877     if (targetBytes <= 0)
2878         return;
2879     targetBytes = std::min(targetBytes, Options::gcIncrementMaxBytes());
2880
2881     SlotVisitor& slotVisitor = *m_mutatorSlotVisitor;
2882     ParallelModeEnabler parallelModeEnabler(slotVisitor);
2883     size_t bytesVisited = slotVisitor.performIncrementOfDraining(static_cast<size_t>(targetBytes));
2884     // incrementBalance may go negative here because it'll remember how many bytes we overshot.
2885     m_incrementBalance -= bytesVisited;
2886 }
2887
2888 void Heap::addHeapFinalizerCallback(const HeapFinalizerCallback& callback)
2889 {
2890     m_heapFinalizerCallbacks.append(callback);
2891 }
2892
2893 void Heap::removeHeapFinalizerCallback(const HeapFinalizerCallback& callback)
2894 {
2895     m_heapFinalizerCallbacks.removeFirst(callback);
2896 }
2897
2898 void Heap::setBonusVisitorTask(RefPtr<SharedTask<void(SlotVisitor&)>> task)
2899 {
2900     auto locker = holdLock(m_markingMutex);
2901     m_bonusVisitorTask = task;
2902     m_markingConditionVariable.notifyAll();
2903 }
2904
2905 void Heap::runTaskInParallel(RefPtr<SharedTask<void(SlotVisitor&)>> task)
2906 {
2907     unsigned initialRefCount = task->refCount();
2908     setBonusVisitorTask(task);
2909     task->run(*m_collectorSlotVisitor);
2910     setBonusVisitorTask(nullptr);
2911     // The constraint solver expects return of this function to imply termination of the task in all
2912     // threads. This ensures that property.
2913     {
2914         auto locker = holdLock(m_markingMutex);
2915         while (task->refCount() > initialRefCount)
2916             m_markingConditionVariable.wait(m_markingMutex);
2917     }
2918 }
2919
2920 } // namespace JSC