828e0f8696baca8202109d838d787f5f211381c1
[WebKit-https.git] / Source / JavaScriptCore / heap / Heap.cpp
1 /*
2  *  Copyright (C) 2003-2017 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "Heap.h"
23
24 #include "CodeBlock.h"
25 #include "CodeBlockSetInlines.h"
26 #include "ConservativeRoots.h"
27 #include "DFGWorklistInlines.h"
28 #include "EdenGCActivityCallback.h"
29 #include "Exception.h"
30 #include "FullGCActivityCallback.h"
31 #include "GCActivityCallback.h"
32 #include "GCIncomingRefCountedSetInlines.h"
33 #include "GCSegmentedArrayInlines.h"
34 #include "GCTypeMap.h"
35 #include "HasOwnPropertyCache.h"
36 #include "HeapHelperPool.h"
37 #include "HeapIterationScope.h"
38 #include "HeapProfiler.h"
39 #include "HeapSnapshot.h"
40 #include "HeapStatistics.h"
41 #include "HeapVerifier.h"
42 #include "HelpingGCScope.h"
43 #include "IncrementalSweeper.h"
44 #include "Interpreter.h"
45 #include "JITStubRoutineSet.h"
46 #include "JITWorklist.h"
47 #include "JSCInlines.h"
48 #include "JSGlobalObject.h"
49 #include "JSLock.h"
50 #include "JSVirtualMachineInternal.h"
51 #include "MarkedSpaceInlines.h"
52 #include "MarkingConstraintSet.h"
53 #include "PreventCollectionScope.h"
54 #include "SamplingProfiler.h"
55 #include "ShadowChicken.h"
56 #include "SpaceTimeMutatorScheduler.h"
57 #include "SuperSampler.h"
58 #include "StopIfNecessaryTimer.h"
59 #include "SynchronousStopTheWorldMutatorScheduler.h"
60 #include "TypeProfilerLog.h"
61 #include "UnlinkedCodeBlock.h"
62 #include "VM.h"
63 #include "WeakSetInlines.h"
64 #include <algorithm>
65 #include <wtf/CurrentTime.h>
66 #include <wtf/MainThread.h>
67 #include <wtf/ParallelVectorIterator.h>
68 #include <wtf/ProcessID.h>
69 #include <wtf/RAMSize.h>
70 #include <wtf/SimpleStats.h>
71
72 #if USE(FOUNDATION)
73 #if __has_include(<objc/objc-internal.h>)
74 #include <objc/objc-internal.h>
75 #else
76 extern "C" void* objc_autoreleasePoolPush(void);
77 extern "C" void objc_autoreleasePoolPop(void *context);
78 #endif
79 #endif // USE(FOUNDATION)
80
81 using namespace std;
82
83 namespace JSC {
84
85 namespace {
86
87 bool verboseStop = false;
88
89 double maxPauseMS(double thisPauseMS)
90 {
91     static double maxPauseMS;
92     maxPauseMS = std::max(thisPauseMS, maxPauseMS);
93     return maxPauseMS;
94 }
95
96 size_t minHeapSize(HeapType heapType, size_t ramSize)
97 {
98     if (heapType == LargeHeap) {
99         double result = min(
100             static_cast<double>(Options::largeHeapSize()),
101             ramSize * Options::smallHeapRAMFraction());
102         return static_cast<size_t>(result);
103     }
104     return Options::smallHeapSize();
105 }
106
107 size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
108 {
109     if (heapSize < ramSize * Options::smallHeapRAMFraction())
110         return Options::smallHeapGrowthFactor() * heapSize;
111     if (heapSize < ramSize * Options::mediumHeapRAMFraction())
112         return Options::mediumHeapGrowthFactor() * heapSize;
113     return Options::largeHeapGrowthFactor() * heapSize;
114 }
115
116 bool isValidSharedInstanceThreadState(VM* vm)
117 {
118     return vm->currentThreadIsHoldingAPILock();
119 }
120
121 bool isValidThreadState(VM* vm)
122 {
123     if (vm->atomicStringTable() != wtfThreadData().atomicStringTable())
124         return false;
125
126     if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
127         return false;
128
129     return true;
130 }
131
132 void recordType(TypeCountSet& set, JSCell* cell)
133 {
134     const char* typeName = "[unknown]";
135     const ClassInfo* info = cell->classInfo();
136     if (info && info->className)
137         typeName = info->className;
138     set.add(typeName);
139 }
140
141 bool measurePhaseTiming()
142 {
143     return false;
144 }
145
146 HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats()
147 {
148     static HashMap<const char*, GCTypeMap<SimpleStats>>* result;
149     static std::once_flag once;
150     std::call_once(
151         once,
152         [] {
153             result = new HashMap<const char*, GCTypeMap<SimpleStats>>();
154         });
155     return *result;
156 }
157
158 SimpleStats& timingStats(const char* name, CollectionScope scope)
159 {
160     return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[scope];
161 }
162
163 class TimingScope {
164 public:
165     TimingScope(std::optional<CollectionScope> scope, const char* name)
166         : m_scope(scope)
167         , m_name(name)
168     {
169         if (measurePhaseTiming())
170             m_before = monotonicallyIncreasingTimeMS();
171     }
172     
173     TimingScope(Heap& heap, const char* name)
174         : TimingScope(heap.collectionScope(), name)
175     {
176     }
177     
178     void setScope(std::optional<CollectionScope> scope)
179     {
180         m_scope = scope;
181     }
182     
183     void setScope(Heap& heap)
184     {
185         setScope(heap.collectionScope());
186     }
187     
188     ~TimingScope()
189     {
190         if (measurePhaseTiming()) {
191             double after = monotonicallyIncreasingTimeMS();
192             double timing = after - m_before;
193             SimpleStats& stats = timingStats(m_name, *m_scope);
194             stats.add(timing);
195             dataLog("[GC:", *m_scope, "] ", m_name, " took: ", timing, " ms (average ", stats.mean(), " ms).\n");
196         }
197     }
198 private:
199     std::optional<CollectionScope> m_scope;
200     double m_before;
201     const char* m_name;
202 };
203
204 } // anonymous namespace
205
206 class Heap::Thread : public AutomaticThread {
207 public:
208     Thread(const LockHolder& locker, Heap& heap)
209         : AutomaticThread(locker, heap.m_threadLock, heap.m_threadCondition)
210         , m_heap(heap)
211     {
212     }
213     
214 protected:
215     PollResult poll(const LockHolder& locker) override
216     {
217         if (m_heap.m_threadShouldStop) {
218             m_heap.notifyThreadStopping(locker);
219             return PollResult::Stop;
220         }
221         if (m_heap.shouldCollectInThread(locker))
222             return PollResult::Work;
223         return PollResult::Wait;
224     }
225     
226     WorkResult work() override
227     {
228         m_heap.collectInThread();
229         return WorkResult::Continue;
230     }
231     
232     void threadDidStart() override
233     {
234         WTF::registerGCThread(GCThreadType::Main);
235     }
236
237 private:
238     Heap& m_heap;
239 };
240
241 Heap::Heap(VM* vm, HeapType heapType)
242     : m_heapType(heapType)
243     , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
244     , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
245     , m_sizeAfterLastCollect(0)
246     , m_sizeAfterLastFullCollect(0)
247     , m_sizeBeforeLastFullCollect(0)
248     , m_sizeAfterLastEdenCollect(0)
249     , m_sizeBeforeLastEdenCollect(0)
250     , m_bytesAllocatedThisCycle(0)
251     , m_bytesAbandonedSinceLastFullCollect(0)
252     , m_maxEdenSize(m_minBytesPerCycle)
253     , m_maxHeapSize(m_minBytesPerCycle)
254     , m_shouldDoFullCollection(false)
255     , m_totalBytesVisited(0)
256     , m_objectSpace(this)
257     , m_extraMemorySize(0)
258     , m_deprecatedExtraMemorySize(0)
259     , m_machineThreads(this)
260     , m_collectorSlotVisitor(std::make_unique<SlotVisitor>(*this))
261     , m_mutatorMarkStack(std::make_unique<MarkStackArray>())
262     , m_raceMarkStack(std::make_unique<MarkStackArray>())
263     , m_handleSet(vm)
264     , m_codeBlocks(std::make_unique<CodeBlockSet>())
265     , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>())
266     , m_isSafeToCollect(false)
267     , m_vm(vm)
268     // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously 
269     // schedule the timer if we've never done a collection.
270     , m_lastFullGCLength(0.01)
271     , m_lastEdenGCLength(0.01)
272 #if USE(CF)
273     , m_runLoop(CFRunLoopGetCurrent())
274 #endif // USE(CF)
275     , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
276     , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
277     , m_sweeper(adoptRef(new IncrementalSweeper(this)))
278     , m_stopIfNecessaryTimer(adoptRef(new StopIfNecessaryTimer(vm)))
279     , m_deferralDepth(0)
280 #if USE(FOUNDATION)
281     , m_delayedReleaseRecursionCount(0)
282 #endif
283     , m_sharedCollectorMarkStack(std::make_unique<MarkStackArray>())
284     , m_sharedMutatorMarkStack(std::make_unique<MarkStackArray>())
285     , m_helperClient(&heapHelperPool())
286     , m_scheduler(std::make_unique<SpaceTimeMutatorScheduler>(*this))
287     , m_threadLock(Box<Lock>::create())
288     , m_threadCondition(AutomaticThreadCondition::create())
289 {
290     m_worldState.store(0);
291     
292     if (Options::useConcurrentGC())
293         m_scheduler = std::make_unique<SpaceTimeMutatorScheduler>(*this);
294     else {
295         // We simulate turning off concurrent GC by making the scheduler say that the world
296         // should always be stopped when the collector is running.
297         m_scheduler = std::make_unique<SynchronousStopTheWorldMutatorScheduler>();
298     }
299     
300     if (Options::verifyHeap())
301         m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
302     
303     m_collectorSlotVisitor->optimizeForStoppedMutator();
304     
305     LockHolder locker(*m_threadLock);
306     m_thread = adoptRef(new Thread(locker, *this));
307 }
308
309 Heap::~Heap()
310 {
311     for (auto& slotVisitor : m_parallelSlotVisitors)
312         slotVisitor->clearMarkStacks();
313     m_collectorSlotVisitor->clearMarkStacks();
314     m_mutatorMarkStack->clear();
315     m_raceMarkStack->clear();
316     
317     for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
318         WeakBlock::destroy(*this, block);
319 }
320
321 bool Heap::isPagedOut(double deadline)
322 {
323     return m_objectSpace.isPagedOut(deadline);
324 }
325
326 // The VM is being destroyed and the collector will never run again.
327 // Run all pending finalizers now because we won't get another chance.
328 void Heap::lastChanceToFinalize()
329 {
330     RELEASE_ASSERT(!m_vm->entryScope);
331     RELEASE_ASSERT(m_mutatorState == MutatorState::Running);
332     
333     if (m_collectContinuouslyThread) {
334         {
335             LockHolder locker(m_collectContinuouslyLock);
336             m_shouldStopCollectingContinuously = true;
337             m_collectContinuouslyCondition.notifyOne();
338         }
339         waitForThreadCompletion(m_collectContinuouslyThread);
340     }
341     
342     // Carefully bring the thread down. We need to use waitForCollector() until we know that there
343     // won't be any other collections.
344     bool stopped = false;
345     {
346         LockHolder locker(*m_threadLock);
347         stopped = m_thread->tryStop(locker);
348         if (!stopped) {
349             m_threadShouldStop = true;
350             m_threadCondition->notifyOne(locker);
351         }
352     }
353     if (!stopped) {
354         waitForCollector(
355             [&] (const LockHolder&) -> bool {
356                 return m_threadIsStopping;
357             });
358         // It's now safe to join the thread, since we know that there will not be any more collections.
359         m_thread->join();
360     }
361     
362     m_arrayBuffers.lastChanceToFinalize();
363     m_codeBlocks->lastChanceToFinalize();
364     m_objectSpace.lastChanceToFinalize();
365     releaseDelayedReleasedObjects();
366
367     sweepAllLogicallyEmptyWeakBlocks();
368 }
369
370 void Heap::releaseDelayedReleasedObjects()
371 {
372 #if USE(FOUNDATION)
373     // We need to guard against the case that releasing an object can create more objects due to the
374     // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
375     // back here and could try to recursively release objects. We guard that with a recursive entry
376     // count. Only the initial call will release objects, recursive calls simple return and let the
377     // the initial call to the function take care of any objects created during release time.
378     // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
379     // and use a temp Vector for the actual releasing.
380     if (!m_delayedReleaseRecursionCount++) {
381         while (!m_delayedReleaseObjects.isEmpty()) {
382             ASSERT(m_vm->currentThreadIsHoldingAPILock());
383
384             Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTFMove(m_delayedReleaseObjects);
385
386             {
387                 // We need to drop locks before calling out to arbitrary code.
388                 JSLock::DropAllLocks dropAllLocks(m_vm);
389
390                 void* context = objc_autoreleasePoolPush();
391                 objectsToRelease.clear();
392                 objc_autoreleasePoolPop(context);
393             }
394         }
395     }
396     m_delayedReleaseRecursionCount--;
397 #endif
398 }
399
400 void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
401 {
402     didAllocate(size);
403     collectIfNecessaryOrDefer();
404 }
405
406 void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
407 {
408     m_deprecatedExtraMemorySize += size;
409     reportExtraMemoryAllocatedSlowCase(size);
410 }
411
412 void Heap::reportAbandonedObjectGraph()
413 {
414     // Our clients don't know exactly how much memory they
415     // are abandoning so we just guess for them.
416     size_t abandonedBytes = static_cast<size_t>(0.1 * capacity());
417
418     // We want to accelerate the next collection. Because memory has just 
419     // been abandoned, the next collection has the potential to 
420     // be more profitable. Since allocation is the trigger for collection, 
421     // we hasten the next collection by pretending that we've allocated more memory. 
422     if (m_fullActivityCallback) {
423         m_fullActivityCallback->didAllocate(
424             m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
425     }
426     m_bytesAbandonedSinceLastFullCollect += abandonedBytes;
427 }
428
429 void Heap::protect(JSValue k)
430 {
431     ASSERT(k);
432     ASSERT(m_vm->currentThreadIsHoldingAPILock());
433
434     if (!k.isCell())
435         return;
436
437     m_protectedValues.add(k.asCell());
438 }
439
440 bool Heap::unprotect(JSValue k)
441 {
442     ASSERT(k);
443     ASSERT(m_vm->currentThreadIsHoldingAPILock());
444
445     if (!k.isCell())
446         return false;
447
448     return m_protectedValues.remove(k.asCell());
449 }
450
451 void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
452 {
453     if (m_arrayBuffers.addReference(cell, buffer)) {
454         collectIfNecessaryOrDefer();
455         didAllocate(buffer->gcSizeEstimateInBytes());
456     }
457 }
458
459 void Heap::finalizeUnconditionalFinalizers()
460 {
461     while (m_unconditionalFinalizers.hasNext()) {
462         UnconditionalFinalizer* finalizer = m_unconditionalFinalizers.removeNext();
463         finalizer->finalizeUnconditionally();
464     }
465 }
466
467 void Heap::willStartIterating()
468 {
469     m_objectSpace.willStartIterating();
470 }
471
472 void Heap::didFinishIterating()
473 {
474     m_objectSpace.didFinishIterating();
475 }
476
477 void Heap::completeAllJITPlans()
478 {
479 #if ENABLE(JIT)
480     JITWorklist::instance()->completeAllForVM(*m_vm);
481 #endif // ENABLE(JIT)
482     DFG::completeAllPlansForVM(*m_vm);
483 }
484
485 template<typename Func>
486 void Heap::iterateExecutingAndCompilingCodeBlocks(const Func& func)
487 {
488     m_codeBlocks->iterateCurrentlyExecuting(func);
489     DFG::iterateCodeBlocksForGC(*m_vm, func);
490 }
491
492 template<typename Func>
493 void Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func& func)
494 {
495     Vector<CodeBlock*, 256> codeBlocks;
496     iterateExecutingAndCompilingCodeBlocks(
497         [&] (CodeBlock* codeBlock) {
498             codeBlocks.append(codeBlock);
499         });
500     for (CodeBlock* codeBlock : codeBlocks)
501         func(codeBlock);
502 }
503
504 void Heap::assertSharedMarkStacksEmpty()
505 {
506     bool ok = true;
507     
508     if (!m_sharedCollectorMarkStack->isEmpty()) {
509         dataLog("FATAL: Shared collector mark stack not empty! It has ", m_sharedCollectorMarkStack->size(), " elements.\n");
510         ok = false;
511     }
512     
513     if (!m_sharedMutatorMarkStack->isEmpty()) {
514         dataLog("FATAL: Shared mutator mark stack not empty! It has ", m_sharedMutatorMarkStack->size(), " elements.\n");
515         ok = false;
516     }
517     
518     RELEASE_ASSERT(ok);
519 }
520
521 void Heap::markToFixpoint(double gcStartTime)
522 {
523     TimingScope markToFixpointTimingScope(*this, "Heap::markToFixpoint");
524     
525     if (m_collectionScope == CollectionScope::Full) {
526         m_opaqueRoots.clear();
527         m_collectorSlotVisitor->clearMarkStacks();
528         m_mutatorMarkStack->clear();
529     }
530
531     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
532
533     beginMarking();
534
535     m_parallelMarkersShouldExit = false;
536
537     m_helperClient.setFunction(
538         [this] () {
539             SlotVisitor* slotVisitor;
540             {
541                 LockHolder locker(m_parallelSlotVisitorLock);
542                 if (m_availableParallelSlotVisitors.isEmpty()) {
543                     std::unique_ptr<SlotVisitor> newVisitor =
544                         std::make_unique<SlotVisitor>(*this);
545                     
546                     if (Options::optimizeParallelSlotVisitorsForStoppedMutator())
547                         newVisitor->optimizeForStoppedMutator();
548                     
549                     slotVisitor = newVisitor.get();
550                     m_parallelSlotVisitors.append(WTFMove(newVisitor));
551                 } else
552                     slotVisitor = m_availableParallelSlotVisitors.takeLast();
553             }
554
555             WTF::registerGCThread(GCThreadType::Helper);
556
557             {
558                 ParallelModeEnabler parallelModeEnabler(*slotVisitor);
559                 slotVisitor->didStartMarking();
560                 slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
561             }
562
563             {
564                 LockHolder locker(m_parallelSlotVisitorLock);
565                 m_availableParallelSlotVisitors.append(slotVisitor);
566             }
567         });
568
569     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
570     slotVisitor.didStartMarking();
571
572     m_constraintSet->resetStats();
573     
574     m_scheduler->beginCollection();
575     if (Options::logGC())
576         m_scheduler->log();
577     
578     // Wondering what m_constraintSet->executeXYZ does? It's running the constraints created by
579     // Heap::buildConstraintSet().
580     
581     m_constraintSet->executeBootstrap(slotVisitor, MonotonicTime::infinity());
582     m_scheduler->didExecuteConstraints();
583
584     // After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()"
585     // checks because bootstrap would have put things into the visitor. So, we should fall
586     // through to draining.
587     
588     for (unsigned iteration = 1; ; ++iteration) {
589         if (Options::logGC())
590             dataLog("i#", iteration, " b=", m_barriersExecuted, " ");
591         
592         if (slotVisitor.didReachTermination()) {
593             assertSharedMarkStacksEmpty();
594             
595             // FIXME: Take m_mutatorDidRun into account when scheduling constraints. Most likely,
596             // we don't have to execute root constraints again unless the mutator did run. At a
597             // minimum, we could use this for work estimates - but it's probably more than just an
598             // estimate.
599             // https://bugs.webkit.org/show_bug.cgi?id=166828
600             
601             // FIXME: We should take advantage of the fact that we could timeout. This only comes
602             // into play if we're executing constraints for the first time. But that will matter
603             // when we have deep stacks or a lot of DOM stuff.
604             // https://bugs.webkit.org/show_bug.cgi?id=166831
605             
606             bool executedEverything =
607                 m_constraintSet->executeConvergence(slotVisitor, MonotonicTime::infinity());
608             if (executedEverything && slotVisitor.isEmpty()) {
609                 assertSharedMarkStacksEmpty();
610                 break;
611             }
612             
613             m_scheduler->didExecuteConstraints();
614         }
615         
616         if (Options::logGC())
617             dataLog(slotVisitor.collectorMarkStack().size(), "+", m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " ");
618         
619         {
620             ParallelModeEnabler enabler(slotVisitor);
621             slotVisitor.drainInParallel(m_scheduler->timeToResume());
622         }
623         
624         if (!m_scheduler->shouldResume())
625             continue;
626         
627         m_scheduler->willResume();
628         
629         if (Options::logGC()) {
630             double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds();
631             dataLog("p=", thisPauseMS, " ms (max ", maxPauseMS(thisPauseMS), ")...]\n");
632         }
633         
634         resumeTheWorld();
635         
636         {
637             ParallelModeEnabler enabler(slotVisitor);
638             slotVisitor.drainInParallelPassively(m_scheduler->timeToStop());
639         }
640
641         stopTheWorld();
642         
643         if (Options::logGC())
644             dataLog("[GC: ");
645         
646         m_scheduler->didStop();
647         
648         if (Options::logGC())
649             m_scheduler->log();
650     }
651     
652     m_scheduler->endCollection();
653
654     {
655         std::lock_guard<Lock> lock(m_markingMutex);
656         m_parallelMarkersShouldExit = true;
657         m_markingConditionVariable.notifyAll();
658     }
659     m_helperClient.finish();
660
661     iterateExecutingAndCompilingCodeBlocks(
662         [&] (CodeBlock* codeBlock) {
663             writeBarrier(codeBlock);
664         });
665         
666     updateObjectCounts(gcStartTime);
667     endMarking();
668 }
669
670 void Heap::gatherStackRoots(ConservativeRoots& roots)
671 {
672     m_machineThreads.gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
673 }
674
675 void Heap::gatherJSStackRoots(ConservativeRoots& roots)
676 {
677 #if !ENABLE(JIT)
678     m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
679 #else
680     UNUSED_PARAM(roots);
681 #endif
682 }
683
684 void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
685 {
686 #if ENABLE(DFG_JIT)
687     m_vm->gatherConservativeRoots(roots);
688 #else
689     UNUSED_PARAM(roots);
690 #endif
691 }
692
693 void Heap::beginMarking()
694 {
695     TimingScope timingScope(*this, "Heap::beginMarking");
696     if (m_collectionScope == CollectionScope::Full)
697         m_codeBlocks->clearMarksForFullCollection();
698     m_jitStubRoutines->clearMarks();
699     m_objectSpace.beginMarking();
700     setMutatorShouldBeFenced(true);
701 }
702
703 void Heap::removeDeadCompilerWorklistEntries()
704 {
705 #if ENABLE(DFG_JIT)
706     for (unsigned i = DFG::numberOfWorklists(); i--;)
707         DFG::existingWorklistForIndex(i).removeDeadPlans(*m_vm);
708 #endif
709 }
710
711 bool Heap::isHeapSnapshotting() const
712 {
713     HeapProfiler* heapProfiler = m_vm->heapProfiler();
714     if (UNLIKELY(heapProfiler))
715         return heapProfiler->activeSnapshotBuilder();
716     return false;
717 }
718
719 struct GatherHeapSnapshotData : MarkedBlock::CountFunctor {
720     GatherHeapSnapshotData(HeapSnapshotBuilder& builder)
721         : m_builder(builder)
722     {
723     }
724
725     IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const
726     {
727         if (kind == HeapCell::JSCell) {
728             JSCell* cell = static_cast<JSCell*>(heapCell);
729             cell->methodTable()->heapSnapshot(cell, m_builder);
730         }
731         return IterationStatus::Continue;
732     }
733
734     HeapSnapshotBuilder& m_builder;
735 };
736
737 void Heap::gatherExtraHeapSnapshotData(HeapProfiler& heapProfiler)
738 {
739     if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) {
740         HeapIterationScope heapIterationScope(*this);
741         GatherHeapSnapshotData functor(*builder);
742         m_objectSpace.forEachLiveCell(heapIterationScope, functor);
743     }
744 }
745
746 struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor {
747     RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot)
748         : m_snapshot(snapshot)
749     {
750     }
751
752     IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const
753     {
754         if (kind == HeapCell::JSCell)
755             m_snapshot.sweepCell(static_cast<JSCell*>(cell));
756         return IterationStatus::Continue;
757     }
758
759     HeapSnapshot& m_snapshot;
760 };
761
762 void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler)
763 {
764     if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) {
765         HeapIterationScope heapIterationScope(*this);
766         RemoveDeadHeapSnapshotNodes functor(*snapshot);
767         m_objectSpace.forEachDeadCell(heapIterationScope, functor);
768         snapshot->shrinkToFit();
769     }
770 }
771
772 void Heap::updateObjectCounts(double gcStartTime)
773 {
774     if (Options::logGC() == GCLogging::Verbose) {
775         size_t visitCount = m_collectorSlotVisitor->visitCount();
776         visitCount += threadVisitCount();
777         dataLogF("\nNumber of live Objects after GC %lu, took %.6f secs\n", static_cast<unsigned long>(visitCount), WTF::monotonicallyIncreasingTime() - gcStartTime);
778     }
779     
780     if (m_collectionScope == CollectionScope::Full)
781         m_totalBytesVisited = 0;
782
783     m_totalBytesVisitedThisCycle =
784         m_collectorSlotVisitor->bytesVisited() +
785         threadBytesVisited();
786     
787     m_totalBytesVisited += m_totalBytesVisitedThisCycle;
788 }
789
790 void Heap::endMarking()
791 {
792     m_collectorSlotVisitor->reset();
793
794     for (auto& parallelVisitor : m_parallelSlotVisitors)
795         parallelVisitor->reset();
796
797     assertSharedMarkStacksEmpty();
798     m_weakReferenceHarvesters.removeAll();
799
800     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
801     
802     m_objectSpace.endMarking();
803     setMutatorShouldBeFenced(Options::forceFencedBarrier());
804 }
805
806 size_t Heap::objectCount()
807 {
808     return m_objectSpace.objectCount();
809 }
810
811 size_t Heap::extraMemorySize()
812 {
813     return m_extraMemorySize + m_deprecatedExtraMemorySize + m_arrayBuffers.size();
814 }
815
816 size_t Heap::size()
817 {
818     return m_objectSpace.size() + extraMemorySize();
819 }
820
821 size_t Heap::capacity()
822 {
823     return m_objectSpace.capacity() + extraMemorySize();
824 }
825
826 size_t Heap::protectedGlobalObjectCount()
827 {
828     size_t result = 0;
829     forEachProtectedCell(
830         [&] (JSCell* cell) {
831             if (cell->isObject() && asObject(cell)->isGlobalObject())
832                 result++;
833         });
834     return result;
835 }
836
837 size_t Heap::globalObjectCount()
838 {
839     HeapIterationScope iterationScope(*this);
840     size_t result = 0;
841     m_objectSpace.forEachLiveCell(
842         iterationScope,
843         [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus {
844             if (kind != HeapCell::JSCell)
845                 return IterationStatus::Continue;
846             JSCell* cell = static_cast<JSCell*>(heapCell);
847             if (cell->isObject() && asObject(cell)->isGlobalObject())
848                 result++;
849             return IterationStatus::Continue;
850         });
851     return result;
852 }
853
854 size_t Heap::protectedObjectCount()
855 {
856     size_t result = 0;
857     forEachProtectedCell(
858         [&] (JSCell*) {
859             result++;
860         });
861     return result;
862 }
863
864 std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
865 {
866     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
867     forEachProtectedCell(
868         [&] (JSCell* cell) {
869             recordType(*result, cell);
870         });
871     return result;
872 }
873
874 std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
875 {
876     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
877     HeapIterationScope iterationScope(*this);
878     m_objectSpace.forEachLiveCell(
879         iterationScope,
880         [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
881             if (kind == HeapCell::JSCell)
882                 recordType(*result, static_cast<JSCell*>(cell));
883             return IterationStatus::Continue;
884         });
885     return result;
886 }
887
888 void Heap::deleteAllCodeBlocks(DeleteAllCodeEffort effort)
889 {
890     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
891         return;
892     
893     PreventCollectionScope preventCollectionScope(*this);
894     
895     // If JavaScript is running, it's not safe to delete all JavaScript code, since
896     // we'll end up returning to deleted code.
897     RELEASE_ASSERT(!m_vm->entryScope);
898     RELEASE_ASSERT(!m_collectionScope);
899
900     completeAllJITPlans();
901
902     for (ExecutableBase* executable : m_executables)
903         executable->clearCode();
904 }
905
906 void Heap::deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort effort)
907 {
908     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
909         return;
910     
911     PreventCollectionScope preventCollectionScope(*this);
912
913     RELEASE_ASSERT(!m_collectionScope);
914     
915     for (ExecutableBase* current : m_executables) {
916         if (!current->isFunctionExecutable())
917             continue;
918         static_cast<FunctionExecutable*>(current)->unlinkedExecutable()->clearCode();
919     }
920 }
921
922 void Heap::clearUnmarkedExecutables()
923 {
924     for (unsigned i = m_executables.size(); i--;) {
925         ExecutableBase* current = m_executables[i];
926         if (isMarked(current))
927             continue;
928
929         // Eagerly dereference the Executable's JITCode in order to run watchpoint
930         // destructors. Otherwise, watchpoints might fire for deleted CodeBlocks.
931         current->clearCode();
932         std::swap(m_executables[i], m_executables.last());
933         m_executables.removeLast();
934     }
935
936     m_executables.shrinkToFit();
937 }
938
939 void Heap::deleteUnmarkedCompiledCode()
940 {
941     clearUnmarkedExecutables();
942     m_codeBlocks->deleteUnmarkedAndUnreferenced(*m_lastCollectionScope);
943     m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines();
944 }
945
946 void Heap::addToRememberedSet(const JSCell* constCell)
947 {
948     JSCell* cell = const_cast<JSCell*>(constCell);
949     ASSERT(cell);
950     ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
951     m_barriersExecuted++;
952     if (m_mutatorShouldBeFenced) {
953         WTF::loadLoadFence();
954         if (!isMarkedConcurrently(cell)) {
955             // During a full collection a store into an unmarked object that had surivived past
956             // collections will manifest as a store to an unmarked PossiblyBlack object. If the
957             // object gets marked at some time after this then it will go down the normal marking
958             // path. So, we don't have to remember this object. We could return here. But we go
959             // further and attempt to re-white the object.
960             
961             RELEASE_ASSERT(m_collectionScope == CollectionScope::Full);
962             
963             if (cell->atomicCompareExchangeCellStateStrong(CellState::PossiblyBlack, CellState::DefinitelyWhite) == CellState::PossiblyBlack) {
964                 // Now we protect against this race:
965                 //
966                 //     1) Object starts out black + unmarked.
967                 //     --> We do isMarkedConcurrently here.
968                 //     2) Object is marked and greyed.
969                 //     3) Object is scanned and blacked.
970                 //     --> We do atomicCompareExchangeCellStateStrong here.
971                 //
972                 // In this case we would have made the object white again, even though it should
973                 // be black. This check lets us correct our mistake. This relies on the fact that
974                 // isMarkedConcurrently converges monotonically to true.
975                 if (isMarkedConcurrently(cell)) {
976                     // It's difficult to work out whether the object should be grey or black at
977                     // this point. We say black conservatively.
978                     cell->setCellState(CellState::PossiblyBlack);
979                 }
980                 
981                 // Either way, we can return. Most likely, the object was not marked, and so the
982                 // object is now labeled white. This means that future barrier executions will not
983                 // fire. In the unlikely event that the object had become marked, we can still
984                 // return anyway, since we proved that the object was not marked at the time that
985                 // we executed this slow path.
986             }
987             
988             return;
989         }
990     } else
991         ASSERT(Heap::isMarkedConcurrently(cell));
992     // It could be that the object was *just* marked. This means that the collector may set the
993     // state to DefinitelyGrey and then to PossiblyOldOrBlack at any time. It's OK for us to
994     // race with the collector here. If we win then this is accurate because the object _will_
995     // get scanned again. If we lose then someone else will barrier the object again. That would
996     // be unfortunate but not the end of the world.
997     cell->setCellState(CellState::PossiblyGrey);
998     m_mutatorMarkStack->append(cell);
999 }
1000
1001 void Heap::collectAllGarbage()
1002 {
1003     if (!m_isSafeToCollect)
1004         return;
1005     
1006     collectSync(CollectionScope::Full);
1007
1008     DeferGCForAWhile deferGC(*this);
1009     if (UNLIKELY(Options::useImmortalObjects()))
1010         sweeper()->willFinishSweeping();
1011     else {
1012         double before = 0;
1013         if (Options::logGC()) {
1014             dataLog("[Full sweep: ", capacity() / 1024, " kb ");
1015             before = currentTimeMS();
1016         }
1017         m_objectSpace.sweep();
1018         m_objectSpace.shrink();
1019         if (Options::logGC()) {
1020             double after = currentTimeMS();
1021             dataLog("=> ", capacity() / 1024, " kb, ", after - before, " ms]\n");
1022         }
1023     }
1024     m_objectSpace.assertNoUnswept();
1025
1026     sweepAllLogicallyEmptyWeakBlocks();
1027 }
1028
1029 void Heap::collectAsync(std::optional<CollectionScope> scope)
1030 {
1031     if (!m_isSafeToCollect)
1032         return;
1033
1034     bool alreadyRequested = false;
1035     {
1036         LockHolder locker(*m_threadLock);
1037         for (std::optional<CollectionScope> request : m_requests) {
1038             if (scope) {
1039                 if (scope == CollectionScope::Eden) {
1040                     alreadyRequested = true;
1041                     break;
1042                 } else {
1043                     RELEASE_ASSERT(scope == CollectionScope::Full);
1044                     if (request == CollectionScope::Full) {
1045                         alreadyRequested = true;
1046                         break;
1047                     }
1048                 }
1049             } else {
1050                 if (!request || request == CollectionScope::Full) {
1051                     alreadyRequested = true;
1052                     break;
1053                 }
1054             }
1055         }
1056     }
1057     if (alreadyRequested)
1058         return;
1059
1060     requestCollection(scope);
1061 }
1062
1063 void Heap::collectSync(std::optional<CollectionScope> scope)
1064 {
1065     if (!m_isSafeToCollect)
1066         return;
1067     
1068     waitForCollection(requestCollection(scope));
1069 }
1070
1071 bool Heap::shouldCollectInThread(const LockHolder&)
1072 {
1073     RELEASE_ASSERT(m_requests.isEmpty() == (m_lastServedTicket == m_lastGrantedTicket));
1074     RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
1075     
1076     return !m_requests.isEmpty();
1077 }
1078
1079 void Heap::collectInThread()
1080 {
1081     m_currentGCStartTime = MonotonicTime::now();
1082     
1083     std::optional<CollectionScope> scope;
1084     {
1085         LockHolder locker(*m_threadLock);
1086         RELEASE_ASSERT(!m_requests.isEmpty());
1087         scope = m_requests.first();
1088     }
1089     
1090     SuperSamplerScope superSamplerScope(false);
1091     TimingScope collectImplTimingScope(scope, "Heap::collectInThread");
1092     
1093 #if ENABLE(ALLOCATION_LOGGING)
1094     dataLogF("JSC GC starting collection.\n");
1095 #endif
1096     
1097     stopTheWorld();
1098     
1099     if (false)
1100         dataLog("GC START!\n");
1101
1102     MonotonicTime before;
1103     if (Options::logGC()) {
1104         dataLog("[GC: START ", capacity() / 1024, " kb ");
1105         before = MonotonicTime::now();
1106     }
1107     
1108     double gcStartTime;
1109     
1110     ASSERT(m_isSafeToCollect);
1111     if (m_collectionScope) {
1112         dataLog("Collection scope already set during GC: ", *m_collectionScope, "\n");
1113         RELEASE_ASSERT_NOT_REACHED();
1114     }
1115     
1116     willStartCollection(scope);
1117     collectImplTimingScope.setScope(*this);
1118     
1119     gcStartTime = WTF::monotonicallyIncreasingTime();
1120     if (m_verifier) {
1121         // Verify that live objects from the last GC cycle haven't been corrupted by
1122         // mutators before we begin this new GC cycle.
1123         m_verifier->verify(HeapVerifier::Phase::BeforeGC);
1124             
1125         m_verifier->initializeGCCycle();
1126         m_verifier->gatherLiveObjects(HeapVerifier::Phase::BeforeMarking);
1127     }
1128     
1129     prepareForMarking();
1130     
1131     markToFixpoint(gcStartTime);
1132     
1133     if (m_verifier) {
1134         m_verifier->gatherLiveObjects(HeapVerifier::Phase::AfterMarking);
1135         m_verifier->verify(HeapVerifier::Phase::AfterMarking);
1136     }
1137         
1138     if (vm()->typeProfiler())
1139         vm()->typeProfiler()->invalidateTypeSetCache();
1140         
1141     reapWeakHandles();
1142     pruneStaleEntriesFromWeakGCMaps();
1143     sweepArrayBuffers();
1144     snapshotUnswept();
1145     finalizeUnconditionalFinalizers();
1146     removeDeadCompilerWorklistEntries();
1147     notifyIncrementalSweeper();
1148     
1149     m_codeBlocks->iterateCurrentlyExecuting(
1150         [&] (CodeBlock* codeBlock) {
1151             writeBarrier(codeBlock);
1152         });
1153     m_codeBlocks->clearCurrentlyExecuting();
1154         
1155     m_objectSpace.prepareForAllocation();
1156     updateAllocationLimits();
1157
1158     didFinishCollection(gcStartTime);
1159     
1160     if (m_verifier) {
1161         m_verifier->trimDeadObjects();
1162         m_verifier->verify(HeapVerifier::Phase::AfterGC);
1163     }
1164
1165     if (false) {
1166         dataLog("Heap state after GC:\n");
1167         m_objectSpace.dumpBits();
1168     }
1169     
1170     if (Options::logGC()) {
1171         MonotonicTime after = MonotonicTime::now();
1172         double thisPauseMS = (after - m_stopTime).milliseconds();
1173         dataLog("p=", thisPauseMS, " ms (max ", maxPauseMS(thisPauseMS), "), cycle ", (after - before).milliseconds(), " ms END]\n");
1174     }
1175     
1176     {
1177         LockHolder locker(*m_threadLock);
1178         m_requests.removeFirst();
1179         m_lastServedTicket++;
1180         clearMutatorWaiting();
1181     }
1182     ParkingLot::unparkAll(&m_worldState);
1183
1184     if (false)
1185         dataLog("GC END!\n");
1186
1187     setNeedFinalize();
1188     resumeTheWorld();
1189     
1190     m_lastGCStartTime = m_currentGCStartTime;
1191     m_lastGCEndTime = MonotonicTime::now();
1192 }
1193
1194 void Heap::stopTheWorld()
1195 {
1196     RELEASE_ASSERT(!m_collectorBelievesThatTheWorldIsStopped);
1197     waitWhileNeedFinalize();
1198     stopTheMutator();
1199     suspendCompilerThreads();
1200     m_collectorBelievesThatTheWorldIsStopped = true;
1201
1202     forEachSlotVisitor(
1203         [&] (SlotVisitor& slotVisitor) {
1204             slotVisitor.updateMutatorIsStopped(NoLockingNecessary);
1205         });
1206
1207 #if ENABLE(JIT)
1208     {
1209         DeferGCForAWhile awhile(*this);
1210         if (JITWorklist::instance()->completeAllForVM(*m_vm))
1211             setGCDidJIT();
1212     }
1213 #endif // ENABLE(JIT)
1214     
1215     vm()->shadowChicken().update(*vm(), vm()->topCallFrame);
1216     
1217     m_structureIDTable.flushOldTables();
1218     m_objectSpace.stopAllocating();
1219     
1220     m_stopTime = MonotonicTime::now();
1221 }
1222
1223 void Heap::resumeTheWorld()
1224 {
1225     // Calling resumeAllocating does the Right Thing depending on whether this is the end of a
1226     // collection cycle or this is just a concurrent phase within a collection cycle:
1227     // - At end of collection cycle: it's a no-op because prepareForAllocation already cleared the
1228     //   last active block.
1229     // - During collection cycle: it reinstates the last active block.
1230     m_objectSpace.resumeAllocating();
1231     
1232     m_barriersExecuted = 0;
1233     
1234     RELEASE_ASSERT(m_collectorBelievesThatTheWorldIsStopped);
1235     m_collectorBelievesThatTheWorldIsStopped = false;
1236     
1237     // FIXME: This could be vastly improved: we want to grab the locks in the order in which they
1238     // become available. We basically want a lockAny() method that will lock whatever lock is available
1239     // and tell you which one it locked. That would require teaching ParkingLot how to park on multiple
1240     // queues at once, which is totally achievable - it would just require memory allocation, which is
1241     // suboptimal but not a disaster. Alternatively, we could replace the SlotVisitor rightToRun lock
1242     // with a DLG-style handshake mechanism, but that seems not as general.
1243     Vector<SlotVisitor*, 8> slotVisitorsToUpdate;
1244
1245     forEachSlotVisitor(
1246         [&] (SlotVisitor& slotVisitor) {
1247             slotVisitorsToUpdate.append(&slotVisitor);
1248         });
1249     
1250     for (unsigned countdown = 40; !slotVisitorsToUpdate.isEmpty() && countdown--;) {
1251         for (unsigned index = 0; index < slotVisitorsToUpdate.size(); ++index) {
1252             SlotVisitor& slotVisitor = *slotVisitorsToUpdate[index];
1253             bool remove = false;
1254             if (slotVisitor.hasAcknowledgedThatTheMutatorIsResumed())
1255                 remove = true;
1256             else if (auto locker = tryHoldLock(slotVisitor.rightToRun())) {
1257                 slotVisitor.updateMutatorIsStopped(locker);
1258                 remove = true;
1259             }
1260             if (remove) {
1261                 slotVisitorsToUpdate[index--] = slotVisitorsToUpdate.last();
1262                 slotVisitorsToUpdate.takeLast();
1263             }
1264         }
1265         std::this_thread::yield();
1266     }
1267     
1268     for (SlotVisitor* slotVisitor : slotVisitorsToUpdate)
1269         slotVisitor->updateMutatorIsStopped();
1270     
1271     resumeCompilerThreads();
1272     resumeTheMutator();
1273 }
1274
1275 void Heap::stopTheMutator()
1276 {
1277     for (;;) {
1278         unsigned oldState = m_worldState.load();
1279         if ((oldState & stoppedBit)
1280             && (oldState & shouldStopBit))
1281             return;
1282         
1283         // Note: We could just have the mutator stop in-place like we do when !hasAccessBit. We could
1284         // switch to that if it turned out to be less confusing, but then it would not give the
1285         // mutator the opportunity to react to the world being stopped.
1286         if (oldState & mutatorWaitingBit) {
1287             if (m_worldState.compareExchangeWeak(oldState, oldState & ~mutatorWaitingBit))
1288                 ParkingLot::unparkAll(&m_worldState);
1289             continue;
1290         }
1291         
1292         if (!(oldState & hasAccessBit)
1293             || (oldState & stoppedBit)) {
1294             // We can stop the world instantly.
1295             if (m_worldState.compareExchangeWeak(oldState, oldState | stoppedBit | shouldStopBit))
1296                 return;
1297             continue;
1298         }
1299         
1300         RELEASE_ASSERT(oldState & hasAccessBit);
1301         RELEASE_ASSERT(!(oldState & stoppedBit));
1302         m_worldState.compareExchangeStrong(oldState, oldState | shouldStopBit);
1303         m_stopIfNecessaryTimer->scheduleSoon();
1304         ParkingLot::compareAndPark(&m_worldState, oldState | shouldStopBit);
1305     }
1306 }
1307
1308 void Heap::resumeTheMutator()
1309 {
1310     for (;;) {
1311         unsigned oldState = m_worldState.load();
1312         RELEASE_ASSERT(oldState & shouldStopBit);
1313         
1314         if (!(oldState & hasAccessBit)) {
1315             // We can resume the world instantly.
1316             if (m_worldState.compareExchangeWeak(oldState, oldState & ~(stoppedBit | shouldStopBit))) {
1317                 ParkingLot::unparkAll(&m_worldState);
1318                 return;
1319             }
1320             continue;
1321         }
1322         
1323         // We can tell the world to resume.
1324         if (m_worldState.compareExchangeWeak(oldState, oldState & ~shouldStopBit)) {
1325             ParkingLot::unparkAll(&m_worldState);
1326             return;
1327         }
1328     }
1329 }
1330
1331 void Heap::stopIfNecessarySlow()
1332 {
1333     while (stopIfNecessarySlow(m_worldState.load())) { }
1334     
1335     RELEASE_ASSERT(m_worldState.load() & hasAccessBit);
1336     RELEASE_ASSERT(!(m_worldState.load() & stoppedBit));
1337     
1338     handleGCDidJIT();
1339     handleNeedFinalize();
1340     m_mutatorDidRun = true;
1341 }
1342
1343 bool Heap::stopIfNecessarySlow(unsigned oldState)
1344 {
1345     RELEASE_ASSERT(oldState & hasAccessBit);
1346     
1347     // It's possible for us to wake up with finalization already requested but the world not yet
1348     // resumed. If that happens, we can't run finalization yet.
1349     if (!(oldState & stoppedBit)
1350         && handleNeedFinalize(oldState))
1351         return true;
1352     
1353     if (!(oldState & shouldStopBit) && !m_scheduler->shouldStop()) {
1354         if (!(oldState & stoppedBit))
1355             return false;
1356         m_worldState.compareExchangeStrong(oldState, oldState & ~stoppedBit);
1357         return true;
1358     }
1359     
1360     sanitizeStackForVM(m_vm);
1361
1362     if (verboseStop) {
1363         dataLog("Stopping!\n");
1364         WTFReportBacktrace();
1365     }
1366     m_worldState.compareExchangeStrong(oldState, oldState | stoppedBit);
1367     ParkingLot::unparkAll(&m_worldState);
1368     ParkingLot::compareAndPark(&m_worldState, oldState | stoppedBit);
1369     return true;
1370 }
1371
1372 template<typename Func>
1373 void Heap::waitForCollector(const Func& func)
1374 {
1375     for (;;) {
1376         bool done;
1377         {
1378             LockHolder locker(*m_threadLock);
1379             done = func(locker);
1380             if (!done) {
1381                 setMutatorWaiting();
1382                 // At this point, the collector knows that we intend to wait, and he will clear the
1383                 // waiting bit and then unparkAll when the GC cycle finishes. Clearing the bit
1384                 // prevents us from parking except if there is also stop-the-world. Unparking after
1385                 // clearing means that if the clearing happens after we park, then we will unpark.
1386             }
1387         }
1388
1389         // If we're in a stop-the-world scenario, we need to wait for that even if done is true.
1390         unsigned oldState = m_worldState.load();
1391         if (stopIfNecessarySlow(oldState))
1392             continue;
1393         
1394         if (done) {
1395             clearMutatorWaiting(); // Clean up just in case.
1396             return;
1397         }
1398         
1399         // If mutatorWaitingBit is still set then we want to wait.
1400         ParkingLot::compareAndPark(&m_worldState, oldState | mutatorWaitingBit);
1401     }
1402 }
1403
1404 void Heap::acquireAccessSlow()
1405 {
1406     for (;;) {
1407         unsigned oldState = m_worldState.load();
1408         RELEASE_ASSERT(!(oldState & hasAccessBit));
1409         
1410         if (oldState & shouldStopBit) {
1411             RELEASE_ASSERT(oldState & stoppedBit);
1412             if (verboseStop) {
1413                 dataLog("Stopping in acquireAccess!\n");
1414                 WTFReportBacktrace();
1415             }
1416             // Wait until we're not stopped anymore.
1417             ParkingLot::compareAndPark(&m_worldState, oldState);
1418             continue;
1419         }
1420         
1421         RELEASE_ASSERT(!(oldState & stoppedBit));
1422         unsigned newState = oldState | hasAccessBit;
1423         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1424             handleGCDidJIT();
1425             handleNeedFinalize();
1426             m_mutatorDidRun = true;
1427             return;
1428         }
1429     }
1430 }
1431
1432 void Heap::releaseAccessSlow()
1433 {
1434     for (;;) {
1435         unsigned oldState = m_worldState.load();
1436         RELEASE_ASSERT(oldState & hasAccessBit);
1437         RELEASE_ASSERT(!(oldState & stoppedBit));
1438         
1439         if (handleNeedFinalize(oldState))
1440             continue;
1441         
1442         if (oldState & shouldStopBit) {
1443             unsigned newState = (oldState & ~hasAccessBit) | stoppedBit;
1444             if (m_worldState.compareExchangeWeak(oldState, newState)) {
1445                 ParkingLot::unparkAll(&m_worldState);
1446                 return;
1447             }
1448             continue;
1449         }
1450         
1451         RELEASE_ASSERT(!(oldState & shouldStopBit));
1452         
1453         if (m_worldState.compareExchangeWeak(oldState, oldState & ~hasAccessBit))
1454             return;
1455     }
1456 }
1457
1458 bool Heap::handleGCDidJIT(unsigned oldState)
1459 {
1460     RELEASE_ASSERT(oldState & hasAccessBit);
1461     if (!(oldState & gcDidJITBit))
1462         return false;
1463     if (m_worldState.compareExchangeWeak(oldState, oldState & ~gcDidJITBit)) {
1464         WTF::crossModifyingCodeFence();
1465         return true;
1466     }
1467     return true;
1468 }
1469
1470 bool Heap::handleNeedFinalize(unsigned oldState)
1471 {
1472     RELEASE_ASSERT(oldState & hasAccessBit);
1473     RELEASE_ASSERT(!(oldState & stoppedBit));
1474     
1475     if (!(oldState & needFinalizeBit))
1476         return false;
1477     if (m_worldState.compareExchangeWeak(oldState, oldState & ~needFinalizeBit)) {
1478         finalize();
1479         // Wake up anyone waiting for us to finalize. Note that they may have woken up already, in
1480         // which case they would be waiting for us to release heap access.
1481         ParkingLot::unparkAll(&m_worldState);
1482         return true;
1483     }
1484     return true;
1485 }
1486
1487 void Heap::handleGCDidJIT()
1488 {
1489     while (handleGCDidJIT(m_worldState.load())) { }
1490 }
1491
1492 void Heap::handleNeedFinalize()
1493 {
1494     while (handleNeedFinalize(m_worldState.load())) { }
1495 }
1496
1497 void Heap::setGCDidJIT()
1498 {
1499     m_worldState.transaction(
1500         [&] (unsigned& state) {
1501             RELEASE_ASSERT(state & stoppedBit);
1502             state |= gcDidJITBit;
1503         });
1504 }
1505
1506 void Heap::setNeedFinalize()
1507 {
1508     m_worldState.exchangeOr(needFinalizeBit);
1509     ParkingLot::unparkAll(&m_worldState);
1510     m_stopIfNecessaryTimer->scheduleSoon();
1511 }
1512
1513 void Heap::waitWhileNeedFinalize()
1514 {
1515     for (;;) {
1516         unsigned oldState = m_worldState.load();
1517         if (!(oldState & needFinalizeBit)) {
1518             // This means that either there was no finalize request or the main thread will finalize
1519             // with heap access, so a subsequent call to stopTheWorld() will return only when
1520             // finalize finishes.
1521             return;
1522         }
1523         ParkingLot::compareAndPark(&m_worldState, oldState);
1524     }
1525 }
1526
1527 void Heap::setMutatorWaiting()
1528 {
1529     m_worldState.exchangeOr(mutatorWaitingBit);
1530 }
1531
1532 void Heap::clearMutatorWaiting()
1533 {
1534     m_worldState.exchangeAnd(~mutatorWaitingBit);
1535 }
1536
1537 void Heap::notifyThreadStopping(const LockHolder&)
1538 {
1539     m_threadIsStopping = true;
1540     clearMutatorWaiting();
1541     ParkingLot::unparkAll(&m_worldState);
1542 }
1543
1544 void Heap::finalize()
1545 {
1546     {
1547         HelpingGCScope helpingGCScope(*this);
1548         deleteUnmarkedCompiledCode();
1549         deleteSourceProviderCaches();
1550         sweepLargeAllocations();
1551     }
1552     
1553     if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache())
1554         cache->clear();
1555 }
1556
1557 Heap::Ticket Heap::requestCollection(std::optional<CollectionScope> scope)
1558 {
1559     stopIfNecessary();
1560     
1561     ASSERT(vm()->currentThreadIsHoldingAPILock());
1562     RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
1563     
1564     LockHolder locker(*m_threadLock);
1565     m_requests.append(scope);
1566     m_lastGrantedTicket++;
1567     m_threadCondition->notifyOne(locker);
1568     return m_lastGrantedTicket;
1569 }
1570
1571 void Heap::waitForCollection(Ticket ticket)
1572 {
1573     waitForCollector(
1574         [&] (const LockHolder&) -> bool {
1575             return m_lastServedTicket >= ticket;
1576         });
1577 }
1578
1579 void Heap::sweepLargeAllocations()
1580 {
1581     m_objectSpace.sweepLargeAllocations();
1582 }
1583
1584 void Heap::suspendCompilerThreads()
1585 {
1586 #if ENABLE(DFG_JIT)
1587     // We ensure the worklists so that it's not possible for the mutator to start a new worklist
1588     // after we have suspended the ones that he had started before. That's not very expensive since
1589     // the worklists use AutomaticThreads anyway.
1590     for (unsigned i = DFG::numberOfWorklists(); i--;)
1591         DFG::ensureWorklistForIndex(i).suspendAllThreads();
1592 #endif
1593 }
1594
1595 void Heap::willStartCollection(std::optional<CollectionScope> scope)
1596 {
1597     if (Options::logGC())
1598         dataLog("=> ");
1599     
1600     if (shouldDoFullCollection(scope)) {
1601         m_collectionScope = CollectionScope::Full;
1602         m_shouldDoFullCollection = false;
1603         if (Options::logGC())
1604             dataLog("FullCollection, ");
1605         if (false)
1606             dataLog("Full collection!\n");
1607     } else {
1608         m_collectionScope = CollectionScope::Eden;
1609         if (Options::logGC())
1610             dataLog("EdenCollection, ");
1611         if (false)
1612             dataLog("Eden collection!\n");
1613     }
1614     if (m_collectionScope == CollectionScope::Full) {
1615         m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
1616         m_extraMemorySize = 0;
1617         m_deprecatedExtraMemorySize = 0;
1618 #if ENABLE(RESOURCE_USAGE)
1619         m_externalMemorySize = 0;
1620 #endif
1621
1622         if (m_fullActivityCallback)
1623             m_fullActivityCallback->willCollect();
1624     } else {
1625         ASSERT(m_collectionScope == CollectionScope::Eden);
1626         m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
1627     }
1628
1629     if (m_edenActivityCallback)
1630         m_edenActivityCallback->willCollect();
1631
1632     for (auto* observer : m_observers)
1633         observer->willGarbageCollect();
1634 }
1635
1636 void Heap::prepareForMarking()
1637 {
1638     m_objectSpace.prepareForMarking();
1639 }
1640
1641 void Heap::reapWeakHandles()
1642 {
1643     m_objectSpace.reapWeakSets();
1644 }
1645
1646 void Heap::pruneStaleEntriesFromWeakGCMaps()
1647 {
1648     if (m_collectionScope != CollectionScope::Full)
1649         return;
1650     for (auto& pruneCallback : m_weakGCMaps.values())
1651         pruneCallback();
1652 }
1653
1654 void Heap::sweepArrayBuffers()
1655 {
1656     m_arrayBuffers.sweep();
1657 }
1658
1659 void Heap::snapshotUnswept()
1660 {
1661     TimingScope timingScope(*this, "Heap::snapshotUnswept");
1662     m_objectSpace.snapshotUnswept();
1663 }
1664
1665 void Heap::deleteSourceProviderCaches()
1666 {
1667     m_vm->clearSourceProviderCaches();
1668 }
1669
1670 void Heap::notifyIncrementalSweeper()
1671 {
1672     if (m_collectionScope == CollectionScope::Full) {
1673         if (!m_logicallyEmptyWeakBlocks.isEmpty())
1674             m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
1675     }
1676
1677     m_sweeper->startSweeping();
1678 }
1679
1680 NEVER_INLINE void Heap::didExceedMaxLiveSize()
1681 {
1682     CRASH();
1683 }
1684
1685 void Heap::updateAllocationLimits()
1686 {
1687     static const bool verbose = false;
1688     
1689     if (verbose) {
1690         dataLog("\n");
1691         dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n");
1692     }
1693     
1694     // Calculate our current heap size threshold for the purpose of figuring out when we should
1695     // run another collection. This isn't the same as either size() or capacity(), though it should
1696     // be somewhere between the two. The key is to match the size calculations involved calls to
1697     // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
1698     // fragmentation, we may have size() much smaller than capacity().
1699     size_t currentHeapSize = 0;
1700
1701     // For marked space, we use the total number of bytes visited. This matches the logic for
1702     // MarkedAllocator's calls to didAllocate(), which effectively accounts for the total size of
1703     // objects allocated rather than blocks used. This will underestimate capacity(), and in case
1704     // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
1705     // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
1706     currentHeapSize += m_totalBytesVisited;
1707     if (verbose)
1708         dataLog("totalBytesVisited = ", m_totalBytesVisited, ", currentHeapSize = ", currentHeapSize, "\n");
1709
1710     // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
1711     // extra memory reporting.
1712     currentHeapSize += extraMemorySize();
1713
1714     if (verbose)
1715         dataLog("extraMemorySize() = ", extraMemorySize(), ", currentHeapSize = ", currentHeapSize, "\n");
1716
1717     if (m_maxLiveSize && currentHeapSize > m_maxLiveSize)
1718         didExceedMaxLiveSize();
1719     
1720     if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
1721         HeapStatistics::exitWithFailure();
1722
1723     if (m_collectionScope == CollectionScope::Full) {
1724         // To avoid pathological GC churn in very small and very large heaps, we set
1725         // the new allocation limit based on the current size of the heap, with a
1726         // fixed minimum.
1727         m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
1728         if (verbose)
1729             dataLog("Full: maxHeapSize = ", m_maxHeapSize, "\n");
1730         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1731         if (verbose)
1732             dataLog("Full: maxEdenSize = ", m_maxEdenSize, "\n");
1733         m_sizeAfterLastFullCollect = currentHeapSize;
1734         if (verbose)
1735             dataLog("Full: sizeAfterLastFullCollect = ", currentHeapSize, "\n");
1736         m_bytesAbandonedSinceLastFullCollect = 0;
1737         if (verbose)
1738             dataLog("Full: bytesAbandonedSinceLastFullCollect = ", 0, "\n");
1739     } else {
1740         ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
1741         // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have.
1742         // But we are sloppy, so we have to defend against the overflow.
1743         m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize;
1744         if (verbose)
1745             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
1746         m_sizeAfterLastEdenCollect = currentHeapSize;
1747         if (verbose)
1748             dataLog("Eden: sizeAfterLastEdenCollect = ", currentHeapSize, "\n");
1749         double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
1750         double minEdenToOldGenerationRatio = 1.0 / 3.0;
1751         if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
1752             m_shouldDoFullCollection = true;
1753         // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
1754         m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
1755         if (verbose)
1756             dataLog("Eden: maxHeapSize = ", m_maxHeapSize, "\n");
1757         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1758         if (verbose)
1759             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
1760         if (m_fullActivityCallback) {
1761             ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
1762             m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
1763         }
1764     }
1765
1766     m_sizeAfterLastCollect = currentHeapSize;
1767     if (verbose)
1768         dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n");
1769     m_bytesAllocatedThisCycle = 0;
1770
1771     if (Options::logGC())
1772         dataLog("=> ", currentHeapSize / 1024, " kb, ");
1773 }
1774
1775 void Heap::didFinishCollection(double gcStartTime)
1776 {
1777     double gcEndTime = WTF::monotonicallyIncreasingTime();
1778     CollectionScope scope = *m_collectionScope;
1779     if (scope == CollectionScope::Full)
1780         m_lastFullGCLength = gcEndTime - gcStartTime;
1781     else
1782         m_lastEdenGCLength = gcEndTime - gcStartTime;
1783
1784 #if ENABLE(RESOURCE_USAGE)
1785     ASSERT(externalMemorySize() <= extraMemorySize());
1786 #endif
1787
1788     if (Options::recordGCPauseTimes())
1789         HeapStatistics::recordGCPauseTime(gcStartTime, gcEndTime);
1790
1791     if (Options::useZombieMode())
1792         zombifyDeadObjects();
1793
1794     if (Options::dumpObjectStatistics())
1795         HeapStatistics::dumpObjectStatistics(this);
1796
1797     if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) {
1798         gatherExtraHeapSnapshotData(*heapProfiler);
1799         removeDeadHeapSnapshotNodes(*heapProfiler);
1800     }
1801
1802     RELEASE_ASSERT(m_collectionScope);
1803     m_lastCollectionScope = m_collectionScope;
1804     m_collectionScope = std::nullopt;
1805
1806     for (auto* observer : m_observers)
1807         observer->didGarbageCollect(scope);
1808 }
1809
1810 void Heap::resumeCompilerThreads()
1811 {
1812 #if ENABLE(DFG_JIT)
1813     for (unsigned i = DFG::numberOfWorklists(); i--;)
1814         DFG::existingWorklistForIndex(i).resumeAllThreads();
1815 #endif
1816 }
1817
1818 GCActivityCallback* Heap::fullActivityCallback()
1819 {
1820     return m_fullActivityCallback.get();
1821 }
1822
1823 GCActivityCallback* Heap::edenActivityCallback()
1824 {
1825     return m_edenActivityCallback.get();
1826 }
1827
1828 IncrementalSweeper* Heap::sweeper()
1829 {
1830     return m_sweeper.get();
1831 }
1832
1833 void Heap::setGarbageCollectionTimerEnabled(bool enable)
1834 {
1835     if (m_fullActivityCallback)
1836         m_fullActivityCallback->setEnabled(enable);
1837     if (m_edenActivityCallback)
1838         m_edenActivityCallback->setEnabled(enable);
1839 }
1840
1841 void Heap::didAllocate(size_t bytes)
1842 {
1843     if (m_edenActivityCallback)
1844         m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
1845     m_bytesAllocatedThisCycle += bytes;
1846 }
1847
1848 bool Heap::isValidAllocation(size_t)
1849 {
1850     if (!isValidThreadState(m_vm))
1851         return false;
1852
1853     if (isCurrentThreadBusy())
1854         return false;
1855     
1856     return true;
1857 }
1858
1859 void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
1860 {
1861     WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
1862 }
1863
1864 void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
1865 {
1866     HandleSlot slot = handle.slot();
1867     Finalizer finalizer = reinterpret_cast<Finalizer>(context);
1868     finalizer(slot->asCell());
1869     WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
1870 }
1871
1872 void Heap::addExecutable(ExecutableBase* executable)
1873 {
1874     m_executables.append(executable);
1875 }
1876
1877 void Heap::collectAllGarbageIfNotDoneRecently()
1878 {
1879     if (!m_fullActivityCallback) {
1880         collectAllGarbage();
1881         return;
1882     }
1883
1884     if (m_fullActivityCallback->didSyncGCRecently()) {
1885         // A synchronous GC was already requested recently so we merely accelerate next collection.
1886         reportAbandonedObjectGraph();
1887         return;
1888     }
1889
1890     m_fullActivityCallback->setDidSyncGCRecently();
1891     collectAllGarbage();
1892 }
1893
1894 class Zombify : public MarkedBlock::VoidFunctor {
1895 public:
1896     inline void visit(HeapCell* cell) const
1897     {
1898         void** current = reinterpret_cast_ptr<void**>(cell);
1899
1900         // We want to maintain zapped-ness because that's how we know if we've called 
1901         // the destructor.
1902         if (cell->isZapped())
1903             current++;
1904
1905         void* limit = static_cast<void*>(reinterpret_cast<char*>(cell) + cell->cellSize());
1906         for (; current < limit; current++)
1907             *current = zombifiedBits;
1908     }
1909     IterationStatus operator()(HeapCell* cell, HeapCell::Kind) const
1910     {
1911         visit(cell);
1912         return IterationStatus::Continue;
1913     }
1914 };
1915
1916 void Heap::zombifyDeadObjects()
1917 {
1918     // Sweep now because destructors will crash once we're zombified.
1919     m_objectSpace.sweep();
1920     HeapIterationScope iterationScope(*this);
1921     m_objectSpace.forEachDeadCell(iterationScope, Zombify());
1922 }
1923
1924 bool Heap::shouldDoFullCollection(std::optional<CollectionScope> scope) const
1925 {
1926     if (!Options::useGenerationalGC())
1927         return true;
1928
1929     if (!scope)
1930         return m_shouldDoFullCollection;
1931     return *scope == CollectionScope::Full;
1932 }
1933
1934 void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
1935 {
1936     m_logicallyEmptyWeakBlocks.append(block);
1937 }
1938
1939 void Heap::sweepAllLogicallyEmptyWeakBlocks()
1940 {
1941     if (m_logicallyEmptyWeakBlocks.isEmpty())
1942         return;
1943
1944     m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
1945     while (sweepNextLogicallyEmptyWeakBlock()) { }
1946 }
1947
1948 bool Heap::sweepNextLogicallyEmptyWeakBlock()
1949 {
1950     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
1951         return false;
1952
1953     WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
1954
1955     block->sweep();
1956     if (block->isEmpty()) {
1957         std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
1958         m_logicallyEmptyWeakBlocks.removeLast();
1959         WeakBlock::destroy(*this, block);
1960     } else
1961         m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
1962
1963     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
1964         m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
1965         return false;
1966     }
1967
1968     return true;
1969 }
1970
1971 size_t Heap::threadVisitCount()
1972 {       
1973     unsigned long result = 0;
1974     for (auto& parallelVisitor : m_parallelSlotVisitors)
1975         result += parallelVisitor->visitCount();
1976     return result;
1977 }
1978
1979 size_t Heap::threadBytesVisited()
1980 {       
1981     size_t result = 0;
1982     for (auto& parallelVisitor : m_parallelSlotVisitors)
1983         result += parallelVisitor->bytesVisited();
1984     return result;
1985 }
1986
1987 void Heap::forEachCodeBlockImpl(const ScopedLambda<bool(CodeBlock*)>& func)
1988 {
1989     // We don't know the full set of CodeBlocks until compilation has terminated.
1990     completeAllJITPlans();
1991
1992     return m_codeBlocks->iterate(func);
1993 }
1994
1995 void Heap::writeBarrierSlowPath(const JSCell* from)
1996 {
1997     if (UNLIKELY(mutatorShouldBeFenced())) {
1998         // In this case, the barrierThreshold is the tautological threshold, so from could still be
1999         // not black. But we can't know for sure until we fire off a fence.
2000         WTF::storeLoadFence();
2001         if (from->cellState() != CellState::PossiblyBlack)
2002             return;
2003     }
2004     
2005     addToRememberedSet(from);
2006 }
2007
2008 bool Heap::isCurrentThreadBusy()
2009 {
2010     return mayBeGCThread() || mutatorState() != MutatorState::Running;
2011 }
2012
2013 void Heap::reportExtraMemoryVisited(size_t size)
2014 {
2015     size_t* counter = &m_extraMemorySize;
2016     
2017     for (;;) {
2018         size_t oldSize = *counter;
2019         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size))
2020             return;
2021     }
2022 }
2023
2024 #if ENABLE(RESOURCE_USAGE)
2025 void Heap::reportExternalMemoryVisited(size_t size)
2026 {
2027     size_t* counter = &m_externalMemorySize;
2028
2029     for (;;) {
2030         size_t oldSize = *counter;
2031         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size))
2032             return;
2033     }
2034 }
2035 #endif
2036
2037 void Heap::collectIfNecessaryOrDefer(GCDeferralContext* deferralContext)
2038 {
2039     ASSERT(!DisallowGC::isGCDisallowedOnCurrentThread());
2040
2041     if (!m_isSafeToCollect)
2042         return;
2043     if (mutatorState() == MutatorState::HelpingGC)
2044         return;
2045     if (!Options::useGC())
2046         return;
2047     
2048     if (mayNeedToStop()) {
2049         if (deferralContext)
2050             deferralContext->m_shouldGC = true;
2051         else if (isDeferred())
2052             m_didDeferGCWork = true;
2053         else {
2054             stopIfNecessary();
2055             // FIXME: Check if the scheduler wants us to stop.
2056             // https://bugs.webkit.org/show_bug.cgi?id=166827
2057         }
2058     }
2059     
2060     if (UNLIKELY(Options::gcMaxHeapSize())) {
2061         if (m_bytesAllocatedThisCycle <= Options::gcMaxHeapSize())
2062             return;
2063     } else {
2064         if (m_bytesAllocatedThisCycle <= m_maxEdenSize)
2065             return;
2066     }
2067
2068     if (deferralContext)
2069         deferralContext->m_shouldGC = true;
2070     else if (isDeferred())
2071         m_didDeferGCWork = true;
2072     else
2073         collectAsync();
2074 }
2075
2076 void Heap::decrementDeferralDepthAndGCIfNeededSlow()
2077 {
2078     // Can't do anything if we're still deferred.
2079     if (m_deferralDepth)
2080         return;
2081     
2082     ASSERT(!isDeferred());
2083     
2084     m_didDeferGCWork = false;
2085     // FIXME: Bring back something like the DeferGCProbability mode.
2086     // https://bugs.webkit.org/show_bug.cgi?id=166627
2087     collectIfNecessaryOrDefer();
2088 }
2089
2090 void Heap::registerWeakGCMap(void* weakGCMap, std::function<void()> pruningCallback)
2091 {
2092     m_weakGCMaps.add(weakGCMap, WTFMove(pruningCallback));
2093 }
2094
2095 void Heap::unregisterWeakGCMap(void* weakGCMap)
2096 {
2097     m_weakGCMaps.remove(weakGCMap);
2098 }
2099
2100 void Heap::didAllocateBlock(size_t capacity)
2101 {
2102 #if ENABLE(RESOURCE_USAGE)
2103     m_blockBytesAllocated += capacity;
2104 #else
2105     UNUSED_PARAM(capacity);
2106 #endif
2107 }
2108
2109 void Heap::didFreeBlock(size_t capacity)
2110 {
2111 #if ENABLE(RESOURCE_USAGE)
2112     m_blockBytesAllocated -= capacity;
2113 #else
2114     UNUSED_PARAM(capacity);
2115 #endif
2116 }
2117
2118 #if USE(CF)
2119 void Heap::setRunLoop(CFRunLoopRef runLoop)
2120 {
2121     m_runLoop = runLoop;
2122     m_fullActivityCallback->setRunLoop(runLoop);
2123     m_edenActivityCallback->setRunLoop(runLoop);
2124     m_sweeper->setRunLoop(runLoop);
2125 }
2126 #endif // USE(CF)
2127
2128 void Heap::buildConstraintSet()
2129 {
2130     m_constraintSet = std::make_unique<MarkingConstraintSet>();
2131     
2132     m_constraintSet->add(
2133         "Cs", "Conservative Scan",
2134         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2135             TimingScope preConvergenceTimingScope(*this, "Constraint: conservative scan");
2136             m_objectSpace.prepareForConservativeScan();
2137             ConservativeRoots conservativeRoots(*this);
2138             SuperSamplerScope superSamplerScope(false);
2139             gatherStackRoots(conservativeRoots);
2140             gatherJSStackRoots(conservativeRoots);
2141             gatherScratchBufferRoots(conservativeRoots);
2142             slotVisitor.append(conservativeRoots);
2143         },
2144         MarkingConstraint::GreyedByExecution);
2145     
2146     m_constraintSet->add(
2147         "Msr", "Misc Small Roots",
2148         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2149 #if JSC_OBJC_API_ENABLED
2150             scanExternalRememberedSet(*m_vm, slotVisitor);
2151 #endif
2152
2153             if (m_vm->smallStrings.needsToBeVisited(*m_collectionScope))
2154                 m_vm->smallStrings.visitStrongReferences(slotVisitor);
2155             
2156             for (auto& pair : m_protectedValues)
2157                 slotVisitor.appendUnbarriered(pair.key);
2158             
2159             if (m_markListSet && m_markListSet->size())
2160                 MarkedArgumentBuffer::markLists(slotVisitor, *m_markListSet);
2161             
2162             slotVisitor.appendUnbarriered(m_vm->exception());
2163             slotVisitor.appendUnbarriered(m_vm->lastException());
2164         },
2165         MarkingConstraint::GreyedByExecution);
2166     
2167     m_constraintSet->add(
2168         "Sh", "Strong Handles",
2169         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2170             m_handleSet.visitStrongHandles(slotVisitor);
2171             m_handleStack.visit(slotVisitor);
2172         },
2173         MarkingConstraint::GreyedByExecution);
2174     
2175     m_constraintSet->add(
2176         "D", "Debugger",
2177         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2178 #if ENABLE(SAMPLING_PROFILER)
2179             if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
2180                 LockHolder locker(samplingProfiler->getLock());
2181                 samplingProfiler->processUnverifiedStackTraces();
2182                 samplingProfiler->visit(slotVisitor);
2183                 if (Options::logGC() == GCLogging::Verbose)
2184                     dataLog("Sampling Profiler data:\n", slotVisitor);
2185             }
2186 #endif // ENABLE(SAMPLING_PROFILER)
2187             
2188             if (m_vm->typeProfiler())
2189                 m_vm->typeProfilerLog()->visit(slotVisitor);
2190             
2191             m_vm->shadowChicken().visitChildren(slotVisitor);
2192         },
2193         MarkingConstraint::GreyedByExecution);
2194     
2195     m_constraintSet->add(
2196         "Jsr", "JIT Stub Routines",
2197         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2198             m_jitStubRoutines->traceMarkedStubRoutines(slotVisitor);
2199         },
2200         MarkingConstraint::GreyedByExecution);
2201     
2202     m_constraintSet->add(
2203         "Ws", "Weak Sets",
2204         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2205             slotVisitor.mergeOpaqueRootsIfNecessary();
2206             for (auto& parallelVisitor : m_parallelSlotVisitors)
2207                 parallelVisitor->mergeOpaqueRootsIfNecessary();
2208             
2209             m_objectSpace.visitWeakSets(slotVisitor);
2210         },
2211         MarkingConstraint::GreyedByMarking);
2212     
2213     m_constraintSet->add(
2214         "Wrh", "Weak Reference Harvesters",
2215         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2216             for (WeakReferenceHarvester* current = m_weakReferenceHarvesters.head(); current; current = current->next())
2217                 current->visitWeakReferences(slotVisitor);
2218         },
2219         MarkingConstraint::GreyedByMarking);
2220     
2221 #if ENABLE(DFG_JIT)
2222     m_constraintSet->add(
2223         "Dw", "DFG Worklists",
2224         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2225             for (unsigned i = DFG::numberOfWorklists(); i--;)
2226                 DFG::existingWorklistForIndex(i).visitWeakReferences(slotVisitor);
2227             
2228             // FIXME: This is almost certainly unnecessary.
2229             // https://bugs.webkit.org/show_bug.cgi?id=166829
2230             DFG::iterateCodeBlocksForGC(
2231                 *m_vm,
2232                 [&] (CodeBlock* codeBlock) {
2233                     slotVisitor.appendUnbarriered(codeBlock);
2234                 });
2235             
2236             if (Options::logGC() == GCLogging::Verbose)
2237                 dataLog("DFG Worklists:\n", slotVisitor);
2238         },
2239         MarkingConstraint::GreyedByMarking);
2240 #endif
2241     
2242     m_constraintSet->add(
2243         "Cb", "CodeBlocks",
2244         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2245             iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(
2246                 [&] (CodeBlock* codeBlock) {
2247                     // Visit the CodeBlock as a constraint only if it's black.
2248                     if (Heap::isMarked(codeBlock)
2249                         && codeBlock->cellState() == CellState::PossiblyBlack)
2250                         slotVisitor.visitAsConstraint(codeBlock);
2251                 });
2252         },
2253         MarkingConstraint::GreyedByExecution);
2254     
2255     m_constraintSet->add(
2256         "Mrms", "Mutator+Race Mark Stack",
2257         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2258             // Indicate to the fixpoint that we introduced work!
2259             size_t size = m_mutatorMarkStack->size() + m_raceMarkStack->size();
2260             slotVisitor.addToVisitCount(size);
2261             
2262             if (Options::logGC())
2263                 dataLog("(", size, ")");
2264             
2265             m_mutatorMarkStack->transferTo(slotVisitor.mutatorMarkStack());
2266             m_raceMarkStack->transferTo(slotVisitor.mutatorMarkStack());
2267         },
2268         [this] (SlotVisitor&) -> double {
2269             return m_mutatorMarkStack->size() + m_raceMarkStack->size();
2270         },
2271         MarkingConstraint::GreyedByExecution);
2272 }
2273
2274 void Heap::notifyIsSafeToCollect()
2275 {
2276     buildConstraintSet();
2277     
2278     m_isSafeToCollect = true;
2279     
2280     if (Options::collectContinuously()) {
2281         m_collectContinuouslyThread = createThread(
2282             "JSC DEBUG Continuous GC",
2283             [this] () {
2284                 MonotonicTime initialTime = MonotonicTime::now();
2285                 Seconds period = Seconds::fromMilliseconds(Options::collectContinuouslyPeriodMS());
2286                 while (!m_shouldStopCollectingContinuously) {
2287                     {
2288                         LockHolder locker(*m_threadLock);
2289                         if (m_requests.isEmpty()) {
2290                             m_requests.append(std::nullopt);
2291                             m_lastGrantedTicket++;
2292                             m_threadCondition->notifyOne(locker);
2293                         }
2294                     }
2295                     
2296                     {
2297                         LockHolder locker(m_collectContinuouslyLock);
2298                         Seconds elapsed = MonotonicTime::now() - initialTime;
2299                         Seconds elapsedInPeriod = elapsed % period;
2300                         MonotonicTime timeToWakeUp =
2301                             initialTime + elapsed - elapsedInPeriod + period;
2302                         while (!hasElapsed(timeToWakeUp) && !m_shouldStopCollectingContinuously) {
2303                             m_collectContinuouslyCondition.waitUntil(
2304                                 m_collectContinuouslyLock, timeToWakeUp);
2305                         }
2306                     }
2307                 }
2308             });
2309     }
2310 }
2311
2312 void Heap::preventCollection()
2313 {
2314     // This prevents the collectContinuously thread from starting a collection.
2315     m_collectContinuouslyLock.lock();
2316     
2317     // Wait for all collections to finish.
2318     waitForCollector(
2319         [&] (const LockHolder&) -> bool {
2320             ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2321             return m_lastServedTicket == m_lastGrantedTicket;
2322         });
2323     
2324     // Now a collection can only start if this thread starts it.
2325     RELEASE_ASSERT(!m_collectionScope);
2326 }
2327
2328 void Heap::allowCollection()
2329 {
2330     m_collectContinuouslyLock.unlock();
2331 }
2332
2333 template<typename Func>
2334 void Heap::forEachSlotVisitor(const Func& func)
2335 {
2336     auto locker = holdLock(m_parallelSlotVisitorLock);
2337     func(*m_collectorSlotVisitor);
2338     for (auto& slotVisitor : m_parallelSlotVisitors)
2339         func(*slotVisitor);
2340 }
2341
2342 void Heap::writeBarrierOpaqueRootSlow(void* root)
2343 {
2344     ASSERT(mutatorShouldBeFenced());
2345     
2346     auto locker = holdLock(m_opaqueRootsMutex);
2347     m_opaqueRoots.add(root);
2348 }
2349
2350 void Heap::addMutatorShouldBeFencedCache(bool& cache)
2351 {
2352     ASSERT(hasHeapAccess());
2353     cache = m_mutatorShouldBeFenced;
2354     m_mutatorShouldBeFencedCaches.append(&cache);
2355 }
2356
2357 void Heap::setMutatorShouldBeFenced(bool value)
2358 {
2359     m_mutatorShouldBeFenced = value;
2360     m_barrierThreshold = value ? tautologicalThreshold : blackThreshold;
2361     for (bool* cache : m_mutatorShouldBeFencedCaches)
2362         *cache = value;
2363 }
2364     
2365 } // namespace JSC