Make opaque root scanning truly constraint-based
[WebKit-https.git] / Source / JavaScriptCore / heap / Heap.cpp
1 /*
2  *  Copyright (C) 2003-2017 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "Heap.h"
23
24 #include "CodeBlock.h"
25 #include "CodeBlockSetInlines.h"
26 #include "ConservativeRoots.h"
27 #include "DFGWorklistInlines.h"
28 #include "EdenGCActivityCallback.h"
29 #include "Exception.h"
30 #include "FullGCActivityCallback.h"
31 #include "GCActivityCallback.h"
32 #include "GCIncomingRefCountedSetInlines.h"
33 #include "GCSegmentedArrayInlines.h"
34 #include "GCTypeMap.h"
35 #include "HasOwnPropertyCache.h"
36 #include "HeapHelperPool.h"
37 #include "HeapIterationScope.h"
38 #include "HeapProfiler.h"
39 #include "HeapSnapshot.h"
40 #include "HeapStatistics.h"
41 #include "HeapVerifier.h"
42 #include "HelpingGCScope.h"
43 #include "IncrementalSweeper.h"
44 #include "Interpreter.h"
45 #include "JITStubRoutineSet.h"
46 #include "JITWorklist.h"
47 #include "JSCInlines.h"
48 #include "JSGlobalObject.h"
49 #include "JSLock.h"
50 #include "JSVirtualMachineInternal.h"
51 #include "MarkedSpaceInlines.h"
52 #include "MarkingConstraintSet.h"
53 #include "PreventCollectionScope.h"
54 #include "SamplingProfiler.h"
55 #include "ShadowChicken.h"
56 #include "SpaceTimeMutatorScheduler.h"
57 #include "SuperSampler.h"
58 #include "StopIfNecessaryTimer.h"
59 #include "SynchronousStopTheWorldMutatorScheduler.h"
60 #include "TypeProfilerLog.h"
61 #include "UnlinkedCodeBlock.h"
62 #include "VM.h"
63 #include "WeakSetInlines.h"
64 #include <algorithm>
65 #include <wtf/CurrentTime.h>
66 #include <wtf/MainThread.h>
67 #include <wtf/ParallelVectorIterator.h>
68 #include <wtf/ProcessID.h>
69 #include <wtf/RAMSize.h>
70 #include <wtf/SimpleStats.h>
71
72 #if USE(FOUNDATION)
73 #if __has_include(<objc/objc-internal.h>)
74 #include <objc/objc-internal.h>
75 #else
76 extern "C" void* objc_autoreleasePoolPush(void);
77 extern "C" void objc_autoreleasePoolPop(void *context);
78 #endif
79 #endif // USE(FOUNDATION)
80
81 using namespace std;
82
83 namespace JSC {
84
85 namespace {
86
87 bool verboseStop = false;
88
89 double maxPauseMS(double thisPauseMS)
90 {
91     static double maxPauseMS;
92     maxPauseMS = std::max(thisPauseMS, maxPauseMS);
93     return maxPauseMS;
94 }
95
96 size_t minHeapSize(HeapType heapType, size_t ramSize)
97 {
98     if (heapType == LargeHeap) {
99         double result = min(
100             static_cast<double>(Options::largeHeapSize()),
101             ramSize * Options::smallHeapRAMFraction());
102         return static_cast<size_t>(result);
103     }
104     return Options::smallHeapSize();
105 }
106
107 size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
108 {
109     if (heapSize < ramSize * Options::smallHeapRAMFraction())
110         return Options::smallHeapGrowthFactor() * heapSize;
111     if (heapSize < ramSize * Options::mediumHeapRAMFraction())
112         return Options::mediumHeapGrowthFactor() * heapSize;
113     return Options::largeHeapGrowthFactor() * heapSize;
114 }
115
116 bool isValidSharedInstanceThreadState(VM* vm)
117 {
118     return vm->currentThreadIsHoldingAPILock();
119 }
120
121 bool isValidThreadState(VM* vm)
122 {
123     if (vm->atomicStringTable() != wtfThreadData().atomicStringTable())
124         return false;
125
126     if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
127         return false;
128
129     return true;
130 }
131
132 void recordType(TypeCountSet& set, JSCell* cell)
133 {
134     const char* typeName = "[unknown]";
135     const ClassInfo* info = cell->classInfo();
136     if (info && info->className)
137         typeName = info->className;
138     set.add(typeName);
139 }
140
141 bool measurePhaseTiming()
142 {
143     return false;
144 }
145
146 HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats()
147 {
148     static HashMap<const char*, GCTypeMap<SimpleStats>>* result;
149     static std::once_flag once;
150     std::call_once(
151         once,
152         [] {
153             result = new HashMap<const char*, GCTypeMap<SimpleStats>>();
154         });
155     return *result;
156 }
157
158 SimpleStats& timingStats(const char* name, CollectionScope scope)
159 {
160     return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[scope];
161 }
162
163 class TimingScope {
164 public:
165     TimingScope(std::optional<CollectionScope> scope, const char* name)
166         : m_scope(scope)
167         , m_name(name)
168     {
169         if (measurePhaseTiming())
170             m_before = monotonicallyIncreasingTimeMS();
171     }
172     
173     TimingScope(Heap& heap, const char* name)
174         : TimingScope(heap.collectionScope(), name)
175     {
176     }
177     
178     void setScope(std::optional<CollectionScope> scope)
179     {
180         m_scope = scope;
181     }
182     
183     void setScope(Heap& heap)
184     {
185         setScope(heap.collectionScope());
186     }
187     
188     ~TimingScope()
189     {
190         if (measurePhaseTiming()) {
191             double after = monotonicallyIncreasingTimeMS();
192             double timing = after - m_before;
193             SimpleStats& stats = timingStats(m_name, *m_scope);
194             stats.add(timing);
195             dataLog("[GC:", *m_scope, "] ", m_name, " took: ", timing, "ms (average ", stats.mean(), "ms).\n");
196         }
197     }
198 private:
199     std::optional<CollectionScope> m_scope;
200     double m_before;
201     const char* m_name;
202 };
203
204 } // anonymous namespace
205
206 class Heap::Thread : public AutomaticThread {
207 public:
208     Thread(const LockHolder& locker, Heap& heap)
209         : AutomaticThread(locker, heap.m_threadLock, heap.m_threadCondition)
210         , m_heap(heap)
211     {
212     }
213     
214 protected:
215     PollResult poll(const LockHolder& locker) override
216     {
217         if (m_heap.m_threadShouldStop) {
218             m_heap.notifyThreadStopping(locker);
219             return PollResult::Stop;
220         }
221         if (m_heap.shouldCollectInThread(locker))
222             return PollResult::Work;
223         return PollResult::Wait;
224     }
225     
226     WorkResult work() override
227     {
228         m_heap.collectInThread();
229         return WorkResult::Continue;
230     }
231     
232     void threadDidStart() override
233     {
234         WTF::registerGCThread(GCThreadType::Main);
235     }
236
237 private:
238     Heap& m_heap;
239 };
240
241 Heap::Heap(VM* vm, HeapType heapType)
242     : m_heapType(heapType)
243     , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
244     , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
245     , m_sizeAfterLastCollect(0)
246     , m_sizeAfterLastFullCollect(0)
247     , m_sizeBeforeLastFullCollect(0)
248     , m_sizeAfterLastEdenCollect(0)
249     , m_sizeBeforeLastEdenCollect(0)
250     , m_bytesAllocatedThisCycle(0)
251     , m_bytesAbandonedSinceLastFullCollect(0)
252     , m_maxEdenSize(m_minBytesPerCycle)
253     , m_maxHeapSize(m_minBytesPerCycle)
254     , m_shouldDoFullCollection(false)
255     , m_totalBytesVisited(0)
256     , m_objectSpace(this)
257     , m_extraMemorySize(0)
258     , m_deprecatedExtraMemorySize(0)
259     , m_machineThreads(this)
260     , m_collectorSlotVisitor(std::make_unique<SlotVisitor>(*this))
261     , m_mutatorMarkStack(std::make_unique<MarkStackArray>())
262     , m_raceMarkStack(std::make_unique<MarkStackArray>())
263     , m_constraintSet(std::make_unique<MarkingConstraintSet>())
264     , m_handleSet(vm)
265     , m_codeBlocks(std::make_unique<CodeBlockSet>())
266     , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>())
267     , m_isSafeToCollect(false)
268     , m_vm(vm)
269     // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously 
270     // schedule the timer if we've never done a collection.
271     , m_lastFullGCLength(0.01)
272     , m_lastEdenGCLength(0.01)
273 #if USE(CF)
274     , m_runLoop(CFRunLoopGetCurrent())
275 #endif // USE(CF)
276     , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
277     , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
278     , m_sweeper(adoptRef(new IncrementalSweeper(this)))
279     , m_stopIfNecessaryTimer(adoptRef(new StopIfNecessaryTimer(vm)))
280     , m_deferralDepth(0)
281 #if USE(FOUNDATION)
282     , m_delayedReleaseRecursionCount(0)
283 #endif
284     , m_sharedCollectorMarkStack(std::make_unique<MarkStackArray>())
285     , m_sharedMutatorMarkStack(std::make_unique<MarkStackArray>())
286     , m_helperClient(&heapHelperPool())
287     , m_scheduler(std::make_unique<SpaceTimeMutatorScheduler>(*this))
288     , m_threadLock(Box<Lock>::create())
289     , m_threadCondition(AutomaticThreadCondition::create())
290 {
291     m_worldState.store(0);
292     
293     if (Options::useConcurrentGC())
294         m_scheduler = std::make_unique<SpaceTimeMutatorScheduler>(*this);
295     else {
296         // We simulate turning off concurrent GC by making the scheduler say that the world
297         // should always be stopped when the collector is running.
298         m_scheduler = std::make_unique<SynchronousStopTheWorldMutatorScheduler>();
299     }
300     
301     if (Options::verifyHeap())
302         m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
303     
304     m_collectorSlotVisitor->optimizeForStoppedMutator();
305     
306     LockHolder locker(*m_threadLock);
307     m_thread = adoptRef(new Thread(locker, *this));
308 }
309
310 Heap::~Heap()
311 {
312     for (auto& slotVisitor : m_parallelSlotVisitors)
313         slotVisitor->clearMarkStacks();
314     m_collectorSlotVisitor->clearMarkStacks();
315     m_mutatorMarkStack->clear();
316     m_raceMarkStack->clear();
317     
318     for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
319         WeakBlock::destroy(*this, block);
320 }
321
322 bool Heap::isPagedOut(double deadline)
323 {
324     return m_objectSpace.isPagedOut(deadline);
325 }
326
327 // The VM is being destroyed and the collector will never run again.
328 // Run all pending finalizers now because we won't get another chance.
329 void Heap::lastChanceToFinalize()
330 {
331     RELEASE_ASSERT(!m_vm->entryScope);
332     RELEASE_ASSERT(m_mutatorState == MutatorState::Running);
333     
334     if (m_collectContinuouslyThread) {
335         {
336             LockHolder locker(m_collectContinuouslyLock);
337             m_shouldStopCollectingContinuously = true;
338             m_collectContinuouslyCondition.notifyOne();
339         }
340         waitForThreadCompletion(m_collectContinuouslyThread);
341     }
342     
343     // Carefully bring the thread down. We need to use waitForCollector() until we know that there
344     // won't be any other collections.
345     bool stopped = false;
346     {
347         LockHolder locker(*m_threadLock);
348         stopped = m_thread->tryStop(locker);
349         if (!stopped) {
350             m_threadShouldStop = true;
351             m_threadCondition->notifyOne(locker);
352         }
353     }
354     if (!stopped) {
355         waitForCollector(
356             [&] (const LockHolder&) -> bool {
357                 return m_threadIsStopping;
358             });
359         // It's now safe to join the thread, since we know that there will not be any more collections.
360         m_thread->join();
361     }
362     
363     m_arrayBuffers.lastChanceToFinalize();
364     m_codeBlocks->lastChanceToFinalize();
365     m_objectSpace.stopAllocating();
366     m_objectSpace.lastChanceToFinalize();
367     releaseDelayedReleasedObjects();
368
369     sweepAllLogicallyEmptyWeakBlocks();
370 }
371
372 void Heap::releaseDelayedReleasedObjects()
373 {
374 #if USE(FOUNDATION)
375     // We need to guard against the case that releasing an object can create more objects due to the
376     // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
377     // back here and could try to recursively release objects. We guard that with a recursive entry
378     // count. Only the initial call will release objects, recursive calls simple return and let the
379     // the initial call to the function take care of any objects created during release time.
380     // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
381     // and use a temp Vector for the actual releasing.
382     if (!m_delayedReleaseRecursionCount++) {
383         while (!m_delayedReleaseObjects.isEmpty()) {
384             ASSERT(m_vm->currentThreadIsHoldingAPILock());
385
386             Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTFMove(m_delayedReleaseObjects);
387
388             {
389                 // We need to drop locks before calling out to arbitrary code.
390                 JSLock::DropAllLocks dropAllLocks(m_vm);
391
392                 void* context = objc_autoreleasePoolPush();
393                 objectsToRelease.clear();
394                 objc_autoreleasePoolPop(context);
395             }
396         }
397     }
398     m_delayedReleaseRecursionCount--;
399 #endif
400 }
401
402 void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
403 {
404     didAllocate(size);
405     collectIfNecessaryOrDefer();
406 }
407
408 void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
409 {
410     m_deprecatedExtraMemorySize += size;
411     reportExtraMemoryAllocatedSlowCase(size);
412 }
413
414 void Heap::reportAbandonedObjectGraph()
415 {
416     // Our clients don't know exactly how much memory they
417     // are abandoning so we just guess for them.
418     size_t abandonedBytes = static_cast<size_t>(0.1 * capacity());
419
420     // We want to accelerate the next collection. Because memory has just 
421     // been abandoned, the next collection has the potential to 
422     // be more profitable. Since allocation is the trigger for collection, 
423     // we hasten the next collection by pretending that we've allocated more memory. 
424     if (m_fullActivityCallback) {
425         m_fullActivityCallback->didAllocate(
426             m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
427     }
428     m_bytesAbandonedSinceLastFullCollect += abandonedBytes;
429 }
430
431 void Heap::protect(JSValue k)
432 {
433     ASSERT(k);
434     ASSERT(m_vm->currentThreadIsHoldingAPILock());
435
436     if (!k.isCell())
437         return;
438
439     m_protectedValues.add(k.asCell());
440 }
441
442 bool Heap::unprotect(JSValue k)
443 {
444     ASSERT(k);
445     ASSERT(m_vm->currentThreadIsHoldingAPILock());
446
447     if (!k.isCell())
448         return false;
449
450     return m_protectedValues.remove(k.asCell());
451 }
452
453 void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
454 {
455     if (m_arrayBuffers.addReference(cell, buffer)) {
456         collectIfNecessaryOrDefer();
457         didAllocate(buffer->gcSizeEstimateInBytes());
458     }
459 }
460
461 void Heap::finalizeUnconditionalFinalizers()
462 {
463     while (m_unconditionalFinalizers.hasNext()) {
464         UnconditionalFinalizer* finalizer = m_unconditionalFinalizers.removeNext();
465         finalizer->finalizeUnconditionally();
466     }
467 }
468
469 void Heap::willStartIterating()
470 {
471     m_objectSpace.willStartIterating();
472 }
473
474 void Heap::didFinishIterating()
475 {
476     m_objectSpace.didFinishIterating();
477 }
478
479 void Heap::completeAllJITPlans()
480 {
481 #if ENABLE(JIT)
482     JITWorklist::instance()->completeAllForVM(*m_vm);
483 #endif // ENABLE(JIT)
484     DFG::completeAllPlansForVM(*m_vm);
485 }
486
487 template<typename Func>
488 void Heap::iterateExecutingAndCompilingCodeBlocks(const Func& func)
489 {
490     m_codeBlocks->iterateCurrentlyExecuting(func);
491     DFG::iterateCodeBlocksForGC(*m_vm, func);
492 }
493
494 template<typename Func>
495 void Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func& func)
496 {
497     Vector<CodeBlock*, 256> codeBlocks;
498     iterateExecutingAndCompilingCodeBlocks(
499         [&] (CodeBlock* codeBlock) {
500             codeBlocks.append(codeBlock);
501         });
502     for (CodeBlock* codeBlock : codeBlocks)
503         func(codeBlock);
504 }
505
506 void Heap::assertSharedMarkStacksEmpty()
507 {
508     bool ok = true;
509     
510     if (!m_sharedCollectorMarkStack->isEmpty()) {
511         dataLog("FATAL: Shared collector mark stack not empty! It has ", m_sharedCollectorMarkStack->size(), " elements.\n");
512         ok = false;
513     }
514     
515     if (!m_sharedMutatorMarkStack->isEmpty()) {
516         dataLog("FATAL: Shared mutator mark stack not empty! It has ", m_sharedMutatorMarkStack->size(), " elements.\n");
517         ok = false;
518     }
519     
520     RELEASE_ASSERT(ok);
521 }
522
523 void Heap::markToFixpoint(double gcStartTime)
524 {
525     TimingScope markToFixpointTimingScope(*this, "Heap::markToFixpoint");
526     
527     if (m_collectionScope == CollectionScope::Full) {
528         m_opaqueRoots.clear();
529         m_collectorSlotVisitor->clearMarkStacks();
530         m_mutatorMarkStack->clear();
531     }
532
533     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
534
535     beginMarking();
536
537     m_parallelMarkersShouldExit = false;
538
539     m_helperClient.setFunction(
540         [this] () {
541             SlotVisitor* slotVisitor;
542             {
543                 LockHolder locker(m_parallelSlotVisitorLock);
544                 if (m_availableParallelSlotVisitors.isEmpty()) {
545                     std::unique_ptr<SlotVisitor> newVisitor =
546                         std::make_unique<SlotVisitor>(*this);
547                     
548                     if (Options::optimizeParallelSlotVisitorsForStoppedMutator())
549                         newVisitor->optimizeForStoppedMutator();
550                     
551                     slotVisitor = newVisitor.get();
552                     m_parallelSlotVisitors.append(WTFMove(newVisitor));
553                 } else
554                     slotVisitor = m_availableParallelSlotVisitors.takeLast();
555             }
556
557             WTF::registerGCThread(GCThreadType::Helper);
558
559             {
560                 ParallelModeEnabler parallelModeEnabler(*slotVisitor);
561                 slotVisitor->didStartMarking();
562                 slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
563             }
564
565             {
566                 LockHolder locker(m_parallelSlotVisitorLock);
567                 m_availableParallelSlotVisitors.append(slotVisitor);
568             }
569         });
570
571     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
572     slotVisitor.didStartMarking();
573
574     m_constraintSet->resetStats();
575     
576     m_scheduler->beginCollection();
577     if (Options::logGC())
578         m_scheduler->log();
579     
580     // Wondering what m_constraintSet->executeXYZ does? It's running the constraints created by
581     // Heap::buildConstraintSet().
582     
583     m_constraintSet->executeBootstrap(slotVisitor, MonotonicTime::infinity());
584     m_scheduler->didExecuteConstraints();
585
586     // After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()"
587     // checks because bootstrap would have put things into the visitor. So, we should fall
588     // through to draining.
589     
590     unsigned iteration = 1;
591     for (;;) {
592         if (Options::logGC())
593             dataLog("v=", bytesVisited() / 1024, "kb o=", m_opaqueRoots.size(), " b=", m_barriersExecuted, " ");
594         
595         if (slotVisitor.didReachTermination()) {
596             if (Options::logGC())
597                 dataLog("i#", iteration, " ");
598         
599             assertSharedMarkStacksEmpty();
600             
601             slotVisitor.mergeIfNecessary();
602             for (auto& parallelVisitor : m_parallelSlotVisitors)
603                 parallelVisitor->mergeIfNecessary();
604             
605             // FIXME: Take m_mutatorDidRun into account when scheduling constraints. Most likely,
606             // we don't have to execute root constraints again unless the mutator did run. At a
607             // minimum, we could use this for work estimates - but it's probably more than just an
608             // estimate.
609             // https://bugs.webkit.org/show_bug.cgi?id=166828
610             
611             // FIXME: We should take advantage of the fact that we could timeout. This only comes
612             // into play if we're executing constraints for the first time. But that will matter
613             // when we have deep stacks or a lot of DOM stuff.
614             // https://bugs.webkit.org/show_bug.cgi?id=166831
615             
616             bool converged =
617                 m_constraintSet->executeConvergence(slotVisitor, MonotonicTime::infinity());
618             if (converged && slotVisitor.isEmpty()) {
619                 assertSharedMarkStacksEmpty();
620                 break;
621             }
622             
623             m_scheduler->didExecuteConstraints();
624             iteration++;
625         }
626         
627         if (Options::logGC())
628             dataLog(slotVisitor.collectorMarkStack().size(), "+", m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " ");
629         
630         {
631             ParallelModeEnabler enabler(slotVisitor);
632             slotVisitor.drainInParallel(m_scheduler->timeToResume());
633         }
634         
635         if (!m_scheduler->shouldResume())
636             continue;
637         
638         m_scheduler->willResume();
639         
640         if (Options::logGC()) {
641             double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds();
642             dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), ")...]\n");
643         }
644         
645         resumeTheWorld();
646         
647         {
648             ParallelModeEnabler enabler(slotVisitor);
649             slotVisitor.drainInParallelPassively(m_scheduler->timeToStop());
650         }
651
652         stopTheWorld();
653         
654         if (Options::logGC())
655             dataLog("[GC: ");
656         
657         m_scheduler->didStop();
658         
659         if (Options::logGC())
660             m_scheduler->log();
661     }
662     
663     m_scheduler->endCollection();
664
665     {
666         std::lock_guard<Lock> lock(m_markingMutex);
667         m_parallelMarkersShouldExit = true;
668         m_markingConditionVariable.notifyAll();
669     }
670     m_helperClient.finish();
671
672     iterateExecutingAndCompilingCodeBlocks(
673         [&] (CodeBlock* codeBlock) {
674             writeBarrier(codeBlock);
675         });
676         
677     updateObjectCounts(gcStartTime);
678     endMarking();
679 }
680
681 void Heap::gatherStackRoots(ConservativeRoots& roots)
682 {
683     m_machineThreads.gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
684 }
685
686 void Heap::gatherJSStackRoots(ConservativeRoots& roots)
687 {
688 #if !ENABLE(JIT)
689     m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
690 #else
691     UNUSED_PARAM(roots);
692 #endif
693 }
694
695 void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
696 {
697 #if ENABLE(DFG_JIT)
698     m_vm->gatherConservativeRoots(roots);
699 #else
700     UNUSED_PARAM(roots);
701 #endif
702 }
703
704 void Heap::beginMarking()
705 {
706     TimingScope timingScope(*this, "Heap::beginMarking");
707     if (m_collectionScope == CollectionScope::Full)
708         m_codeBlocks->clearMarksForFullCollection();
709     m_jitStubRoutines->clearMarks();
710     m_objectSpace.beginMarking();
711     setMutatorShouldBeFenced(true);
712 }
713
714 void Heap::removeDeadCompilerWorklistEntries()
715 {
716 #if ENABLE(DFG_JIT)
717     for (unsigned i = DFG::numberOfWorklists(); i--;)
718         DFG::existingWorklistForIndex(i).removeDeadPlans(*m_vm);
719 #endif
720 }
721
722 bool Heap::isHeapSnapshotting() const
723 {
724     HeapProfiler* heapProfiler = m_vm->heapProfiler();
725     if (UNLIKELY(heapProfiler))
726         return heapProfiler->activeSnapshotBuilder();
727     return false;
728 }
729
730 struct GatherHeapSnapshotData : MarkedBlock::CountFunctor {
731     GatherHeapSnapshotData(HeapSnapshotBuilder& builder)
732         : m_builder(builder)
733     {
734     }
735
736     IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const
737     {
738         if (kind == HeapCell::JSCell) {
739             JSCell* cell = static_cast<JSCell*>(heapCell);
740             cell->methodTable()->heapSnapshot(cell, m_builder);
741         }
742         return IterationStatus::Continue;
743     }
744
745     HeapSnapshotBuilder& m_builder;
746 };
747
748 void Heap::gatherExtraHeapSnapshotData(HeapProfiler& heapProfiler)
749 {
750     if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) {
751         HeapIterationScope heapIterationScope(*this);
752         GatherHeapSnapshotData functor(*builder);
753         m_objectSpace.forEachLiveCell(heapIterationScope, functor);
754     }
755 }
756
757 struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor {
758     RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot)
759         : m_snapshot(snapshot)
760     {
761     }
762
763     IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const
764     {
765         if (kind == HeapCell::JSCell)
766             m_snapshot.sweepCell(static_cast<JSCell*>(cell));
767         return IterationStatus::Continue;
768     }
769
770     HeapSnapshot& m_snapshot;
771 };
772
773 void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler)
774 {
775     if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) {
776         HeapIterationScope heapIterationScope(*this);
777         RemoveDeadHeapSnapshotNodes functor(*snapshot);
778         m_objectSpace.forEachDeadCell(heapIterationScope, functor);
779         snapshot->shrinkToFit();
780     }
781 }
782
783 void Heap::updateObjectCounts(double gcStartTime)
784 {
785     if (Options::logGC() == GCLogging::Verbose) {
786         size_t visitCount = m_collectorSlotVisitor->visitCount();
787         visitCount += threadVisitCount();
788         dataLogF("\nNumber of live Objects after GC %lu, took %.6f secs\n", static_cast<unsigned long>(visitCount), WTF::monotonicallyIncreasingTime() - gcStartTime);
789     }
790     
791     if (m_collectionScope == CollectionScope::Full)
792         m_totalBytesVisited = 0;
793
794     m_totalBytesVisitedThisCycle = bytesVisited();
795     
796     m_totalBytesVisited += m_totalBytesVisitedThisCycle;
797 }
798
799 void Heap::endMarking()
800 {
801     m_collectorSlotVisitor->reset();
802
803     for (auto& parallelVisitor : m_parallelSlotVisitors)
804         parallelVisitor->reset();
805
806     assertSharedMarkStacksEmpty();
807     m_weakReferenceHarvesters.removeAll();
808
809     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
810     
811     m_objectSpace.endMarking();
812     setMutatorShouldBeFenced(Options::forceFencedBarrier());
813 }
814
815 size_t Heap::objectCount()
816 {
817     return m_objectSpace.objectCount();
818 }
819
820 size_t Heap::extraMemorySize()
821 {
822     return m_extraMemorySize + m_deprecatedExtraMemorySize + m_arrayBuffers.size();
823 }
824
825 size_t Heap::size()
826 {
827     return m_objectSpace.size() + extraMemorySize();
828 }
829
830 size_t Heap::capacity()
831 {
832     return m_objectSpace.capacity() + extraMemorySize();
833 }
834
835 size_t Heap::protectedGlobalObjectCount()
836 {
837     size_t result = 0;
838     forEachProtectedCell(
839         [&] (JSCell* cell) {
840             if (cell->isObject() && asObject(cell)->isGlobalObject())
841                 result++;
842         });
843     return result;
844 }
845
846 size_t Heap::globalObjectCount()
847 {
848     HeapIterationScope iterationScope(*this);
849     size_t result = 0;
850     m_objectSpace.forEachLiveCell(
851         iterationScope,
852         [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus {
853             if (kind != HeapCell::JSCell)
854                 return IterationStatus::Continue;
855             JSCell* cell = static_cast<JSCell*>(heapCell);
856             if (cell->isObject() && asObject(cell)->isGlobalObject())
857                 result++;
858             return IterationStatus::Continue;
859         });
860     return result;
861 }
862
863 size_t Heap::protectedObjectCount()
864 {
865     size_t result = 0;
866     forEachProtectedCell(
867         [&] (JSCell*) {
868             result++;
869         });
870     return result;
871 }
872
873 std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
874 {
875     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
876     forEachProtectedCell(
877         [&] (JSCell* cell) {
878             recordType(*result, cell);
879         });
880     return result;
881 }
882
883 std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
884 {
885     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
886     HeapIterationScope iterationScope(*this);
887     m_objectSpace.forEachLiveCell(
888         iterationScope,
889         [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
890             if (kind == HeapCell::JSCell)
891                 recordType(*result, static_cast<JSCell*>(cell));
892             return IterationStatus::Continue;
893         });
894     return result;
895 }
896
897 void Heap::deleteAllCodeBlocks(DeleteAllCodeEffort effort)
898 {
899     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
900         return;
901     
902     PreventCollectionScope preventCollectionScope(*this);
903     
904     // If JavaScript is running, it's not safe to delete all JavaScript code, since
905     // we'll end up returning to deleted code.
906     RELEASE_ASSERT(!m_vm->entryScope);
907     RELEASE_ASSERT(!m_collectionScope);
908
909     completeAllJITPlans();
910
911     for (ExecutableBase* executable : m_executables)
912         executable->clearCode();
913 }
914
915 void Heap::deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort effort)
916 {
917     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
918         return;
919     
920     PreventCollectionScope preventCollectionScope(*this);
921
922     RELEASE_ASSERT(!m_collectionScope);
923     
924     for (ExecutableBase* current : m_executables) {
925         if (!current->isFunctionExecutable())
926             continue;
927         static_cast<FunctionExecutable*>(current)->unlinkedExecutable()->clearCode();
928     }
929 }
930
931 void Heap::clearUnmarkedExecutables()
932 {
933     for (unsigned i = m_executables.size(); i--;) {
934         ExecutableBase* current = m_executables[i];
935         if (isMarked(current))
936             continue;
937
938         // Eagerly dereference the Executable's JITCode in order to run watchpoint
939         // destructors. Otherwise, watchpoints might fire for deleted CodeBlocks.
940         current->clearCode();
941         std::swap(m_executables[i], m_executables.last());
942         m_executables.removeLast();
943     }
944
945     m_executables.shrinkToFit();
946 }
947
948 void Heap::deleteUnmarkedCompiledCode()
949 {
950     clearUnmarkedExecutables();
951     m_codeBlocks->deleteUnmarkedAndUnreferenced(*m_lastCollectionScope);
952     m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines();
953 }
954
955 void Heap::addToRememberedSet(const JSCell* constCell)
956 {
957     JSCell* cell = const_cast<JSCell*>(constCell);
958     ASSERT(cell);
959     ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
960     m_barriersExecuted++;
961     if (m_mutatorShouldBeFenced) {
962         WTF::loadLoadFence();
963         if (!isMarkedConcurrently(cell)) {
964             // During a full collection a store into an unmarked object that had surivived past
965             // collections will manifest as a store to an unmarked PossiblyBlack object. If the
966             // object gets marked at some time after this then it will go down the normal marking
967             // path. So, we don't have to remember this object. We could return here. But we go
968             // further and attempt to re-white the object.
969             
970             RELEASE_ASSERT(m_collectionScope == CollectionScope::Full);
971             
972             if (cell->atomicCompareExchangeCellStateStrong(CellState::PossiblyBlack, CellState::DefinitelyWhite) == CellState::PossiblyBlack) {
973                 // Now we protect against this race:
974                 //
975                 //     1) Object starts out black + unmarked.
976                 //     --> We do isMarkedConcurrently here.
977                 //     2) Object is marked and greyed.
978                 //     3) Object is scanned and blacked.
979                 //     --> We do atomicCompareExchangeCellStateStrong here.
980                 //
981                 // In this case we would have made the object white again, even though it should
982                 // be black. This check lets us correct our mistake. This relies on the fact that
983                 // isMarkedConcurrently converges monotonically to true.
984                 if (isMarkedConcurrently(cell)) {
985                     // It's difficult to work out whether the object should be grey or black at
986                     // this point. We say black conservatively.
987                     cell->setCellState(CellState::PossiblyBlack);
988                 }
989                 
990                 // Either way, we can return. Most likely, the object was not marked, and so the
991                 // object is now labeled white. This means that future barrier executions will not
992                 // fire. In the unlikely event that the object had become marked, we can still
993                 // return anyway, since we proved that the object was not marked at the time that
994                 // we executed this slow path.
995             }
996             
997             return;
998         }
999     } else
1000         ASSERT(Heap::isMarkedConcurrently(cell));
1001     // It could be that the object was *just* marked. This means that the collector may set the
1002     // state to DefinitelyGrey and then to PossiblyOldOrBlack at any time. It's OK for us to
1003     // race with the collector here. If we win then this is accurate because the object _will_
1004     // get scanned again. If we lose then someone else will barrier the object again. That would
1005     // be unfortunate but not the end of the world.
1006     cell->setCellState(CellState::PossiblyGrey);
1007     m_mutatorMarkStack->append(cell);
1008 }
1009
1010 void Heap::collectAllGarbage()
1011 {
1012     if (!m_isSafeToCollect)
1013         return;
1014     
1015     collectSync(CollectionScope::Full);
1016
1017     DeferGCForAWhile deferGC(*this);
1018     if (UNLIKELY(Options::useImmortalObjects()))
1019         sweeper()->willFinishSweeping();
1020     else {
1021         double before = 0;
1022         if (Options::logGC()) {
1023             dataLog("[Full sweep: ", capacity() / 1024, "kb ");
1024             before = currentTimeMS();
1025         }
1026         m_objectSpace.sweep();
1027         m_objectSpace.shrink();
1028         if (Options::logGC()) {
1029             double after = currentTimeMS();
1030             dataLog("=> ", capacity() / 1024, "kb, ", after - before, "ms]\n");
1031         }
1032     }
1033     m_objectSpace.assertNoUnswept();
1034
1035     sweepAllLogicallyEmptyWeakBlocks();
1036 }
1037
1038 void Heap::collectAsync(std::optional<CollectionScope> scope)
1039 {
1040     if (!m_isSafeToCollect)
1041         return;
1042
1043     bool alreadyRequested = false;
1044     {
1045         LockHolder locker(*m_threadLock);
1046         for (std::optional<CollectionScope> request : m_requests) {
1047             if (scope) {
1048                 if (scope == CollectionScope::Eden) {
1049                     alreadyRequested = true;
1050                     break;
1051                 } else {
1052                     RELEASE_ASSERT(scope == CollectionScope::Full);
1053                     if (request == CollectionScope::Full) {
1054                         alreadyRequested = true;
1055                         break;
1056                     }
1057                 }
1058             } else {
1059                 if (!request || request == CollectionScope::Full) {
1060                     alreadyRequested = true;
1061                     break;
1062                 }
1063             }
1064         }
1065     }
1066     if (alreadyRequested)
1067         return;
1068
1069     requestCollection(scope);
1070 }
1071
1072 void Heap::collectSync(std::optional<CollectionScope> scope)
1073 {
1074     if (!m_isSafeToCollect)
1075         return;
1076     
1077     waitForCollection(requestCollection(scope));
1078 }
1079
1080 bool Heap::shouldCollectInThread(const LockHolder&)
1081 {
1082     RELEASE_ASSERT(m_requests.isEmpty() == (m_lastServedTicket == m_lastGrantedTicket));
1083     RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
1084     
1085     return !m_requests.isEmpty();
1086 }
1087
1088 void Heap::collectInThread()
1089 {
1090     m_currentGCStartTime = MonotonicTime::now();
1091     
1092     std::optional<CollectionScope> scope;
1093     {
1094         LockHolder locker(*m_threadLock);
1095         RELEASE_ASSERT(!m_requests.isEmpty());
1096         scope = m_requests.first();
1097     }
1098     
1099     SuperSamplerScope superSamplerScope(false);
1100     TimingScope collectImplTimingScope(scope, "Heap::collectInThread");
1101     
1102 #if ENABLE(ALLOCATION_LOGGING)
1103     dataLogF("JSC GC starting collection.\n");
1104 #endif
1105     
1106     stopTheWorld();
1107     
1108     if (false)
1109         dataLog("GC START!\n");
1110
1111     MonotonicTime before;
1112     if (Options::logGC()) {
1113         dataLog("[GC: START ", capacity() / 1024, "kb ");
1114         before = MonotonicTime::now();
1115     }
1116     
1117     double gcStartTime;
1118     
1119     ASSERT(m_isSafeToCollect);
1120     if (m_collectionScope) {
1121         dataLog("Collection scope already set during GC: ", *m_collectionScope, "\n");
1122         RELEASE_ASSERT_NOT_REACHED();
1123     }
1124     
1125     willStartCollection(scope);
1126     collectImplTimingScope.setScope(*this);
1127     
1128     gcStartTime = WTF::monotonicallyIncreasingTime();
1129     if (m_verifier) {
1130         // Verify that live objects from the last GC cycle haven't been corrupted by
1131         // mutators before we begin this new GC cycle.
1132         m_verifier->verify(HeapVerifier::Phase::BeforeGC);
1133             
1134         m_verifier->initializeGCCycle();
1135         m_verifier->gatherLiveObjects(HeapVerifier::Phase::BeforeMarking);
1136     }
1137     
1138     prepareForMarking();
1139     
1140     markToFixpoint(gcStartTime);
1141     
1142     if (m_verifier) {
1143         m_verifier->gatherLiveObjects(HeapVerifier::Phase::AfterMarking);
1144         m_verifier->verify(HeapVerifier::Phase::AfterMarking);
1145     }
1146         
1147     if (vm()->typeProfiler())
1148         vm()->typeProfiler()->invalidateTypeSetCache();
1149         
1150     reapWeakHandles();
1151     pruneStaleEntriesFromWeakGCMaps();
1152     sweepArrayBuffers();
1153     snapshotUnswept();
1154     finalizeUnconditionalFinalizers();
1155     removeDeadCompilerWorklistEntries();
1156     notifyIncrementalSweeper();
1157     
1158     m_codeBlocks->iterateCurrentlyExecuting(
1159         [&] (CodeBlock* codeBlock) {
1160             writeBarrier(codeBlock);
1161         });
1162     m_codeBlocks->clearCurrentlyExecuting();
1163         
1164     m_objectSpace.prepareForAllocation();
1165     updateAllocationLimits();
1166
1167     didFinishCollection(gcStartTime);
1168     
1169     if (m_verifier) {
1170         m_verifier->trimDeadObjects();
1171         m_verifier->verify(HeapVerifier::Phase::AfterGC);
1172     }
1173
1174     if (false) {
1175         dataLog("Heap state after GC:\n");
1176         m_objectSpace.dumpBits();
1177     }
1178     
1179     if (Options::logGC()) {
1180         MonotonicTime after = MonotonicTime::now();
1181         double thisPauseMS = (after - m_stopTime).milliseconds();
1182         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), "), cycle ", (after - before).milliseconds(), "ms END]\n");
1183     }
1184     
1185     {
1186         LockHolder locker(*m_threadLock);
1187         m_requests.removeFirst();
1188         m_lastServedTicket++;
1189         clearMutatorWaiting();
1190     }
1191     ParkingLot::unparkAll(&m_worldState);
1192
1193     if (false)
1194         dataLog("GC END!\n");
1195
1196     setNeedFinalize();
1197     resumeTheWorld();
1198     
1199     m_lastGCStartTime = m_currentGCStartTime;
1200     m_lastGCEndTime = MonotonicTime::now();
1201 }
1202
1203 void Heap::stopTheWorld()
1204 {
1205     RELEASE_ASSERT(!m_collectorBelievesThatTheWorldIsStopped);
1206     waitWhileNeedFinalize();
1207     stopTheMutator();
1208     
1209     if (m_mutatorDidRun)
1210         m_mutatorExecutionVersion++;
1211     
1212     m_mutatorDidRun = false;
1213     
1214     suspendCompilerThreads();
1215     m_collectorBelievesThatTheWorldIsStopped = true;
1216
1217     forEachSlotVisitor(
1218         [&] (SlotVisitor& slotVisitor) {
1219             slotVisitor.updateMutatorIsStopped(NoLockingNecessary);
1220         });
1221
1222 #if ENABLE(JIT)
1223     {
1224         DeferGCForAWhile awhile(*this);
1225         if (JITWorklist::instance()->completeAllForVM(*m_vm))
1226             setGCDidJIT();
1227     }
1228 #endif // ENABLE(JIT)
1229     
1230     vm()->shadowChicken().update(*vm(), vm()->topCallFrame);
1231     
1232     m_structureIDTable.flushOldTables();
1233     m_objectSpace.stopAllocating();
1234     
1235     m_stopTime = MonotonicTime::now();
1236 }
1237
1238 void Heap::resumeTheWorld()
1239 {
1240     // Calling resumeAllocating does the Right Thing depending on whether this is the end of a
1241     // collection cycle or this is just a concurrent phase within a collection cycle:
1242     // - At end of collection cycle: it's a no-op because prepareForAllocation already cleared the
1243     //   last active block.
1244     // - During collection cycle: it reinstates the last active block.
1245     m_objectSpace.resumeAllocating();
1246     
1247     m_barriersExecuted = 0;
1248     
1249     RELEASE_ASSERT(m_collectorBelievesThatTheWorldIsStopped);
1250     m_collectorBelievesThatTheWorldIsStopped = false;
1251     
1252     // FIXME: This could be vastly improved: we want to grab the locks in the order in which they
1253     // become available. We basically want a lockAny() method that will lock whatever lock is available
1254     // and tell you which one it locked. That would require teaching ParkingLot how to park on multiple
1255     // queues at once, which is totally achievable - it would just require memory allocation, which is
1256     // suboptimal but not a disaster. Alternatively, we could replace the SlotVisitor rightToRun lock
1257     // with a DLG-style handshake mechanism, but that seems not as general.
1258     Vector<SlotVisitor*, 8> slotVisitorsToUpdate;
1259
1260     forEachSlotVisitor(
1261         [&] (SlotVisitor& slotVisitor) {
1262             slotVisitorsToUpdate.append(&slotVisitor);
1263         });
1264     
1265     for (unsigned countdown = 40; !slotVisitorsToUpdate.isEmpty() && countdown--;) {
1266         for (unsigned index = 0; index < slotVisitorsToUpdate.size(); ++index) {
1267             SlotVisitor& slotVisitor = *slotVisitorsToUpdate[index];
1268             bool remove = false;
1269             if (slotVisitor.hasAcknowledgedThatTheMutatorIsResumed())
1270                 remove = true;
1271             else if (auto locker = tryHoldLock(slotVisitor.rightToRun())) {
1272                 slotVisitor.updateMutatorIsStopped(locker);
1273                 remove = true;
1274             }
1275             if (remove) {
1276                 slotVisitorsToUpdate[index--] = slotVisitorsToUpdate.last();
1277                 slotVisitorsToUpdate.takeLast();
1278             }
1279         }
1280         std::this_thread::yield();
1281     }
1282     
1283     for (SlotVisitor* slotVisitor : slotVisitorsToUpdate)
1284         slotVisitor->updateMutatorIsStopped();
1285     
1286     resumeCompilerThreads();
1287     resumeTheMutator();
1288 }
1289
1290 void Heap::stopTheMutator()
1291 {
1292     for (;;) {
1293         unsigned oldState = m_worldState.load();
1294         if ((oldState & stoppedBit)
1295             && (oldState & shouldStopBit))
1296             return;
1297         
1298         // Note: We could just have the mutator stop in-place like we do when !hasAccessBit. We could
1299         // switch to that if it turned out to be less confusing, but then it would not give the
1300         // mutator the opportunity to react to the world being stopped.
1301         if (oldState & mutatorWaitingBit) {
1302             if (m_worldState.compareExchangeWeak(oldState, oldState & ~mutatorWaitingBit))
1303                 ParkingLot::unparkAll(&m_worldState);
1304             continue;
1305         }
1306         
1307         if (!(oldState & hasAccessBit)
1308             || (oldState & stoppedBit)) {
1309             // We can stop the world instantly.
1310             if (m_worldState.compareExchangeWeak(oldState, oldState | stoppedBit | shouldStopBit))
1311                 return;
1312             continue;
1313         }
1314         
1315         RELEASE_ASSERT(oldState & hasAccessBit);
1316         RELEASE_ASSERT(!(oldState & stoppedBit));
1317         m_worldState.compareExchangeStrong(oldState, oldState | shouldStopBit);
1318         m_stopIfNecessaryTimer->scheduleSoon();
1319         ParkingLot::compareAndPark(&m_worldState, oldState | shouldStopBit);
1320     }
1321 }
1322
1323 void Heap::resumeTheMutator()
1324 {
1325     for (;;) {
1326         unsigned oldState = m_worldState.load();
1327         RELEASE_ASSERT(oldState & shouldStopBit);
1328         
1329         if (!(oldState & hasAccessBit)) {
1330             // We can resume the world instantly.
1331             if (m_worldState.compareExchangeWeak(oldState, oldState & ~(stoppedBit | shouldStopBit))) {
1332                 ParkingLot::unparkAll(&m_worldState);
1333                 return;
1334             }
1335             continue;
1336         }
1337         
1338         // We can tell the world to resume.
1339         if (m_worldState.compareExchangeWeak(oldState, oldState & ~shouldStopBit)) {
1340             ParkingLot::unparkAll(&m_worldState);
1341             return;
1342         }
1343     }
1344 }
1345
1346 void Heap::stopIfNecessarySlow()
1347 {
1348     while (stopIfNecessarySlow(m_worldState.load())) { }
1349     
1350     RELEASE_ASSERT(m_worldState.load() & hasAccessBit);
1351     RELEASE_ASSERT(!(m_worldState.load() & stoppedBit));
1352     
1353     handleGCDidJIT();
1354     handleNeedFinalize();
1355     m_mutatorDidRun = true;
1356 }
1357
1358 bool Heap::stopIfNecessarySlow(unsigned oldState)
1359 {
1360     RELEASE_ASSERT(oldState & hasAccessBit);
1361     
1362     // It's possible for us to wake up with finalization already requested but the world not yet
1363     // resumed. If that happens, we can't run finalization yet.
1364     if (!(oldState & stoppedBit)
1365         && handleNeedFinalize(oldState))
1366         return true;
1367     
1368     if (!(oldState & shouldStopBit) && !m_scheduler->shouldStop()) {
1369         if (!(oldState & stoppedBit))
1370             return false;
1371         m_worldState.compareExchangeStrong(oldState, oldState & ~stoppedBit);
1372         return true;
1373     }
1374     
1375     sanitizeStackForVM(m_vm);
1376
1377     if (verboseStop) {
1378         dataLog("Stopping!\n");
1379         WTFReportBacktrace();
1380     }
1381     m_worldState.compareExchangeStrong(oldState, oldState | stoppedBit);
1382     ParkingLot::unparkAll(&m_worldState);
1383     ParkingLot::compareAndPark(&m_worldState, oldState | stoppedBit);
1384     return true;
1385 }
1386
1387 template<typename Func>
1388 void Heap::waitForCollector(const Func& func)
1389 {
1390     for (;;) {
1391         bool done;
1392         {
1393             LockHolder locker(*m_threadLock);
1394             done = func(locker);
1395             if (!done) {
1396                 setMutatorWaiting();
1397                 // At this point, the collector knows that we intend to wait, and he will clear the
1398                 // waiting bit and then unparkAll when the GC cycle finishes. Clearing the bit
1399                 // prevents us from parking except if there is also stop-the-world. Unparking after
1400                 // clearing means that if the clearing happens after we park, then we will unpark.
1401             }
1402         }
1403
1404         // If we're in a stop-the-world scenario, we need to wait for that even if done is true.
1405         unsigned oldState = m_worldState.load();
1406         if (stopIfNecessarySlow(oldState))
1407             continue;
1408         
1409         if (done) {
1410             clearMutatorWaiting(); // Clean up just in case.
1411             return;
1412         }
1413         
1414         // If mutatorWaitingBit is still set then we want to wait.
1415         ParkingLot::compareAndPark(&m_worldState, oldState | mutatorWaitingBit);
1416     }
1417 }
1418
1419 void Heap::acquireAccessSlow()
1420 {
1421     for (;;) {
1422         unsigned oldState = m_worldState.load();
1423         RELEASE_ASSERT(!(oldState & hasAccessBit));
1424         
1425         if (oldState & shouldStopBit) {
1426             RELEASE_ASSERT(oldState & stoppedBit);
1427             if (verboseStop) {
1428                 dataLog("Stopping in acquireAccess!\n");
1429                 WTFReportBacktrace();
1430             }
1431             // Wait until we're not stopped anymore.
1432             ParkingLot::compareAndPark(&m_worldState, oldState);
1433             continue;
1434         }
1435         
1436         RELEASE_ASSERT(!(oldState & stoppedBit));
1437         unsigned newState = oldState | hasAccessBit;
1438         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1439             handleGCDidJIT();
1440             handleNeedFinalize();
1441             m_mutatorDidRun = true;
1442             return;
1443         }
1444     }
1445 }
1446
1447 void Heap::releaseAccessSlow()
1448 {
1449     for (;;) {
1450         unsigned oldState = m_worldState.load();
1451         RELEASE_ASSERT(oldState & hasAccessBit);
1452         RELEASE_ASSERT(!(oldState & stoppedBit));
1453         
1454         if (handleNeedFinalize(oldState))
1455             continue;
1456         
1457         if (oldState & shouldStopBit) {
1458             unsigned newState = (oldState & ~hasAccessBit) | stoppedBit;
1459             if (m_worldState.compareExchangeWeak(oldState, newState)) {
1460                 ParkingLot::unparkAll(&m_worldState);
1461                 return;
1462             }
1463             continue;
1464         }
1465         
1466         RELEASE_ASSERT(!(oldState & shouldStopBit));
1467         
1468         if (m_worldState.compareExchangeWeak(oldState, oldState & ~hasAccessBit))
1469             return;
1470     }
1471 }
1472
1473 bool Heap::handleGCDidJIT(unsigned oldState)
1474 {
1475     RELEASE_ASSERT(oldState & hasAccessBit);
1476     if (!(oldState & gcDidJITBit))
1477         return false;
1478     if (m_worldState.compareExchangeWeak(oldState, oldState & ~gcDidJITBit)) {
1479         WTF::crossModifyingCodeFence();
1480         return true;
1481     }
1482     return true;
1483 }
1484
1485 bool Heap::handleNeedFinalize(unsigned oldState)
1486 {
1487     RELEASE_ASSERT(oldState & hasAccessBit);
1488     RELEASE_ASSERT(!(oldState & stoppedBit));
1489     
1490     if (!(oldState & needFinalizeBit))
1491         return false;
1492     if (m_worldState.compareExchangeWeak(oldState, oldState & ~needFinalizeBit)) {
1493         finalize();
1494         // Wake up anyone waiting for us to finalize. Note that they may have woken up already, in
1495         // which case they would be waiting for us to release heap access.
1496         ParkingLot::unparkAll(&m_worldState);
1497         return true;
1498     }
1499     return true;
1500 }
1501
1502 void Heap::handleGCDidJIT()
1503 {
1504     while (handleGCDidJIT(m_worldState.load())) { }
1505 }
1506
1507 void Heap::handleNeedFinalize()
1508 {
1509     while (handleNeedFinalize(m_worldState.load())) { }
1510 }
1511
1512 void Heap::setGCDidJIT()
1513 {
1514     m_worldState.transaction(
1515         [&] (unsigned& state) {
1516             RELEASE_ASSERT(state & stoppedBit);
1517             state |= gcDidJITBit;
1518         });
1519 }
1520
1521 void Heap::setNeedFinalize()
1522 {
1523     m_worldState.exchangeOr(needFinalizeBit);
1524     ParkingLot::unparkAll(&m_worldState);
1525     m_stopIfNecessaryTimer->scheduleSoon();
1526 }
1527
1528 void Heap::waitWhileNeedFinalize()
1529 {
1530     for (;;) {
1531         unsigned oldState = m_worldState.load();
1532         if (!(oldState & needFinalizeBit)) {
1533             // This means that either there was no finalize request or the main thread will finalize
1534             // with heap access, so a subsequent call to stopTheWorld() will return only when
1535             // finalize finishes.
1536             return;
1537         }
1538         ParkingLot::compareAndPark(&m_worldState, oldState);
1539     }
1540 }
1541
1542 void Heap::setMutatorWaiting()
1543 {
1544     m_worldState.exchangeOr(mutatorWaitingBit);
1545 }
1546
1547 void Heap::clearMutatorWaiting()
1548 {
1549     m_worldState.exchangeAnd(~mutatorWaitingBit);
1550 }
1551
1552 void Heap::notifyThreadStopping(const LockHolder&)
1553 {
1554     m_threadIsStopping = true;
1555     clearMutatorWaiting();
1556     ParkingLot::unparkAll(&m_worldState);
1557 }
1558
1559 void Heap::finalize()
1560 {
1561     {
1562         HelpingGCScope helpingGCScope(*this);
1563         deleteUnmarkedCompiledCode();
1564         deleteSourceProviderCaches();
1565         sweepLargeAllocations();
1566     }
1567     
1568     if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache())
1569         cache->clear();
1570 }
1571
1572 Heap::Ticket Heap::requestCollection(std::optional<CollectionScope> scope)
1573 {
1574     stopIfNecessary();
1575     
1576     ASSERT(vm()->currentThreadIsHoldingAPILock());
1577     RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
1578     
1579     LockHolder locker(*m_threadLock);
1580     m_requests.append(scope);
1581     m_lastGrantedTicket++;
1582     m_threadCondition->notifyOne(locker);
1583     return m_lastGrantedTicket;
1584 }
1585
1586 void Heap::waitForCollection(Ticket ticket)
1587 {
1588     waitForCollector(
1589         [&] (const LockHolder&) -> bool {
1590             return m_lastServedTicket >= ticket;
1591         });
1592 }
1593
1594 void Heap::sweepLargeAllocations()
1595 {
1596     m_objectSpace.sweepLargeAllocations();
1597 }
1598
1599 void Heap::suspendCompilerThreads()
1600 {
1601 #if ENABLE(DFG_JIT)
1602     // We ensure the worklists so that it's not possible for the mutator to start a new worklist
1603     // after we have suspended the ones that he had started before. That's not very expensive since
1604     // the worklists use AutomaticThreads anyway.
1605     for (unsigned i = DFG::numberOfWorklists(); i--;)
1606         DFG::ensureWorklistForIndex(i).suspendAllThreads();
1607 #endif
1608 }
1609
1610 void Heap::willStartCollection(std::optional<CollectionScope> scope)
1611 {
1612     if (Options::logGC())
1613         dataLog("=> ");
1614     
1615     if (shouldDoFullCollection(scope)) {
1616         m_collectionScope = CollectionScope::Full;
1617         m_shouldDoFullCollection = false;
1618         if (Options::logGC())
1619             dataLog("FullCollection, ");
1620         if (false)
1621             dataLog("Full collection!\n");
1622     } else {
1623         m_collectionScope = CollectionScope::Eden;
1624         if (Options::logGC())
1625             dataLog("EdenCollection, ");
1626         if (false)
1627             dataLog("Eden collection!\n");
1628     }
1629     if (m_collectionScope == CollectionScope::Full) {
1630         m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
1631         m_extraMemorySize = 0;
1632         m_deprecatedExtraMemorySize = 0;
1633 #if ENABLE(RESOURCE_USAGE)
1634         m_externalMemorySize = 0;
1635 #endif
1636
1637         if (m_fullActivityCallback)
1638             m_fullActivityCallback->willCollect();
1639     } else {
1640         ASSERT(m_collectionScope == CollectionScope::Eden);
1641         m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
1642     }
1643
1644     if (m_edenActivityCallback)
1645         m_edenActivityCallback->willCollect();
1646
1647     for (auto* observer : m_observers)
1648         observer->willGarbageCollect();
1649 }
1650
1651 void Heap::prepareForMarking()
1652 {
1653     m_objectSpace.prepareForMarking();
1654 }
1655
1656 void Heap::reapWeakHandles()
1657 {
1658     m_objectSpace.reapWeakSets();
1659 }
1660
1661 void Heap::pruneStaleEntriesFromWeakGCMaps()
1662 {
1663     if (m_collectionScope != CollectionScope::Full)
1664         return;
1665     for (auto& pruneCallback : m_weakGCMaps.values())
1666         pruneCallback();
1667 }
1668
1669 void Heap::sweepArrayBuffers()
1670 {
1671     m_arrayBuffers.sweep();
1672 }
1673
1674 void Heap::snapshotUnswept()
1675 {
1676     TimingScope timingScope(*this, "Heap::snapshotUnswept");
1677     m_objectSpace.snapshotUnswept();
1678 }
1679
1680 void Heap::deleteSourceProviderCaches()
1681 {
1682     m_vm->clearSourceProviderCaches();
1683 }
1684
1685 void Heap::notifyIncrementalSweeper()
1686 {
1687     if (m_collectionScope == CollectionScope::Full) {
1688         if (!m_logicallyEmptyWeakBlocks.isEmpty())
1689             m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
1690     }
1691
1692     m_sweeper->startSweeping();
1693 }
1694
1695 NEVER_INLINE void Heap::didExceedMaxLiveSize()
1696 {
1697     CRASH();
1698 }
1699
1700 void Heap::updateAllocationLimits()
1701 {
1702     static const bool verbose = false;
1703     
1704     if (verbose) {
1705         dataLog("\n");
1706         dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n");
1707     }
1708     
1709     // Calculate our current heap size threshold for the purpose of figuring out when we should
1710     // run another collection. This isn't the same as either size() or capacity(), though it should
1711     // be somewhere between the two. The key is to match the size calculations involved calls to
1712     // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
1713     // fragmentation, we may have size() much smaller than capacity().
1714     size_t currentHeapSize = 0;
1715
1716     // For marked space, we use the total number of bytes visited. This matches the logic for
1717     // MarkedAllocator's calls to didAllocate(), which effectively accounts for the total size of
1718     // objects allocated rather than blocks used. This will underestimate capacity(), and in case
1719     // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
1720     // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
1721     currentHeapSize += m_totalBytesVisited;
1722     if (verbose)
1723         dataLog("totalBytesVisited = ", m_totalBytesVisited, ", currentHeapSize = ", currentHeapSize, "\n");
1724
1725     // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
1726     // extra memory reporting.
1727     currentHeapSize += extraMemorySize();
1728
1729     if (verbose)
1730         dataLog("extraMemorySize() = ", extraMemorySize(), ", currentHeapSize = ", currentHeapSize, "\n");
1731
1732     if (m_maxLiveSize && currentHeapSize > m_maxLiveSize)
1733         didExceedMaxLiveSize();
1734     
1735     if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
1736         HeapStatistics::exitWithFailure();
1737
1738     if (m_collectionScope == CollectionScope::Full) {
1739         // To avoid pathological GC churn in very small and very large heaps, we set
1740         // the new allocation limit based on the current size of the heap, with a
1741         // fixed minimum.
1742         m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
1743         if (verbose)
1744             dataLog("Full: maxHeapSize = ", m_maxHeapSize, "\n");
1745         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1746         if (verbose)
1747             dataLog("Full: maxEdenSize = ", m_maxEdenSize, "\n");
1748         m_sizeAfterLastFullCollect = currentHeapSize;
1749         if (verbose)
1750             dataLog("Full: sizeAfterLastFullCollect = ", currentHeapSize, "\n");
1751         m_bytesAbandonedSinceLastFullCollect = 0;
1752         if (verbose)
1753             dataLog("Full: bytesAbandonedSinceLastFullCollect = ", 0, "\n");
1754     } else {
1755         ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
1756         // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have.
1757         // But we are sloppy, so we have to defend against the overflow.
1758         m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize;
1759         if (verbose)
1760             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
1761         m_sizeAfterLastEdenCollect = currentHeapSize;
1762         if (verbose)
1763             dataLog("Eden: sizeAfterLastEdenCollect = ", currentHeapSize, "\n");
1764         double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
1765         double minEdenToOldGenerationRatio = 1.0 / 3.0;
1766         if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
1767             m_shouldDoFullCollection = true;
1768         // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
1769         m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
1770         if (verbose)
1771             dataLog("Eden: maxHeapSize = ", m_maxHeapSize, "\n");
1772         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1773         if (verbose)
1774             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
1775         if (m_fullActivityCallback) {
1776             ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
1777             m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
1778         }
1779     }
1780
1781     m_sizeAfterLastCollect = currentHeapSize;
1782     if (verbose)
1783         dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n");
1784     m_bytesAllocatedThisCycle = 0;
1785
1786     if (Options::logGC())
1787         dataLog("=> ", currentHeapSize / 1024, "kb, ");
1788 }
1789
1790 void Heap::didFinishCollection(double gcStartTime)
1791 {
1792     double gcEndTime = WTF::monotonicallyIncreasingTime();
1793     CollectionScope scope = *m_collectionScope;
1794     if (scope == CollectionScope::Full)
1795         m_lastFullGCLength = gcEndTime - gcStartTime;
1796     else
1797         m_lastEdenGCLength = gcEndTime - gcStartTime;
1798
1799 #if ENABLE(RESOURCE_USAGE)
1800     ASSERT(externalMemorySize() <= extraMemorySize());
1801 #endif
1802
1803     if (Options::recordGCPauseTimes())
1804         HeapStatistics::recordGCPauseTime(gcStartTime, gcEndTime);
1805
1806     if (Options::useZombieMode())
1807         zombifyDeadObjects();
1808
1809     if (Options::dumpObjectStatistics())
1810         HeapStatistics::dumpObjectStatistics(this);
1811
1812     if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) {
1813         gatherExtraHeapSnapshotData(*heapProfiler);
1814         removeDeadHeapSnapshotNodes(*heapProfiler);
1815     }
1816
1817     RELEASE_ASSERT(m_collectionScope);
1818     m_lastCollectionScope = m_collectionScope;
1819     m_collectionScope = std::nullopt;
1820
1821     for (auto* observer : m_observers)
1822         observer->didGarbageCollect(scope);
1823 }
1824
1825 void Heap::resumeCompilerThreads()
1826 {
1827 #if ENABLE(DFG_JIT)
1828     for (unsigned i = DFG::numberOfWorklists(); i--;)
1829         DFG::existingWorklistForIndex(i).resumeAllThreads();
1830 #endif
1831 }
1832
1833 GCActivityCallback* Heap::fullActivityCallback()
1834 {
1835     return m_fullActivityCallback.get();
1836 }
1837
1838 GCActivityCallback* Heap::edenActivityCallback()
1839 {
1840     return m_edenActivityCallback.get();
1841 }
1842
1843 IncrementalSweeper* Heap::sweeper()
1844 {
1845     return m_sweeper.get();
1846 }
1847
1848 void Heap::setGarbageCollectionTimerEnabled(bool enable)
1849 {
1850     if (m_fullActivityCallback)
1851         m_fullActivityCallback->setEnabled(enable);
1852     if (m_edenActivityCallback)
1853         m_edenActivityCallback->setEnabled(enable);
1854 }
1855
1856 void Heap::didAllocate(size_t bytes)
1857 {
1858     if (m_edenActivityCallback)
1859         m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
1860     m_bytesAllocatedThisCycle += bytes;
1861 }
1862
1863 bool Heap::isValidAllocation(size_t)
1864 {
1865     if (!isValidThreadState(m_vm))
1866         return false;
1867
1868     if (isCurrentThreadBusy())
1869         return false;
1870     
1871     return true;
1872 }
1873
1874 void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
1875 {
1876     WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
1877 }
1878
1879 void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
1880 {
1881     HandleSlot slot = handle.slot();
1882     Finalizer finalizer = reinterpret_cast<Finalizer>(context);
1883     finalizer(slot->asCell());
1884     WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
1885 }
1886
1887 void Heap::addExecutable(ExecutableBase* executable)
1888 {
1889     m_executables.append(executable);
1890 }
1891
1892 void Heap::collectAllGarbageIfNotDoneRecently()
1893 {
1894     if (!m_fullActivityCallback) {
1895         collectAllGarbage();
1896         return;
1897     }
1898
1899     if (m_fullActivityCallback->didSyncGCRecently()) {
1900         // A synchronous GC was already requested recently so we merely accelerate next collection.
1901         reportAbandonedObjectGraph();
1902         return;
1903     }
1904
1905     m_fullActivityCallback->setDidSyncGCRecently();
1906     collectAllGarbage();
1907 }
1908
1909 class Zombify : public MarkedBlock::VoidFunctor {
1910 public:
1911     inline void visit(HeapCell* cell) const
1912     {
1913         void** current = reinterpret_cast_ptr<void**>(cell);
1914
1915         // We want to maintain zapped-ness because that's how we know if we've called 
1916         // the destructor.
1917         if (cell->isZapped())
1918             current++;
1919
1920         void* limit = static_cast<void*>(reinterpret_cast<char*>(cell) + cell->cellSize());
1921         for (; current < limit; current++)
1922             *current = zombifiedBits;
1923     }
1924     IterationStatus operator()(HeapCell* cell, HeapCell::Kind) const
1925     {
1926         visit(cell);
1927         return IterationStatus::Continue;
1928     }
1929 };
1930
1931 void Heap::zombifyDeadObjects()
1932 {
1933     // Sweep now because destructors will crash once we're zombified.
1934     m_objectSpace.sweep();
1935     HeapIterationScope iterationScope(*this);
1936     m_objectSpace.forEachDeadCell(iterationScope, Zombify());
1937 }
1938
1939 bool Heap::shouldDoFullCollection(std::optional<CollectionScope> scope) const
1940 {
1941     if (!Options::useGenerationalGC())
1942         return true;
1943
1944     if (!scope)
1945         return m_shouldDoFullCollection;
1946     return *scope == CollectionScope::Full;
1947 }
1948
1949 void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
1950 {
1951     m_logicallyEmptyWeakBlocks.append(block);
1952 }
1953
1954 void Heap::sweepAllLogicallyEmptyWeakBlocks()
1955 {
1956     if (m_logicallyEmptyWeakBlocks.isEmpty())
1957         return;
1958
1959     m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
1960     while (sweepNextLogicallyEmptyWeakBlock()) { }
1961 }
1962
1963 bool Heap::sweepNextLogicallyEmptyWeakBlock()
1964 {
1965     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
1966         return false;
1967
1968     WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
1969
1970     block->sweep();
1971     if (block->isEmpty()) {
1972         std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
1973         m_logicallyEmptyWeakBlocks.removeLast();
1974         WeakBlock::destroy(*this, block);
1975     } else
1976         m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
1977
1978     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
1979         m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
1980         return false;
1981     }
1982
1983     return true;
1984 }
1985
1986 size_t Heap::threadVisitCount()
1987 {       
1988     unsigned long result = 0;
1989     for (auto& parallelVisitor : m_parallelSlotVisitors)
1990         result += parallelVisitor->visitCount();
1991     return result;
1992 }
1993
1994 size_t Heap::bytesVisited()
1995 {
1996     return m_collectorSlotVisitor->bytesVisited() + threadBytesVisited();
1997 }
1998
1999 size_t Heap::threadBytesVisited()
2000 {       
2001     size_t result = 0;
2002     for (auto& parallelVisitor : m_parallelSlotVisitors)
2003         result += parallelVisitor->bytesVisited();
2004     return result;
2005 }
2006
2007 void Heap::forEachCodeBlockImpl(const ScopedLambda<bool(CodeBlock*)>& func)
2008 {
2009     // We don't know the full set of CodeBlocks until compilation has terminated.
2010     completeAllJITPlans();
2011
2012     return m_codeBlocks->iterate(func);
2013 }
2014
2015 void Heap::writeBarrierSlowPath(const JSCell* from)
2016 {
2017     if (UNLIKELY(mutatorShouldBeFenced())) {
2018         // In this case, the barrierThreshold is the tautological threshold, so from could still be
2019         // not black. But we can't know for sure until we fire off a fence.
2020         WTF::storeLoadFence();
2021         if (from->cellState() != CellState::PossiblyBlack)
2022             return;
2023     }
2024     
2025     addToRememberedSet(from);
2026 }
2027
2028 bool Heap::isCurrentThreadBusy()
2029 {
2030     return mayBeGCThread() || mutatorState() != MutatorState::Running;
2031 }
2032
2033 void Heap::reportExtraMemoryVisited(size_t size)
2034 {
2035     size_t* counter = &m_extraMemorySize;
2036     
2037     for (;;) {
2038         size_t oldSize = *counter;
2039         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size))
2040             return;
2041     }
2042 }
2043
2044 #if ENABLE(RESOURCE_USAGE)
2045 void Heap::reportExternalMemoryVisited(size_t size)
2046 {
2047     size_t* counter = &m_externalMemorySize;
2048
2049     for (;;) {
2050         size_t oldSize = *counter;
2051         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size))
2052             return;
2053     }
2054 }
2055 #endif
2056
2057 void Heap::collectIfNecessaryOrDefer(GCDeferralContext* deferralContext)
2058 {
2059     ASSERT(!DisallowGC::isGCDisallowedOnCurrentThread());
2060
2061     if (!m_isSafeToCollect)
2062         return;
2063     if (mutatorState() == MutatorState::HelpingGC)
2064         return;
2065     if (!Options::useGC())
2066         return;
2067     
2068     if (mayNeedToStop()) {
2069         if (deferralContext)
2070             deferralContext->m_shouldGC = true;
2071         else if (isDeferred())
2072             m_didDeferGCWork = true;
2073         else {
2074             stopIfNecessary();
2075             // FIXME: Check if the scheduler wants us to stop.
2076             // https://bugs.webkit.org/show_bug.cgi?id=166827
2077         }
2078     }
2079     
2080     if (UNLIKELY(Options::gcMaxHeapSize())) {
2081         if (m_bytesAllocatedThisCycle <= Options::gcMaxHeapSize())
2082             return;
2083     } else {
2084         if (m_bytesAllocatedThisCycle <= m_maxEdenSize)
2085             return;
2086     }
2087
2088     if (deferralContext)
2089         deferralContext->m_shouldGC = true;
2090     else if (isDeferred())
2091         m_didDeferGCWork = true;
2092     else
2093         collectAsync();
2094 }
2095
2096 void Heap::decrementDeferralDepthAndGCIfNeededSlow()
2097 {
2098     // Can't do anything if we're still deferred.
2099     if (m_deferralDepth)
2100         return;
2101     
2102     ASSERT(!isDeferred());
2103     
2104     m_didDeferGCWork = false;
2105     // FIXME: Bring back something like the DeferGCProbability mode.
2106     // https://bugs.webkit.org/show_bug.cgi?id=166627
2107     collectIfNecessaryOrDefer();
2108 }
2109
2110 void Heap::registerWeakGCMap(void* weakGCMap, std::function<void()> pruningCallback)
2111 {
2112     m_weakGCMaps.add(weakGCMap, WTFMove(pruningCallback));
2113 }
2114
2115 void Heap::unregisterWeakGCMap(void* weakGCMap)
2116 {
2117     m_weakGCMaps.remove(weakGCMap);
2118 }
2119
2120 void Heap::didAllocateBlock(size_t capacity)
2121 {
2122 #if ENABLE(RESOURCE_USAGE)
2123     m_blockBytesAllocated += capacity;
2124 #else
2125     UNUSED_PARAM(capacity);
2126 #endif
2127 }
2128
2129 void Heap::didFreeBlock(size_t capacity)
2130 {
2131 #if ENABLE(RESOURCE_USAGE)
2132     m_blockBytesAllocated -= capacity;
2133 #else
2134     UNUSED_PARAM(capacity);
2135 #endif
2136 }
2137
2138 #if USE(CF)
2139 void Heap::setRunLoop(CFRunLoopRef runLoop)
2140 {
2141     m_runLoop = runLoop;
2142     m_fullActivityCallback->setRunLoop(runLoop);
2143     m_edenActivityCallback->setRunLoop(runLoop);
2144     m_sweeper->setRunLoop(runLoop);
2145 }
2146 #endif // USE(CF)
2147
2148 void Heap::addCoreConstraints()
2149 {
2150     m_constraintSet->add(
2151         "Cs", "Conservative Scan",
2152         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2153             TimingScope preConvergenceTimingScope(*this, "Constraint: conservative scan");
2154             m_objectSpace.prepareForConservativeScan();
2155             ConservativeRoots conservativeRoots(*this);
2156             SuperSamplerScope superSamplerScope(false);
2157             gatherStackRoots(conservativeRoots);
2158             gatherJSStackRoots(conservativeRoots);
2159             gatherScratchBufferRoots(conservativeRoots);
2160             slotVisitor.append(conservativeRoots);
2161         },
2162         ConstraintVolatility::GreyedByExecution);
2163     
2164     m_constraintSet->add(
2165         "Msr", "Misc Small Roots",
2166         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2167 #if JSC_OBJC_API_ENABLED
2168             scanExternalRememberedSet(*m_vm, slotVisitor);
2169 #endif
2170
2171             if (m_vm->smallStrings.needsToBeVisited(*m_collectionScope))
2172                 m_vm->smallStrings.visitStrongReferences(slotVisitor);
2173             
2174             for (auto& pair : m_protectedValues)
2175                 slotVisitor.appendUnbarriered(pair.key);
2176             
2177             if (m_markListSet && m_markListSet->size())
2178                 MarkedArgumentBuffer::markLists(slotVisitor, *m_markListSet);
2179             
2180             slotVisitor.appendUnbarriered(m_vm->exception());
2181             slotVisitor.appendUnbarriered(m_vm->lastException());
2182         },
2183         ConstraintVolatility::GreyedByExecution);
2184     
2185     m_constraintSet->add(
2186         "Sh", "Strong Handles",
2187         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2188             m_handleSet.visitStrongHandles(slotVisitor);
2189             m_handleStack.visit(slotVisitor);
2190         },
2191         ConstraintVolatility::GreyedByExecution);
2192     
2193     m_constraintSet->add(
2194         "D", "Debugger",
2195         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2196 #if ENABLE(SAMPLING_PROFILER)
2197             if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
2198                 LockHolder locker(samplingProfiler->getLock());
2199                 samplingProfiler->processUnverifiedStackTraces();
2200                 samplingProfiler->visit(slotVisitor);
2201                 if (Options::logGC() == GCLogging::Verbose)
2202                     dataLog("Sampling Profiler data:\n", slotVisitor);
2203             }
2204 #endif // ENABLE(SAMPLING_PROFILER)
2205             
2206             if (m_vm->typeProfiler())
2207                 m_vm->typeProfilerLog()->visit(slotVisitor);
2208             
2209             m_vm->shadowChicken().visitChildren(slotVisitor);
2210         },
2211         ConstraintVolatility::GreyedByExecution);
2212     
2213     m_constraintSet->add(
2214         "Jsr", "JIT Stub Routines",
2215         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2216             m_jitStubRoutines->traceMarkedStubRoutines(slotVisitor);
2217         },
2218         ConstraintVolatility::GreyedByExecution);
2219     
2220     m_constraintSet->add(
2221         "Ws", "Weak Sets",
2222         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2223             m_objectSpace.visitWeakSets(slotVisitor);
2224         },
2225         ConstraintVolatility::GreyedByMarking);
2226     
2227     m_constraintSet->add(
2228         "Wrh", "Weak Reference Harvesters",
2229         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2230             for (WeakReferenceHarvester* current = m_weakReferenceHarvesters.head(); current; current = current->next())
2231                 current->visitWeakReferences(slotVisitor);
2232         },
2233         ConstraintVolatility::GreyedByMarking);
2234     
2235 #if ENABLE(DFG_JIT)
2236     m_constraintSet->add(
2237         "Dw", "DFG Worklists",
2238         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2239             for (unsigned i = DFG::numberOfWorklists(); i--;)
2240                 DFG::existingWorklistForIndex(i).visitWeakReferences(slotVisitor);
2241             
2242             // FIXME: This is almost certainly unnecessary.
2243             // https://bugs.webkit.org/show_bug.cgi?id=166829
2244             DFG::iterateCodeBlocksForGC(
2245                 *m_vm,
2246                 [&] (CodeBlock* codeBlock) {
2247                     slotVisitor.appendUnbarriered(codeBlock);
2248                 });
2249             
2250             if (Options::logGC() == GCLogging::Verbose)
2251                 dataLog("DFG Worklists:\n", slotVisitor);
2252         },
2253         ConstraintVolatility::GreyedByMarking);
2254 #endif
2255     
2256     m_constraintSet->add(
2257         "Cb", "CodeBlocks",
2258         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2259             iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(
2260                 [&] (CodeBlock* codeBlock) {
2261                     // Visit the CodeBlock as a constraint only if it's black.
2262                     if (Heap::isMarked(codeBlock)
2263                         && codeBlock->cellState() == CellState::PossiblyBlack)
2264                         slotVisitor.visitAsConstraint(codeBlock);
2265                 });
2266         },
2267         ConstraintVolatility::SeldomGreyed);
2268     
2269     m_constraintSet->add(
2270         "Mrms", "Mutator+Race Mark Stack",
2271         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2272             // Indicate to the fixpoint that we introduced work!
2273             size_t size = m_mutatorMarkStack->size() + m_raceMarkStack->size();
2274             slotVisitor.addToVisitCount(size);
2275             
2276             if (Options::logGC())
2277                 dataLog("(", size, ")");
2278             
2279             m_mutatorMarkStack->transferTo(slotVisitor.mutatorMarkStack());
2280             m_raceMarkStack->transferTo(slotVisitor.mutatorMarkStack());
2281         },
2282         [this] (SlotVisitor&) -> double {
2283             return m_mutatorMarkStack->size() + m_raceMarkStack->size();
2284         },
2285         ConstraintVolatility::GreyedByExecution);
2286 }
2287
2288 void Heap::addMarkingConstraint(std::unique_ptr<MarkingConstraint> constraint)
2289 {
2290     PreventCollectionScope preventCollectionScope(*this);
2291     m_constraintSet->add(WTFMove(constraint));
2292 }
2293
2294 void Heap::notifyIsSafeToCollect()
2295 {
2296     addCoreConstraints();
2297     
2298     m_isSafeToCollect = true;
2299     
2300     if (Options::collectContinuously()) {
2301         m_collectContinuouslyThread = createThread(
2302             "JSC DEBUG Continuous GC",
2303             [this] () {
2304                 MonotonicTime initialTime = MonotonicTime::now();
2305                 Seconds period = Seconds::fromMilliseconds(Options::collectContinuouslyPeriodMS());
2306                 while (!m_shouldStopCollectingContinuously) {
2307                     {
2308                         LockHolder locker(*m_threadLock);
2309                         if (m_requests.isEmpty()) {
2310                             m_requests.append(std::nullopt);
2311                             m_lastGrantedTicket++;
2312                             m_threadCondition->notifyOne(locker);
2313                         }
2314                     }
2315                     
2316                     {
2317                         LockHolder locker(m_collectContinuouslyLock);
2318                         Seconds elapsed = MonotonicTime::now() - initialTime;
2319                         Seconds elapsedInPeriod = elapsed % period;
2320                         MonotonicTime timeToWakeUp =
2321                             initialTime + elapsed - elapsedInPeriod + period;
2322                         while (!hasElapsed(timeToWakeUp) && !m_shouldStopCollectingContinuously) {
2323                             m_collectContinuouslyCondition.waitUntil(
2324                                 m_collectContinuouslyLock, timeToWakeUp);
2325                         }
2326                     }
2327                 }
2328             });
2329     }
2330 }
2331
2332 void Heap::preventCollection()
2333 {
2334     if (!m_isSafeToCollect)
2335         return;
2336     
2337     // This prevents the collectContinuously thread from starting a collection.
2338     m_collectContinuouslyLock.lock();
2339     
2340     // Wait for all collections to finish.
2341     waitForCollector(
2342         [&] (const LockHolder&) -> bool {
2343             ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2344             return m_lastServedTicket == m_lastGrantedTicket;
2345         });
2346     
2347     // Now a collection can only start if this thread starts it.
2348     RELEASE_ASSERT(!m_collectionScope);
2349 }
2350
2351 void Heap::allowCollection()
2352 {
2353     if (!m_isSafeToCollect)
2354         return;
2355     
2356     m_collectContinuouslyLock.unlock();
2357 }
2358
2359 template<typename Func>
2360 void Heap::forEachSlotVisitor(const Func& func)
2361 {
2362     auto locker = holdLock(m_parallelSlotVisitorLock);
2363     func(*m_collectorSlotVisitor);
2364     for (auto& slotVisitor : m_parallelSlotVisitors)
2365         func(*slotVisitor);
2366 }
2367
2368 void Heap::setMutatorShouldBeFenced(bool value)
2369 {
2370     m_mutatorShouldBeFenced = value;
2371     m_barrierThreshold = value ? tautologicalThreshold : blackThreshold;
2372 }
2373     
2374 } // namespace JSC