070b58626034daa35c8961e1fc95605e3a65e1ec
[WebKit-https.git] / Source / JavaScriptCore / heap / Heap.cpp
1 /*
2  *  Copyright (C) 2003-2017 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "Heap.h"
23
24 #include "CodeBlock.h"
25 #include "CodeBlockSetInlines.h"
26 #include "CollectingScope.h"
27 #include "ConservativeRoots.h"
28 #include "DFGWorklistInlines.h"
29 #include "EdenGCActivityCallback.h"
30 #include "Exception.h"
31 #include "FullGCActivityCallback.h"
32 #include "GCActivityCallback.h"
33 #include "GCIncomingRefCountedSetInlines.h"
34 #include "GCSegmentedArrayInlines.h"
35 #include "GCTypeMap.h"
36 #include "HasOwnPropertyCache.h"
37 #include "HeapHelperPool.h"
38 #include "HeapIterationScope.h"
39 #include "HeapProfiler.h"
40 #include "HeapSnapshot.h"
41 #include "HeapVerifier.h"
42 #include "IncrementalSweeper.h"
43 #include "Interpreter.h"
44 #include "JITStubRoutineSet.h"
45 #include "JITWorklist.h"
46 #include "JSCInlines.h"
47 #include "JSGlobalObject.h"
48 #include "JSLock.h"
49 #include "JSVirtualMachineInternal.h"
50 #include "JSWebAssemblyCodeBlock.h"
51 #include "MachineStackMarker.h"
52 #include "MarkedAllocatorInlines.h"
53 #include "MarkedSpaceInlines.h"
54 #include "MarkingConstraintSet.h"
55 #include "PreventCollectionScope.h"
56 #include "SamplingProfiler.h"
57 #include "ShadowChicken.h"
58 #include "SpaceTimeMutatorScheduler.h"
59 #include "SubspaceInlines.h"
60 #include "SuperSampler.h"
61 #include "StochasticSpaceTimeMutatorScheduler.h"
62 #include "StopIfNecessaryTimer.h"
63 #include "SweepingScope.h"
64 #include "SynchronousStopTheWorldMutatorScheduler.h"
65 #include "TypeProfiler.h"
66 #include "TypeProfilerLog.h"
67 #include "UnlinkedCodeBlock.h"
68 #include "VM.h"
69 #include "WasmMemory.h"
70 #include "WeakSetInlines.h"
71 #include <algorithm>
72 #if PLATFORM(IOS)
73 #include <bmalloc/bmalloc.h>
74 #include <sys/sysctl.h>
75 #endif
76 #include <wtf/CurrentTime.h>
77 #include <wtf/ListDump.h>
78 #include <wtf/MainThread.h>
79 #include <wtf/ParallelVectorIterator.h>
80 #include <wtf/ProcessID.h>
81 #include <wtf/RAMSize.h>
82 #include <wtf/SimpleStats.h>
83 #include <wtf/Threading.h>
84
85 #if USE(FOUNDATION)
86 #if __has_include(<objc/objc-internal.h>)
87 #include <objc/objc-internal.h>
88 #else
89 extern "C" void* objc_autoreleasePoolPush(void);
90 extern "C" void objc_autoreleasePoolPop(void *context);
91 #endif
92 #endif // USE(FOUNDATION)
93
94 using namespace std;
95
96 namespace JSC {
97
98 namespace {
99
100 bool verboseStop = false;
101
102 double maxPauseMS(double thisPauseMS)
103 {
104     static double maxPauseMS;
105     maxPauseMS = std::max(thisPauseMS, maxPauseMS);
106     return maxPauseMS;
107 }
108
109 size_t minHeapSize(HeapType heapType, size_t ramSize)
110 {
111     if (heapType == LargeHeap) {
112         double result = min(
113             static_cast<double>(Options::largeHeapSize()),
114             ramSize * Options::smallHeapRAMFraction());
115         return static_cast<size_t>(result);
116     }
117     return Options::smallHeapSize();
118 }
119
120 #if PLATFORM(IOS)
121 static bool useAggressiveGCTrigger()
122 {
123     static bool useAggressiveGCTrigger;
124     static std::once_flag once;
125     std::call_once(once, [] {
126         useAggressiveGCTrigger = false;
127
128         if (Options::forceAggressiveGCTrigger()) {
129             useAggressiveGCTrigger = true;
130             return;
131         }
132
133         uint64_t memSizeInBytes;
134         size_t sizeofMemSize = sizeof(memSizeInBytes);
135         if (sysctlbyname("hw.memsize", &memSizeInBytes, &sizeofMemSize, nullptr, 0))
136             return;
137         useAggressiveGCTrigger = memSizeInBytes <= 1 * GB;
138     });
139
140     return useAggressiveGCTrigger;
141 }
142 #endif
143
144 size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
145 {
146 #if PLATFORM(IOS)
147     if (useAggressiveGCTrigger()) {
148         double memoryUsed = bmalloc::api::percentAvailableMemoryInUse();
149         double result = ((1 - memoryUsed) / Options::aggressiveGCTriggerScalingValue()) + 1;
150         return heapSize * std::max(std::min(result, Options::aggressiveGCTriggerMaxMultiplier()), Options::aggressiveGCTriggerMinMultiplier());
151     }
152
153     size_t memoryFootprint = bmalloc::api::memoryFootprint();
154     if (memoryFootprint < ramSize * Options::smallHeapRAMFraction())
155         return Options::smallHeapGrowthFactor() * heapSize;
156     if (memoryFootprint < ramSize * Options::mediumHeapRAMFraction())
157         return Options::mediumHeapGrowthFactor() * heapSize;
158 #else
159     if (heapSize < ramSize * Options::smallHeapRAMFraction())
160         return Options::smallHeapGrowthFactor() * heapSize;
161     if (heapSize < ramSize * Options::mediumHeapRAMFraction())
162         return Options::mediumHeapGrowthFactor() * heapSize;
163 #endif
164     return Options::largeHeapGrowthFactor() * heapSize;
165 }
166
167 bool isValidSharedInstanceThreadState(VM* vm)
168 {
169     return vm->currentThreadIsHoldingAPILock();
170 }
171
172 bool isValidThreadState(VM* vm)
173 {
174     if (vm->atomicStringTable() != WTF::Thread::current().atomicStringTable())
175         return false;
176
177     if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
178         return false;
179
180     return true;
181 }
182
183 void recordType(VM& vm, TypeCountSet& set, JSCell* cell)
184 {
185     const char* typeName = "[unknown]";
186     const ClassInfo* info = cell->classInfo(vm);
187     if (info && info->className)
188         typeName = info->className;
189     set.add(typeName);
190 }
191
192 bool measurePhaseTiming()
193 {
194     return false;
195 }
196
197 HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats()
198 {
199     static HashMap<const char*, GCTypeMap<SimpleStats>>* result;
200     static std::once_flag once;
201     std::call_once(
202         once,
203         [] {
204             result = new HashMap<const char*, GCTypeMap<SimpleStats>>();
205         });
206     return *result;
207 }
208
209 SimpleStats& timingStats(const char* name, CollectionScope scope)
210 {
211     return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[scope];
212 }
213
214 class TimingScope {
215 public:
216     TimingScope(std::optional<CollectionScope> scope, const char* name)
217         : m_scope(scope)
218         , m_name(name)
219     {
220         if (measurePhaseTiming())
221             m_before = monotonicallyIncreasingTimeMS();
222     }
223     
224     TimingScope(Heap& heap, const char* name)
225         : TimingScope(heap.collectionScope(), name)
226     {
227     }
228     
229     void setScope(std::optional<CollectionScope> scope)
230     {
231         m_scope = scope;
232     }
233     
234     void setScope(Heap& heap)
235     {
236         setScope(heap.collectionScope());
237     }
238     
239     ~TimingScope()
240     {
241         if (measurePhaseTiming()) {
242             double after = monotonicallyIncreasingTimeMS();
243             double timing = after - m_before;
244             SimpleStats& stats = timingStats(m_name, *m_scope);
245             stats.add(timing);
246             dataLog("[GC:", *m_scope, "] ", m_name, " took: ", timing, "ms (average ", stats.mean(), "ms).\n");
247         }
248     }
249 private:
250     std::optional<CollectionScope> m_scope;
251     double m_before;
252     const char* m_name;
253 };
254
255 } // anonymous namespace
256
257 class Heap::Thread : public AutomaticThread {
258 public:
259     Thread(const AbstractLocker& locker, Heap& heap)
260         : AutomaticThread(locker, heap.m_threadLock, heap.m_threadCondition)
261         , m_heap(heap)
262     {
263     }
264     
265 protected:
266     PollResult poll(const AbstractLocker& locker) override
267     {
268         if (m_heap.m_threadShouldStop) {
269             m_heap.notifyThreadStopping(locker);
270             return PollResult::Stop;
271         }
272         if (m_heap.shouldCollectInCollectorThread(locker))
273             return PollResult::Work;
274         return PollResult::Wait;
275     }
276     
277     WorkResult work() override
278     {
279         m_heap.collectInCollectorThread();
280         return WorkResult::Continue;
281     }
282     
283     void threadDidStart() override
284     {
285         WTF::registerGCThread(GCThreadType::Main);
286     }
287
288 private:
289     Heap& m_heap;
290 };
291
292 Heap::Heap(VM* vm, HeapType heapType)
293     : m_heapType(heapType)
294     , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
295     , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
296     , m_sizeAfterLastCollect(0)
297     , m_sizeAfterLastFullCollect(0)
298     , m_sizeBeforeLastFullCollect(0)
299     , m_sizeAfterLastEdenCollect(0)
300     , m_sizeBeforeLastEdenCollect(0)
301     , m_bytesAllocatedThisCycle(0)
302     , m_bytesAbandonedSinceLastFullCollect(0)
303     , m_maxEdenSize(m_minBytesPerCycle)
304     , m_maxHeapSize(m_minBytesPerCycle)
305     , m_shouldDoFullCollection(false)
306     , m_totalBytesVisited(0)
307     , m_objectSpace(this)
308     , m_extraMemorySize(0)
309     , m_deprecatedExtraMemorySize(0)
310     , m_machineThreads(std::make_unique<MachineThreads>())
311     , m_collectorSlotVisitor(std::make_unique<SlotVisitor>(*this, "C"))
312     , m_mutatorSlotVisitor(std::make_unique<SlotVisitor>(*this, "M"))
313     , m_mutatorMarkStack(std::make_unique<MarkStackArray>())
314     , m_raceMarkStack(std::make_unique<MarkStackArray>())
315     , m_constraintSet(std::make_unique<MarkingConstraintSet>())
316     , m_handleSet(vm)
317     , m_codeBlocks(std::make_unique<CodeBlockSet>())
318     , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>())
319     , m_isSafeToCollect(false)
320     , m_vm(vm)
321     // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously 
322     // schedule the timer if we've never done a collection.
323     , m_lastFullGCLength(0.01)
324     , m_lastEdenGCLength(0.01)
325     , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
326     , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
327     , m_sweeper(adoptRef(new IncrementalSweeper(this)))
328     , m_stopIfNecessaryTimer(adoptRef(new StopIfNecessaryTimer(vm)))
329     , m_deferralDepth(0)
330 #if USE(FOUNDATION)
331     , m_delayedReleaseRecursionCount(0)
332 #endif
333     , m_sharedCollectorMarkStack(std::make_unique<MarkStackArray>())
334     , m_sharedMutatorMarkStack(std::make_unique<MarkStackArray>())
335     , m_helperClient(&heapHelperPool())
336     , m_threadLock(Box<Lock>::create())
337     , m_threadCondition(AutomaticThreadCondition::create())
338 {
339     m_worldState.store(0);
340     
341     if (Options::useConcurrentGC()) {
342         if (Options::useStochasticMutatorScheduler())
343             m_scheduler = std::make_unique<StochasticSpaceTimeMutatorScheduler>(*this);
344         else
345             m_scheduler = std::make_unique<SpaceTimeMutatorScheduler>(*this);
346     } else {
347         // We simulate turning off concurrent GC by making the scheduler say that the world
348         // should always be stopped when the collector is running.
349         m_scheduler = std::make_unique<SynchronousStopTheWorldMutatorScheduler>();
350     }
351     
352     if (Options::verifyHeap())
353         m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
354     
355     m_collectorSlotVisitor->optimizeForStoppedMutator();
356
357     // When memory is critical, allow allocating 25% of the amount above the critical threshold before collecting.
358     size_t memoryAboveCriticalThreshold = static_cast<size_t>(static_cast<double>(m_ramSize) * (1.0 - Options::criticalGCMemoryThreshold()));
359     m_maxEdenSizeWhenCritical = memoryAboveCriticalThreshold / 4;
360
361     LockHolder locker(*m_threadLock);
362     m_thread = adoptRef(new Thread(locker, *this));
363 }
364
365 Heap::~Heap()
366 {
367     forEachSlotVisitor(
368         [&] (SlotVisitor& visitor) {
369             visitor.clearMarkStacks();
370         });
371     m_mutatorMarkStack->clear();
372     m_raceMarkStack->clear();
373     
374     for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
375         WeakBlock::destroy(*this, block);
376 }
377
378 bool Heap::isPagedOut(double deadline)
379 {
380     return m_objectSpace.isPagedOut(deadline);
381 }
382
383 // The VM is being destroyed and the collector will never run again.
384 // Run all pending finalizers now because we won't get another chance.
385 void Heap::lastChanceToFinalize()
386 {
387     MonotonicTime before;
388     if (Options::logGC()) {
389         before = MonotonicTime::now();
390         dataLog("[GC<", RawPointer(this), ">: shutdown ");
391     }
392     
393     RELEASE_ASSERT(!m_vm->entryScope);
394     RELEASE_ASSERT(m_mutatorState == MutatorState::Running);
395     
396     if (m_collectContinuouslyThread) {
397         {
398             LockHolder locker(m_collectContinuouslyLock);
399             m_shouldStopCollectingContinuously = true;
400             m_collectContinuouslyCondition.notifyOne();
401         }
402         m_collectContinuouslyThread->waitForCompletion();
403     }
404     
405     if (Options::logGC())
406         dataLog("1");
407     
408     // Prevent new collections from being started. This is probably not even necessary, since we're not
409     // going to call into anything that starts collections. Still, this makes the algorithm more
410     // obviously sound.
411     m_isSafeToCollect = false;
412     
413     if (Options::logGC())
414         dataLog("2");
415
416     bool isCollecting;
417     {
418         auto locker = holdLock(*m_threadLock);
419         RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
420         isCollecting = m_lastServedTicket < m_lastGrantedTicket;
421     }
422     if (isCollecting) {
423         if (Options::logGC())
424             dataLog("...]\n");
425         
426         // Wait for the current collection to finish.
427         waitForCollector(
428             [&] (const AbstractLocker&) -> bool {
429                 RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
430                 return m_lastServedTicket == m_lastGrantedTicket;
431             });
432         
433         if (Options::logGC())
434             dataLog("[GC<", RawPointer(this), ">: shutdown ");
435     }
436     if (Options::logGC())
437         dataLog("3");
438
439     RELEASE_ASSERT(m_requests.isEmpty());
440     RELEASE_ASSERT(m_lastServedTicket == m_lastGrantedTicket);
441     
442     // Carefully bring the thread down.
443     bool stopped = false;
444     {
445         LockHolder locker(*m_threadLock);
446         stopped = m_thread->tryStop(locker);
447         m_threadShouldStop = true;
448         if (!stopped)
449             m_threadCondition->notifyOne(locker);
450     }
451
452     if (Options::logGC())
453         dataLog("4");
454     
455     if (!stopped)
456         m_thread->join();
457     
458     if (Options::logGC())
459         dataLog("5 ");
460     
461     m_arrayBuffers.lastChanceToFinalize();
462     m_codeBlocks->lastChanceToFinalize(*m_vm);
463     m_objectSpace.stopAllocating();
464     m_objectSpace.lastChanceToFinalize();
465     releaseDelayedReleasedObjects();
466
467     sweepAllLogicallyEmptyWeakBlocks();
468     
469     m_objectSpace.freeMemory();
470     
471     if (Options::logGC())
472         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
473 }
474
475 void Heap::releaseDelayedReleasedObjects()
476 {
477 #if USE(FOUNDATION)
478     // We need to guard against the case that releasing an object can create more objects due to the
479     // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
480     // back here and could try to recursively release objects. We guard that with a recursive entry
481     // count. Only the initial call will release objects, recursive calls simple return and let the
482     // the initial call to the function take care of any objects created during release time.
483     // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
484     // and use a temp Vector for the actual releasing.
485     if (!m_delayedReleaseRecursionCount++) {
486         while (!m_delayedReleaseObjects.isEmpty()) {
487             ASSERT(m_vm->currentThreadIsHoldingAPILock());
488
489             Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTFMove(m_delayedReleaseObjects);
490
491             {
492                 // We need to drop locks before calling out to arbitrary code.
493                 JSLock::DropAllLocks dropAllLocks(m_vm);
494
495                 void* context = objc_autoreleasePoolPush();
496                 objectsToRelease.clear();
497                 objc_autoreleasePoolPop(context);
498             }
499         }
500     }
501     m_delayedReleaseRecursionCount--;
502 #endif
503 }
504
505 void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
506 {
507     didAllocate(size);
508     collectIfNecessaryOrDefer();
509 }
510
511 void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
512 {
513     // FIXME: Change this to use SaturatedArithmetic when available.
514     // https://bugs.webkit.org/show_bug.cgi?id=170411
515     Checked<size_t, RecordOverflow> checkedNewSize = m_deprecatedExtraMemorySize;
516     checkedNewSize += size;
517     m_deprecatedExtraMemorySize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
518     reportExtraMemoryAllocatedSlowCase(size);
519 }
520
521 bool Heap::overCriticalMemoryThreshold(MemoryThresholdCallType memoryThresholdCallType)
522 {
523 #if PLATFORM(IOS)
524     if (memoryThresholdCallType == MemoryThresholdCallType::Direct || ++m_precentAvailableMemoryCachedCallCount >= 100) {
525         m_overCriticalMemoryThreshold = bmalloc::api::percentAvailableMemoryInUse() > Options::criticalGCMemoryThreshold();
526         m_precentAvailableMemoryCachedCallCount = 0;
527     }
528
529     return m_overCriticalMemoryThreshold;
530 #else
531     UNUSED_PARAM(memoryThresholdCallType);
532     return false;
533 #endif
534 }
535
536 void Heap::reportAbandonedObjectGraph()
537 {
538     // Our clients don't know exactly how much memory they
539     // are abandoning so we just guess for them.
540     size_t abandonedBytes = static_cast<size_t>(0.1 * capacity());
541
542     // We want to accelerate the next collection. Because memory has just 
543     // been abandoned, the next collection has the potential to 
544     // be more profitable. Since allocation is the trigger for collection, 
545     // we hasten the next collection by pretending that we've allocated more memory. 
546     if (m_fullActivityCallback) {
547         m_fullActivityCallback->didAllocate(
548             m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
549     }
550     m_bytesAbandonedSinceLastFullCollect += abandonedBytes;
551 }
552
553 void Heap::protect(JSValue k)
554 {
555     ASSERT(k);
556     ASSERT(m_vm->currentThreadIsHoldingAPILock());
557
558     if (!k.isCell())
559         return;
560
561     m_protectedValues.add(k.asCell());
562 }
563
564 bool Heap::unprotect(JSValue k)
565 {
566     ASSERT(k);
567     ASSERT(m_vm->currentThreadIsHoldingAPILock());
568
569     if (!k.isCell())
570         return false;
571
572     return m_protectedValues.remove(k.asCell());
573 }
574
575 void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
576 {
577     if (m_arrayBuffers.addReference(cell, buffer)) {
578         collectIfNecessaryOrDefer();
579         didAllocate(buffer->gcSizeEstimateInBytes());
580     }
581 }
582
583 void Heap::finalizeUnconditionalFinalizers()
584 {
585     while (m_unconditionalFinalizers.hasNext()) {
586         UnconditionalFinalizer* finalizer = m_unconditionalFinalizers.removeNext();
587         finalizer->finalizeUnconditionally();
588     }
589 }
590
591 void Heap::willStartIterating()
592 {
593     m_objectSpace.willStartIterating();
594 }
595
596 void Heap::didFinishIterating()
597 {
598     m_objectSpace.didFinishIterating();
599 }
600
601 void Heap::completeAllJITPlans()
602 {
603 #if ENABLE(JIT)
604     JITWorklist::instance()->completeAllForVM(*m_vm);
605 #endif // ENABLE(JIT)
606     DFG::completeAllPlansForVM(*m_vm);
607 }
608
609 template<typename Func>
610 void Heap::iterateExecutingAndCompilingCodeBlocks(const Func& func)
611 {
612     m_codeBlocks->iterateCurrentlyExecuting(func);
613     DFG::iterateCodeBlocksForGC(*m_vm, func);
614 }
615
616 template<typename Func>
617 void Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func& func)
618 {
619     Vector<CodeBlock*, 256> codeBlocks;
620     iterateExecutingAndCompilingCodeBlocks(
621         [&] (CodeBlock* codeBlock) {
622             codeBlocks.append(codeBlock);
623         });
624     for (CodeBlock* codeBlock : codeBlocks)
625         func(codeBlock);
626 }
627
628 void Heap::assertSharedMarkStacksEmpty()
629 {
630     bool ok = true;
631     
632     if (!m_sharedCollectorMarkStack->isEmpty()) {
633         dataLog("FATAL: Shared collector mark stack not empty! It has ", m_sharedCollectorMarkStack->size(), " elements.\n");
634         ok = false;
635     }
636     
637     if (!m_sharedMutatorMarkStack->isEmpty()) {
638         dataLog("FATAL: Shared mutator mark stack not empty! It has ", m_sharedMutatorMarkStack->size(), " elements.\n");
639         ok = false;
640     }
641     
642     RELEASE_ASSERT(ok);
643 }
644
645 void Heap::gatherStackRoots(ConservativeRoots& roots)
646 {
647     m_machineThreads->gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks, m_currentThreadState);
648 }
649
650 void Heap::gatherJSStackRoots(ConservativeRoots& roots)
651 {
652 #if !ENABLE(JIT)
653     m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
654 #else
655     UNUSED_PARAM(roots);
656 #endif
657 }
658
659 void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
660 {
661 #if ENABLE(DFG_JIT)
662     m_vm->gatherConservativeRoots(roots);
663 #else
664     UNUSED_PARAM(roots);
665 #endif
666 }
667
668 void Heap::beginMarking()
669 {
670     TimingScope timingScope(*this, "Heap::beginMarking");
671     if (m_collectionScope == CollectionScope::Full)
672         m_codeBlocks->clearMarksForFullCollection();
673     m_jitStubRoutines->clearMarks();
674     m_objectSpace.beginMarking();
675     setMutatorShouldBeFenced(true);
676 }
677
678 void Heap::removeDeadCompilerWorklistEntries()
679 {
680 #if ENABLE(DFG_JIT)
681     for (unsigned i = DFG::numberOfWorklists(); i--;)
682         DFG::existingWorklistForIndex(i).removeDeadPlans(*m_vm);
683 #endif
684 }
685
686 bool Heap::isHeapSnapshotting() const
687 {
688     HeapProfiler* heapProfiler = m_vm->heapProfiler();
689     if (UNLIKELY(heapProfiler))
690         return heapProfiler->activeSnapshotBuilder();
691     return false;
692 }
693
694 struct GatherHeapSnapshotData : MarkedBlock::CountFunctor {
695     GatherHeapSnapshotData(HeapSnapshotBuilder& builder)
696         : m_builder(builder)
697     {
698     }
699
700     IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const
701     {
702         if (kind == HeapCell::JSCell) {
703             JSCell* cell = static_cast<JSCell*>(heapCell);
704             cell->methodTable()->heapSnapshot(cell, m_builder);
705         }
706         return IterationStatus::Continue;
707     }
708
709     HeapSnapshotBuilder& m_builder;
710 };
711
712 void Heap::gatherExtraHeapSnapshotData(HeapProfiler& heapProfiler)
713 {
714     if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) {
715         HeapIterationScope heapIterationScope(*this);
716         GatherHeapSnapshotData functor(*builder);
717         m_objectSpace.forEachLiveCell(heapIterationScope, functor);
718     }
719 }
720
721 struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor {
722     RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot)
723         : m_snapshot(snapshot)
724     {
725     }
726
727     IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const
728     {
729         if (kind == HeapCell::JSCell)
730             m_snapshot.sweepCell(static_cast<JSCell*>(cell));
731         return IterationStatus::Continue;
732     }
733
734     HeapSnapshot& m_snapshot;
735 };
736
737 void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler)
738 {
739     if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) {
740         HeapIterationScope heapIterationScope(*this);
741         RemoveDeadHeapSnapshotNodes functor(*snapshot);
742         m_objectSpace.forEachDeadCell(heapIterationScope, functor);
743         snapshot->shrinkToFit();
744     }
745 }
746
747 void Heap::updateObjectCounts()
748 {
749     if (m_collectionScope == CollectionScope::Full)
750         m_totalBytesVisited = 0;
751
752     m_totalBytesVisitedThisCycle = bytesVisited();
753     
754     m_totalBytesVisited += m_totalBytesVisitedThisCycle;
755 }
756
757 void Heap::endMarking()
758 {
759     forEachSlotVisitor(
760         [&] (SlotVisitor& visitor) {
761             visitor.reset();
762         });
763
764     assertSharedMarkStacksEmpty();
765     m_weakReferenceHarvesters.removeAll();
766
767     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
768     
769     m_objectSpace.endMarking();
770     setMutatorShouldBeFenced(Options::forceFencedBarrier());
771 }
772
773 size_t Heap::objectCount()
774 {
775     return m_objectSpace.objectCount();
776 }
777
778 size_t Heap::extraMemorySize()
779 {
780     // FIXME: Change this to use SaturatedArithmetic when available.
781     // https://bugs.webkit.org/show_bug.cgi?id=170411
782     Checked<size_t, RecordOverflow> checkedTotal = m_extraMemorySize;
783     checkedTotal += m_deprecatedExtraMemorySize;
784     checkedTotal += m_arrayBuffers.size();
785     size_t total = UNLIKELY(checkedTotal.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedTotal.unsafeGet();
786
787     ASSERT(m_objectSpace.capacity() >= m_objectSpace.size());
788     return std::min(total, std::numeric_limits<size_t>::max() - m_objectSpace.capacity());
789 }
790
791 size_t Heap::size()
792 {
793     return m_objectSpace.size() + extraMemorySize();
794 }
795
796 size_t Heap::capacity()
797 {
798     return m_objectSpace.capacity() + extraMemorySize();
799 }
800
801 size_t Heap::protectedGlobalObjectCount()
802 {
803     size_t result = 0;
804     forEachProtectedCell(
805         [&] (JSCell* cell) {
806             if (cell->isObject() && asObject(cell)->isGlobalObject())
807                 result++;
808         });
809     return result;
810 }
811
812 size_t Heap::globalObjectCount()
813 {
814     HeapIterationScope iterationScope(*this);
815     size_t result = 0;
816     m_objectSpace.forEachLiveCell(
817         iterationScope,
818         [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus {
819             if (kind != HeapCell::JSCell)
820                 return IterationStatus::Continue;
821             JSCell* cell = static_cast<JSCell*>(heapCell);
822             if (cell->isObject() && asObject(cell)->isGlobalObject())
823                 result++;
824             return IterationStatus::Continue;
825         });
826     return result;
827 }
828
829 size_t Heap::protectedObjectCount()
830 {
831     size_t result = 0;
832     forEachProtectedCell(
833         [&] (JSCell*) {
834             result++;
835         });
836     return result;
837 }
838
839 std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
840 {
841     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
842     forEachProtectedCell(
843         [&] (JSCell* cell) {
844             recordType(*vm(), *result, cell);
845         });
846     return result;
847 }
848
849 std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
850 {
851     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
852     HeapIterationScope iterationScope(*this);
853     m_objectSpace.forEachLiveCell(
854         iterationScope,
855         [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
856             if (kind == HeapCell::JSCell)
857                 recordType(*vm(), *result, static_cast<JSCell*>(cell));
858             return IterationStatus::Continue;
859         });
860     return result;
861 }
862
863 void Heap::deleteAllCodeBlocks(DeleteAllCodeEffort effort)
864 {
865     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
866         return;
867     
868     PreventCollectionScope preventCollectionScope(*this);
869     
870     // If JavaScript is running, it's not safe to delete all JavaScript code, since
871     // we'll end up returning to deleted code.
872     RELEASE_ASSERT(!m_vm->entryScope);
873     RELEASE_ASSERT(!m_collectionScope);
874
875     completeAllJITPlans();
876
877     for (ExecutableBase* executable : m_executables)
878         executable->clearCode();
879
880 #if ENABLE(WEBASSEMBLY)
881     {
882         // We must ensure that we clear the JS call ICs from Wasm. Otherwise, Wasm will
883         // have no idea that we cleared the code from all of the Executables in the
884         // VM. This could leave Wasm in an inconsistent state where it has an IC that
885         // points into a CodeBlock that could be dead. The IC will still succeed because
886         // it uses a callee check, but then it will call into dead code.
887         HeapIterationScope heapIterationScope(*this);
888         m_vm->webAssemblyCodeBlockSpace.forEachLiveCell([&] (HeapCell* cell, HeapCell::Kind kind) {
889             ASSERT_UNUSED(kind, kind == HeapCell::Kind::JSCell);
890             JSWebAssemblyCodeBlock* codeBlock = static_cast<JSWebAssemblyCodeBlock*>(cell);
891             codeBlock->clearJSCallICs(*m_vm);
892         });
893     }
894 #endif
895 }
896
897 void Heap::deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort effort)
898 {
899     if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
900         return;
901     
902     PreventCollectionScope preventCollectionScope(*this);
903
904     RELEASE_ASSERT(!m_collectionScope);
905     
906     for (ExecutableBase* current : m_executables) {
907         if (!current->isFunctionExecutable())
908             continue;
909         static_cast<FunctionExecutable*>(current)->unlinkedExecutable()->clearCode();
910     }
911 }
912
913 void Heap::clearUnmarkedExecutables()
914 {
915     for (unsigned i = m_executables.size(); i--;) {
916         ExecutableBase* current = m_executables[i];
917         if (isMarked(current))
918             continue;
919
920         // Eagerly dereference the Executable's JITCode in order to run watchpoint
921         // destructors. Otherwise, watchpoints might fire for deleted CodeBlocks.
922         current->clearCode();
923         std::swap(m_executables[i], m_executables.last());
924         m_executables.removeLast();
925     }
926
927     m_executables.shrinkToFit();
928 }
929
930 void Heap::deleteUnmarkedCompiledCode()
931 {
932     clearUnmarkedExecutables();
933     m_codeBlocks->deleteUnmarkedAndUnreferenced(*m_vm, *m_lastCollectionScope);
934     m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines();
935 }
936
937 void Heap::addToRememberedSet(const JSCell* constCell)
938 {
939     JSCell* cell = const_cast<JSCell*>(constCell);
940     ASSERT(cell);
941     ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
942     m_barriersExecuted++;
943     if (m_mutatorShouldBeFenced) {
944         WTF::loadLoadFence();
945         if (!isMarkedConcurrently(cell)) {
946             // During a full collection a store into an unmarked object that had surivived past
947             // collections will manifest as a store to an unmarked PossiblyBlack object. If the
948             // object gets marked at some time after this then it will go down the normal marking
949             // path. So, we don't have to remember this object. We could return here. But we go
950             // further and attempt to re-white the object.
951             
952             RELEASE_ASSERT(m_collectionScope == CollectionScope::Full);
953             
954             if (cell->atomicCompareExchangeCellStateStrong(CellState::PossiblyBlack, CellState::DefinitelyWhite) == CellState::PossiblyBlack) {
955                 // Now we protect against this race:
956                 //
957                 //     1) Object starts out black + unmarked.
958                 //     --> We do isMarkedConcurrently here.
959                 //     2) Object is marked and greyed.
960                 //     3) Object is scanned and blacked.
961                 //     --> We do atomicCompareExchangeCellStateStrong here.
962                 //
963                 // In this case we would have made the object white again, even though it should
964                 // be black. This check lets us correct our mistake. This relies on the fact that
965                 // isMarkedConcurrently converges monotonically to true.
966                 if (isMarkedConcurrently(cell)) {
967                     // It's difficult to work out whether the object should be grey or black at
968                     // this point. We say black conservatively.
969                     cell->setCellState(CellState::PossiblyBlack);
970                 }
971                 
972                 // Either way, we can return. Most likely, the object was not marked, and so the
973                 // object is now labeled white. This means that future barrier executions will not
974                 // fire. In the unlikely event that the object had become marked, we can still
975                 // return anyway, since we proved that the object was not marked at the time that
976                 // we executed this slow path.
977             }
978             
979             return;
980         }
981     } else
982         ASSERT(Heap::isMarkedConcurrently(cell));
983     // It could be that the object was *just* marked. This means that the collector may set the
984     // state to DefinitelyGrey and then to PossiblyOldOrBlack at any time. It's OK for us to
985     // race with the collector here. If we win then this is accurate because the object _will_
986     // get scanned again. If we lose then someone else will barrier the object again. That would
987     // be unfortunate but not the end of the world.
988     cell->setCellState(CellState::PossiblyGrey);
989     m_mutatorMarkStack->append(cell);
990 }
991
992 void Heap::sweepSynchronously()
993 {
994     double before = 0;
995     if (Options::logGC()) {
996         dataLog("Full sweep: ", capacity() / 1024, "kb ");
997         before = currentTimeMS();
998     }
999     m_objectSpace.sweep();
1000     m_objectSpace.shrink();
1001     if (Options::logGC()) {
1002         double after = currentTimeMS();
1003         dataLog("=> ", capacity() / 1024, "kb, ", after - before, "ms");
1004     }
1005 }
1006
1007 void Heap::collect(Synchronousness synchronousness, GCRequest request)
1008 {
1009     switch (synchronousness) {
1010     case Async:
1011         collectAsync(request);
1012         return;
1013     case Sync:
1014         collectSync(request);
1015         return;
1016     }
1017     RELEASE_ASSERT_NOT_REACHED();
1018 }
1019
1020 void Heap::collectNow(Synchronousness synchronousness, GCRequest request)
1021 {
1022     switch (synchronousness) {
1023     case Async: {
1024         collectAsync(request);
1025         stopIfNecessary();
1026         return;
1027     }
1028         
1029     case Sync: {
1030         collectSync(request);
1031         
1032         DeferGCForAWhile deferGC(*this);
1033         if (UNLIKELY(Options::useImmortalObjects()))
1034             sweeper().stopSweeping();
1035         
1036         bool alreadySweptInCollectSync = Options::sweepSynchronously();
1037         if (!alreadySweptInCollectSync) {
1038             if (Options::logGC())
1039                 dataLog("[GC<", RawPointer(this), ">: ");
1040             sweepSynchronously();
1041             if (Options::logGC())
1042                 dataLog("]\n");
1043         }
1044         m_objectSpace.assertNoUnswept();
1045         
1046         sweepAllLogicallyEmptyWeakBlocks();
1047         return;
1048     } }
1049     RELEASE_ASSERT_NOT_REACHED();
1050 }
1051
1052 void Heap::collectAsync(GCRequest request)
1053 {
1054     if (!m_isSafeToCollect)
1055         return;
1056
1057     bool alreadyRequested = false;
1058     {
1059         LockHolder locker(*m_threadLock);
1060         for (const GCRequest& previousRequest : m_requests) {
1061             if (request.subsumedBy(previousRequest)) {
1062                 alreadyRequested = true;
1063                 break;
1064             }
1065         }
1066     }
1067     if (alreadyRequested)
1068         return;
1069
1070     requestCollection(request);
1071 }
1072
1073 void Heap::collectSync(GCRequest request)
1074 {
1075     if (!m_isSafeToCollect)
1076         return;
1077     
1078     waitForCollection(requestCollection(request));
1079 }
1080
1081 bool Heap::shouldCollectInCollectorThread(const AbstractLocker&)
1082 {
1083     RELEASE_ASSERT(m_requests.isEmpty() == (m_lastServedTicket == m_lastGrantedTicket));
1084     RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
1085     
1086     if (false)
1087         dataLog("Mutator has the conn = ", !!(m_worldState.load() & mutatorHasConnBit), "\n");
1088     
1089     return !m_requests.isEmpty() && !(m_worldState.load() & mutatorHasConnBit);
1090 }
1091
1092 void Heap::collectInCollectorThread()
1093 {
1094     for (;;) {
1095         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Collector, nullptr);
1096         switch (result) {
1097         case RunCurrentPhaseResult::Finished:
1098             return;
1099         case RunCurrentPhaseResult::Continue:
1100             break;
1101         case RunCurrentPhaseResult::NeedCurrentThreadState:
1102             RELEASE_ASSERT_NOT_REACHED();
1103             break;
1104         }
1105     }
1106 }
1107
1108 void Heap::checkConn(GCConductor conn)
1109 {
1110     switch (conn) {
1111     case GCConductor::Mutator:
1112         RELEASE_ASSERT(m_worldState.load() & mutatorHasConnBit);
1113         return;
1114     case GCConductor::Collector:
1115         RELEASE_ASSERT(!(m_worldState.load() & mutatorHasConnBit));
1116         return;
1117     }
1118     RELEASE_ASSERT_NOT_REACHED();
1119 }
1120
1121 auto Heap::runCurrentPhase(GCConductor conn, CurrentThreadState* currentThreadState) -> RunCurrentPhaseResult
1122 {
1123     checkConn(conn);
1124     m_currentThreadState = currentThreadState;
1125     
1126     // If the collector transfers the conn to the mutator, it leaves us in between phases.
1127     if (!finishChangingPhase(conn)) {
1128         // A mischevious mutator could repeatedly relinquish the conn back to us. We try to avoid doing
1129         // this, but it's probably not the end of the world if it did happen.
1130         if (false)
1131             dataLog("Conn bounce-back.\n");
1132         return RunCurrentPhaseResult::Finished;
1133     }
1134     
1135     bool result = false;
1136     switch (m_currentPhase) {
1137     case CollectorPhase::NotRunning:
1138         result = runNotRunningPhase(conn);
1139         break;
1140         
1141     case CollectorPhase::Begin:
1142         result = runBeginPhase(conn);
1143         break;
1144         
1145     case CollectorPhase::Fixpoint:
1146         if (!currentThreadState && conn == GCConductor::Mutator)
1147             return RunCurrentPhaseResult::NeedCurrentThreadState;
1148         
1149         result = runFixpointPhase(conn);
1150         break;
1151         
1152     case CollectorPhase::Concurrent:
1153         result = runConcurrentPhase(conn);
1154         break;
1155         
1156     case CollectorPhase::Reloop:
1157         result = runReloopPhase(conn);
1158         break;
1159         
1160     case CollectorPhase::End:
1161         result = runEndPhase(conn);
1162         break;
1163     }
1164
1165     return result ? RunCurrentPhaseResult::Continue : RunCurrentPhaseResult::Finished;
1166 }
1167
1168 NEVER_INLINE bool Heap::runNotRunningPhase(GCConductor conn)
1169 {
1170     // Check m_requests since the mutator calls this to poll what's going on.
1171     {
1172         auto locker = holdLock(*m_threadLock);
1173         if (m_requests.isEmpty())
1174             return false;
1175     }
1176     
1177     return changePhase(conn, CollectorPhase::Begin);
1178 }
1179
1180 NEVER_INLINE bool Heap::runBeginPhase(GCConductor conn)
1181 {
1182     m_currentGCStartTime = MonotonicTime::now();
1183     
1184     {
1185         LockHolder locker(*m_threadLock);
1186         RELEASE_ASSERT(!m_requests.isEmpty());
1187         m_currentRequest = m_requests.first();
1188     }
1189         
1190     if (Options::logGC())
1191         dataLog("[GC<", RawPointer(this), ">: START ", gcConductorShortName(conn), " ", capacity() / 1024, "kb ");
1192
1193     m_beforeGC = MonotonicTime::now();
1194
1195     if (m_collectionScope) {
1196         dataLog("Collection scope already set during GC: ", *m_collectionScope, "\n");
1197         RELEASE_ASSERT_NOT_REACHED();
1198     }
1199     
1200     willStartCollection();
1201         
1202     if (UNLIKELY(m_verifier)) {
1203         // Verify that live objects from the last GC cycle haven't been corrupted by
1204         // mutators before we begin this new GC cycle.
1205         m_verifier->verify(HeapVerifier::Phase::BeforeGC);
1206             
1207         m_verifier->startGC();
1208         m_verifier->gatherLiveCells(HeapVerifier::Phase::BeforeMarking);
1209     }
1210         
1211     prepareForMarking();
1212         
1213     if (m_collectionScope == CollectionScope::Full) {
1214         m_opaqueRoots.clear();
1215         m_collectorSlotVisitor->clearMarkStacks();
1216         m_mutatorMarkStack->clear();
1217     }
1218
1219     RELEASE_ASSERT(m_raceMarkStack->isEmpty());
1220
1221     beginMarking();
1222
1223     forEachSlotVisitor(
1224         [&] (SlotVisitor& visitor) {
1225             visitor.didStartMarking();
1226         });
1227
1228     m_parallelMarkersShouldExit = false;
1229
1230     m_helperClient.setFunction(
1231         [this] () {
1232             SlotVisitor* slotVisitor;
1233             {
1234                 LockHolder locker(m_parallelSlotVisitorLock);
1235                 if (m_availableParallelSlotVisitors.isEmpty()) {
1236                     std::unique_ptr<SlotVisitor> newVisitor = std::make_unique<SlotVisitor>(
1237                         *this, toCString("P", m_parallelSlotVisitors.size() + 1));
1238                     
1239                     if (Options::optimizeParallelSlotVisitorsForStoppedMutator())
1240                         newVisitor->optimizeForStoppedMutator();
1241                     
1242                     newVisitor->didStartMarking();
1243                     
1244                     slotVisitor = newVisitor.get();
1245                     m_parallelSlotVisitors.append(WTFMove(newVisitor));
1246                 } else
1247                     slotVisitor = m_availableParallelSlotVisitors.takeLast();
1248             }
1249
1250             WTF::registerGCThread(GCThreadType::Helper);
1251
1252             {
1253                 ParallelModeEnabler parallelModeEnabler(*slotVisitor);
1254                 slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
1255             }
1256
1257             {
1258                 LockHolder locker(m_parallelSlotVisitorLock);
1259                 m_availableParallelSlotVisitors.append(slotVisitor);
1260             }
1261         });
1262
1263     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1264
1265     m_constraintSet->didStartMarking();
1266     
1267     m_scheduler->beginCollection();
1268     if (Options::logGC())
1269         m_scheduler->log();
1270     
1271     // After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()"
1272     // checks because bootstrap would have put things into the visitor. So, we should fall
1273     // through to draining.
1274     
1275     if (!slotVisitor.didReachTermination()) {
1276         dataLog("Fatal: SlotVisitor should think that GC should terminate before constraint solving, but it does not think this.\n");
1277         dataLog("slotVisitor.isEmpty(): ", slotVisitor.isEmpty(), "\n");
1278         dataLog("slotVisitor.collectorMarkStack().isEmpty(): ", slotVisitor.collectorMarkStack().isEmpty(), "\n");
1279         dataLog("slotVisitor.mutatorMarkStack().isEmpty(): ", slotVisitor.mutatorMarkStack().isEmpty(), "\n");
1280         dataLog("m_numberOfActiveParallelMarkers: ", m_numberOfActiveParallelMarkers, "\n");
1281         dataLog("m_sharedCollectorMarkStack->isEmpty(): ", m_sharedCollectorMarkStack->isEmpty(), "\n");
1282         dataLog("m_sharedMutatorMarkStack->isEmpty(): ", m_sharedMutatorMarkStack->isEmpty(), "\n");
1283         dataLog("slotVisitor.didReachTermination(): ", slotVisitor.didReachTermination(), "\n");
1284         RELEASE_ASSERT_NOT_REACHED();
1285     }
1286         
1287     return changePhase(conn, CollectorPhase::Fixpoint);
1288 }
1289
1290 NEVER_INLINE bool Heap::runFixpointPhase(GCConductor conn)
1291 {
1292     RELEASE_ASSERT(conn == GCConductor::Collector || m_currentThreadState);
1293     
1294     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1295     
1296     if (Options::logGC()) {
1297         HashMap<const char*, size_t> visitMap;
1298         forEachSlotVisitor(
1299             [&] (SlotVisitor& slotVisitor) {
1300                 visitMap.add(slotVisitor.codeName(), slotVisitor.bytesVisited() / 1024);
1301             });
1302         
1303         auto perVisitorDump = sortedMapDump(
1304             visitMap,
1305             [] (const char* a, const char* b) -> bool {
1306                 return strcmp(a, b) < 0;
1307             },
1308             ":", " ");
1309         
1310         dataLog("v=", bytesVisited() / 1024, "kb (", perVisitorDump, ") o=", m_opaqueRoots.size(), " b=", m_barriersExecuted, " ");
1311     }
1312         
1313     if (slotVisitor.didReachTermination()) {
1314         m_scheduler->didReachTermination();
1315             
1316         assertSharedMarkStacksEmpty();
1317             
1318         slotVisitor.mergeIfNecessary();
1319         for (auto& parallelVisitor : m_parallelSlotVisitors)
1320             parallelVisitor->mergeIfNecessary();
1321             
1322         // FIXME: Take m_mutatorDidRun into account when scheduling constraints. Most likely,
1323         // we don't have to execute root constraints again unless the mutator did run. At a
1324         // minimum, we could use this for work estimates - but it's probably more than just an
1325         // estimate.
1326         // https://bugs.webkit.org/show_bug.cgi?id=166828
1327             
1328         // FIXME: We should take advantage of the fact that we could timeout. This only comes
1329         // into play if we're executing constraints for the first time. But that will matter
1330         // when we have deep stacks or a lot of DOM stuff.
1331         // https://bugs.webkit.org/show_bug.cgi?id=166831
1332             
1333         // Wondering what this does? Look at Heap::addCoreConstraints(). The DOM and others can also
1334         // add their own using Heap::addMarkingConstraint().
1335         bool converged =
1336             m_constraintSet->executeConvergence(slotVisitor, MonotonicTime::infinity());
1337         if (converged && slotVisitor.isEmpty()) {
1338             assertSharedMarkStacksEmpty();
1339             return changePhase(conn, CollectorPhase::End);
1340         }
1341             
1342         m_scheduler->didExecuteConstraints();
1343     }
1344         
1345     if (Options::logGC())
1346         dataLog(slotVisitor.collectorMarkStack().size(), "+", m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " ");
1347         
1348     {
1349         ParallelModeEnabler enabler(slotVisitor);
1350         slotVisitor.drainInParallel(m_scheduler->timeToResume());
1351     }
1352         
1353     m_scheduler->synchronousDrainingDidStall();
1354
1355     if (slotVisitor.didReachTermination())
1356         return true; // This is like relooping to the top if runFixpointPhase().
1357         
1358     if (!m_scheduler->shouldResume())
1359         return true;
1360
1361     m_scheduler->willResume();
1362         
1363     if (Options::logGC()) {
1364         double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds();
1365         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), ")...]\n");
1366     }
1367
1368     // Forgive the mutator for its past failures to keep up.
1369     // FIXME: Figure out if moving this to different places results in perf changes.
1370     m_incrementBalance = 0;
1371         
1372     return changePhase(conn, CollectorPhase::Concurrent);
1373 }
1374
1375 NEVER_INLINE bool Heap::runConcurrentPhase(GCConductor conn)
1376 {
1377     SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1378
1379     switch (conn) {
1380     case GCConductor::Mutator: {
1381         // When the mutator has the conn, we poll runConcurrentPhase() on every time someone says
1382         // stopIfNecessary(), so on every allocation slow path. When that happens we poll if it's time
1383         // to stop and do some work.
1384         if (slotVisitor.didReachTermination()
1385             || m_scheduler->shouldStop())
1386             return changePhase(conn, CollectorPhase::Reloop);
1387         
1388         // We could be coming from a collector phase that stuffed our SlotVisitor, so make sure we donate
1389         // everything. This is super cheap if the SlotVisitor is already empty.
1390         slotVisitor.donateAll();
1391         return false;
1392     }
1393     case GCConductor::Collector: {
1394         {
1395             ParallelModeEnabler enabler(slotVisitor);
1396             slotVisitor.drainInParallelPassively(m_scheduler->timeToStop());
1397         }
1398         return changePhase(conn, CollectorPhase::Reloop);
1399     } }
1400     
1401     RELEASE_ASSERT_NOT_REACHED();
1402     return false;
1403 }
1404
1405 NEVER_INLINE bool Heap::runReloopPhase(GCConductor conn)
1406 {
1407     if (Options::logGC())
1408         dataLog("[GC<", RawPointer(this), ">: ", gcConductorShortName(conn), " ");
1409     
1410     m_scheduler->didStop();
1411     
1412     if (Options::logGC())
1413         m_scheduler->log();
1414     
1415     return changePhase(conn, CollectorPhase::Fixpoint);
1416 }
1417
1418 NEVER_INLINE bool Heap::runEndPhase(GCConductor conn)
1419 {
1420     m_scheduler->endCollection();
1421         
1422     {
1423         auto locker = holdLock(m_markingMutex);
1424         m_parallelMarkersShouldExit = true;
1425         m_markingConditionVariable.notifyAll();
1426     }
1427     m_helperClient.finish();
1428     
1429     iterateExecutingAndCompilingCodeBlocks(
1430         [&] (CodeBlock* codeBlock) {
1431             writeBarrier(codeBlock);
1432         });
1433         
1434     updateObjectCounts();
1435     endMarking();
1436         
1437     if (UNLIKELY(m_verifier)) {
1438         m_verifier->gatherLiveCells(HeapVerifier::Phase::AfterMarking);
1439         m_verifier->verify(HeapVerifier::Phase::AfterMarking);
1440     }
1441         
1442     if (vm()->typeProfiler())
1443         vm()->typeProfiler()->invalidateTypeSetCache();
1444         
1445     reapWeakHandles();
1446     pruneStaleEntriesFromWeakGCMaps();
1447     sweepArrayBuffers();
1448     snapshotUnswept();
1449     finalizeUnconditionalFinalizers();
1450     removeDeadCompilerWorklistEntries();
1451     notifyIncrementalSweeper();
1452     
1453     m_codeBlocks->iterateCurrentlyExecuting(
1454         [&] (CodeBlock* codeBlock) {
1455             writeBarrier(codeBlock);
1456         });
1457     m_codeBlocks->clearCurrentlyExecuting();
1458         
1459     m_objectSpace.prepareForAllocation();
1460     updateAllocationLimits();
1461
1462     if (UNLIKELY(m_verifier)) {
1463         m_verifier->trimDeadCells();
1464         m_verifier->verify(HeapVerifier::Phase::AfterGC);
1465     }
1466
1467     didFinishCollection();
1468     
1469     if (m_currentRequest.didFinishEndPhase)
1470         m_currentRequest.didFinishEndPhase->run();
1471     
1472     if (false) {
1473         dataLog("Heap state after GC:\n");
1474         m_objectSpace.dumpBits();
1475     }
1476     
1477     if (Options::logGC()) {
1478         double thisPauseMS = (m_afterGC - m_stopTime).milliseconds();
1479         dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), "), cycle ", (m_afterGC - m_beforeGC).milliseconds(), "ms END]\n");
1480     }
1481     
1482     {
1483         auto locker = holdLock(*m_threadLock);
1484         m_requests.removeFirst();
1485         m_lastServedTicket++;
1486         clearMutatorWaiting();
1487     }
1488     ParkingLot::unparkAll(&m_worldState);
1489
1490     if (false)
1491         dataLog("GC END!\n");
1492
1493     setNeedFinalize();
1494
1495     m_lastGCStartTime = m_currentGCStartTime;
1496     m_lastGCEndTime = MonotonicTime::now();
1497         
1498     return changePhase(conn, CollectorPhase::NotRunning);
1499 }
1500
1501 bool Heap::changePhase(GCConductor conn, CollectorPhase nextPhase)
1502 {
1503     checkConn(conn);
1504
1505     m_nextPhase = nextPhase;
1506
1507     return finishChangingPhase(conn);
1508 }
1509
1510 NEVER_INLINE bool Heap::finishChangingPhase(GCConductor conn)
1511 {
1512     checkConn(conn);
1513     
1514     if (m_nextPhase == m_currentPhase)
1515         return true;
1516
1517     if (false)
1518         dataLog(conn, ": Going to phase: ", m_nextPhase, " (from ", m_currentPhase, ")\n");
1519     
1520     bool suspendedBefore = worldShouldBeSuspended(m_currentPhase);
1521     bool suspendedAfter = worldShouldBeSuspended(m_nextPhase);
1522     
1523     if (suspendedBefore != suspendedAfter) {
1524         if (suspendedBefore) {
1525             RELEASE_ASSERT(!suspendedAfter);
1526             
1527             resumeThePeriphery();
1528             if (conn == GCConductor::Collector)
1529                 resumeTheMutator();
1530             else
1531                 handleNeedFinalize();
1532         } else {
1533             RELEASE_ASSERT(!suspendedBefore);
1534             RELEASE_ASSERT(suspendedAfter);
1535             
1536             if (conn == GCConductor::Collector) {
1537                 waitWhileNeedFinalize();
1538                 if (!stopTheMutator()) {
1539                     if (false)
1540                         dataLog("Returning false.\n");
1541                     return false;
1542                 }
1543             } else {
1544                 sanitizeStackForVM(m_vm);
1545                 handleNeedFinalize();
1546             }
1547             stopThePeriphery(conn);
1548         }
1549     }
1550     
1551     m_currentPhase = m_nextPhase;
1552     return true;
1553 }
1554
1555 void Heap::stopThePeriphery(GCConductor conn)
1556 {
1557     if (m_collectorBelievesThatTheWorldIsStopped) {
1558         dataLog("FATAL: world already stopped.\n");
1559         RELEASE_ASSERT_NOT_REACHED();
1560     }
1561     
1562     if (m_mutatorDidRun)
1563         m_mutatorExecutionVersion++;
1564     
1565     m_mutatorDidRun = false;
1566
1567     suspendCompilerThreads();
1568     m_collectorBelievesThatTheWorldIsStopped = true;
1569
1570     forEachSlotVisitor(
1571         [&] (SlotVisitor& slotVisitor) {
1572             slotVisitor.updateMutatorIsStopped(NoLockingNecessary);
1573         });
1574
1575 #if ENABLE(JIT)
1576     {
1577         DeferGCForAWhile awhile(*this);
1578         if (JITWorklist::instance()->completeAllForVM(*m_vm)
1579             && conn == GCConductor::Collector)
1580             setGCDidJIT();
1581     }
1582 #else
1583     UNUSED_PARAM(conn);
1584 #endif // ENABLE(JIT)
1585     
1586     vm()->shadowChicken().update(*vm(), vm()->topCallFrame);
1587     
1588     m_structureIDTable.flushOldTables();
1589     m_objectSpace.stopAllocating();
1590     
1591     m_stopTime = MonotonicTime::now();
1592 }
1593
1594 NEVER_INLINE void Heap::resumeThePeriphery()
1595 {
1596     // Calling resumeAllocating does the Right Thing depending on whether this is the end of a
1597     // collection cycle or this is just a concurrent phase within a collection cycle:
1598     // - At end of collection cycle: it's a no-op because prepareForAllocation already cleared the
1599     //   last active block.
1600     // - During collection cycle: it reinstates the last active block.
1601     m_objectSpace.resumeAllocating();
1602     
1603     m_barriersExecuted = 0;
1604     
1605     if (!m_collectorBelievesThatTheWorldIsStopped) {
1606         dataLog("Fatal: collector does not believe that the world is stopped.\n");
1607         RELEASE_ASSERT_NOT_REACHED();
1608     }
1609     m_collectorBelievesThatTheWorldIsStopped = false;
1610     
1611     // FIXME: This could be vastly improved: we want to grab the locks in the order in which they
1612     // become available. We basically want a lockAny() method that will lock whatever lock is available
1613     // and tell you which one it locked. That would require teaching ParkingLot how to park on multiple
1614     // queues at once, which is totally achievable - it would just require memory allocation, which is
1615     // suboptimal but not a disaster. Alternatively, we could replace the SlotVisitor rightToRun lock
1616     // with a DLG-style handshake mechanism, but that seems not as general.
1617     Vector<SlotVisitor*, 8> slotVisitorsToUpdate;
1618
1619     forEachSlotVisitor(
1620         [&] (SlotVisitor& slotVisitor) {
1621             slotVisitorsToUpdate.append(&slotVisitor);
1622         });
1623     
1624     for (unsigned countdown = 40; !slotVisitorsToUpdate.isEmpty() && countdown--;) {
1625         for (unsigned index = 0; index < slotVisitorsToUpdate.size(); ++index) {
1626             SlotVisitor& slotVisitor = *slotVisitorsToUpdate[index];
1627             bool remove = false;
1628             if (slotVisitor.hasAcknowledgedThatTheMutatorIsResumed())
1629                 remove = true;
1630             else if (auto locker = tryHoldLock(slotVisitor.rightToRun())) {
1631                 slotVisitor.updateMutatorIsStopped(locker);
1632                 remove = true;
1633             }
1634             if (remove) {
1635                 slotVisitorsToUpdate[index--] = slotVisitorsToUpdate.last();
1636                 slotVisitorsToUpdate.takeLast();
1637             }
1638         }
1639         WTF::Thread::yield();
1640     }
1641     
1642     for (SlotVisitor* slotVisitor : slotVisitorsToUpdate)
1643         slotVisitor->updateMutatorIsStopped();
1644     
1645     resumeCompilerThreads();
1646 }
1647
1648 bool Heap::stopTheMutator()
1649 {
1650     for (;;) {
1651         unsigned oldState = m_worldState.load();
1652         if (oldState & stoppedBit) {
1653             RELEASE_ASSERT(!(oldState & hasAccessBit));
1654             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1655             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1656             return true;
1657         }
1658         
1659         if (oldState & mutatorHasConnBit) {
1660             RELEASE_ASSERT(!(oldState & hasAccessBit));
1661             RELEASE_ASSERT(!(oldState & stoppedBit));
1662             return false;
1663         }
1664
1665         if (!(oldState & hasAccessBit)) {
1666             RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1667             RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1668             // We can stop the world instantly.
1669             if (m_worldState.compareExchangeWeak(oldState, oldState | stoppedBit))
1670                 return true;
1671             continue;
1672         }
1673         
1674         // Transfer the conn to the mutator and bail.
1675         RELEASE_ASSERT(oldState & hasAccessBit);
1676         RELEASE_ASSERT(!(oldState & stoppedBit));
1677         unsigned newState = (oldState | mutatorHasConnBit) & ~mutatorWaitingBit;
1678         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1679             if (false)
1680                 dataLog("Handed off the conn.\n");
1681             m_stopIfNecessaryTimer->scheduleSoon();
1682             ParkingLot::unparkAll(&m_worldState);
1683             return false;
1684         }
1685     }
1686 }
1687
1688 NEVER_INLINE void Heap::resumeTheMutator()
1689 {
1690     if (false)
1691         dataLog("Resuming the mutator.\n");
1692     for (;;) {
1693         unsigned oldState = m_worldState.load();
1694         if (!!(oldState & hasAccessBit) != !(oldState & stoppedBit)) {
1695             dataLog("Fatal: hasAccess = ", !!(oldState & hasAccessBit), ", stopped = ", !!(oldState & stoppedBit), "\n");
1696             RELEASE_ASSERT_NOT_REACHED();
1697         }
1698         if (oldState & mutatorHasConnBit) {
1699             dataLog("Fatal: mutator has the conn.\n");
1700             RELEASE_ASSERT_NOT_REACHED();
1701         }
1702         
1703         if (!(oldState & stoppedBit)) {
1704             if (false)
1705                 dataLog("Returning because not stopped.\n");
1706             return;
1707         }
1708         
1709         if (m_worldState.compareExchangeWeak(oldState, oldState & ~stoppedBit)) {
1710             if (false)
1711                 dataLog("CASing and returning.\n");
1712             ParkingLot::unparkAll(&m_worldState);
1713             return;
1714         }
1715     }
1716 }
1717
1718 void Heap::stopIfNecessarySlow()
1719 {
1720     while (stopIfNecessarySlow(m_worldState.load())) { }
1721     
1722     RELEASE_ASSERT(m_worldState.load() & hasAccessBit);
1723     RELEASE_ASSERT(!(m_worldState.load() & stoppedBit));
1724     
1725     handleGCDidJIT();
1726     handleNeedFinalize();
1727     m_mutatorDidRun = true;
1728 }
1729
1730 bool Heap::stopIfNecessarySlow(unsigned oldState)
1731 {
1732     RELEASE_ASSERT(oldState & hasAccessBit);
1733     RELEASE_ASSERT(!(oldState & stoppedBit));
1734     
1735     // It's possible for us to wake up with finalization already requested but the world not yet
1736     // resumed. If that happens, we can't run finalization yet.
1737     if (handleNeedFinalize(oldState))
1738         return true;
1739
1740     // FIXME: When entering the concurrent phase, we could arrange for this branch not to fire, and then
1741     // have the SlotVisitor do things to the m_worldState to make this branch fire again. That would
1742     // prevent us from polling this so much. Ideally, stopIfNecessary would ignore the mutatorHasConnBit
1743     // and there would be some other bit indicating whether we were in some GC phase other than the
1744     // NotRunning or Concurrent ones.
1745     if (oldState & mutatorHasConnBit)
1746         collectInMutatorThread();
1747     
1748     return false;
1749 }
1750
1751 NEVER_INLINE void Heap::collectInMutatorThread()
1752 {
1753     CollectingScope collectingScope(*this);
1754     for (;;) {
1755         RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, nullptr);
1756         switch (result) {
1757         case RunCurrentPhaseResult::Finished:
1758             return;
1759         case RunCurrentPhaseResult::Continue:
1760             break;
1761         case RunCurrentPhaseResult::NeedCurrentThreadState:
1762             sanitizeStackForVM(m_vm);
1763             auto lambda = [&] (CurrentThreadState& state) {
1764                 for (;;) {
1765                     RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, &state);
1766                     switch (result) {
1767                     case RunCurrentPhaseResult::Finished:
1768                         return;
1769                     case RunCurrentPhaseResult::Continue:
1770                         break;
1771                     case RunCurrentPhaseResult::NeedCurrentThreadState:
1772                         RELEASE_ASSERT_NOT_REACHED();
1773                         break;
1774                     }
1775                 }
1776             };
1777             callWithCurrentThreadState(scopedLambda<void(CurrentThreadState&)>(WTFMove(lambda)));
1778             return;
1779         }
1780     }
1781 }
1782
1783 template<typename Func>
1784 void Heap::waitForCollector(const Func& func)
1785 {
1786     for (;;) {
1787         bool done;
1788         {
1789             LockHolder locker(*m_threadLock);
1790             done = func(locker);
1791             if (!done) {
1792                 setMutatorWaiting();
1793                 
1794                 // At this point, the collector knows that we intend to wait, and he will clear the
1795                 // waiting bit and then unparkAll when the GC cycle finishes. Clearing the bit
1796                 // prevents us from parking except if there is also stop-the-world. Unparking after
1797                 // clearing means that if the clearing happens after we park, then we will unpark.
1798             }
1799         }
1800         
1801         // If we're in a stop-the-world scenario, we need to wait for that even if done is true.
1802         unsigned oldState = m_worldState.load();
1803         if (stopIfNecessarySlow(oldState))
1804             continue;
1805         
1806         // FIXME: We wouldn't need this if stopIfNecessarySlow() had a mode where it knew to just
1807         // do the collection.
1808         relinquishConn();
1809         
1810         if (done) {
1811             clearMutatorWaiting(); // Clean up just in case.
1812             return;
1813         }
1814         
1815         // If mutatorWaitingBit is still set then we want to wait.
1816         ParkingLot::compareAndPark(&m_worldState, oldState | mutatorWaitingBit);
1817     }
1818 }
1819
1820 void Heap::acquireAccessSlow()
1821 {
1822     for (;;) {
1823         unsigned oldState = m_worldState.load();
1824         RELEASE_ASSERT(!(oldState & hasAccessBit));
1825         
1826         if (oldState & stoppedBit) {
1827             if (verboseStop) {
1828                 dataLog("Stopping in acquireAccess!\n");
1829                 WTFReportBacktrace();
1830             }
1831             // Wait until we're not stopped anymore.
1832             ParkingLot::compareAndPark(&m_worldState, oldState);
1833             continue;
1834         }
1835         
1836         RELEASE_ASSERT(!(oldState & stoppedBit));
1837         unsigned newState = oldState | hasAccessBit;
1838         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1839             handleGCDidJIT();
1840             handleNeedFinalize();
1841             m_mutatorDidRun = true;
1842             stopIfNecessary();
1843             return;
1844         }
1845     }
1846 }
1847
1848 void Heap::releaseAccessSlow()
1849 {
1850     for (;;) {
1851         unsigned oldState = m_worldState.load();
1852         if (!(oldState & hasAccessBit)) {
1853             dataLog("FATAL: Attempting to release access but the mutator does not have access.\n");
1854             RELEASE_ASSERT_NOT_REACHED();
1855         }
1856         if (oldState & stoppedBit) {
1857             dataLog("FATAL: Attempting to release access but the mutator is stopped.\n");
1858             RELEASE_ASSERT_NOT_REACHED();
1859         }
1860         
1861         if (handleNeedFinalize(oldState))
1862             continue;
1863         
1864         unsigned newState = oldState & ~(hasAccessBit | mutatorHasConnBit);
1865         
1866         if ((oldState & mutatorHasConnBit)
1867             && m_nextPhase != m_currentPhase) {
1868             // This means that the collector thread had given us the conn so that we would do something
1869             // for it. Stop ourselves as we release access. This ensures that acquireAccess blocks. In
1870             // the meantime, since we're handing the conn over, the collector will be awoken and it is
1871             // sure to have work to do.
1872             newState |= stoppedBit;
1873         }
1874
1875         if (m_worldState.compareExchangeWeak(oldState, newState)) {
1876             if (oldState & mutatorHasConnBit)
1877                 finishRelinquishingConn();
1878             return;
1879         }
1880     }
1881 }
1882
1883 bool Heap::relinquishConn(unsigned oldState)
1884 {
1885     RELEASE_ASSERT(oldState & hasAccessBit);
1886     RELEASE_ASSERT(!(oldState & stoppedBit));
1887     
1888     if (!(oldState & mutatorHasConnBit))
1889         return false; // Done.
1890     
1891     if (m_threadShouldStop)
1892         return false;
1893     
1894     if (!m_worldState.compareExchangeWeak(oldState, oldState & ~mutatorHasConnBit))
1895         return true; // Loop around.
1896     
1897     finishRelinquishingConn();
1898     return true;
1899 }
1900
1901 void Heap::finishRelinquishingConn()
1902 {
1903     if (false)
1904         dataLog("Relinquished the conn.\n");
1905     
1906     sanitizeStackForVM(m_vm);
1907     
1908     auto locker = holdLock(*m_threadLock);
1909     if (!m_requests.isEmpty())
1910         m_threadCondition->notifyOne(locker);
1911     ParkingLot::unparkAll(&m_worldState);
1912 }
1913
1914 void Heap::relinquishConn()
1915 {
1916     while (relinquishConn(m_worldState.load())) { }
1917 }
1918
1919 bool Heap::handleGCDidJIT(unsigned oldState)
1920 {
1921     RELEASE_ASSERT(oldState & hasAccessBit);
1922     if (!(oldState & gcDidJITBit))
1923         return false;
1924     if (m_worldState.compareExchangeWeak(oldState, oldState & ~gcDidJITBit)) {
1925         WTF::crossModifyingCodeFence();
1926         return true;
1927     }
1928     return true;
1929 }
1930
1931 NEVER_INLINE bool Heap::handleNeedFinalize(unsigned oldState)
1932 {
1933     RELEASE_ASSERT(oldState & hasAccessBit);
1934     RELEASE_ASSERT(!(oldState & stoppedBit));
1935     
1936     if (!(oldState & needFinalizeBit))
1937         return false;
1938     if (m_worldState.compareExchangeWeak(oldState, oldState & ~needFinalizeBit)) {
1939         finalize();
1940         // Wake up anyone waiting for us to finalize. Note that they may have woken up already, in
1941         // which case they would be waiting for us to release heap access.
1942         ParkingLot::unparkAll(&m_worldState);
1943         return true;
1944     }
1945     return true;
1946 }
1947
1948 void Heap::handleGCDidJIT()
1949 {
1950     while (handleGCDidJIT(m_worldState.load())) { }
1951 }
1952
1953 void Heap::handleNeedFinalize()
1954 {
1955     while (handleNeedFinalize(m_worldState.load())) { }
1956 }
1957
1958 void Heap::setGCDidJIT()
1959 {
1960     m_worldState.transaction(
1961         [&] (unsigned& state) -> bool {
1962             RELEASE_ASSERT(state & stoppedBit);
1963             state |= gcDidJITBit;
1964             return true;
1965         });
1966 }
1967
1968 void Heap::setNeedFinalize()
1969 {
1970     m_worldState.exchangeOr(needFinalizeBit);
1971     ParkingLot::unparkAll(&m_worldState);
1972     m_stopIfNecessaryTimer->scheduleSoon();
1973 }
1974
1975 void Heap::waitWhileNeedFinalize()
1976 {
1977     for (;;) {
1978         unsigned oldState = m_worldState.load();
1979         if (!(oldState & needFinalizeBit)) {
1980             // This means that either there was no finalize request or the main thread will finalize
1981             // with heap access, so a subsequent call to stopTheWorld() will return only when
1982             // finalize finishes.
1983             return;
1984         }
1985         ParkingLot::compareAndPark(&m_worldState, oldState);
1986     }
1987 }
1988
1989 void Heap::setMutatorWaiting()
1990 {
1991     m_worldState.exchangeOr(mutatorWaitingBit);
1992 }
1993
1994 void Heap::clearMutatorWaiting()
1995 {
1996     m_worldState.exchangeAnd(~mutatorWaitingBit);
1997 }
1998
1999 void Heap::notifyThreadStopping(const AbstractLocker&)
2000 {
2001     m_threadIsStopping = true;
2002     clearMutatorWaiting();
2003     ParkingLot::unparkAll(&m_worldState);
2004 }
2005
2006 void Heap::finalize()
2007 {
2008     MonotonicTime before;
2009     if (Options::logGC()) {
2010         before = MonotonicTime::now();
2011         dataLog("[GC<", RawPointer(this), ">: finalize ");
2012     }
2013     
2014     {
2015         SweepingScope sweepingScope(*this);
2016         deleteUnmarkedCompiledCode();
2017         deleteSourceProviderCaches();
2018         sweepInFinalize();
2019     }
2020     
2021     if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache())
2022         cache->clear();
2023     
2024     for (const HeapFinalizerCallback& callback : m_heapFinalizerCallbacks)
2025         callback.run(*vm());
2026     
2027     if (Options::sweepSynchronously())
2028         sweepSynchronously();
2029
2030     if (Options::logGC()) {
2031         MonotonicTime after = MonotonicTime::now();
2032         dataLog((after - before).milliseconds(), "ms]\n");
2033     }
2034 }
2035
2036 Heap::Ticket Heap::requestCollection(GCRequest request)
2037 {
2038     stopIfNecessary();
2039     
2040     ASSERT(vm()->currentThreadIsHoldingAPILock());
2041     RELEASE_ASSERT(vm()->atomicStringTable() == WTF::Thread::current().atomicStringTable());
2042     
2043     LockHolder locker(*m_threadLock);
2044     // We may be able to steal the conn. That only works if the collector is definitely not running
2045     // right now. This is an optimization that prevents the collector thread from ever starting in most
2046     // cases.
2047     ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2048     if ((m_lastServedTicket == m_lastGrantedTicket) && (m_currentPhase == CollectorPhase::NotRunning)) {
2049         if (false)
2050             dataLog("Taking the conn.\n");
2051         m_worldState.exchangeOr(mutatorHasConnBit);
2052     }
2053     
2054     m_requests.append(request);
2055     m_lastGrantedTicket++;
2056     if (!(m_worldState.load() & mutatorHasConnBit))
2057         m_threadCondition->notifyOne(locker);
2058     return m_lastGrantedTicket;
2059 }
2060
2061 void Heap::waitForCollection(Ticket ticket)
2062 {
2063     waitForCollector(
2064         [&] (const AbstractLocker&) -> bool {
2065             return m_lastServedTicket >= ticket;
2066         });
2067 }
2068
2069 void Heap::sweepInFinalize()
2070 {
2071     m_objectSpace.sweepLargeAllocations();
2072     
2073     auto sweepBlock = [&] (MarkedBlock::Handle* handle) {
2074         handle->sweep(nullptr);
2075     };
2076     
2077     vm()->eagerlySweptDestructibleObjectSpace.forEachMarkedBlock(sweepBlock);
2078 }
2079
2080 void Heap::suspendCompilerThreads()
2081 {
2082 #if ENABLE(DFG_JIT)
2083     // We ensure the worklists so that it's not possible for the mutator to start a new worklist
2084     // after we have suspended the ones that he had started before. That's not very expensive since
2085     // the worklists use AutomaticThreads anyway.
2086     for (unsigned i = DFG::numberOfWorklists(); i--;)
2087         DFG::ensureWorklistForIndex(i).suspendAllThreads();
2088 #endif
2089 }
2090
2091 void Heap::willStartCollection()
2092 {
2093     if (Options::logGC())
2094         dataLog("=> ");
2095     
2096     if (shouldDoFullCollection()) {
2097         m_collectionScope = CollectionScope::Full;
2098         m_shouldDoFullCollection = false;
2099         if (Options::logGC())
2100             dataLog("FullCollection, ");
2101         if (false)
2102             dataLog("Full collection!\n");
2103     } else {
2104         m_collectionScope = CollectionScope::Eden;
2105         if (Options::logGC())
2106             dataLog("EdenCollection, ");
2107         if (false)
2108             dataLog("Eden collection!\n");
2109     }
2110     if (m_collectionScope == CollectionScope::Full) {
2111         m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2112         m_extraMemorySize = 0;
2113         m_deprecatedExtraMemorySize = 0;
2114 #if ENABLE(RESOURCE_USAGE)
2115         m_externalMemorySize = 0;
2116 #endif
2117
2118         if (m_fullActivityCallback)
2119             m_fullActivityCallback->willCollect();
2120     } else {
2121         ASSERT(m_collectionScope == CollectionScope::Eden);
2122         m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2123     }
2124
2125     if (m_edenActivityCallback)
2126         m_edenActivityCallback->willCollect();
2127
2128     for (auto* observer : m_observers)
2129         observer->willGarbageCollect();
2130 }
2131
2132 void Heap::prepareForMarking()
2133 {
2134     m_objectSpace.prepareForMarking();
2135 }
2136
2137 void Heap::reapWeakHandles()
2138 {
2139     m_objectSpace.reapWeakSets();
2140 }
2141
2142 void Heap::pruneStaleEntriesFromWeakGCMaps()
2143 {
2144     if (m_collectionScope != CollectionScope::Full)
2145         return;
2146     for (auto& pruneCallback : m_weakGCMaps.values())
2147         pruneCallback();
2148 }
2149
2150 void Heap::sweepArrayBuffers()
2151 {
2152     m_arrayBuffers.sweep();
2153 }
2154
2155 void Heap::snapshotUnswept()
2156 {
2157     TimingScope timingScope(*this, "Heap::snapshotUnswept");
2158     m_objectSpace.snapshotUnswept();
2159 }
2160
2161 void Heap::deleteSourceProviderCaches()
2162 {
2163     if (*m_lastCollectionScope == CollectionScope::Full)
2164         m_vm->clearSourceProviderCaches();
2165 }
2166
2167 void Heap::notifyIncrementalSweeper()
2168 {
2169     if (m_collectionScope == CollectionScope::Full) {
2170         if (!m_logicallyEmptyWeakBlocks.isEmpty())
2171             m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2172     }
2173
2174     m_sweeper->startSweeping();
2175 }
2176
2177 void Heap::updateAllocationLimits()
2178 {
2179     static const bool verbose = false;
2180     
2181     if (verbose) {
2182         dataLog("\n");
2183         dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n");
2184     }
2185     
2186     // Calculate our current heap size threshold for the purpose of figuring out when we should
2187     // run another collection. This isn't the same as either size() or capacity(), though it should
2188     // be somewhere between the two. The key is to match the size calculations involved calls to
2189     // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
2190     // fragmentation, we may have size() much smaller than capacity().
2191     size_t currentHeapSize = 0;
2192
2193     // For marked space, we use the total number of bytes visited. This matches the logic for
2194     // MarkedAllocator's calls to didAllocate(), which effectively accounts for the total size of
2195     // objects allocated rather than blocks used. This will underestimate capacity(), and in case
2196     // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
2197     // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
2198     currentHeapSize += m_totalBytesVisited;
2199     if (verbose)
2200         dataLog("totalBytesVisited = ", m_totalBytesVisited, ", currentHeapSize = ", currentHeapSize, "\n");
2201
2202     // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
2203     // extra memory reporting.
2204     currentHeapSize += extraMemorySize();
2205     if (!ASSERT_DISABLED) {
2206         Checked<size_t, RecordOverflow> checkedCurrentHeapSize = m_totalBytesVisited;
2207         checkedCurrentHeapSize += extraMemorySize();
2208         ASSERT(!checkedCurrentHeapSize.hasOverflowed() && checkedCurrentHeapSize.unsafeGet() == currentHeapSize);
2209     }
2210
2211     if (verbose)
2212         dataLog("extraMemorySize() = ", extraMemorySize(), ", currentHeapSize = ", currentHeapSize, "\n");
2213     
2214     if (m_collectionScope == CollectionScope::Full) {
2215         // To avoid pathological GC churn in very small and very large heaps, we set
2216         // the new allocation limit based on the current size of the heap, with a
2217         // fixed minimum.
2218         m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
2219         if (verbose)
2220             dataLog("Full: maxHeapSize = ", m_maxHeapSize, "\n");
2221         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2222         if (verbose)
2223             dataLog("Full: maxEdenSize = ", m_maxEdenSize, "\n");
2224         m_sizeAfterLastFullCollect = currentHeapSize;
2225         if (verbose)
2226             dataLog("Full: sizeAfterLastFullCollect = ", currentHeapSize, "\n");
2227         m_bytesAbandonedSinceLastFullCollect = 0;
2228         if (verbose)
2229             dataLog("Full: bytesAbandonedSinceLastFullCollect = ", 0, "\n");
2230     } else {
2231         ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
2232         // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have.
2233         // But we are sloppy, so we have to defend against the overflow.
2234         m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize;
2235         if (verbose)
2236             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2237         m_sizeAfterLastEdenCollect = currentHeapSize;
2238         if (verbose)
2239             dataLog("Eden: sizeAfterLastEdenCollect = ", currentHeapSize, "\n");
2240         double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
2241         double minEdenToOldGenerationRatio = 1.0 / 3.0;
2242         if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
2243             m_shouldDoFullCollection = true;
2244         // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
2245         m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
2246         if (verbose)
2247             dataLog("Eden: maxHeapSize = ", m_maxHeapSize, "\n");
2248         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2249         if (verbose)
2250             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2251         if (m_fullActivityCallback) {
2252             ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
2253             m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
2254         }
2255     }
2256
2257 #if PLATFORM(IOS)
2258     // Get critical memory threshold for next cycle.
2259     overCriticalMemoryThreshold(MemoryThresholdCallType::Direct);
2260 #endif
2261
2262     m_sizeAfterLastCollect = currentHeapSize;
2263     if (verbose)
2264         dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n");
2265     m_bytesAllocatedThisCycle = 0;
2266
2267     if (Options::logGC())
2268         dataLog("=> ", currentHeapSize / 1024, "kb, ");
2269 }
2270
2271 void Heap::didFinishCollection()
2272 {
2273     m_afterGC = MonotonicTime::now();
2274     CollectionScope scope = *m_collectionScope;
2275     if (scope == CollectionScope::Full)
2276         m_lastFullGCLength = m_afterGC - m_beforeGC;
2277     else
2278         m_lastEdenGCLength = m_afterGC - m_beforeGC;
2279
2280 #if ENABLE(RESOURCE_USAGE)
2281     ASSERT(externalMemorySize() <= extraMemorySize());
2282 #endif
2283
2284     if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) {
2285         gatherExtraHeapSnapshotData(*heapProfiler);
2286         removeDeadHeapSnapshotNodes(*heapProfiler);
2287     }
2288
2289     if (UNLIKELY(m_verifier))
2290         m_verifier->endGC();
2291
2292     RELEASE_ASSERT(m_collectionScope);
2293     m_lastCollectionScope = m_collectionScope;
2294     m_collectionScope = std::nullopt;
2295
2296     for (auto* observer : m_observers)
2297         observer->didGarbageCollect(scope);
2298 }
2299
2300 void Heap::resumeCompilerThreads()
2301 {
2302 #if ENABLE(DFG_JIT)
2303     for (unsigned i = DFG::numberOfWorklists(); i--;)
2304         DFG::existingWorklistForIndex(i).resumeAllThreads();
2305 #endif
2306 }
2307
2308 GCActivityCallback* Heap::fullActivityCallback()
2309 {
2310     return m_fullActivityCallback.get();
2311 }
2312
2313 GCActivityCallback* Heap::edenActivityCallback()
2314 {
2315     return m_edenActivityCallback.get();
2316 }
2317
2318 IncrementalSweeper& Heap::sweeper()
2319 {
2320     return *m_sweeper;
2321 }
2322
2323 void Heap::setGarbageCollectionTimerEnabled(bool enable)
2324 {
2325     if (m_fullActivityCallback)
2326         m_fullActivityCallback->setEnabled(enable);
2327     if (m_edenActivityCallback)
2328         m_edenActivityCallback->setEnabled(enable);
2329 }
2330
2331 void Heap::didAllocate(size_t bytes)
2332 {
2333     if (m_edenActivityCallback)
2334         m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
2335     m_bytesAllocatedThisCycle += bytes;
2336     performIncrement(bytes);
2337 }
2338
2339 bool Heap::isValidAllocation(size_t)
2340 {
2341     if (!isValidThreadState(m_vm))
2342         return false;
2343
2344     if (isCurrentThreadBusy())
2345         return false;
2346     
2347     return true;
2348 }
2349
2350 void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
2351 {
2352     WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
2353 }
2354
2355 void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
2356 {
2357     HandleSlot slot = handle.slot();
2358     Finalizer finalizer = reinterpret_cast<Finalizer>(context);
2359     finalizer(slot->asCell());
2360     WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
2361 }
2362
2363 void Heap::addExecutable(ExecutableBase* executable)
2364 {
2365     m_executables.append(executable);
2366 }
2367
2368 void Heap::collectNowFullIfNotDoneRecently(Synchronousness synchronousness)
2369 {
2370     if (!m_fullActivityCallback) {
2371         collectNow(synchronousness, CollectionScope::Full);
2372         return;
2373     }
2374
2375     if (m_fullActivityCallback->didGCRecently()) {
2376         // A synchronous GC was already requested recently so we merely accelerate next collection.
2377         reportAbandonedObjectGraph();
2378         return;
2379     }
2380
2381     m_fullActivityCallback->setDidGCRecently();
2382     collectNow(synchronousness, CollectionScope::Full);
2383 }
2384
2385 bool Heap::shouldDoFullCollection()
2386 {
2387     if (!Options::useGenerationalGC())
2388         return true;
2389
2390     if (!m_currentRequest.scope)
2391         return m_shouldDoFullCollection || overCriticalMemoryThreshold();
2392     return *m_currentRequest.scope == CollectionScope::Full;
2393 }
2394
2395 void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
2396 {
2397     m_logicallyEmptyWeakBlocks.append(block);
2398 }
2399
2400 void Heap::sweepAllLogicallyEmptyWeakBlocks()
2401 {
2402     if (m_logicallyEmptyWeakBlocks.isEmpty())
2403         return;
2404
2405     m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2406     while (sweepNextLogicallyEmptyWeakBlock()) { }
2407 }
2408
2409 bool Heap::sweepNextLogicallyEmptyWeakBlock()
2410 {
2411     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
2412         return false;
2413
2414     WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
2415
2416     block->sweep();
2417     if (block->isEmpty()) {
2418         std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
2419         m_logicallyEmptyWeakBlocks.removeLast();
2420         WeakBlock::destroy(*this, block);
2421     } else
2422         m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
2423
2424     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
2425         m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
2426         return false;
2427     }
2428
2429     return true;
2430 }
2431
2432 size_t Heap::visitCount()
2433 {
2434     size_t result = 0;
2435     forEachSlotVisitor(
2436         [&] (SlotVisitor& visitor) {
2437             result += visitor.visitCount();
2438         });
2439     return result;
2440 }
2441
2442 size_t Heap::bytesVisited()
2443 {
2444     size_t result = 0;
2445     forEachSlotVisitor(
2446         [&] (SlotVisitor& visitor) {
2447             result += visitor.bytesVisited();
2448         });
2449     return result;
2450 }
2451
2452 void Heap::forEachCodeBlockImpl(const ScopedLambda<bool(CodeBlock*)>& func)
2453 {
2454     // We don't know the full set of CodeBlocks until compilation has terminated.
2455     completeAllJITPlans();
2456
2457     return m_codeBlocks->iterate(func);
2458 }
2459
2460 void Heap::forEachCodeBlockIgnoringJITPlansImpl(const AbstractLocker& locker, const ScopedLambda<bool(CodeBlock*)>& func)
2461 {
2462     return m_codeBlocks->iterate(locker, func);
2463 }
2464
2465 void Heap::writeBarrierSlowPath(const JSCell* from)
2466 {
2467     if (UNLIKELY(mutatorShouldBeFenced())) {
2468         // In this case, the barrierThreshold is the tautological threshold, so from could still be
2469         // not black. But we can't know for sure until we fire off a fence.
2470         WTF::storeLoadFence();
2471         if (from->cellState() != CellState::PossiblyBlack)
2472             return;
2473     }
2474     
2475     addToRememberedSet(from);
2476 }
2477
2478 bool Heap::isCurrentThreadBusy()
2479 {
2480     return mayBeGCThread() || mutatorState() != MutatorState::Running;
2481 }
2482
2483 void Heap::reportExtraMemoryVisited(size_t size)
2484 {
2485     size_t* counter = &m_extraMemorySize;
2486     
2487     for (;;) {
2488         size_t oldSize = *counter;
2489         // FIXME: Change this to use SaturatedArithmetic when available.
2490         // https://bugs.webkit.org/show_bug.cgi?id=170411
2491         Checked<size_t, RecordOverflow> checkedNewSize = oldSize;
2492         checkedNewSize += size;
2493         size_t newSize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
2494         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, newSize))
2495             return;
2496     }
2497 }
2498
2499 #if ENABLE(RESOURCE_USAGE)
2500 void Heap::reportExternalMemoryVisited(size_t size)
2501 {
2502     size_t* counter = &m_externalMemorySize;
2503
2504     for (;;) {
2505         size_t oldSize = *counter;
2506         if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size))
2507             return;
2508     }
2509 }
2510 #endif
2511
2512 void Heap::collectIfNecessaryOrDefer(GCDeferralContext* deferralContext)
2513 {
2514     ASSERT(deferralContext || isDeferred() || !DisallowGC::isInEffectOnCurrentThread());
2515
2516     if (!m_isSafeToCollect)
2517         return;
2518     switch (mutatorState()) {
2519     case MutatorState::Running:
2520     case MutatorState::Allocating:
2521         break;
2522     case MutatorState::Sweeping:
2523     case MutatorState::Collecting:
2524         return;
2525     }
2526     if (!Options::useGC())
2527         return;
2528     
2529     if (mayNeedToStop()) {
2530         if (deferralContext)
2531             deferralContext->m_shouldGC = true;
2532         else if (isDeferred())
2533             m_didDeferGCWork = true;
2534         else
2535             stopIfNecessary();
2536     }
2537     
2538     if (UNLIKELY(Options::gcMaxHeapSize())) {
2539         if (m_bytesAllocatedThisCycle <= Options::gcMaxHeapSize())
2540             return;
2541     } else {
2542         size_t bytesAllowedThisCycle = m_maxEdenSize;
2543
2544 #if PLATFORM(IOS)
2545         if (overCriticalMemoryThreshold())
2546             bytesAllowedThisCycle = std::min(m_maxEdenSizeWhenCritical, bytesAllowedThisCycle);
2547 #endif
2548
2549         if (m_bytesAllocatedThisCycle <= bytesAllowedThisCycle)
2550             return;
2551     }
2552
2553     if (deferralContext)
2554         deferralContext->m_shouldGC = true;
2555     else if (isDeferred())
2556         m_didDeferGCWork = true;
2557     else {
2558         collectAsync();
2559         stopIfNecessary(); // This will immediately start the collection if we have the conn.
2560     }
2561 }
2562
2563 void Heap::decrementDeferralDepthAndGCIfNeededSlow()
2564 {
2565     // Can't do anything if we're still deferred.
2566     if (m_deferralDepth)
2567         return;
2568     
2569     ASSERT(!isDeferred());
2570     
2571     m_didDeferGCWork = false;
2572     // FIXME: Bring back something like the DeferGCProbability mode.
2573     // https://bugs.webkit.org/show_bug.cgi?id=166627
2574     collectIfNecessaryOrDefer();
2575 }
2576
2577 void Heap::registerWeakGCMap(void* weakGCMap, std::function<void()> pruningCallback)
2578 {
2579     m_weakGCMaps.add(weakGCMap, WTFMove(pruningCallback));
2580 }
2581
2582 void Heap::unregisterWeakGCMap(void* weakGCMap)
2583 {
2584     m_weakGCMaps.remove(weakGCMap);
2585 }
2586
2587 void Heap::didAllocateBlock(size_t capacity)
2588 {
2589 #if ENABLE(RESOURCE_USAGE)
2590     m_blockBytesAllocated += capacity;
2591 #else
2592     UNUSED_PARAM(capacity);
2593 #endif
2594 }
2595
2596 void Heap::didFreeBlock(size_t capacity)
2597 {
2598 #if ENABLE(RESOURCE_USAGE)
2599     m_blockBytesAllocated -= capacity;
2600 #else
2601     UNUSED_PARAM(capacity);
2602 #endif
2603 }
2604
2605 void Heap::addCoreConstraints()
2606 {
2607     m_constraintSet->add(
2608         "Cs", "Conservative Scan",
2609         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2610             TimingScope preConvergenceTimingScope(*this, "Constraint: conservative scan");
2611             m_objectSpace.prepareForConservativeScan();
2612             ConservativeRoots conservativeRoots(*this);
2613             SuperSamplerScope superSamplerScope(false);
2614             gatherStackRoots(conservativeRoots);
2615             gatherJSStackRoots(conservativeRoots);
2616             gatherScratchBufferRoots(conservativeRoots);
2617             slotVisitor.append(conservativeRoots);
2618         },
2619         ConstraintVolatility::GreyedByExecution);
2620     
2621     m_constraintSet->add(
2622         "Msr", "Misc Small Roots",
2623         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2624 #if JSC_OBJC_API_ENABLED
2625             scanExternalRememberedSet(*m_vm, slotVisitor);
2626 #endif
2627
2628             if (m_vm->smallStrings.needsToBeVisited(*m_collectionScope))
2629                 m_vm->smallStrings.visitStrongReferences(slotVisitor);
2630             
2631             for (auto& pair : m_protectedValues)
2632                 slotVisitor.appendUnbarriered(pair.key);
2633             
2634             if (m_markListSet && m_markListSet->size())
2635                 MarkedArgumentBuffer::markLists(slotVisitor, *m_markListSet);
2636             
2637             slotVisitor.appendUnbarriered(m_vm->exception());
2638             slotVisitor.appendUnbarriered(m_vm->lastException());
2639         },
2640         ConstraintVolatility::GreyedByExecution);
2641     
2642     m_constraintSet->add(
2643         "Sh", "Strong Handles",
2644         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2645             m_handleSet.visitStrongHandles(slotVisitor);
2646             m_handleStack.visit(slotVisitor);
2647         },
2648         ConstraintVolatility::GreyedByExecution);
2649     
2650     m_constraintSet->add(
2651         "D", "Debugger",
2652         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2653 #if ENABLE(SAMPLING_PROFILER)
2654             if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
2655                 LockHolder locker(samplingProfiler->getLock());
2656                 samplingProfiler->processUnverifiedStackTraces();
2657                 samplingProfiler->visit(slotVisitor);
2658                 if (Options::logGC() == GCLogging::Verbose)
2659                     dataLog("Sampling Profiler data:\n", slotVisitor);
2660             }
2661 #endif // ENABLE(SAMPLING_PROFILER)
2662             
2663             if (m_vm->typeProfiler())
2664                 m_vm->typeProfilerLog()->visit(slotVisitor);
2665             
2666             m_vm->shadowChicken().visitChildren(slotVisitor);
2667         },
2668         ConstraintVolatility::GreyedByExecution);
2669     
2670     m_constraintSet->add(
2671         "Jsr", "JIT Stub Routines",
2672         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2673             m_jitStubRoutines->traceMarkedStubRoutines(slotVisitor);
2674         },
2675         ConstraintVolatility::GreyedByExecution);
2676     
2677     m_constraintSet->add(
2678         "Ws", "Weak Sets",
2679         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2680             m_objectSpace.visitWeakSets(slotVisitor);
2681         },
2682         ConstraintVolatility::GreyedByMarking);
2683     
2684     m_constraintSet->add(
2685         "Wrh", "Weak Reference Harvesters",
2686         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2687             for (WeakReferenceHarvester* current = m_weakReferenceHarvesters.head(); current; current = current->next())
2688                 current->visitWeakReferences(slotVisitor);
2689         },
2690         ConstraintVolatility::GreyedByMarking);
2691     
2692 #if ENABLE(DFG_JIT)
2693     m_constraintSet->add(
2694         "Dw", "DFG Worklists",
2695         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2696             for (unsigned i = DFG::numberOfWorklists(); i--;)
2697                 DFG::existingWorklistForIndex(i).visitWeakReferences(slotVisitor);
2698             
2699             // FIXME: This is almost certainly unnecessary.
2700             // https://bugs.webkit.org/show_bug.cgi?id=166829
2701             DFG::iterateCodeBlocksForGC(
2702                 *m_vm,
2703                 [&] (CodeBlock* codeBlock) {
2704                     slotVisitor.appendUnbarriered(codeBlock);
2705                 });
2706             
2707             if (Options::logGC() == GCLogging::Verbose)
2708                 dataLog("DFG Worklists:\n", slotVisitor);
2709         },
2710         ConstraintVolatility::GreyedByMarking);
2711 #endif
2712     
2713     m_constraintSet->add(
2714         "Cb", "CodeBlocks",
2715         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2716             iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(
2717                 [&] (CodeBlock* codeBlock) {
2718                     // Visit the CodeBlock as a constraint only if it's black.
2719                     if (Heap::isMarked(codeBlock)
2720                         && codeBlock->cellState() == CellState::PossiblyBlack)
2721                         slotVisitor.visitAsConstraint(codeBlock);
2722                 });
2723         },
2724         ConstraintVolatility::SeldomGreyed);
2725     
2726     m_constraintSet->add(
2727         "Mrms", "Mutator+Race Mark Stack",
2728         [this] (SlotVisitor& slotVisitor, const VisitingTimeout&) {
2729             // Indicate to the fixpoint that we introduced work!
2730             size_t size = m_mutatorMarkStack->size() + m_raceMarkStack->size();
2731             slotVisitor.addToVisitCount(size);
2732             
2733             if (Options::logGC())
2734                 dataLog("(", size, ")");
2735             
2736             m_mutatorMarkStack->transferTo(slotVisitor.mutatorMarkStack());
2737             m_raceMarkStack->transferTo(slotVisitor.mutatorMarkStack());
2738         },
2739         [this] (SlotVisitor&) -> double {
2740             return m_mutatorMarkStack->size() + m_raceMarkStack->size();
2741         },
2742         ConstraintVolatility::GreyedByExecution);
2743 }
2744
2745 void Heap::addMarkingConstraint(std::unique_ptr<MarkingConstraint> constraint)
2746 {
2747     PreventCollectionScope preventCollectionScope(*this);
2748     m_constraintSet->add(WTFMove(constraint));
2749 }
2750
2751 void Heap::notifyIsSafeToCollect()
2752 {
2753     MonotonicTime before;
2754     if (Options::logGC()) {
2755         before = MonotonicTime::now();
2756         dataLog("[GC<", RawPointer(this), ">: starting ");
2757     }
2758     
2759     addCoreConstraints();
2760     
2761     m_isSafeToCollect = true;
2762     
2763     if (Options::collectContinuously()) {
2764         m_collectContinuouslyThread = WTF::Thread::create(
2765             "JSC DEBUG Continuous GC",
2766             [this] () {
2767                 MonotonicTime initialTime = MonotonicTime::now();
2768                 Seconds period = Seconds::fromMilliseconds(Options::collectContinuouslyPeriodMS());
2769                 while (!m_shouldStopCollectingContinuously) {
2770                     {
2771                         LockHolder locker(*m_threadLock);
2772                         if (m_requests.isEmpty()) {
2773                             m_requests.append(std::nullopt);
2774                             m_lastGrantedTicket++;
2775                             m_threadCondition->notifyOne(locker);
2776                         }
2777                     }
2778                     
2779                     {
2780                         LockHolder locker(m_collectContinuouslyLock);
2781                         Seconds elapsed = MonotonicTime::now() - initialTime;
2782                         Seconds elapsedInPeriod = elapsed % period;
2783                         MonotonicTime timeToWakeUp =
2784                             initialTime + elapsed - elapsedInPeriod + period;
2785                         while (!hasElapsed(timeToWakeUp) && !m_shouldStopCollectingContinuously) {
2786                             m_collectContinuouslyCondition.waitUntil(
2787                                 m_collectContinuouslyLock, timeToWakeUp);
2788                         }
2789                     }
2790                 }
2791             });
2792     }
2793     
2794     if (Options::logGC())
2795         dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
2796 }
2797
2798 void Heap::preventCollection()
2799 {
2800     if (!m_isSafeToCollect)
2801         return;
2802     
2803     // This prevents the collectContinuously thread from starting a collection.
2804     m_collectContinuouslyLock.lock();
2805     
2806     // Wait for all collections to finish.
2807     waitForCollector(
2808         [&] (const AbstractLocker&) -> bool {
2809             ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2810             return m_lastServedTicket == m_lastGrantedTicket;
2811         });
2812     
2813     // Now a collection can only start if this thread starts it.
2814     RELEASE_ASSERT(!m_collectionScope);
2815 }
2816
2817 void Heap::allowCollection()
2818 {
2819     if (!m_isSafeToCollect)
2820         return;
2821     
2822     m_collectContinuouslyLock.unlock();
2823 }
2824
2825 template<typename Func>
2826 void Heap::forEachSlotVisitor(const Func& func)
2827 {
2828     auto locker = holdLock(m_parallelSlotVisitorLock);
2829     func(*m_collectorSlotVisitor);
2830     func(*m_mutatorSlotVisitor);
2831     for (auto& slotVisitor : m_parallelSlotVisitors)
2832         func(*slotVisitor);
2833 }
2834
2835 void Heap::setMutatorShouldBeFenced(bool value)
2836 {
2837     m_mutatorShouldBeFenced = value;
2838     m_barrierThreshold = value ? tautologicalThreshold : blackThreshold;
2839 }
2840
2841 void Heap::performIncrement(size_t bytes)
2842 {
2843     if (!m_objectSpace.isMarking())
2844         return;
2845
2846     m_incrementBalance += bytes * Options::gcIncrementScale();
2847
2848     // Save ourselves from crazy. Since this is an optimization, it's OK to go back to any consistent
2849     // state when the double goes wild.
2850     if (std::isnan(m_incrementBalance) || std::isinf(m_incrementBalance))
2851         m_incrementBalance = 0;
2852     
2853     if (m_incrementBalance < static_cast<double>(Options::gcIncrementBytes()))
2854         return;
2855
2856     double targetBytes = m_incrementBalance;
2857     if (targetBytes <= 0)
2858         return;
2859     targetBytes = std::min(targetBytes, Options::gcIncrementMaxBytes());
2860
2861     SlotVisitor& slotVisitor = *m_mutatorSlotVisitor;
2862     ParallelModeEnabler parallelModeEnabler(slotVisitor);
2863     size_t bytesVisited = slotVisitor.performIncrementOfDraining(static_cast<size_t>(targetBytes));
2864     // incrementBalance may go negative here because it'll remember how many bytes we overshot.
2865     m_incrementBalance -= bytesVisited;
2866 }
2867
2868 void Heap::addHeapFinalizerCallback(const HeapFinalizerCallback& callback)
2869 {
2870     m_heapFinalizerCallbacks.append(callback);
2871 }
2872
2873 void Heap::removeHeapFinalizerCallback(const HeapFinalizerCallback& callback)
2874 {
2875     m_heapFinalizerCallbacks.removeFirst(callback);
2876 }
2877
2878 } // namespace JSC