MarkedBlock should know what objects are live during marking
[WebKit-https.git] / Source / JavaScriptCore / heap / Heap.cpp
1 /*
2  *  Copyright (C) 2003-2009, 2011, 2013-2016 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "Heap.h"
23
24 #include "CodeBlock.h"
25 #include "CodeBlockSet.h"
26 #include "ConservativeRoots.h"
27 #include "DFGWorklist.h"
28 #include "EdenGCActivityCallback.h"
29 #include "FullGCActivityCallback.h"
30 #include "GCActivityCallback.h"
31 #include "GCIncomingRefCountedSetInlines.h"
32 #include "GCSegmentedArrayInlines.h"
33 #include "GCTypeMap.h"
34 #include "HasOwnPropertyCache.h"
35 #include "HeapHelperPool.h"
36 #include "HeapIterationScope.h"
37 #include "HeapProfiler.h"
38 #include "HeapRootVisitor.h"
39 #include "HeapSnapshot.h"
40 #include "HeapStatistics.h"
41 #include "HeapVerifier.h"
42 #include "IncrementalSweeper.h"
43 #include "Interpreter.h"
44 #include "JITStubRoutineSet.h"
45 #include "JITWorklist.h"
46 #include "JSCInlines.h"
47 #include "JSGlobalObject.h"
48 #include "JSLock.h"
49 #include "JSVirtualMachineInternal.h"
50 #include "MarkedSpaceInlines.h"
51 #include "SamplingProfiler.h"
52 #include "ShadowChicken.h"
53 #include "SuperSampler.h"
54 #include "TypeProfilerLog.h"
55 #include "UnlinkedCodeBlock.h"
56 #include "VM.h"
57 #include "WeakSetInlines.h"
58 #include <algorithm>
59 #include <wtf/CurrentTime.h>
60 #include <wtf/MainThread.h>
61 #include <wtf/ParallelVectorIterator.h>
62 #include <wtf/ProcessID.h>
63 #include <wtf/RAMSize.h>
64 #include <wtf/SimpleStats.h>
65
66 #if USE(FOUNDATION)
67 #if __has_include(<objc/objc-internal.h>)
68 #include <objc/objc-internal.h>
69 #else
70 extern "C" void* objc_autoreleasePoolPush(void);
71 extern "C" void objc_autoreleasePoolPop(void *context);
72 #endif
73 #endif // USE(FOUNDATION)
74
75 using namespace std;
76
77 namespace JSC {
78
79 namespace {
80
81 static const size_t largeHeapSize = 32 * MB; // About 1.5X the average webpage.
82 const size_t smallHeapSize = 1 * MB; // Matches the FastMalloc per-thread cache.
83
84 size_t minHeapSize(HeapType heapType, size_t ramSize)
85 {
86     if (heapType == LargeHeap)
87         return min(largeHeapSize, ramSize / 4);
88     return smallHeapSize;
89 }
90
91 size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
92 {
93     // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
94     if (heapSize < ramSize / 4)
95         return 2 * heapSize;
96     if (heapSize < ramSize / 2)
97         return 1.5 * heapSize;
98     return 1.25 * heapSize;
99 }
100
101 bool isValidSharedInstanceThreadState(VM* vm)
102 {
103     return vm->currentThreadIsHoldingAPILock();
104 }
105
106 bool isValidThreadState(VM* vm)
107 {
108     if (vm->atomicStringTable() != wtfThreadData().atomicStringTable())
109         return false;
110
111     if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
112         return false;
113
114     return true;
115 }
116
117 void recordType(TypeCountSet& set, JSCell* cell)
118 {
119     const char* typeName = "[unknown]";
120     const ClassInfo* info = cell->classInfo();
121     if (info && info->className)
122         typeName = info->className;
123     set.add(typeName);
124 }
125
126 bool measurePhaseTiming()
127 {
128     return false;
129 }
130
131 HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats()
132 {
133     static HashMap<const char*, GCTypeMap<SimpleStats>>* result;
134     static std::once_flag once;
135     std::call_once(
136         once,
137         [] {
138             result = new HashMap<const char*, GCTypeMap<SimpleStats>>();
139         });
140     return *result;
141 }
142
143 SimpleStats& timingStats(const char* name, HeapOperation operation)
144 {
145     return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[operation];
146 }
147
148 class TimingScope {
149 public:
150     TimingScope(HeapOperation operation, const char* name)
151         : m_operation(operation)
152         , m_name(name)
153     {
154         if (measurePhaseTiming())
155             m_before = monotonicallyIncreasingTimeMS();
156     }
157     
158     TimingScope(Heap& heap, const char* name)
159         : TimingScope(heap.operationInProgress(), name)
160     {
161     }
162     
163     void setOperation(HeapOperation operation)
164     {
165         m_operation = operation;
166     }
167     
168     void setOperation(Heap& heap)
169     {
170         setOperation(heap.operationInProgress());
171     }
172     
173     ~TimingScope()
174     {
175         if (measurePhaseTiming()) {
176             double after = monotonicallyIncreasingTimeMS();
177             double timing = after - m_before;
178             SimpleStats& stats = timingStats(m_name, m_operation);
179             stats.add(timing);
180             dataLog("[GC:", m_operation, "] ", m_name, " took: ", timing, " ms (average ", stats.mean(), " ms).\n");
181         }
182     }
183 private:
184     HeapOperation m_operation;
185     double m_before;
186     const char* m_name;
187 };
188
189 } // anonymous namespace
190
191 Heap::Heap(VM* vm, HeapType heapType)
192     : m_heapType(heapType)
193     , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
194     , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
195     , m_sizeAfterLastCollect(0)
196     , m_sizeAfterLastFullCollect(0)
197     , m_sizeBeforeLastFullCollect(0)
198     , m_sizeAfterLastEdenCollect(0)
199     , m_sizeBeforeLastEdenCollect(0)
200     , m_bytesAllocatedThisCycle(0)
201     , m_bytesAbandonedSinceLastFullCollect(0)
202     , m_maxEdenSize(m_minBytesPerCycle)
203     , m_maxHeapSize(m_minBytesPerCycle)
204     , m_shouldDoFullCollection(false)
205     , m_totalBytesVisited(0)
206     , m_operationInProgress(NoOperation)
207     , m_objectSpace(this)
208     , m_extraMemorySize(0)
209     , m_deprecatedExtraMemorySize(0)
210     , m_machineThreads(this)
211     , m_slotVisitor(*this)
212     , m_handleSet(vm)
213     , m_codeBlocks(std::make_unique<CodeBlockSet>())
214     , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>())
215     , m_isSafeToCollect(false)
216     , m_writeBarrierBuffer(256)
217     , m_vm(vm)
218     // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously 
219     // schedule the timer if we've never done a collection.
220     , m_lastFullGCLength(0.01)
221     , m_lastEdenGCLength(0.01)
222     , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
223     , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
224 #if USE(CF)
225     , m_sweeper(std::make_unique<IncrementalSweeper>(this, CFRunLoopGetCurrent()))
226 #else
227     , m_sweeper(std::make_unique<IncrementalSweeper>(this))
228 #endif
229     , m_deferralDepth(0)
230 #if USE(FOUNDATION)
231     , m_delayedReleaseRecursionCount(0)
232 #endif
233     , m_helperClient(&heapHelperPool())
234 {
235     if (Options::verifyHeap())
236         m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
237 }
238
239 Heap::~Heap()
240 {
241     for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
242         WeakBlock::destroy(*this, block);
243 }
244
245 bool Heap::isPagedOut(double deadline)
246 {
247     return m_objectSpace.isPagedOut(deadline);
248 }
249
250 // The VM is being destroyed and the collector will never run again.
251 // Run all pending finalizers now because we won't get another chance.
252 void Heap::lastChanceToFinalize()
253 {
254     RELEASE_ASSERT(!m_vm->entryScope);
255     RELEASE_ASSERT(m_operationInProgress == NoOperation);
256
257     m_arrayBuffers.lastChanceToFinalize();
258     m_codeBlocks->lastChanceToFinalize();
259     m_objectSpace.lastChanceToFinalize();
260     releaseDelayedReleasedObjects();
261
262     sweepAllLogicallyEmptyWeakBlocks();
263 }
264
265 void Heap::releaseDelayedReleasedObjects()
266 {
267 #if USE(FOUNDATION)
268     // We need to guard against the case that releasing an object can create more objects due to the
269     // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
270     // back here and could try to recursively release objects. We guard that with a recursive entry
271     // count. Only the initial call will release objects, recursive calls simple return and let the
272     // the initial call to the function take care of any objects created during release time.
273     // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
274     // and use a temp Vector for the actual releasing.
275     if (!m_delayedReleaseRecursionCount++) {
276         while (!m_delayedReleaseObjects.isEmpty()) {
277             ASSERT(m_vm->currentThreadIsHoldingAPILock());
278
279             Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTFMove(m_delayedReleaseObjects);
280
281             {
282                 // We need to drop locks before calling out to arbitrary code.
283                 JSLock::DropAllLocks dropAllLocks(m_vm);
284
285                 void* context = objc_autoreleasePoolPush();
286                 objectsToRelease.clear();
287                 objc_autoreleasePoolPop(context);
288             }
289         }
290     }
291     m_delayedReleaseRecursionCount--;
292 #endif
293 }
294
295 void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
296 {
297     didAllocate(size);
298     collectIfNecessaryOrDefer();
299 }
300
301 void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
302 {
303     m_deprecatedExtraMemorySize += size;
304     reportExtraMemoryAllocatedSlowCase(size);
305 }
306
307 void Heap::reportAbandonedObjectGraph()
308 {
309     // Our clients don't know exactly how much memory they
310     // are abandoning so we just guess for them.
311     size_t abandonedBytes = static_cast<size_t>(0.1 * capacity());
312
313     // We want to accelerate the next collection. Because memory has just 
314     // been abandoned, the next collection has the potential to 
315     // be more profitable. Since allocation is the trigger for collection, 
316     // we hasten the next collection by pretending that we've allocated more memory. 
317     if (m_fullActivityCallback) {
318         m_fullActivityCallback->didAllocate(
319             m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
320     }
321     m_bytesAbandonedSinceLastFullCollect += abandonedBytes;
322 }
323
324 void Heap::protect(JSValue k)
325 {
326     ASSERT(k);
327     ASSERT(m_vm->currentThreadIsHoldingAPILock());
328
329     if (!k.isCell())
330         return;
331
332     m_protectedValues.add(k.asCell());
333 }
334
335 bool Heap::unprotect(JSValue k)
336 {
337     ASSERT(k);
338     ASSERT(m_vm->currentThreadIsHoldingAPILock());
339
340     if (!k.isCell())
341         return false;
342
343     return m_protectedValues.remove(k.asCell());
344 }
345
346 void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
347 {
348     if (m_arrayBuffers.addReference(cell, buffer)) {
349         collectIfNecessaryOrDefer();
350         didAllocate(buffer->gcSizeEstimateInBytes());
351     }
352 }
353
354 void Heap::harvestWeakReferences()
355 {
356     m_slotVisitor.harvestWeakReferences();
357 }
358
359 void Heap::finalizeUnconditionalFinalizers()
360 {
361     m_slotVisitor.finalizeUnconditionalFinalizers();
362 }
363
364 void Heap::willStartIterating()
365 {
366     m_objectSpace.willStartIterating();
367 }
368
369 void Heap::didFinishIterating()
370 {
371     m_objectSpace.didFinishIterating();
372 }
373
374 void Heap::completeAllJITPlans()
375 {
376 #if ENABLE(JIT)
377     JITWorklist::instance()->completeAllForVM(*m_vm);
378 #endif // ENABLE(JIT)
379 #if ENABLE(DFG_JIT)
380     DFG::completeAllPlansForVM(*m_vm);
381 #endif
382 }
383
384 void Heap::markRoots(double gcStartTime, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
385 {
386     TimingScope markRootsTimingScope(*this, "Heap::markRoots");
387     
388     ASSERT(isValidThreadState(m_vm));
389
390     HeapRootVisitor heapRootVisitor(m_slotVisitor);
391     
392     {
393         TimingScope preConvergenceTimingScope(*this, "Heap::markRoots before convergence");
394
395 #if ENABLE(DFG_JIT)
396         DFG::rememberCodeBlocks(*m_vm);
397 #endif
398
399 #if ENABLE(SAMPLING_PROFILER)
400         if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
401             // Note that we need to own the lock from now until we're done
402             // marking the SamplingProfiler's data because once we verify the
403             // SamplingProfiler's stack traces, we don't want it to accumulate
404             // more stack traces before we get the chance to mark it.
405             // This lock is released inside visitSamplingProfiler().
406             samplingProfiler->getLock().lock();
407             samplingProfiler->processUnverifiedStackTraces();
408         }
409 #endif // ENABLE(SAMPLING_PROFILER)
410
411         if (m_operationInProgress == FullCollection) {
412             m_opaqueRoots.clear();
413             m_slotVisitor.clearMarkStack();
414         }
415
416         beginMarking();
417
418         m_parallelMarkersShouldExit = false;
419
420         m_helperClient.setFunction(
421             [this] () {
422                 SlotVisitor* slotVisitor;
423                 {
424                     LockHolder locker(m_parallelSlotVisitorLock);
425                     if (m_availableParallelSlotVisitors.isEmpty()) {
426                         std::unique_ptr<SlotVisitor> newVisitor =
427                             std::make_unique<SlotVisitor>(*this);
428                         slotVisitor = newVisitor.get();
429                         m_parallelSlotVisitors.append(WTFMove(newVisitor));
430                     } else
431                         slotVisitor = m_availableParallelSlotVisitors.takeLast();
432                 }
433
434                 WTF::registerGCThread();
435
436                 {
437                     ParallelModeEnabler parallelModeEnabler(*slotVisitor);
438                     slotVisitor->didStartMarking();
439                     slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
440                 }
441
442                 {
443                     LockHolder locker(m_parallelSlotVisitorLock);
444                     m_availableParallelSlotVisitors.append(slotVisitor);
445                 }
446             });
447
448         m_slotVisitor.didStartMarking();
449     }
450     
451     {
452         SuperSamplerScope superSamplerScope(false);
453         TimingScope convergenceTimingScope(*this, "Heap::markRoots convergence");
454         ParallelModeEnabler enabler(m_slotVisitor);
455         
456         m_slotVisitor.donateAndDrain();
457
458         {
459             TimingScope preConvergenceTimingScope(*this, "Heap::markRoots conservative scan");
460             ConservativeRoots conservativeRoots(*this);
461             SuperSamplerScope superSamplerScope(false);
462             gatherStackRoots(conservativeRoots, stackOrigin, stackTop, calleeSavedRegisters);
463             gatherJSStackRoots(conservativeRoots);
464             gatherScratchBufferRoots(conservativeRoots);
465             visitConservativeRoots(conservativeRoots);
466             
467             // We want to do this to conservatively ensure that we rescan any code blocks that are
468             // running right now. However, we need to be sure to do it *after* we mark the code block
469             // so that we know for sure if it really needs a barrier.
470             m_codeBlocks->writeBarrierCurrentlyExecuting(this);
471         }
472
473         visitExternalRememberedSet();
474         visitSmallStrings();
475         visitProtectedObjects(heapRootVisitor);
476         visitArgumentBuffers(heapRootVisitor);
477         visitException(heapRootVisitor);
478         visitStrongHandles(heapRootVisitor);
479         visitHandleStack(heapRootVisitor);
480         visitSamplingProfiler();
481         visitShadowChicken();
482         traceCodeBlocksAndJITStubRoutines();
483         m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
484     }
485     
486     TimingScope postConvergenceTimingScope(*this, "Heap::markRoots after convergence");
487
488     // Weak references must be marked last because their liveness depends on
489     // the liveness of the rest of the object graph.
490     visitWeakHandles(heapRootVisitor);
491
492     {
493         std::lock_guard<Lock> lock(m_markingMutex);
494         m_parallelMarkersShouldExit = true;
495         m_markingConditionVariable.notifyAll();
496     }
497     m_helperClient.finish();
498     updateObjectCounts(gcStartTime);
499     endMarking();
500 }
501
502 void Heap::gatherStackRoots(ConservativeRoots& roots, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
503 {
504     m_jitStubRoutines->clearMarks();
505     m_machineThreads.gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks, stackOrigin, stackTop, calleeSavedRegisters);
506 }
507
508 void Heap::gatherJSStackRoots(ConservativeRoots& roots)
509 {
510 #if !ENABLE(JIT)
511     m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
512 #else
513     UNUSED_PARAM(roots);
514 #endif
515 }
516
517 void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
518 {
519 #if ENABLE(DFG_JIT)
520     m_vm->gatherConservativeRoots(roots);
521 #else
522     UNUSED_PARAM(roots);
523 #endif
524 }
525
526 void Heap::beginMarking()
527 {
528     TimingScope timingScope(*this, "Heap::beginMarking");
529     if (m_operationInProgress == FullCollection)
530         m_codeBlocks->clearMarksForFullCollection();
531     
532     {
533         TimingScope clearMarksTimingScope(*this, "m_objectSpace.beginMarking");
534         m_objectSpace.beginMarking();
535     }
536 }
537
538 void Heap::visitExternalRememberedSet()
539 {
540 #if JSC_OBJC_API_ENABLED
541     scanExternalRememberedSet(*m_vm, m_slotVisitor);
542 #endif
543 }
544
545 void Heap::visitSmallStrings()
546 {
547     if (!m_vm->smallStrings.needsToBeVisited(m_operationInProgress))
548         return;
549
550     m_vm->smallStrings.visitStrongReferences(m_slotVisitor);
551     if (Options::logGC() == GCLogging::Verbose)
552         dataLog("Small strings:\n", m_slotVisitor);
553     m_slotVisitor.donateAndDrain();
554 }
555
556 void Heap::visitConservativeRoots(ConservativeRoots& roots)
557 {
558     m_slotVisitor.append(roots);
559
560     if (Options::logGC() == GCLogging::Verbose)
561         dataLog("Conservative Roots:\n", m_slotVisitor);
562
563     m_slotVisitor.donateAndDrain();
564 }
565
566 void Heap::visitCompilerWorklistWeakReferences()
567 {
568 #if ENABLE(DFG_JIT)
569     for (auto worklist : m_suspendedCompilerWorklists)
570         worklist->visitWeakReferences(m_slotVisitor);
571
572     if (Options::logGC() == GCLogging::Verbose)
573         dataLog("DFG Worklists:\n", m_slotVisitor);
574 #endif
575 }
576
577 void Heap::removeDeadCompilerWorklistEntries()
578 {
579 #if ENABLE(DFG_JIT)
580     for (auto worklist : m_suspendedCompilerWorklists)
581         worklist->removeDeadPlans(*m_vm);
582 #endif
583 }
584
585 bool Heap::isHeapSnapshotting() const
586 {
587     HeapProfiler* heapProfiler = m_vm->heapProfiler();
588     if (UNLIKELY(heapProfiler))
589         return heapProfiler->activeSnapshotBuilder();
590     return false;
591 }
592
593 struct GatherHeapSnapshotData : MarkedBlock::CountFunctor {
594     GatherHeapSnapshotData(HeapSnapshotBuilder& builder)
595         : m_builder(builder)
596     {
597     }
598
599     IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const
600     {
601         if (kind == HeapCell::JSCell) {
602             JSCell* cell = static_cast<JSCell*>(heapCell);
603             cell->methodTable()->heapSnapshot(cell, m_builder);
604         }
605         return IterationStatus::Continue;
606     }
607
608     HeapSnapshotBuilder& m_builder;
609 };
610
611 void Heap::gatherExtraHeapSnapshotData(HeapProfiler& heapProfiler)
612 {
613     if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) {
614         HeapIterationScope heapIterationScope(*this);
615         GatherHeapSnapshotData functor(*builder);
616         m_objectSpace.forEachLiveCell(heapIterationScope, functor);
617     }
618 }
619
620 struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor {
621     RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot)
622         : m_snapshot(snapshot)
623     {
624     }
625
626     IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const
627     {
628         if (kind == HeapCell::JSCell)
629             m_snapshot.sweepCell(static_cast<JSCell*>(cell));
630         return IterationStatus::Continue;
631     }
632
633     HeapSnapshot& m_snapshot;
634 };
635
636 void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler)
637 {
638     if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) {
639         HeapIterationScope heapIterationScope(*this);
640         RemoveDeadHeapSnapshotNodes functor(*snapshot);
641         m_objectSpace.forEachDeadCell(heapIterationScope, functor);
642         snapshot->shrinkToFit();
643     }
644 }
645
646 void Heap::visitProtectedObjects(HeapRootVisitor& heapRootVisitor)
647 {
648     for (auto& pair : m_protectedValues)
649         heapRootVisitor.visit(&pair.key);
650
651     if (Options::logGC() == GCLogging::Verbose)
652         dataLog("Protected Objects:\n", m_slotVisitor);
653
654     m_slotVisitor.donateAndDrain();
655 }
656
657 void Heap::visitArgumentBuffers(HeapRootVisitor& visitor)
658 {
659     if (!m_markListSet || !m_markListSet->size())
660         return;
661
662     MarkedArgumentBuffer::markLists(visitor, *m_markListSet);
663
664     if (Options::logGC() == GCLogging::Verbose)
665         dataLog("Argument Buffers:\n", m_slotVisitor);
666
667     m_slotVisitor.donateAndDrain();
668 }
669
670 void Heap::visitException(HeapRootVisitor& visitor)
671 {
672     if (!m_vm->exception() && !m_vm->lastException())
673         return;
674
675     visitor.visit(m_vm->addressOfException());
676     visitor.visit(m_vm->addressOfLastException());
677
678     if (Options::logGC() == GCLogging::Verbose)
679         dataLog("Exceptions:\n", m_slotVisitor);
680
681     m_slotVisitor.donateAndDrain();
682 }
683
684 void Heap::visitStrongHandles(HeapRootVisitor& visitor)
685 {
686     m_handleSet.visitStrongHandles(visitor);
687
688     if (Options::logGC() == GCLogging::Verbose)
689         dataLog("Strong Handles:\n", m_slotVisitor);
690
691     m_slotVisitor.donateAndDrain();
692 }
693
694 void Heap::visitHandleStack(HeapRootVisitor& visitor)
695 {
696     m_handleStack.visit(visitor);
697
698     if (Options::logGC() == GCLogging::Verbose)
699         dataLog("Handle Stack:\n", m_slotVisitor);
700
701     m_slotVisitor.donateAndDrain();
702 }
703
704 void Heap::visitSamplingProfiler()
705 {
706 #if ENABLE(SAMPLING_PROFILER)
707     if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
708         ASSERT(samplingProfiler->getLock().isLocked());
709         samplingProfiler->visit(m_slotVisitor);
710         if (Options::logGC() == GCLogging::Verbose)
711             dataLog("Sampling Profiler data:\n", m_slotVisitor);
712
713         m_slotVisitor.donateAndDrain();
714         samplingProfiler->getLock().unlock();
715     }
716 #endif // ENABLE(SAMPLING_PROFILER)
717 }
718
719 void Heap::visitShadowChicken()
720 {
721     m_vm->shadowChicken().visitChildren(m_slotVisitor);
722 }
723
724 void Heap::traceCodeBlocksAndJITStubRoutines()
725 {
726     m_jitStubRoutines->traceMarkedStubRoutines(m_slotVisitor);
727
728     if (Options::logGC() == GCLogging::Verbose)
729         dataLog("Code Blocks and JIT Stub Routines:\n", m_slotVisitor);
730
731     m_slotVisitor.donateAndDrain();
732 }
733
734 void Heap::visitWeakHandles(HeapRootVisitor& visitor)
735 {
736     TimingScope timingScope(*this, "Heap::visitWeakHandles");
737     while (true) {
738         {
739             TimingScope timingScope(*this, "m_objectSpace.visitWeakSets");
740             m_objectSpace.visitWeakSets(visitor);
741         }
742         harvestWeakReferences();
743         visitCompilerWorklistWeakReferences();
744         if (m_slotVisitor.isEmpty())
745             break;
746
747         if (Options::logGC() == GCLogging::Verbose)
748             dataLog("Live Weak Handles:\n", m_slotVisitor);
749
750         {
751             ParallelModeEnabler enabler(m_slotVisitor);
752             m_slotVisitor.donateAndDrain();
753             m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
754         }
755     }
756 }
757
758 void Heap::updateObjectCounts(double gcStartTime)
759 {
760     if (Options::logGC() == GCLogging::Verbose) {
761         size_t visitCount = m_slotVisitor.visitCount();
762         visitCount += threadVisitCount();
763         dataLogF("\nNumber of live Objects after GC %lu, took %.6f secs\n", static_cast<unsigned long>(visitCount), WTF::monotonicallyIncreasingTime() - gcStartTime);
764     }
765     
766     if (m_operationInProgress == FullCollection)
767         m_totalBytesVisited = 0;
768
769     m_totalBytesVisitedThisCycle = m_slotVisitor.bytesVisited() + threadBytesVisited();
770     
771     m_totalBytesVisited += m_totalBytesVisitedThisCycle;
772 }
773
774 void Heap::endMarking()
775 {
776     m_slotVisitor.reset();
777
778     for (auto& parallelVisitor : m_parallelSlotVisitors)
779         parallelVisitor->reset();
780
781     ASSERT(m_sharedMarkStack.isEmpty());
782     m_weakReferenceHarvesters.removeAll();
783     
784     m_objectSpace.endMarking();
785 }
786
787 size_t Heap::objectCount()
788 {
789     return m_objectSpace.objectCount();
790 }
791
792 size_t Heap::extraMemorySize()
793 {
794     return m_extraMemorySize + m_deprecatedExtraMemorySize + m_arrayBuffers.size();
795 }
796
797 size_t Heap::size()
798 {
799     return m_objectSpace.size() + extraMemorySize();
800 }
801
802 size_t Heap::capacity()
803 {
804     return m_objectSpace.capacity() + extraMemorySize();
805 }
806
807 size_t Heap::protectedGlobalObjectCount()
808 {
809     size_t result = 0;
810     forEachProtectedCell(
811         [&] (JSCell* cell) {
812             if (cell->isObject() && asObject(cell)->isGlobalObject())
813                 result++;
814         });
815     return result;
816 }
817
818 size_t Heap::globalObjectCount()
819 {
820     HeapIterationScope iterationScope(*this);
821     size_t result = 0;
822     m_objectSpace.forEachLiveCell(
823         iterationScope,
824         [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus {
825             if (kind != HeapCell::JSCell)
826                 return IterationStatus::Continue;
827             JSCell* cell = static_cast<JSCell*>(heapCell);
828             if (cell->isObject() && asObject(cell)->isGlobalObject())
829                 result++;
830             return IterationStatus::Continue;
831         });
832     return result;
833 }
834
835 size_t Heap::protectedObjectCount()
836 {
837     size_t result = 0;
838     forEachProtectedCell(
839         [&] (JSCell*) {
840             result++;
841         });
842     return result;
843 }
844
845 std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
846 {
847     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
848     forEachProtectedCell(
849         [&] (JSCell* cell) {
850             recordType(*result, cell);
851         });
852     return result;
853 }
854
855 std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
856 {
857     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
858     HeapIterationScope iterationScope(*this);
859     m_objectSpace.forEachLiveCell(
860         iterationScope,
861         [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
862             if (kind == HeapCell::JSCell)
863                 recordType(*result, static_cast<JSCell*>(cell));
864             return IterationStatus::Continue;
865         });
866     return result;
867 }
868
869 void Heap::deleteAllCodeBlocks()
870 {
871     // If JavaScript is running, it's not safe to delete all JavaScript code, since
872     // we'll end up returning to deleted code.
873     RELEASE_ASSERT(!m_vm->entryScope);
874     ASSERT(m_operationInProgress == NoOperation);
875
876     completeAllJITPlans();
877
878     for (ExecutableBase* executable : m_executables)
879         executable->clearCode();
880 }
881
882 void Heap::deleteAllUnlinkedCodeBlocks()
883 {
884     for (ExecutableBase* current : m_executables) {
885         if (!current->isFunctionExecutable())
886             continue;
887         static_cast<FunctionExecutable*>(current)->unlinkedExecutable()->clearCode();
888     }
889 }
890
891 void Heap::clearUnmarkedExecutables()
892 {
893     for (unsigned i = m_executables.size(); i--;) {
894         ExecutableBase* current = m_executables[i];
895         if (isMarked(current))
896             continue;
897
898         // Eagerly dereference the Executable's JITCode in order to run watchpoint
899         // destructors. Otherwise, watchpoints might fire for deleted CodeBlocks.
900         current->clearCode();
901         std::swap(m_executables[i], m_executables.last());
902         m_executables.removeLast();
903     }
904
905     m_executables.shrinkToFit();
906 }
907
908 void Heap::deleteUnmarkedCompiledCode()
909 {
910     clearUnmarkedExecutables();
911     m_codeBlocks->deleteUnmarkedAndUnreferenced(m_operationInProgress);
912     m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines();
913 }
914
915 void Heap::addToRememberedSet(const JSCell* cell)
916 {
917     ASSERT(cell);
918     ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
919     ASSERT(isBlack(cell->cellState()));
920     // Indicate that this object is grey and that it's one of the following:
921     // - A re-greyed object during a concurrent collection.
922     // - An old remembered object.
923     // "OldGrey" doesn't tell us which of these things is true, but we usually treat the two cases the
924     // same.
925     cell->setCellState(CellState::OldGrey);
926     m_slotVisitor.appendToMarkStack(const_cast<JSCell*>(cell));
927 }
928
929 void Heap::collectAllGarbage()
930 {
931     SuperSamplerScope superSamplerScope(false);
932     if (!m_isSafeToCollect)
933         return;
934
935     collectWithoutAnySweep(FullCollection);
936
937     DeferGCForAWhile deferGC(*this);
938     if (UNLIKELY(Options::useImmortalObjects()))
939         sweeper()->willFinishSweeping();
940     else {
941         double before = 0;
942         if (Options::logGC()) {
943             dataLog("[Full sweep: ", capacity() / 1024, " kb ");
944             before = currentTimeMS();
945         }
946         m_objectSpace.sweep();
947         m_objectSpace.shrink();
948         if (Options::logGC()) {
949             double after = currentTimeMS();
950             dataLog("=> ", capacity() / 1024, " kb, ", after - before, " ms]\n");
951         }
952     }
953     m_objectSpace.assertNoUnswept();
954
955     sweepAllLogicallyEmptyWeakBlocks();
956 }
957
958 void Heap::collect(HeapOperation collectionType)
959 {
960     SuperSamplerScope superSamplerScope(false);
961     if (!m_isSafeToCollect)
962         return;
963
964     collectWithoutAnySweep(collectionType);
965 }
966
967 NEVER_INLINE void Heap::collectWithoutAnySweep(HeapOperation collectionType)
968 {
969     void* stackTop;
970     ALLOCATE_AND_GET_REGISTER_STATE(registers);
971
972     collectImpl(collectionType, wtfThreadData().stack().origin(), &stackTop, registers);
973
974     sanitizeStackForVM(m_vm);
975 }
976
977 NEVER_INLINE void Heap::collectImpl(HeapOperation collectionType, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
978 {
979     SuperSamplerScope superSamplerScope(false);
980     TimingScope collectImplTimingScope(collectionType, "Heap::collectImpl");
981     
982 #if ENABLE(ALLOCATION_LOGGING)
983     dataLogF("JSC GC starting collection.\n");
984 #endif
985     
986     double before = 0;
987     if (Options::logGC()) {
988         dataLog("[GC: ", capacity() / 1024, " kb ");
989         before = currentTimeMS();
990     }
991     
992     double gcStartTime;
993     {
994         TimingScope earlyTimingScope(collectionType, "Heap::collectImpl before markRoots");
995
996         if (vm()->typeProfiler()) {
997             DeferGCForAWhile awhile(*this);
998             vm()->typeProfilerLog()->processLogEntries(ASCIILiteral("GC"));
999         }
1000
1001 #if ENABLE(JIT)
1002         {
1003             DeferGCForAWhile awhile(*this);
1004             JITWorklist::instance()->completeAllForVM(*m_vm);
1005         }
1006 #endif // ENABLE(JIT)
1007
1008         vm()->shadowChicken().update(*vm(), vm()->topCallFrame);
1009
1010         RELEASE_ASSERT(!m_deferralDepth);
1011         ASSERT(vm()->currentThreadIsHoldingAPILock());
1012         RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
1013         ASSERT(m_isSafeToCollect);
1014         RELEASE_ASSERT(m_operationInProgress == NoOperation);
1015
1016         suspendCompilerThreads();
1017         willStartCollection(collectionType);
1018         
1019         collectImplTimingScope.setOperation(*this);
1020         earlyTimingScope.setOperation(*this);
1021
1022         gcStartTime = WTF::monotonicallyIncreasingTime();
1023         if (m_verifier) {
1024             // Verify that live objects from the last GC cycle haven't been corrupted by
1025             // mutators before we begin this new GC cycle.
1026             m_verifier->verify(HeapVerifier::Phase::BeforeGC);
1027
1028             m_verifier->initializeGCCycle();
1029             m_verifier->gatherLiveObjects(HeapVerifier::Phase::BeforeMarking);
1030         }
1031
1032         flushOldStructureIDTables();
1033         stopAllocation();
1034         prepareForMarking();
1035         flushWriteBarrierBuffer();
1036
1037         if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache())
1038             cache->clear();
1039     }
1040
1041     markRoots(gcStartTime, stackOrigin, stackTop, calleeSavedRegisters);
1042     
1043     TimingScope lateTimingScope(*this, "Heap::collectImpl after markRoots");
1044
1045     if (m_verifier) {
1046         m_verifier->gatherLiveObjects(HeapVerifier::Phase::AfterMarking);
1047         m_verifier->verify(HeapVerifier::Phase::AfterMarking);
1048     }
1049
1050     if (vm()->typeProfiler())
1051         vm()->typeProfiler()->invalidateTypeSetCache();
1052
1053     reapWeakHandles();
1054     pruneStaleEntriesFromWeakGCMaps();
1055     sweepArrayBuffers();
1056     snapshotUnswept();
1057     finalizeUnconditionalFinalizers();
1058     removeDeadCompilerWorklistEntries();
1059     deleteUnmarkedCompiledCode();
1060     deleteSourceProviderCaches();
1061
1062     notifyIncrementalSweeper();
1063     m_codeBlocks->writeBarrierCurrentlyExecuting(this);
1064     m_codeBlocks->clearCurrentlyExecuting();
1065
1066     prepareForAllocation();
1067     updateAllocationLimits();
1068     didFinishCollection(gcStartTime);
1069     resumeCompilerThreads();
1070     sweepLargeAllocations();
1071     
1072     if (m_verifier) {
1073         m_verifier->trimDeadObjects();
1074         m_verifier->verify(HeapVerifier::Phase::AfterGC);
1075     }
1076
1077     if (Options::logGC()) {
1078         double after = currentTimeMS();
1079         dataLog(after - before, " ms]\n");
1080     }
1081     
1082     if (false) {
1083         dataLog("Heap state after GC:\n");
1084         m_objectSpace.dumpBits();
1085     }
1086 }
1087
1088 void Heap::sweepLargeAllocations()
1089 {
1090     m_objectSpace.sweepLargeAllocations();
1091 }
1092
1093 void Heap::suspendCompilerThreads()
1094 {
1095 #if ENABLE(DFG_JIT)
1096     ASSERT(m_suspendedCompilerWorklists.isEmpty());
1097     for (unsigned i = DFG::numberOfWorklists(); i--;) {
1098         if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) {
1099             m_suspendedCompilerWorklists.append(worklist);
1100             worklist->suspendAllThreads();
1101         }
1102     }
1103 #endif
1104 }
1105
1106 void Heap::willStartCollection(HeapOperation collectionType)
1107 {
1108     if (Options::logGC())
1109         dataLog("=> ");
1110     
1111     if (shouldDoFullCollection(collectionType)) {
1112         m_operationInProgress = FullCollection;
1113         m_shouldDoFullCollection = false;
1114         if (Options::logGC())
1115             dataLog("FullCollection, ");
1116     } else {
1117         m_operationInProgress = EdenCollection;
1118         if (Options::logGC())
1119             dataLog("EdenCollection, ");
1120     }
1121     if (m_operationInProgress == FullCollection) {
1122         m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
1123         m_extraMemorySize = 0;
1124         m_deprecatedExtraMemorySize = 0;
1125 #if ENABLE(RESOURCE_USAGE)
1126         m_externalMemorySize = 0;
1127 #endif
1128
1129         if (m_fullActivityCallback)
1130             m_fullActivityCallback->willCollect();
1131     } else {
1132         ASSERT(m_operationInProgress == EdenCollection);
1133         m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
1134     }
1135
1136     if (m_edenActivityCallback)
1137         m_edenActivityCallback->willCollect();
1138
1139     for (auto* observer : m_observers)
1140         observer->willGarbageCollect();
1141 }
1142
1143 void Heap::flushOldStructureIDTables()
1144 {
1145     m_structureIDTable.flushOldTables();
1146 }
1147
1148 void Heap::flushWriteBarrierBuffer()
1149 {
1150     if (m_operationInProgress == EdenCollection) {
1151         m_writeBarrierBuffer.flush(*this);
1152         return;
1153     }
1154     m_writeBarrierBuffer.reset();
1155 }
1156
1157 void Heap::stopAllocation()
1158 {
1159     m_objectSpace.stopAllocating();
1160 }
1161
1162 void Heap::prepareForMarking()
1163 {
1164     m_objectSpace.prepareForMarking();
1165 }
1166
1167 void Heap::reapWeakHandles()
1168 {
1169     m_objectSpace.reapWeakSets();
1170 }
1171
1172 void Heap::pruneStaleEntriesFromWeakGCMaps()
1173 {
1174     if (m_operationInProgress != FullCollection)
1175         return;
1176     for (auto& pruneCallback : m_weakGCMaps.values())
1177         pruneCallback();
1178 }
1179
1180 void Heap::sweepArrayBuffers()
1181 {
1182     m_arrayBuffers.sweep();
1183 }
1184
1185 void Heap::snapshotUnswept()
1186 {
1187     TimingScope timingScope(*this, "Heap::snapshotUnswept");
1188     m_objectSpace.snapshotUnswept();
1189 }
1190
1191 void Heap::deleteSourceProviderCaches()
1192 {
1193     m_vm->clearSourceProviderCaches();
1194 }
1195
1196 void Heap::notifyIncrementalSweeper()
1197 {
1198     if (m_operationInProgress == FullCollection) {
1199         if (!m_logicallyEmptyWeakBlocks.isEmpty())
1200             m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
1201     }
1202
1203     m_sweeper->startSweeping();
1204 }
1205
1206 void Heap::prepareForAllocation()
1207 {
1208     m_objectSpace.prepareForAllocation();
1209 }
1210
1211 void Heap::updateAllocationLimits()
1212 {
1213     static const bool verbose = false;
1214     
1215     if (verbose) {
1216         dataLog("\n");
1217         dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n");
1218     }
1219     
1220     // Calculate our current heap size threshold for the purpose of figuring out when we should
1221     // run another collection. This isn't the same as either size() or capacity(), though it should
1222     // be somewhere between the two. The key is to match the size calculations involved calls to
1223     // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
1224     // fragmentation, we may have size() much smaller than capacity().
1225     size_t currentHeapSize = 0;
1226
1227     // For marked space, we use the total number of bytes visited. This matches the logic for
1228     // MarkedAllocator's calls to didAllocate(), which effectively accounts for the total size of
1229     // objects allocated rather than blocks used. This will underestimate capacity(), and in case
1230     // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
1231     // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
1232     currentHeapSize += m_totalBytesVisited;
1233     if (verbose)
1234         dataLog("totalBytesVisited = ", m_totalBytesVisited, ", currentHeapSize = ", currentHeapSize, "\n");
1235
1236     // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
1237     // extra memory reporting.
1238     currentHeapSize += extraMemorySize();
1239
1240     if (verbose)
1241         dataLog("extraMemorySize() = ", extraMemorySize(), ", currentHeapSize = ", currentHeapSize, "\n");
1242     
1243     if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
1244         HeapStatistics::exitWithFailure();
1245
1246     if (m_operationInProgress == FullCollection) {
1247         // To avoid pathological GC churn in very small and very large heaps, we set
1248         // the new allocation limit based on the current size of the heap, with a
1249         // fixed minimum.
1250         m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
1251         if (verbose)
1252             dataLog("Full: maxHeapSize = ", m_maxHeapSize, "\n");
1253         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1254         if (verbose)
1255             dataLog("Full: maxEdenSize = ", m_maxEdenSize, "\n");
1256         m_sizeAfterLastFullCollect = currentHeapSize;
1257         if (verbose)
1258             dataLog("Full: sizeAfterLastFullCollect = ", currentHeapSize, "\n");
1259         m_bytesAbandonedSinceLastFullCollect = 0;
1260         if (verbose)
1261             dataLog("Full: bytesAbandonedSinceLastFullCollect = ", 0, "\n");
1262     } else {
1263         ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
1264         // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have.
1265         // But we are sloppy, so we have to defend against the overflow.
1266         m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize;
1267         if (verbose)
1268             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
1269         m_sizeAfterLastEdenCollect = currentHeapSize;
1270         if (verbose)
1271             dataLog("Eden: sizeAfterLastEdenCollect = ", currentHeapSize, "\n");
1272         double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
1273         double minEdenToOldGenerationRatio = 1.0 / 3.0;
1274         if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
1275             m_shouldDoFullCollection = true;
1276         // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
1277         m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
1278         if (verbose)
1279             dataLog("Eden: maxHeapSize = ", m_maxHeapSize, "\n");
1280         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1281         if (verbose)
1282             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
1283         if (m_fullActivityCallback) {
1284             ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
1285             m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
1286         }
1287     }
1288
1289     m_sizeAfterLastCollect = currentHeapSize;
1290     if (verbose)
1291         dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n");
1292     m_bytesAllocatedThisCycle = 0;
1293
1294     if (Options::logGC())
1295         dataLog(currentHeapSize / 1024, " kb, ");
1296 }
1297
1298 void Heap::didFinishCollection(double gcStartTime)
1299 {
1300     double gcEndTime = WTF::monotonicallyIncreasingTime();
1301     HeapOperation operation = m_operationInProgress;
1302     if (m_operationInProgress == FullCollection)
1303         m_lastFullGCLength = gcEndTime - gcStartTime;
1304     else
1305         m_lastEdenGCLength = gcEndTime - gcStartTime;
1306
1307 #if ENABLE(RESOURCE_USAGE)
1308     ASSERT(externalMemorySize() <= extraMemorySize());
1309 #endif
1310
1311     if (Options::recordGCPauseTimes())
1312         HeapStatistics::recordGCPauseTime(gcStartTime, gcEndTime);
1313
1314     if (Options::useZombieMode())
1315         zombifyDeadObjects();
1316
1317     if (Options::dumpObjectStatistics())
1318         HeapStatistics::dumpObjectStatistics(this);
1319
1320     if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) {
1321         gatherExtraHeapSnapshotData(*heapProfiler);
1322         removeDeadHeapSnapshotNodes(*heapProfiler);
1323     }
1324
1325     RELEASE_ASSERT(m_operationInProgress == EdenCollection || m_operationInProgress == FullCollection);
1326     m_operationInProgress = NoOperation;
1327
1328     for (auto* observer : m_observers)
1329         observer->didGarbageCollect(operation);
1330 }
1331
1332 void Heap::resumeCompilerThreads()
1333 {
1334 #if ENABLE(DFG_JIT)
1335     for (auto worklist : m_suspendedCompilerWorklists)
1336         worklist->resumeAllThreads();
1337     m_suspendedCompilerWorklists.clear();
1338 #endif
1339 }
1340
1341 void Heap::setFullActivityCallback(PassRefPtr<FullGCActivityCallback> activityCallback)
1342 {
1343     m_fullActivityCallback = activityCallback;
1344 }
1345
1346 void Heap::setEdenActivityCallback(PassRefPtr<EdenGCActivityCallback> activityCallback)
1347 {
1348     m_edenActivityCallback = activityCallback;
1349 }
1350
1351 GCActivityCallback* Heap::fullActivityCallback()
1352 {
1353     return m_fullActivityCallback.get();
1354 }
1355
1356 GCActivityCallback* Heap::edenActivityCallback()
1357 {
1358     return m_edenActivityCallback.get();
1359 }
1360
1361 void Heap::setIncrementalSweeper(std::unique_ptr<IncrementalSweeper> sweeper)
1362 {
1363     m_sweeper = WTFMove(sweeper);
1364 }
1365
1366 IncrementalSweeper* Heap::sweeper()
1367 {
1368     return m_sweeper.get();
1369 }
1370
1371 void Heap::setGarbageCollectionTimerEnabled(bool enable)
1372 {
1373     if (m_fullActivityCallback)
1374         m_fullActivityCallback->setEnabled(enable);
1375     if (m_edenActivityCallback)
1376         m_edenActivityCallback->setEnabled(enable);
1377 }
1378
1379 void Heap::didAllocate(size_t bytes)
1380 {
1381     if (m_edenActivityCallback)
1382         m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
1383     m_bytesAllocatedThisCycle += bytes;
1384 }
1385
1386 bool Heap::isValidAllocation(size_t)
1387 {
1388     if (!isValidThreadState(m_vm))
1389         return false;
1390
1391     if (m_operationInProgress != NoOperation)
1392         return false;
1393     
1394     return true;
1395 }
1396
1397 void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
1398 {
1399     WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
1400 }
1401
1402 void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
1403 {
1404     HandleSlot slot = handle.slot();
1405     Finalizer finalizer = reinterpret_cast<Finalizer>(context);
1406     finalizer(slot->asCell());
1407     WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
1408 }
1409
1410 void Heap::addExecutable(ExecutableBase* executable)
1411 {
1412     m_executables.append(executable);
1413 }
1414
1415 void Heap::collectAllGarbageIfNotDoneRecently()
1416 {
1417     if (!m_fullActivityCallback) {
1418         collectAllGarbage();
1419         return;
1420     }
1421
1422     if (m_fullActivityCallback->didSyncGCRecently()) {
1423         // A synchronous GC was already requested recently so we merely accelerate next collection.
1424         reportAbandonedObjectGraph();
1425         return;
1426     }
1427
1428     m_fullActivityCallback->setDidSyncGCRecently();
1429     collectAllGarbage();
1430 }
1431
1432 class Zombify : public MarkedBlock::VoidFunctor {
1433 public:
1434     inline void visit(HeapCell* cell) const
1435     {
1436         void** current = reinterpret_cast<void**>(cell);
1437
1438         // We want to maintain zapped-ness because that's how we know if we've called 
1439         // the destructor.
1440         if (cell->isZapped())
1441             current++;
1442
1443         void* limit = static_cast<void*>(reinterpret_cast<char*>(cell) + cell->cellSize());
1444         for (; current < limit; current++)
1445             *current = zombifiedBits;
1446     }
1447     IterationStatus operator()(HeapCell* cell, HeapCell::Kind) const
1448     {
1449         visit(cell);
1450         return IterationStatus::Continue;
1451     }
1452 };
1453
1454 void Heap::zombifyDeadObjects()
1455 {
1456     // Sweep now because destructors will crash once we're zombified.
1457     m_objectSpace.sweep();
1458     HeapIterationScope iterationScope(*this);
1459     m_objectSpace.forEachDeadCell(iterationScope, Zombify());
1460 }
1461
1462 void Heap::flushWriteBarrierBuffer(JSCell* cell)
1463 {
1464     m_writeBarrierBuffer.flush(*this);
1465     m_writeBarrierBuffer.add(cell);
1466 }
1467
1468 bool Heap::shouldDoFullCollection(HeapOperation requestedCollectionType) const
1469 {
1470     if (!Options::useGenerationalGC())
1471         return true;
1472
1473     switch (requestedCollectionType) {
1474     case EdenCollection:
1475         return false;
1476     case FullCollection:
1477         return true;
1478     case AnyCollection:
1479         return m_shouldDoFullCollection;
1480     default:
1481         RELEASE_ASSERT_NOT_REACHED();
1482         return false;
1483     }
1484     RELEASE_ASSERT_NOT_REACHED();
1485     return false;
1486 }
1487
1488 void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
1489 {
1490     m_logicallyEmptyWeakBlocks.append(block);
1491 }
1492
1493 void Heap::sweepAllLogicallyEmptyWeakBlocks()
1494 {
1495     if (m_logicallyEmptyWeakBlocks.isEmpty())
1496         return;
1497
1498     m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
1499     while (sweepNextLogicallyEmptyWeakBlock()) { }
1500 }
1501
1502 bool Heap::sweepNextLogicallyEmptyWeakBlock()
1503 {
1504     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
1505         return false;
1506
1507     WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
1508
1509     block->sweep();
1510     if (block->isEmpty()) {
1511         std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
1512         m_logicallyEmptyWeakBlocks.removeLast();
1513         WeakBlock::destroy(*this, block);
1514     } else
1515         m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
1516
1517     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
1518         m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
1519         return false;
1520     }
1521
1522     return true;
1523 }
1524
1525 size_t Heap::threadVisitCount()
1526 {       
1527     unsigned long result = 0;
1528     for (auto& parallelVisitor : m_parallelSlotVisitors)
1529         result += parallelVisitor->visitCount();
1530     return result;
1531 }
1532
1533 size_t Heap::threadBytesVisited()
1534 {       
1535     size_t result = 0;
1536     for (auto& parallelVisitor : m_parallelSlotVisitors)
1537         result += parallelVisitor->bytesVisited();
1538     return result;
1539 }
1540
1541 void Heap::forEachCodeBlockImpl(const ScopedLambda<bool(CodeBlock*)>& func)
1542 {
1543     // We don't know the full set of CodeBlocks until compilation has terminated.
1544     completeAllJITPlans();
1545
1546     return m_codeBlocks->iterate(func);
1547 }
1548
1549 void Heap::writeBarrierSlowPath(const JSCell* from)
1550 {
1551     if (UNLIKELY(barrierShouldBeFenced())) {
1552         // In this case, the barrierThreshold is the tautological threshold, so from could still be
1553         // not black. But we can't know for sure until we fire off a fence.
1554         WTF::storeLoadFence();
1555         if (!isBlack(from->cellState()))
1556             return;
1557     }
1558     
1559     addToRememberedSet(from);
1560 }
1561
1562 } // namespace JSC