128facb2f22e17cd37931db2eb778cec4e66b063
[WebKit-https.git] / Source / JavaScriptCore / heap / Heap.cpp
1 /*
2  *  Copyright (C) 2003-2009, 2011, 2013-2016 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "Heap.h"
23
24 #include "CodeBlock.h"
25 #include "ConservativeRoots.h"
26 #include "DFGWorklist.h"
27 #include "EdenGCActivityCallback.h"
28 #include "FullGCActivityCallback.h"
29 #include "GCActivityCallback.h"
30 #include "GCIncomingRefCountedSetInlines.h"
31 #include "GCSegmentedArrayInlines.h"
32 #include "GCTypeMap.h"
33 #include "HasOwnPropertyCache.h"
34 #include "HeapHelperPool.h"
35 #include "HeapIterationScope.h"
36 #include "HeapProfiler.h"
37 #include "HeapRootVisitor.h"
38 #include "HeapSnapshot.h"
39 #include "HeapStatistics.h"
40 #include "HeapVerifier.h"
41 #include "IncrementalSweeper.h"
42 #include "Interpreter.h"
43 #include "JITStubRoutineSet.h"
44 #include "JITWorklist.h"
45 #include "JSCInlines.h"
46 #include "JSGlobalObject.h"
47 #include "JSLock.h"
48 #include "JSVirtualMachineInternal.h"
49 #include "MarkedSpaceInlines.h"
50 #include "SamplingProfiler.h"
51 #include "ShadowChicken.h"
52 #include "SuperSampler.h"
53 #include "TypeProfilerLog.h"
54 #include "UnlinkedCodeBlock.h"
55 #include "VM.h"
56 #include "WeakSetInlines.h"
57 #include <algorithm>
58 #include <wtf/CurrentTime.h>
59 #include <wtf/MainThread.h>
60 #include <wtf/ParallelVectorIterator.h>
61 #include <wtf/ProcessID.h>
62 #include <wtf/RAMSize.h>
63 #include <wtf/SimpleStats.h>
64
65 #if USE(FOUNDATION)
66 #if __has_include(<objc/objc-internal.h>)
67 #include <objc/objc-internal.h>
68 #else
69 extern "C" void* objc_autoreleasePoolPush(void);
70 extern "C" void objc_autoreleasePoolPop(void *context);
71 #endif
72 #endif // USE(FOUNDATION)
73
74 using namespace std;
75
76 namespace JSC {
77
78 namespace {
79
80 static const size_t largeHeapSize = 32 * MB; // About 1.5X the average webpage.
81 const size_t smallHeapSize = 1 * MB; // Matches the FastMalloc per-thread cache.
82
83 size_t minHeapSize(HeapType heapType, size_t ramSize)
84 {
85     if (heapType == LargeHeap)
86         return min(largeHeapSize, ramSize / 4);
87     return smallHeapSize;
88 }
89
90 size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
91 {
92     // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
93     if (heapSize < ramSize / 4)
94         return 2 * heapSize;
95     if (heapSize < ramSize / 2)
96         return 1.5 * heapSize;
97     return 1.25 * heapSize;
98 }
99
100 bool isValidSharedInstanceThreadState(VM* vm)
101 {
102     return vm->currentThreadIsHoldingAPILock();
103 }
104
105 bool isValidThreadState(VM* vm)
106 {
107     if (vm->atomicStringTable() != wtfThreadData().atomicStringTable())
108         return false;
109
110     if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
111         return false;
112
113     return true;
114 }
115
116 void recordType(TypeCountSet& set, JSCell* cell)
117 {
118     const char* typeName = "[unknown]";
119     const ClassInfo* info = cell->classInfo();
120     if (info && info->className)
121         typeName = info->className;
122     set.add(typeName);
123 }
124
125 bool measurePhaseTiming()
126 {
127     return false;
128 }
129
130 HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats()
131 {
132     static HashMap<const char*, GCTypeMap<SimpleStats>>* result;
133     static std::once_flag once;
134     std::call_once(
135         once,
136         [] {
137             result = new HashMap<const char*, GCTypeMap<SimpleStats>>();
138         });
139     return *result;
140 }
141
142 SimpleStats& timingStats(const char* name, HeapOperation operation)
143 {
144     return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[operation];
145 }
146
147 class TimingScope {
148 public:
149     TimingScope(HeapOperation operation, const char* name)
150         : m_operation(operation)
151         , m_name(name)
152     {
153         if (measurePhaseTiming())
154             m_before = monotonicallyIncreasingTimeMS();
155     }
156     
157     TimingScope(Heap& heap, const char* name)
158         : TimingScope(heap.operationInProgress(), name)
159     {
160     }
161     
162     void setOperation(HeapOperation operation)
163     {
164         m_operation = operation;
165     }
166     
167     void setOperation(Heap& heap)
168     {
169         setOperation(heap.operationInProgress());
170     }
171     
172     ~TimingScope()
173     {
174         if (measurePhaseTiming()) {
175             double after = monotonicallyIncreasingTimeMS();
176             double timing = after - m_before;
177             SimpleStats& stats = timingStats(m_name, m_operation);
178             stats.add(timing);
179             dataLog("[GC:", m_operation, "] ", m_name, " took: ", timing, " ms (average ", stats.mean(), " ms).\n");
180         }
181     }
182 private:
183     HeapOperation m_operation;
184     double m_before;
185     const char* m_name;
186 };
187
188 } // anonymous namespace
189
190 Heap::Heap(VM* vm, HeapType heapType)
191     : m_heapType(heapType)
192     , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
193     , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
194     , m_sizeAfterLastCollect(0)
195     , m_sizeAfterLastFullCollect(0)
196     , m_sizeBeforeLastFullCollect(0)
197     , m_sizeAfterLastEdenCollect(0)
198     , m_sizeBeforeLastEdenCollect(0)
199     , m_bytesAllocatedThisCycle(0)
200     , m_bytesAbandonedSinceLastFullCollect(0)
201     , m_maxEdenSize(m_minBytesPerCycle)
202     , m_maxHeapSize(m_minBytesPerCycle)
203     , m_shouldDoFullCollection(false)
204     , m_totalBytesVisited(0)
205     , m_operationInProgress(NoOperation)
206     , m_objectSpace(this)
207     , m_extraMemorySize(0)
208     , m_deprecatedExtraMemorySize(0)
209     , m_machineThreads(this)
210     , m_slotVisitor(*this)
211     , m_handleSet(vm)
212     , m_codeBlocks(std::make_unique<CodeBlockSet>())
213     , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>())
214     , m_isSafeToCollect(false)
215     , m_writeBarrierBuffer(256)
216     , m_vm(vm)
217     // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously 
218     // schedule the timer if we've never done a collection.
219     , m_lastFullGCLength(0.01)
220     , m_lastEdenGCLength(0.01)
221     , m_fullActivityCallback(GCActivityCallback::createFullTimer(this))
222     , m_edenActivityCallback(GCActivityCallback::createEdenTimer(this))
223 #if USE(CF)
224     , m_sweeper(std::make_unique<IncrementalSweeper>(this, CFRunLoopGetCurrent()))
225 #else
226     , m_sweeper(std::make_unique<IncrementalSweeper>(this))
227 #endif
228     , m_deferralDepth(0)
229 #if USE(FOUNDATION)
230     , m_delayedReleaseRecursionCount(0)
231 #endif
232     , m_helperClient(&heapHelperPool())
233 {
234     if (Options::verifyHeap())
235         m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
236 }
237
238 Heap::~Heap()
239 {
240     for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
241         WeakBlock::destroy(*this, block);
242 }
243
244 bool Heap::isPagedOut(double deadline)
245 {
246     return m_objectSpace.isPagedOut(deadline);
247 }
248
249 // The VM is being destroyed and the collector will never run again.
250 // Run all pending finalizers now because we won't get another chance.
251 void Heap::lastChanceToFinalize()
252 {
253     RELEASE_ASSERT(!m_vm->entryScope);
254     RELEASE_ASSERT(m_operationInProgress == NoOperation);
255
256     m_arrayBuffers.lastChanceToFinalize();
257     m_codeBlocks->lastChanceToFinalize();
258     m_objectSpace.lastChanceToFinalize();
259     releaseDelayedReleasedObjects();
260
261     sweepAllLogicallyEmptyWeakBlocks();
262 }
263
264 void Heap::releaseDelayedReleasedObjects()
265 {
266 #if USE(FOUNDATION)
267     // We need to guard against the case that releasing an object can create more objects due to the
268     // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
269     // back here and could try to recursively release objects. We guard that with a recursive entry
270     // count. Only the initial call will release objects, recursive calls simple return and let the
271     // the initial call to the function take care of any objects created during release time.
272     // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
273     // and use a temp Vector for the actual releasing.
274     if (!m_delayedReleaseRecursionCount++) {
275         while (!m_delayedReleaseObjects.isEmpty()) {
276             ASSERT(m_vm->currentThreadIsHoldingAPILock());
277
278             Vector<RetainPtr<CFTypeRef>> objectsToRelease = WTFMove(m_delayedReleaseObjects);
279
280             {
281                 // We need to drop locks before calling out to arbitrary code.
282                 JSLock::DropAllLocks dropAllLocks(m_vm);
283
284                 void* context = objc_autoreleasePoolPush();
285                 objectsToRelease.clear();
286                 objc_autoreleasePoolPop(context);
287             }
288         }
289     }
290     m_delayedReleaseRecursionCount--;
291 #endif
292 }
293
294 void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
295 {
296     didAllocate(size);
297     collectIfNecessaryOrDefer();
298 }
299
300 void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
301 {
302     m_deprecatedExtraMemorySize += size;
303     reportExtraMemoryAllocatedSlowCase(size);
304 }
305
306 void Heap::reportAbandonedObjectGraph()
307 {
308     // Our clients don't know exactly how much memory they
309     // are abandoning so we just guess for them.
310     size_t abandonedBytes = static_cast<size_t>(0.1 * capacity());
311
312     // We want to accelerate the next collection. Because memory has just 
313     // been abandoned, the next collection has the potential to 
314     // be more profitable. Since allocation is the trigger for collection, 
315     // we hasten the next collection by pretending that we've allocated more memory. 
316     if (m_fullActivityCallback) {
317         m_fullActivityCallback->didAllocate(
318             m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
319     }
320     m_bytesAbandonedSinceLastFullCollect += abandonedBytes;
321 }
322
323 void Heap::protect(JSValue k)
324 {
325     ASSERT(k);
326     ASSERT(m_vm->currentThreadIsHoldingAPILock());
327
328     if (!k.isCell())
329         return;
330
331     m_protectedValues.add(k.asCell());
332 }
333
334 bool Heap::unprotect(JSValue k)
335 {
336     ASSERT(k);
337     ASSERT(m_vm->currentThreadIsHoldingAPILock());
338
339     if (!k.isCell())
340         return false;
341
342     return m_protectedValues.remove(k.asCell());
343 }
344
345 void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
346 {
347     if (m_arrayBuffers.addReference(cell, buffer)) {
348         collectIfNecessaryOrDefer();
349         didAllocate(buffer->gcSizeEstimateInBytes());
350     }
351 }
352
353 void Heap::harvestWeakReferences()
354 {
355     m_slotVisitor.harvestWeakReferences();
356 }
357
358 void Heap::finalizeUnconditionalFinalizers()
359 {
360     m_slotVisitor.finalizeUnconditionalFinalizers();
361 }
362
363 void Heap::willStartIterating()
364 {
365     m_objectSpace.willStartIterating();
366 }
367
368 void Heap::didFinishIterating()
369 {
370     m_objectSpace.didFinishIterating();
371 }
372
373 void Heap::completeAllJITPlans()
374 {
375 #if ENABLE(JIT)
376     JITWorklist::instance()->completeAllForVM(*m_vm);
377 #endif // ENABLE(JIT)
378 #if ENABLE(DFG_JIT)
379     DFG::completeAllPlansForVM(*m_vm);
380 #endif
381 }
382
383 void Heap::markRoots(double gcStartTime, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
384 {
385     TimingScope markRootsTimingScope(*this, "Heap::markRoots");
386     
387     ASSERT(isValidThreadState(m_vm));
388
389     HeapRootVisitor heapRootVisitor(m_slotVisitor);
390     
391     ConservativeRoots conservativeRoots(*this);
392     {
393         TimingScope preConvergenceTimingScope(*this, "Heap::markRoots before convergence");
394         // We gather conservative roots before clearing mark bits because conservative
395         // gathering uses the mark bits to determine whether a reference is valid.
396         {
397             TimingScope preConvergenceTimingScope(*this, "Heap::markRoots conservative scan");
398             SuperSamplerScope superSamplerScope(false);
399             gatherStackRoots(conservativeRoots, stackOrigin, stackTop, calleeSavedRegisters);
400             gatherJSStackRoots(conservativeRoots);
401             gatherScratchBufferRoots(conservativeRoots);
402         }
403
404 #if ENABLE(DFG_JIT)
405         DFG::rememberCodeBlocks(*m_vm);
406 #endif
407
408 #if ENABLE(SAMPLING_PROFILER)
409         if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
410             // Note that we need to own the lock from now until we're done
411             // marking the SamplingProfiler's data because once we verify the
412             // SamplingProfiler's stack traces, we don't want it to accumulate
413             // more stack traces before we get the chance to mark it.
414             // This lock is released inside visitSamplingProfiler().
415             samplingProfiler->getLock().lock();
416             samplingProfiler->processUnverifiedStackTraces();
417         }
418 #endif // ENABLE(SAMPLING_PROFILER)
419
420         if (m_operationInProgress == FullCollection) {
421             m_opaqueRoots.clear();
422             m_slotVisitor.clearMarkStack();
423         }
424
425         beginMarking();
426
427         m_parallelMarkersShouldExit = false;
428
429         m_helperClient.setFunction(
430             [this] () {
431                 SlotVisitor* slotVisitor;
432                 {
433                     LockHolder locker(m_parallelSlotVisitorLock);
434                     if (m_availableParallelSlotVisitors.isEmpty()) {
435                         std::unique_ptr<SlotVisitor> newVisitor =
436                             std::make_unique<SlotVisitor>(*this);
437                         slotVisitor = newVisitor.get();
438                         m_parallelSlotVisitors.append(WTFMove(newVisitor));
439                     } else
440                         slotVisitor = m_availableParallelSlotVisitors.takeLast();
441                 }
442
443                 WTF::registerGCThread();
444
445                 {
446                     ParallelModeEnabler parallelModeEnabler(*slotVisitor);
447                     slotVisitor->didStartMarking();
448                     slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
449                 }
450
451                 {
452                     LockHolder locker(m_parallelSlotVisitorLock);
453                     m_availableParallelSlotVisitors.append(slotVisitor);
454                 }
455             });
456
457         m_slotVisitor.didStartMarking();
458     }
459     
460     {
461         SuperSamplerScope superSamplerScope(false);
462         TimingScope convergenceTimingScope(*this, "Heap::markRoots convergence");
463         ParallelModeEnabler enabler(m_slotVisitor);
464         
465         m_slotVisitor.donateAndDrain();
466         visitExternalRememberedSet();
467         visitSmallStrings();
468         visitConservativeRoots(conservativeRoots);
469         visitProtectedObjects(heapRootVisitor);
470         visitArgumentBuffers(heapRootVisitor);
471         visitException(heapRootVisitor);
472         visitStrongHandles(heapRootVisitor);
473         visitHandleStack(heapRootVisitor);
474         visitSamplingProfiler();
475         visitShadowChicken();
476         traceCodeBlocksAndJITStubRoutines();
477         m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
478     }
479     
480     TimingScope postConvergenceTimingScope(*this, "Heap::markRoots after convergence");
481
482     // Weak references must be marked last because their liveness depends on
483     // the liveness of the rest of the object graph.
484     visitWeakHandles(heapRootVisitor);
485
486     {
487         std::lock_guard<Lock> lock(m_markingMutex);
488         m_parallelMarkersShouldExit = true;
489         m_markingConditionVariable.notifyAll();
490     }
491     m_helperClient.finish();
492     updateObjectCounts(gcStartTime);
493     endMarking();
494 }
495
496 void Heap::gatherStackRoots(ConservativeRoots& roots, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
497 {
498     m_jitStubRoutines->clearMarks();
499     m_machineThreads.gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks, stackOrigin, stackTop, calleeSavedRegisters);
500 }
501
502 void Heap::gatherJSStackRoots(ConservativeRoots& roots)
503 {
504 #if !ENABLE(JIT)
505     m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
506 #else
507     UNUSED_PARAM(roots);
508 #endif
509 }
510
511 void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
512 {
513 #if ENABLE(DFG_JIT)
514     m_vm->gatherConservativeRoots(roots);
515 #else
516     UNUSED_PARAM(roots);
517 #endif
518 }
519
520 void Heap::beginMarking()
521 {
522     TimingScope timingScope(*this, "Heap::beginMarking");
523     if (m_operationInProgress == FullCollection)
524         m_codeBlocks->clearMarksForFullCollection();
525     
526     {
527         TimingScope clearNewlyAllocatedTimingScope(*this, "m_objectSpace.clearNewlyAllocated");
528         m_objectSpace.clearNewlyAllocated();
529     }
530     
531     {
532         TimingScope clearMarksTimingScope(*this, "m_objectSpace.beginMarking");
533         m_objectSpace.beginMarking();
534     }
535 }
536
537 void Heap::visitExternalRememberedSet()
538 {
539 #if JSC_OBJC_API_ENABLED
540     scanExternalRememberedSet(*m_vm, m_slotVisitor);
541 #endif
542 }
543
544 void Heap::visitSmallStrings()
545 {
546     if (!m_vm->smallStrings.needsToBeVisited(m_operationInProgress))
547         return;
548
549     m_vm->smallStrings.visitStrongReferences(m_slotVisitor);
550     if (Options::logGC() == GCLogging::Verbose)
551         dataLog("Small strings:\n", m_slotVisitor);
552     m_slotVisitor.donateAndDrain();
553 }
554
555 void Heap::visitConservativeRoots(ConservativeRoots& roots)
556 {
557     m_slotVisitor.append(roots);
558
559     if (Options::logGC() == GCLogging::Verbose)
560         dataLog("Conservative Roots:\n", m_slotVisitor);
561
562     m_slotVisitor.donateAndDrain();
563 }
564
565 void Heap::visitCompilerWorklistWeakReferences()
566 {
567 #if ENABLE(DFG_JIT)
568     for (auto worklist : m_suspendedCompilerWorklists)
569         worklist->visitWeakReferences(m_slotVisitor);
570
571     if (Options::logGC() == GCLogging::Verbose)
572         dataLog("DFG Worklists:\n", m_slotVisitor);
573 #endif
574 }
575
576 void Heap::removeDeadCompilerWorklistEntries()
577 {
578 #if ENABLE(DFG_JIT)
579     for (auto worklist : m_suspendedCompilerWorklists)
580         worklist->removeDeadPlans(*m_vm);
581 #endif
582 }
583
584 bool Heap::isHeapSnapshotting() const
585 {
586     HeapProfiler* heapProfiler = m_vm->heapProfiler();
587     if (UNLIKELY(heapProfiler))
588         return heapProfiler->activeSnapshotBuilder();
589     return false;
590 }
591
592 struct GatherHeapSnapshotData : MarkedBlock::CountFunctor {
593     GatherHeapSnapshotData(HeapSnapshotBuilder& builder)
594         : m_builder(builder)
595     {
596     }
597
598     IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const
599     {
600         if (kind == HeapCell::JSCell) {
601             JSCell* cell = static_cast<JSCell*>(heapCell);
602             cell->methodTable()->heapSnapshot(cell, m_builder);
603         }
604         return IterationStatus::Continue;
605     }
606
607     HeapSnapshotBuilder& m_builder;
608 };
609
610 void Heap::gatherExtraHeapSnapshotData(HeapProfiler& heapProfiler)
611 {
612     if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) {
613         HeapIterationScope heapIterationScope(*this);
614         GatherHeapSnapshotData functor(*builder);
615         m_objectSpace.forEachLiveCell(heapIterationScope, functor);
616     }
617 }
618
619 struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor {
620     RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot)
621         : m_snapshot(snapshot)
622     {
623     }
624
625     IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const
626     {
627         if (kind == HeapCell::JSCell)
628             m_snapshot.sweepCell(static_cast<JSCell*>(cell));
629         return IterationStatus::Continue;
630     }
631
632     HeapSnapshot& m_snapshot;
633 };
634
635 void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler)
636 {
637     if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) {
638         HeapIterationScope heapIterationScope(*this);
639         RemoveDeadHeapSnapshotNodes functor(*snapshot);
640         m_objectSpace.forEachDeadCell(heapIterationScope, functor);
641         snapshot->shrinkToFit();
642     }
643 }
644
645 void Heap::visitProtectedObjects(HeapRootVisitor& heapRootVisitor)
646 {
647     for (auto& pair : m_protectedValues)
648         heapRootVisitor.visit(&pair.key);
649
650     if (Options::logGC() == GCLogging::Verbose)
651         dataLog("Protected Objects:\n", m_slotVisitor);
652
653     m_slotVisitor.donateAndDrain();
654 }
655
656 void Heap::visitArgumentBuffers(HeapRootVisitor& visitor)
657 {
658     if (!m_markListSet || !m_markListSet->size())
659         return;
660
661     MarkedArgumentBuffer::markLists(visitor, *m_markListSet);
662
663     if (Options::logGC() == GCLogging::Verbose)
664         dataLog("Argument Buffers:\n", m_slotVisitor);
665
666     m_slotVisitor.donateAndDrain();
667 }
668
669 void Heap::visitException(HeapRootVisitor& visitor)
670 {
671     if (!m_vm->exception() && !m_vm->lastException())
672         return;
673
674     visitor.visit(m_vm->addressOfException());
675     visitor.visit(m_vm->addressOfLastException());
676
677     if (Options::logGC() == GCLogging::Verbose)
678         dataLog("Exceptions:\n", m_slotVisitor);
679
680     m_slotVisitor.donateAndDrain();
681 }
682
683 void Heap::visitStrongHandles(HeapRootVisitor& visitor)
684 {
685     m_handleSet.visitStrongHandles(visitor);
686
687     if (Options::logGC() == GCLogging::Verbose)
688         dataLog("Strong Handles:\n", m_slotVisitor);
689
690     m_slotVisitor.donateAndDrain();
691 }
692
693 void Heap::visitHandleStack(HeapRootVisitor& visitor)
694 {
695     m_handleStack.visit(visitor);
696
697     if (Options::logGC() == GCLogging::Verbose)
698         dataLog("Handle Stack:\n", m_slotVisitor);
699
700     m_slotVisitor.donateAndDrain();
701 }
702
703 void Heap::visitSamplingProfiler()
704 {
705 #if ENABLE(SAMPLING_PROFILER)
706     if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
707         ASSERT(samplingProfiler->getLock().isLocked());
708         samplingProfiler->visit(m_slotVisitor);
709         if (Options::logGC() == GCLogging::Verbose)
710             dataLog("Sampling Profiler data:\n", m_slotVisitor);
711
712         m_slotVisitor.donateAndDrain();
713         samplingProfiler->getLock().unlock();
714     }
715 #endif // ENABLE(SAMPLING_PROFILER)
716 }
717
718 void Heap::visitShadowChicken()
719 {
720     m_vm->shadowChicken().visitChildren(m_slotVisitor);
721 }
722
723 void Heap::traceCodeBlocksAndJITStubRoutines()
724 {
725     m_jitStubRoutines->traceMarkedStubRoutines(m_slotVisitor);
726
727     if (Options::logGC() == GCLogging::Verbose)
728         dataLog("Code Blocks and JIT Stub Routines:\n", m_slotVisitor);
729
730     m_slotVisitor.donateAndDrain();
731 }
732
733 void Heap::visitWeakHandles(HeapRootVisitor& visitor)
734 {
735     TimingScope timingScope(*this, "Heap::visitWeakHandles");
736     while (true) {
737         {
738             TimingScope timingScope(*this, "m_objectSpace.visitWeakSets");
739             m_objectSpace.visitWeakSets(visitor);
740         }
741         harvestWeakReferences();
742         visitCompilerWorklistWeakReferences();
743         if (m_slotVisitor.isEmpty())
744             break;
745
746         if (Options::logGC() == GCLogging::Verbose)
747             dataLog("Live Weak Handles:\n", m_slotVisitor);
748
749         {
750             ParallelModeEnabler enabler(m_slotVisitor);
751             m_slotVisitor.donateAndDrain();
752             m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
753         }
754     }
755 }
756
757 void Heap::updateObjectCounts(double gcStartTime)
758 {
759     if (Options::logGC() == GCLogging::Verbose) {
760         size_t visitCount = m_slotVisitor.visitCount();
761         visitCount += threadVisitCount();
762         dataLogF("\nNumber of live Objects after GC %lu, took %.6f secs\n", static_cast<unsigned long>(visitCount), WTF::monotonicallyIncreasingTime() - gcStartTime);
763     }
764     
765     if (m_operationInProgress == FullCollection)
766         m_totalBytesVisited = 0;
767
768     m_totalBytesVisitedThisCycle = m_slotVisitor.bytesVisited() + threadBytesVisited();
769     
770     m_totalBytesVisited += m_totalBytesVisitedThisCycle;
771 }
772
773 void Heap::endMarking()
774 {
775     m_slotVisitor.reset();
776
777     for (auto& parallelVisitor : m_parallelSlotVisitors)
778         parallelVisitor->reset();
779
780     ASSERT(m_sharedMarkStack.isEmpty());
781     m_weakReferenceHarvesters.removeAll();
782     
783     m_objectSpace.endMarking();
784 }
785
786 size_t Heap::objectCount()
787 {
788     return m_objectSpace.objectCount();
789 }
790
791 size_t Heap::extraMemorySize()
792 {
793     return m_extraMemorySize + m_deprecatedExtraMemorySize + m_arrayBuffers.size();
794 }
795
796 size_t Heap::size()
797 {
798     return m_objectSpace.size() + extraMemorySize();
799 }
800
801 size_t Heap::capacity()
802 {
803     return m_objectSpace.capacity() + extraMemorySize();
804 }
805
806 size_t Heap::protectedGlobalObjectCount()
807 {
808     size_t result = 0;
809     forEachProtectedCell(
810         [&] (JSCell* cell) {
811             if (cell->isObject() && asObject(cell)->isGlobalObject())
812                 result++;
813         });
814     return result;
815 }
816
817 size_t Heap::globalObjectCount()
818 {
819     HeapIterationScope iterationScope(*this);
820     size_t result = 0;
821     m_objectSpace.forEachLiveCell(
822         iterationScope,
823         [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus {
824             if (kind != HeapCell::JSCell)
825                 return IterationStatus::Continue;
826             JSCell* cell = static_cast<JSCell*>(heapCell);
827             if (cell->isObject() && asObject(cell)->isGlobalObject())
828                 result++;
829             return IterationStatus::Continue;
830         });
831     return result;
832 }
833
834 size_t Heap::protectedObjectCount()
835 {
836     size_t result = 0;
837     forEachProtectedCell(
838         [&] (JSCell*) {
839             result++;
840         });
841     return result;
842 }
843
844 std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
845 {
846     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
847     forEachProtectedCell(
848         [&] (JSCell* cell) {
849             recordType(*result, cell);
850         });
851     return result;
852 }
853
854 std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
855 {
856     std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
857     HeapIterationScope iterationScope(*this);
858     m_objectSpace.forEachLiveCell(
859         iterationScope,
860         [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
861             if (kind == HeapCell::JSCell)
862                 recordType(*result, static_cast<JSCell*>(cell));
863             return IterationStatus::Continue;
864         });
865     return result;
866 }
867
868 void Heap::deleteAllCodeBlocks()
869 {
870     // If JavaScript is running, it's not safe to delete all JavaScript code, since
871     // we'll end up returning to deleted code.
872     RELEASE_ASSERT(!m_vm->entryScope);
873     ASSERT(m_operationInProgress == NoOperation);
874
875     completeAllJITPlans();
876
877     for (ExecutableBase* executable : m_executables)
878         executable->clearCode();
879 }
880
881 void Heap::deleteAllUnlinkedCodeBlocks()
882 {
883     for (ExecutableBase* current : m_executables) {
884         if (!current->isFunctionExecutable())
885             continue;
886         static_cast<FunctionExecutable*>(current)->unlinkedExecutable()->clearCode();
887     }
888 }
889
890 void Heap::clearUnmarkedExecutables()
891 {
892     for (unsigned i = m_executables.size(); i--;) {
893         ExecutableBase* current = m_executables[i];
894         if (isMarked(current))
895             continue;
896
897         // Eagerly dereference the Executable's JITCode in order to run watchpoint
898         // destructors. Otherwise, watchpoints might fire for deleted CodeBlocks.
899         current->clearCode();
900         std::swap(m_executables[i], m_executables.last());
901         m_executables.removeLast();
902     }
903
904     m_executables.shrinkToFit();
905 }
906
907 void Heap::deleteUnmarkedCompiledCode()
908 {
909     clearUnmarkedExecutables();
910     m_codeBlocks->deleteUnmarkedAndUnreferenced(m_operationInProgress);
911     m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines();
912 }
913
914 void Heap::addToRememberedSet(const JSCell* cell)
915 {
916     ASSERT(cell);
917     ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
918     ASSERT(isBlack(cell->cellState()));
919     // Indicate that this object is grey and that it's one of the following:
920     // - A re-greyed object during a concurrent collection.
921     // - An old remembered object.
922     // "OldGrey" doesn't tell us which of these things is true, but we usually treat the two cases the
923     // same.
924     cell->setCellState(CellState::OldGrey);
925     m_slotVisitor.appendToMarkStack(const_cast<JSCell*>(cell));
926 }
927
928 void Heap::collectAllGarbage()
929 {
930     SuperSamplerScope superSamplerScope(false);
931     if (!m_isSafeToCollect)
932         return;
933
934     collectWithoutAnySweep(FullCollection);
935
936     DeferGCForAWhile deferGC(*this);
937     if (UNLIKELY(Options::useImmortalObjects()))
938         sweeper()->willFinishSweeping();
939     else {
940         double before = 0;
941         if (Options::logGC()) {
942             dataLog("[Full sweep: ", capacity() / 1024, " kb ");
943             before = currentTimeMS();
944         }
945         m_objectSpace.sweep();
946         m_objectSpace.shrink();
947         if (Options::logGC()) {
948             double after = currentTimeMS();
949             dataLog("=> ", capacity() / 1024, " kb, ", after - before, " ms]\n");
950         }
951     }
952     m_objectSpace.assertNoUnswept();
953
954     sweepAllLogicallyEmptyWeakBlocks();
955 }
956
957 void Heap::collect(HeapOperation collectionType)
958 {
959     SuperSamplerScope superSamplerScope(false);
960     if (!m_isSafeToCollect)
961         return;
962
963     collectWithoutAnySweep(collectionType);
964 }
965
966 NEVER_INLINE void Heap::collectWithoutAnySweep(HeapOperation collectionType)
967 {
968     void* stackTop;
969     ALLOCATE_AND_GET_REGISTER_STATE(registers);
970
971     collectImpl(collectionType, wtfThreadData().stack().origin(), &stackTop, registers);
972
973     sanitizeStackForVM(m_vm);
974 }
975
976 NEVER_INLINE void Heap::collectImpl(HeapOperation collectionType, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
977 {
978     SuperSamplerScope superSamplerScope(false);
979     TimingScope collectImplTimingScope(collectionType, "Heap::collectImpl");
980     
981 #if ENABLE(ALLOCATION_LOGGING)
982     dataLogF("JSC GC starting collection.\n");
983 #endif
984     
985     double before = 0;
986     if (Options::logGC()) {
987         dataLog("[GC: ", capacity() / 1024, " kb ");
988         before = currentTimeMS();
989     }
990     
991     double gcStartTime;
992     {
993         TimingScope earlyTimingScope(collectionType, "Heap::collectImpl before markRoots");
994
995         if (vm()->typeProfiler()) {
996             DeferGCForAWhile awhile(*this);
997             vm()->typeProfilerLog()->processLogEntries(ASCIILiteral("GC"));
998         }
999
1000 #if ENABLE(JIT)
1001         {
1002             DeferGCForAWhile awhile(*this);
1003             JITWorklist::instance()->completeAllForVM(*m_vm);
1004         }
1005 #endif // ENABLE(JIT)
1006
1007         vm()->shadowChicken().update(*vm(), vm()->topCallFrame);
1008
1009         RELEASE_ASSERT(!m_deferralDepth);
1010         ASSERT(vm()->currentThreadIsHoldingAPILock());
1011         RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
1012         ASSERT(m_isSafeToCollect);
1013         RELEASE_ASSERT(m_operationInProgress == NoOperation);
1014
1015         suspendCompilerThreads();
1016         willStartCollection(collectionType);
1017         
1018         collectImplTimingScope.setOperation(*this);
1019         earlyTimingScope.setOperation(*this);
1020
1021         gcStartTime = WTF::monotonicallyIncreasingTime();
1022         if (m_verifier) {
1023             // Verify that live objects from the last GC cycle haven't been corrupted by
1024             // mutators before we begin this new GC cycle.
1025             m_verifier->verify(HeapVerifier::Phase::BeforeGC);
1026
1027             m_verifier->initializeGCCycle();
1028             m_verifier->gatherLiveObjects(HeapVerifier::Phase::BeforeMarking);
1029         }
1030
1031         flushOldStructureIDTables();
1032         stopAllocation();
1033         prepareForMarking();
1034         flushWriteBarrierBuffer();
1035
1036         if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache())
1037             cache->clear();
1038     }
1039
1040     markRoots(gcStartTime, stackOrigin, stackTop, calleeSavedRegisters);
1041     
1042     TimingScope lateTimingScope(*this, "Heap::collectImpl after markRoots");
1043
1044     if (m_verifier) {
1045         m_verifier->gatherLiveObjects(HeapVerifier::Phase::AfterMarking);
1046         m_verifier->verify(HeapVerifier::Phase::AfterMarking);
1047     }
1048
1049     if (vm()->typeProfiler())
1050         vm()->typeProfiler()->invalidateTypeSetCache();
1051
1052     reapWeakHandles();
1053     pruneStaleEntriesFromWeakGCMaps();
1054     sweepArrayBuffers();
1055     snapshotUnswept();
1056     finalizeUnconditionalFinalizers();
1057     removeDeadCompilerWorklistEntries();
1058     deleteUnmarkedCompiledCode();
1059     deleteSourceProviderCaches();
1060
1061     notifyIncrementalSweeper();
1062     writeBarrierCurrentlyExecutingCodeBlocks();
1063
1064     prepareForAllocation();
1065     updateAllocationLimits();
1066     didFinishCollection(gcStartTime);
1067     resumeCompilerThreads();
1068     sweepLargeAllocations();
1069     
1070     if (m_verifier) {
1071         m_verifier->trimDeadObjects();
1072         m_verifier->verify(HeapVerifier::Phase::AfterGC);
1073     }
1074
1075     if (Options::logGC()) {
1076         double after = currentTimeMS();
1077         dataLog(after - before, " ms]\n");
1078     }
1079     
1080     if (false) {
1081         dataLog("Heap state after GC:\n");
1082         m_objectSpace.dumpBits();
1083     }
1084 }
1085
1086 void Heap::sweepLargeAllocations()
1087 {
1088     m_objectSpace.sweepLargeAllocations();
1089 }
1090
1091 void Heap::suspendCompilerThreads()
1092 {
1093 #if ENABLE(DFG_JIT)
1094     ASSERT(m_suspendedCompilerWorklists.isEmpty());
1095     for (unsigned i = DFG::numberOfWorklists(); i--;) {
1096         if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) {
1097             m_suspendedCompilerWorklists.append(worklist);
1098             worklist->suspendAllThreads();
1099         }
1100     }
1101 #endif
1102 }
1103
1104 void Heap::willStartCollection(HeapOperation collectionType)
1105 {
1106     if (Options::logGC())
1107         dataLog("=> ");
1108     
1109     if (shouldDoFullCollection(collectionType)) {
1110         m_operationInProgress = FullCollection;
1111         m_shouldDoFullCollection = false;
1112         if (Options::logGC())
1113             dataLog("FullCollection, ");
1114     } else {
1115         m_operationInProgress = EdenCollection;
1116         if (Options::logGC())
1117             dataLog("EdenCollection, ");
1118     }
1119     if (m_operationInProgress == FullCollection) {
1120         m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
1121         m_extraMemorySize = 0;
1122         m_deprecatedExtraMemorySize = 0;
1123 #if ENABLE(RESOURCE_USAGE)
1124         m_externalMemorySize = 0;
1125 #endif
1126
1127         if (m_fullActivityCallback)
1128             m_fullActivityCallback->willCollect();
1129     } else {
1130         ASSERT(m_operationInProgress == EdenCollection);
1131         m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
1132     }
1133
1134     if (m_edenActivityCallback)
1135         m_edenActivityCallback->willCollect();
1136
1137     for (auto* observer : m_observers)
1138         observer->willGarbageCollect();
1139 }
1140
1141 void Heap::flushOldStructureIDTables()
1142 {
1143     m_structureIDTable.flushOldTables();
1144 }
1145
1146 void Heap::flushWriteBarrierBuffer()
1147 {
1148     if (m_operationInProgress == EdenCollection) {
1149         m_writeBarrierBuffer.flush(*this);
1150         return;
1151     }
1152     m_writeBarrierBuffer.reset();
1153 }
1154
1155 void Heap::stopAllocation()
1156 {
1157     m_objectSpace.stopAllocating();
1158 }
1159
1160 void Heap::prepareForMarking()
1161 {
1162     m_objectSpace.prepareForMarking();
1163 }
1164
1165 void Heap::reapWeakHandles()
1166 {
1167     m_objectSpace.reapWeakSets();
1168 }
1169
1170 void Heap::pruneStaleEntriesFromWeakGCMaps()
1171 {
1172     if (m_operationInProgress != FullCollection)
1173         return;
1174     for (auto& pruneCallback : m_weakGCMaps.values())
1175         pruneCallback();
1176 }
1177
1178 void Heap::sweepArrayBuffers()
1179 {
1180     m_arrayBuffers.sweep();
1181 }
1182
1183 void Heap::snapshotUnswept()
1184 {
1185     TimingScope timingScope(*this, "Heap::snapshotUnswept");
1186     m_objectSpace.snapshotUnswept();
1187 }
1188
1189 void Heap::deleteSourceProviderCaches()
1190 {
1191     m_vm->clearSourceProviderCaches();
1192 }
1193
1194 void Heap::notifyIncrementalSweeper()
1195 {
1196     if (m_operationInProgress == FullCollection) {
1197         if (!m_logicallyEmptyWeakBlocks.isEmpty())
1198             m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
1199     }
1200
1201     m_sweeper->startSweeping();
1202 }
1203
1204 void Heap::writeBarrierCurrentlyExecutingCodeBlocks()
1205 {
1206     m_codeBlocks->writeBarrierCurrentlyExecutingCodeBlocks(this);
1207 }
1208
1209 void Heap::prepareForAllocation()
1210 {
1211     m_objectSpace.prepareForAllocation();
1212 }
1213
1214 void Heap::updateAllocationLimits()
1215 {
1216     static const bool verbose = false;
1217     
1218     if (verbose) {
1219         dataLog("\n");
1220         dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n");
1221     }
1222     
1223     // Calculate our current heap size threshold for the purpose of figuring out when we should
1224     // run another collection. This isn't the same as either size() or capacity(), though it should
1225     // be somewhere between the two. The key is to match the size calculations involved calls to
1226     // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
1227     // fragmentation, we may have size() much smaller than capacity().
1228     size_t currentHeapSize = 0;
1229
1230     // For marked space, we use the total number of bytes visited. This matches the logic for
1231     // MarkedAllocator's calls to didAllocate(), which effectively accounts for the total size of
1232     // objects allocated rather than blocks used. This will underestimate capacity(), and in case
1233     // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
1234     // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
1235     currentHeapSize += m_totalBytesVisited;
1236     if (verbose)
1237         dataLog("totalBytesVisited = ", m_totalBytesVisited, ", currentHeapSize = ", currentHeapSize, "\n");
1238
1239     // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
1240     // extra memory reporting.
1241     currentHeapSize += extraMemorySize();
1242
1243     if (verbose)
1244         dataLog("extraMemorySize() = ", extraMemorySize(), ", currentHeapSize = ", currentHeapSize, "\n");
1245     
1246     if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
1247         HeapStatistics::exitWithFailure();
1248
1249     if (m_operationInProgress == FullCollection) {
1250         // To avoid pathological GC churn in very small and very large heaps, we set
1251         // the new allocation limit based on the current size of the heap, with a
1252         // fixed minimum.
1253         m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
1254         if (verbose)
1255             dataLog("Full: maxHeapSize = ", m_maxHeapSize, "\n");
1256         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1257         if (verbose)
1258             dataLog("Full: maxEdenSize = ", m_maxEdenSize, "\n");
1259         m_sizeAfterLastFullCollect = currentHeapSize;
1260         if (verbose)
1261             dataLog("Full: sizeAfterLastFullCollect = ", currentHeapSize, "\n");
1262         m_bytesAbandonedSinceLastFullCollect = 0;
1263         if (verbose)
1264             dataLog("Full: bytesAbandonedSinceLastFullCollect = ", 0, "\n");
1265     } else {
1266         ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
1267         // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have.
1268         // But we are sloppy, so we have to defend against the overflow.
1269         m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize;
1270         if (verbose)
1271             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
1272         m_sizeAfterLastEdenCollect = currentHeapSize;
1273         if (verbose)
1274             dataLog("Eden: sizeAfterLastEdenCollect = ", currentHeapSize, "\n");
1275         double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
1276         double minEdenToOldGenerationRatio = 1.0 / 3.0;
1277         if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
1278             m_shouldDoFullCollection = true;
1279         // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
1280         m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
1281         if (verbose)
1282             dataLog("Eden: maxHeapSize = ", m_maxHeapSize, "\n");
1283         m_maxEdenSize = m_maxHeapSize - currentHeapSize;
1284         if (verbose)
1285             dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
1286         if (m_fullActivityCallback) {
1287             ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
1288             m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
1289         }
1290     }
1291
1292     m_sizeAfterLastCollect = currentHeapSize;
1293     if (verbose)
1294         dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n");
1295     m_bytesAllocatedThisCycle = 0;
1296
1297     if (Options::logGC())
1298         dataLog(currentHeapSize / 1024, " kb, ");
1299 }
1300
1301 void Heap::didFinishCollection(double gcStartTime)
1302 {
1303     double gcEndTime = WTF::monotonicallyIncreasingTime();
1304     HeapOperation operation = m_operationInProgress;
1305     if (m_operationInProgress == FullCollection)
1306         m_lastFullGCLength = gcEndTime - gcStartTime;
1307     else
1308         m_lastEdenGCLength = gcEndTime - gcStartTime;
1309
1310 #if ENABLE(RESOURCE_USAGE)
1311     ASSERT(externalMemorySize() <= extraMemorySize());
1312 #endif
1313
1314     if (Options::recordGCPauseTimes())
1315         HeapStatistics::recordGCPauseTime(gcStartTime, gcEndTime);
1316
1317     if (Options::useZombieMode())
1318         zombifyDeadObjects();
1319
1320     if (Options::dumpObjectStatistics())
1321         HeapStatistics::dumpObjectStatistics(this);
1322
1323     if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) {
1324         gatherExtraHeapSnapshotData(*heapProfiler);
1325         removeDeadHeapSnapshotNodes(*heapProfiler);
1326     }
1327
1328     RELEASE_ASSERT(m_operationInProgress == EdenCollection || m_operationInProgress == FullCollection);
1329     m_operationInProgress = NoOperation;
1330
1331     for (auto* observer : m_observers)
1332         observer->didGarbageCollect(operation);
1333 }
1334
1335 void Heap::resumeCompilerThreads()
1336 {
1337 #if ENABLE(DFG_JIT)
1338     for (auto worklist : m_suspendedCompilerWorklists)
1339         worklist->resumeAllThreads();
1340     m_suspendedCompilerWorklists.clear();
1341 #endif
1342 }
1343
1344 void Heap::setFullActivityCallback(PassRefPtr<FullGCActivityCallback> activityCallback)
1345 {
1346     m_fullActivityCallback = activityCallback;
1347 }
1348
1349 void Heap::setEdenActivityCallback(PassRefPtr<EdenGCActivityCallback> activityCallback)
1350 {
1351     m_edenActivityCallback = activityCallback;
1352 }
1353
1354 GCActivityCallback* Heap::fullActivityCallback()
1355 {
1356     return m_fullActivityCallback.get();
1357 }
1358
1359 GCActivityCallback* Heap::edenActivityCallback()
1360 {
1361     return m_edenActivityCallback.get();
1362 }
1363
1364 void Heap::setIncrementalSweeper(std::unique_ptr<IncrementalSweeper> sweeper)
1365 {
1366     m_sweeper = WTFMove(sweeper);
1367 }
1368
1369 IncrementalSweeper* Heap::sweeper()
1370 {
1371     return m_sweeper.get();
1372 }
1373
1374 void Heap::setGarbageCollectionTimerEnabled(bool enable)
1375 {
1376     if (m_fullActivityCallback)
1377         m_fullActivityCallback->setEnabled(enable);
1378     if (m_edenActivityCallback)
1379         m_edenActivityCallback->setEnabled(enable);
1380 }
1381
1382 void Heap::didAllocate(size_t bytes)
1383 {
1384     if (m_edenActivityCallback)
1385         m_edenActivityCallback->didAllocate(m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
1386     m_bytesAllocatedThisCycle += bytes;
1387 }
1388
1389 bool Heap::isValidAllocation(size_t)
1390 {
1391     if (!isValidThreadState(m_vm))
1392         return false;
1393
1394     if (m_operationInProgress != NoOperation)
1395         return false;
1396     
1397     return true;
1398 }
1399
1400 void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
1401 {
1402     WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
1403 }
1404
1405 void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
1406 {
1407     HandleSlot slot = handle.slot();
1408     Finalizer finalizer = reinterpret_cast<Finalizer>(context);
1409     finalizer(slot->asCell());
1410     WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
1411 }
1412
1413 void Heap::addExecutable(ExecutableBase* executable)
1414 {
1415     m_executables.append(executable);
1416 }
1417
1418 void Heap::collectAllGarbageIfNotDoneRecently()
1419 {
1420     if (!m_fullActivityCallback) {
1421         collectAllGarbage();
1422         return;
1423     }
1424
1425     if (m_fullActivityCallback->didSyncGCRecently()) {
1426         // A synchronous GC was already requested recently so we merely accelerate next collection.
1427         reportAbandonedObjectGraph();
1428         return;
1429     }
1430
1431     m_fullActivityCallback->setDidSyncGCRecently();
1432     collectAllGarbage();
1433 }
1434
1435 class Zombify : public MarkedBlock::VoidFunctor {
1436 public:
1437     inline void visit(HeapCell* cell) const
1438     {
1439         void** current = reinterpret_cast<void**>(cell);
1440
1441         // We want to maintain zapped-ness because that's how we know if we've called 
1442         // the destructor.
1443         if (cell->isZapped())
1444             current++;
1445
1446         void* limit = static_cast<void*>(reinterpret_cast<char*>(cell) + cell->cellSize());
1447         for (; current < limit; current++)
1448             *current = zombifiedBits;
1449     }
1450     IterationStatus operator()(HeapCell* cell, HeapCell::Kind) const
1451     {
1452         visit(cell);
1453         return IterationStatus::Continue;
1454     }
1455 };
1456
1457 void Heap::zombifyDeadObjects()
1458 {
1459     // Sweep now because destructors will crash once we're zombified.
1460     m_objectSpace.sweep();
1461     HeapIterationScope iterationScope(*this);
1462     m_objectSpace.forEachDeadCell(iterationScope, Zombify());
1463 }
1464
1465 void Heap::flushWriteBarrierBuffer(JSCell* cell)
1466 {
1467     m_writeBarrierBuffer.flush(*this);
1468     m_writeBarrierBuffer.add(cell);
1469 }
1470
1471 bool Heap::shouldDoFullCollection(HeapOperation requestedCollectionType) const
1472 {
1473     if (!Options::useGenerationalGC())
1474         return true;
1475
1476     switch (requestedCollectionType) {
1477     case EdenCollection:
1478         return false;
1479     case FullCollection:
1480         return true;
1481     case AnyCollection:
1482         return m_shouldDoFullCollection;
1483     default:
1484         RELEASE_ASSERT_NOT_REACHED();
1485         return false;
1486     }
1487     RELEASE_ASSERT_NOT_REACHED();
1488     return false;
1489 }
1490
1491 void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
1492 {
1493     m_logicallyEmptyWeakBlocks.append(block);
1494 }
1495
1496 void Heap::sweepAllLogicallyEmptyWeakBlocks()
1497 {
1498     if (m_logicallyEmptyWeakBlocks.isEmpty())
1499         return;
1500
1501     m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
1502     while (sweepNextLogicallyEmptyWeakBlock()) { }
1503 }
1504
1505 bool Heap::sweepNextLogicallyEmptyWeakBlock()
1506 {
1507     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
1508         return false;
1509
1510     WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
1511
1512     block->sweep();
1513     if (block->isEmpty()) {
1514         std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
1515         m_logicallyEmptyWeakBlocks.removeLast();
1516         WeakBlock::destroy(*this, block);
1517     } else
1518         m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
1519
1520     if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
1521         m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
1522         return false;
1523     }
1524
1525     return true;
1526 }
1527
1528 size_t Heap::threadVisitCount()
1529 {       
1530     unsigned long result = 0;
1531     for (auto& parallelVisitor : m_parallelSlotVisitors)
1532         result += parallelVisitor->visitCount();
1533     return result;
1534 }
1535
1536 size_t Heap::threadBytesVisited()
1537 {       
1538     size_t result = 0;
1539     for (auto& parallelVisitor : m_parallelSlotVisitors)
1540         result += parallelVisitor->bytesVisited();
1541     return result;
1542 }
1543
1544 void Heap::forEachCodeBlockImpl(const ScopedLambda<bool(CodeBlock*)>& func)
1545 {
1546     // We don't know the full set of CodeBlocks until compilation has terminated.
1547     completeAllJITPlans();
1548
1549     return m_codeBlocks->iterate(func);
1550 }
1551
1552 void Heap::writeBarrierSlowPath(const JSCell* from)
1553 {
1554     if (UNLIKELY(barrierShouldBeFenced())) {
1555         // In this case, the barrierThreshold is the tautological threshold, so from could still be
1556         // not black. But we can't know for sure until we fire off a fence.
1557         WTF::storeLoadFence();
1558         if (!isBlack(from->cellState()))
1559             return;
1560     }
1561     
1562     addToRememberedSet(from);
1563 }
1564
1565 } // namespace JSC