Implement try/catch in the DFG.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGPlan.cpp
1 /*
2  * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGPlan.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGArgumentsEliminationPhase.h"
32 #include "DFGBackwardsPropagationPhase.h"
33 #include "DFGByteCodeParser.h"
34 #include "DFGCFAPhase.h"
35 #include "DFGCFGSimplificationPhase.h"
36 #include "DFGCPSRethreadingPhase.h"
37 #include "DFGCSEPhase.h"
38 #include "DFGCleanUpPhase.h"
39 #include "DFGConstantFoldingPhase.h"
40 #include "DFGConstantHoistingPhase.h"
41 #include "DFGCriticalEdgeBreakingPhase.h"
42 #include "DFGDCEPhase.h"
43 #include "DFGFailedFinalizer.h"
44 #include "DFGFixupPhase.h"
45 #include "DFGGraphSafepoint.h"
46 #include "DFGIntegerCheckCombiningPhase.h"
47 #include "DFGIntegerRangeOptimizationPhase.h"
48 #include "DFGInvalidationPointInjectionPhase.h"
49 #include "DFGJITCompiler.h"
50 #include "DFGLICMPhase.h"
51 #include "DFGLiveCatchVariablePreservationPhase.h"
52 #include "DFGLivenessAnalysisPhase.h"
53 #include "DFGLoopPreHeaderCreationPhase.h"
54 #include "DFGMaximalFlushInsertionPhase.h"
55 #include "DFGMovHintRemovalPhase.h"
56 #include "DFGOSRAvailabilityAnalysisPhase.h"
57 #include "DFGOSREntrypointCreationPhase.h"
58 #include "DFGObjectAllocationSinkingPhase.h"
59 #include "DFGPhantomInsertionPhase.h"
60 #include "DFGPredictionInjectionPhase.h"
61 #include "DFGPredictionPropagationPhase.h"
62 #include "DFGPutStackSinkingPhase.h"
63 #include "DFGSSAConversionPhase.h"
64 #include "DFGSSALoweringPhase.h"
65 #include "DFGStackLayoutPhase.h"
66 #include "DFGStaticExecutionCountEstimationPhase.h"
67 #include "DFGStoreBarrierInsertionPhase.h"
68 #include "DFGStrengthReductionPhase.h"
69 #include "DFGStructureRegistrationPhase.h"
70 #include "DFGTierUpCheckInjectionPhase.h"
71 #include "DFGTypeCheckHoistingPhase.h"
72 #include "DFGUnificationPhase.h"
73 #include "DFGValidate.h"
74 #include "DFGVarargsForwardingPhase.h"
75 #include "DFGVirtualRegisterAllocationPhase.h"
76 #include "DFGWatchpointCollectionPhase.h"
77 #include "Debugger.h"
78 #include "JSCInlines.h"
79 #include "OperandsInlines.h"
80 #include "ProfilerDatabase.h"
81 #include "TrackedReferences.h"
82 #include <wtf/CurrentTime.h>
83
84 #if ENABLE(FTL_JIT)
85 #include "FTLCapabilities.h"
86 #include "FTLCompile.h"
87 #include "FTLFail.h"
88 #include "FTLLink.h"
89 #include "FTLLowerDFGToLLVM.h"
90 #include "FTLState.h"
91 #include "InitializeLLVM.h"
92 #endif
93
94 namespace JSC { namespace DFG {
95
96 namespace {
97
98 double totalDFGCompileTime;
99 double totalFTLCompileTime;
100 double totalFTLDFGCompileTime;
101 double totalFTLLLVMCompileTime;
102
103 void dumpAndVerifyGraph(Graph& graph, const char* text, bool forceDump = false)
104 {
105     GraphDumpMode modeForFinalValidate = DumpGraph;
106     if (verboseCompilationEnabled(graph.m_plan.mode) || forceDump) {
107         dataLog(text, "\n");
108         graph.dump();
109         modeForFinalValidate = DontDumpGraph;
110     }
111     if (validationEnabled())
112         validate(graph, modeForFinalValidate);
113 }
114
115 Profiler::CompilationKind profilerCompilationKindForMode(CompilationMode mode)
116 {
117     switch (mode) {
118     case InvalidCompilationMode:
119         RELEASE_ASSERT_NOT_REACHED();
120         return Profiler::DFG;
121     case DFGMode:
122         return Profiler::DFG;
123     case FTLMode:
124         return Profiler::FTL;
125     case FTLForOSREntryMode:
126         return Profiler::FTLForOSREntry;
127     }
128     RELEASE_ASSERT_NOT_REACHED();
129     return Profiler::DFG;
130 }
131
132 } // anonymous namespace
133
134 Plan::Plan(PassRefPtr<CodeBlock> passedCodeBlock, CodeBlock* profiledDFGCodeBlock,
135     CompilationMode mode, unsigned osrEntryBytecodeIndex,
136     const Operands<JSValue>& mustHandleValues)
137     : vm(*passedCodeBlock->vm())
138     , codeBlock(passedCodeBlock)
139     , profiledDFGCodeBlock(profiledDFGCodeBlock)
140     , mode(mode)
141     , osrEntryBytecodeIndex(osrEntryBytecodeIndex)
142     , mustHandleValues(mustHandleValues)
143     , compilation(codeBlock->vm()->m_perBytecodeProfiler ? adoptRef(new Profiler::Compilation(codeBlock->vm()->m_perBytecodeProfiler->ensureBytecodesFor(codeBlock.get()), profilerCompilationKindForMode(mode))) : 0)
144     , inlineCallFrames(adoptRef(new InlineCallFrameSet()))
145     , identifiers(codeBlock.get())
146     , weakReferences(codeBlock.get())
147     , willTryToTierUp(false)
148     , stage(Preparing)
149 {
150 }
151
152 Plan::~Plan()
153 {
154 }
155
156 bool Plan::computeCompileTimes() const
157 {
158     return reportCompileTimes()
159         || Options::reportTotalCompileTimes();
160 }
161
162 bool Plan::reportCompileTimes() const
163 {
164     return Options::reportCompileTimes()
165         || (Options::reportFTLCompileTimes() && isFTL(mode));
166 }
167
168 void Plan::compileInThread(LongLivedState& longLivedState, ThreadData* threadData)
169 {
170     this->threadData = threadData;
171     
172     double before = 0;
173     CString codeBlockName;
174     if (computeCompileTimes())
175         before = monotonicallyIncreasingTimeMS();
176     if (reportCompileTimes())
177         codeBlockName = toCString(*codeBlock);
178     
179     SamplingRegion samplingRegion("DFG Compilation (Plan)");
180     CompilationScope compilationScope;
181
182     if (logCompilationChanges(mode))
183         dataLog("DFG(Plan) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n");
184
185     CompilationPath path = compileInThreadImpl(longLivedState);
186
187     RELEASE_ASSERT(path == CancelPath || finalizer);
188     RELEASE_ASSERT((path == CancelPath) == (stage == Cancelled));
189     
190     double after = 0;
191     if (computeCompileTimes())
192         after = monotonicallyIncreasingTimeMS();
193     
194     if (Options::reportTotalCompileTimes()) {
195         if (isFTL(mode)) {
196             totalFTLCompileTime += after - before;
197             totalFTLDFGCompileTime += m_timeBeforeFTL - before;
198             totalFTLLLVMCompileTime += after - m_timeBeforeFTL;
199         } else
200             totalDFGCompileTime += after - before;
201     }
202     
203     if (reportCompileTimes()) {
204         const char* pathName;
205         switch (path) {
206         case FailPath:
207             pathName = "N/A (fail)";
208             break;
209         case DFGPath:
210             pathName = "DFG";
211             break;
212         case FTLPath:
213             pathName = "FTL";
214             break;
215         case CancelPath:
216             pathName = "Cancelled";
217             break;
218         default:
219             RELEASE_ASSERT_NOT_REACHED();
220 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
221             pathName = "";
222 #endif
223             break;
224         }
225         dataLog("Optimized ", codeBlockName, " using ", mode, " with ", pathName, " into ", finalizer ? finalizer->codeSize() : 0, " bytes in ", after - before, " ms");
226         if (path == FTLPath)
227             dataLog(" (DFG: ", m_timeBeforeFTL - before, ", LLVM: ", after - m_timeBeforeFTL, ")");
228         dataLog(".\n");
229     }
230 }
231
232 Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState)
233 {
234     if (verboseCompilationEnabled(mode) && osrEntryBytecodeIndex != UINT_MAX) {
235         dataLog("\n");
236         dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n");
237         dataLog("\n");
238     }
239     
240     Graph dfg(vm, *this, longLivedState);
241     
242     if (!parse(dfg)) {
243         finalizer = std::make_unique<FailedFinalizer>(*this);
244         return FailPath;
245     }
246
247     codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters());
248     
249     // By this point the DFG bytecode parser will have potentially mutated various tables
250     // in the CodeBlock. This is a good time to perform an early shrink, which is more
251     // powerful than a late one. It's safe to do so because we haven't generated any code
252     // that references any of the tables directly, yet.
253     codeBlock->shrinkToFit(CodeBlock::EarlyShrink);
254
255     if (validationEnabled())
256         validate(dfg);
257     
258     if (Options::dumpGraphAfterParsing()) {
259         dataLog("Graph after parsing:\n");
260         dfg.dump();
261     }
262
263     performLiveCatchVariablePreservationPhase(dfg);
264
265     if (Options::enableMaximalFlushInsertionPhase())
266         performMaximalFlushInsertion(dfg);
267     
268     performCPSRethreading(dfg);
269     performUnification(dfg);
270     performPredictionInjection(dfg);
271     
272     performStaticExecutionCountEstimation(dfg);
273     
274     if (mode == FTLForOSREntryMode) {
275         bool result = performOSREntrypointCreation(dfg);
276         if (!result) {
277             finalizer = std::make_unique<FailedFinalizer>(*this);
278             return FailPath;
279         }
280         performCPSRethreading(dfg);
281     }
282     
283     if (validationEnabled())
284         validate(dfg);
285     
286     performBackwardsPropagation(dfg);
287     performPredictionPropagation(dfg);
288     performFixup(dfg);
289     performStructureRegistration(dfg);
290     performInvalidationPointInjection(dfg);
291     performTypeCheckHoisting(dfg);
292     
293     dfg.m_fixpointState = FixpointNotConverged;
294     
295     // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision
296     // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is
297     // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means
298     // that the compiler compiles more quickly. We want the third tier to compile quickly, which
299     // not fixpointing accomplishes; and the fourth tier shouldn't need a fixpoint.
300     if (validationEnabled())
301         validate(dfg);
302         
303     performStrengthReduction(dfg);
304     performLocalCSE(dfg);
305     performCPSRethreading(dfg);
306     performCFA(dfg);
307     performConstantFolding(dfg);
308     bool changed = false;
309     changed |= performCFGSimplification(dfg);
310     changed |= performLocalCSE(dfg);
311     
312     if (validationEnabled())
313         validate(dfg);
314     
315     performCPSRethreading(dfg);
316     if (!isFTL(mode)) {
317         // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and
318         // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL
319         // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack
320         // sequence then gets sunk, eliminating anything that looks like an escape for subsequent phases,
321         // while the ForwardVarargs doesn't get simplified until later (or not at all) and looks like an
322         // escape for all of the arguments. This then disables object allocation sinking.
323         //
324         // So, for now, we just disable this phase for the FTL.
325         //
326         // If we wanted to enable it, we'd have to do any of the following:
327         // - Enable ForwardVarargs->GetStack+PutStack strength reduction, and have that run before
328         //   PutStack sinking and object allocation sinking.
329         // - Make VarargsForwarding emit a GetLocal+SetLocal sequence, that we can later turn into
330         //   GetStack+PutStack.
331         //
332         // But, it's not super valuable to enable those optimizations, since the FTL
333         // ArgumentsEliminationPhase does everything that this phase does, and it doesn't introduce this
334         // pathology.
335         
336         changed |= performVarargsForwarding(dfg); // Do this after CFG simplification and CPS rethreading.
337     }
338     if (changed) {
339         performCFA(dfg);
340         performConstantFolding(dfg);
341     }
342     
343     // If we're doing validation, then run some analyses, to give them an opportunity
344     // to self-validate. Now is as good a time as any to do this.
345     if (validationEnabled()) {
346         dfg.m_dominators.computeIfNecessary(dfg);
347         dfg.m_naturalLoops.computeIfNecessary(dfg);
348         dfg.m_prePostNumbering.computeIfNecessary(dfg);
349     }
350
351     switch (mode) {
352     case DFGMode: {
353         dfg.m_fixpointState = FixpointConverged;
354     
355         performTierUpCheckInjection(dfg);
356
357         performFastStoreBarrierInsertion(dfg);
358         performCleanUp(dfg);
359         performCPSRethreading(dfg);
360         performDCE(dfg);
361         performPhantomInsertion(dfg);
362         performStackLayout(dfg);
363         performVirtualRegisterAllocation(dfg);
364         performWatchpointCollection(dfg);
365         dumpAndVerifyGraph(dfg, "Graph after optimization:");
366         
367         JITCompiler dataFlowJIT(dfg);
368         if (codeBlock->codeType() == FunctionCode)
369             dataFlowJIT.compileFunction();
370         else
371             dataFlowJIT.compile();
372         
373         return DFGPath;
374     }
375     
376     case FTLMode:
377     case FTLForOSREntryMode: {
378 #if ENABLE(FTL_JIT)
379         if (FTL::canCompile(dfg) == FTL::CannotCompile) {
380             finalizer = std::make_unique<FailedFinalizer>(*this);
381             return FailPath;
382         }
383         
384         performCleanUp(dfg); // Reduce the graph size a bit.
385         performCriticalEdgeBreaking(dfg);
386         if (Options::createPreHeaders())
387             performLoopPreHeaderCreation(dfg);
388         performCPSRethreading(dfg);
389         performSSAConversion(dfg);
390         performSSALowering(dfg);
391         
392         // Ideally, these would be run to fixpoint with the object allocation sinking phase.
393         performArgumentsElimination(dfg);
394         performPutStackSinking(dfg);
395         
396         performConstantHoisting(dfg);
397         performGlobalCSE(dfg);
398         performLivenessAnalysis(dfg);
399         performIntegerRangeOptimization(dfg);
400         performLivenessAnalysis(dfg);
401         performCFA(dfg);
402         performConstantFolding(dfg);
403         performCleanUp(dfg); // Reduce the graph size a lot.
404         changed = false;
405         changed |= performStrengthReduction(dfg);
406         if (Options::enableObjectAllocationSinking()) {
407             changed |= performCriticalEdgeBreaking(dfg);
408             changed |= performObjectAllocationSinking(dfg);
409         }
410         if (changed) {
411             // State-at-tail and state-at-head will be invalid if we did strength reduction since
412             // it might increase live ranges.
413             performLivenessAnalysis(dfg);
414             performCFA(dfg);
415             performConstantFolding(dfg);
416         }
417         
418         // Currently, this relies on pre-headers still being valid. That precludes running CFG
419         // simplification before it, unless we re-created the pre-headers. There wouldn't be anything
420         // wrong with running LICM earlier, if we wanted to put other CFG transforms above this point.
421         // Alternatively, we could run loop pre-header creation after SSA conversion - but if we did that
422         // then we'd need to do some simple SSA fix-up.
423         performLICM(dfg);
424         
425         performCleanUp(dfg);
426         performIntegerCheckCombining(dfg);
427         performGlobalCSE(dfg);
428         
429         // At this point we're not allowed to do any further code motion because our reasoning
430         // about code motion assumes that it's OK to insert GC points in random places.
431         dfg.m_fixpointState = FixpointConverged;
432         
433         performLivenessAnalysis(dfg);
434         performCFA(dfg);
435         performGlobalStoreBarrierInsertion(dfg);
436         if (Options::enableMovHintRemoval())
437             performMovHintRemoval(dfg);
438         performCleanUp(dfg);
439         performDCE(dfg); // We rely on this to kill dead code that won't be recognized as dead by LLVM.
440         performStackLayout(dfg);
441         performLivenessAnalysis(dfg);
442         performOSRAvailabilityAnalysis(dfg);
443         performWatchpointCollection(dfg);
444         
445         if (FTL::canCompile(dfg) == FTL::CannotCompile) {
446             finalizer = std::make_unique<FailedFinalizer>(*this);
447             return FailPath;
448         }
449
450         dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldShowDisassembly(mode));
451         
452         bool haveLLVM;
453         Safepoint::Result safepointResult;
454         {
455             GraphSafepoint safepoint(dfg, safepointResult);
456             haveLLVM = initializeLLVM();
457         }
458         if (safepointResult.didGetCancelled())
459             return CancelPath;
460         
461         if (!haveLLVM) {
462             if (Options::ftlCrashesIfCantInitializeLLVM()) {
463                 dataLog("LLVM can't be initialized.\n");
464                 CRASH();
465             }
466             finalizer = std::make_unique<FailedFinalizer>(*this);
467             return FailPath;
468         }
469
470         FTL::State state(dfg);
471         FTL::lowerDFGToLLVM(state);
472         
473         if (computeCompileTimes())
474             m_timeBeforeFTL = monotonicallyIncreasingTimeMS();
475         
476         if (Options::llvmAlwaysFailsBeforeCompile()) {
477             FTL::fail(state);
478             return FTLPath;
479         }
480         
481         FTL::compile(state, safepointResult);
482         if (safepointResult.didGetCancelled())
483             return CancelPath;
484         
485         if (Options::llvmAlwaysFailsBeforeLink()) {
486             FTL::fail(state);
487             return FTLPath;
488         }
489         
490         if (state.allocationFailed) {
491             FTL::fail(state);
492             return FTLPath;
493         }
494
495         if (state.jitCode->stackmaps.stackSize() > Options::llvmMaxStackSize()) {
496             FTL::fail(state);
497             return FTLPath;
498         }
499
500         FTL::link(state);
501         
502         if (state.allocationFailed) {
503             FTL::fail(state);
504             return FTLPath;
505         }
506         
507         return FTLPath;
508 #else
509         RELEASE_ASSERT_NOT_REACHED();
510         return FailPath;
511 #endif // ENABLE(FTL_JIT)
512     }
513         
514     default:
515         RELEASE_ASSERT_NOT_REACHED();
516         return FailPath;
517     }
518 }
519
520 bool Plan::isStillValid()
521 {
522     CodeBlock* replacement = codeBlock->replacement();
523     if (!replacement)
524         return false;
525     // FIXME: This is almost certainly not necessary. There's no way for the baseline
526     // code to be replaced during a compilation, except if we delete the plan, in which
527     // case we wouldn't be here.
528     // https://bugs.webkit.org/show_bug.cgi?id=132707
529     if (codeBlock->alternative() != replacement->baselineVersion())
530         return false;
531     if (!watchpoints.areStillValid())
532         return false;
533     return true;
534 }
535
536 void Plan::reallyAdd(CommonData* commonData)
537 {
538     watchpoints.reallyAdd(codeBlock.get(), *commonData);
539     identifiers.reallyAdd(vm, commonData);
540     weakReferences.reallyAdd(vm, commonData);
541     transitions.reallyAdd(vm, commonData);
542 }
543
544 void Plan::notifyCompiling()
545 {
546     stage = Compiling;
547 }
548
549 void Plan::notifyCompiled()
550 {
551     stage = Compiled;
552 }
553
554 void Plan::notifyReady()
555 {
556     callback->compilationDidBecomeReadyAsynchronously(codeBlock.get());
557     stage = Ready;
558 }
559
560 CompilationResult Plan::finalizeWithoutNotifyingCallback()
561 {
562     // We will establish new references from the code block to things. So, we need a barrier.
563     vm.heap.writeBarrier(codeBlock->ownerExecutable());
564     
565     if (!isStillValid())
566         return CompilationInvalidated;
567
568     bool result;
569     if (codeBlock->codeType() == FunctionCode)
570         result = finalizer->finalizeFunction();
571     else
572         result = finalizer->finalize();
573     
574     if (!result)
575         return CompilationFailed;
576     
577     reallyAdd(codeBlock->jitCode()->dfgCommon());
578     
579     if (validationEnabled()) {
580         TrackedReferences trackedReferences;
581         
582         for (WriteBarrier<JSCell>& reference : codeBlock->jitCode()->dfgCommon()->weakReferences)
583             trackedReferences.add(reference.get());
584         for (WriteBarrier<Structure>& reference : codeBlock->jitCode()->dfgCommon()->weakStructureReferences)
585             trackedReferences.add(reference.get());
586         for (WriteBarrier<Unknown>& constant : codeBlock->constants())
587             trackedReferences.add(constant.get());
588         
589         // Check that any other references that we have anywhere in the JITCode are also
590         // tracked either strongly or weakly.
591         codeBlock->jitCode()->validateReferences(trackedReferences);
592     }
593     
594     return CompilationSuccessful;
595 }
596
597 void Plan::finalizeAndNotifyCallback()
598 {
599     callback->compilationDidComplete(codeBlock.get(), finalizeWithoutNotifyingCallback());
600 }
601
602 CompilationKey Plan::key()
603 {
604     return CompilationKey(codeBlock->alternative(), mode);
605 }
606
607 void Plan::clearCodeBlockMarks()
608 {
609     // Compilation writes lots of values to a CodeBlock without performing
610     // an explicit barrier. So, we need to be pessimistic and assume that
611     // all our CodeBlocks must be visited during GC.
612
613     codeBlock->clearMarks();
614     codeBlock->alternative()->clearMarks();
615     if (profiledDFGCodeBlock)
616         profiledDFGCodeBlock->clearMarks();
617 }
618
619 void Plan::checkLivenessAndVisitChildren(SlotVisitor& visitor)
620 {
621     if (!isKnownToBeLiveDuringGC())
622         return;
623     
624     for (unsigned i = mustHandleValues.size(); i--;)
625         visitor.appendUnbarrieredValue(&mustHandleValues[i]);
626
627     codeBlock->visitStrongly(visitor);
628     codeBlock->alternative()->visitStrongly(visitor);
629     if (profiledDFGCodeBlock)
630         profiledDFGCodeBlock->visitStrongly(visitor);
631
632     if (inlineCallFrames) {
633         for (auto* inlineCallFrame : *inlineCallFrames) {
634             ASSERT(inlineCallFrame->baselineCodeBlock());
635             inlineCallFrame->baselineCodeBlock()->visitStrongly(visitor);
636         }
637     }
638
639     weakReferences.visitChildren(visitor);
640     transitions.visitChildren(visitor);
641 }
642
643 bool Plan::isKnownToBeLiveDuringGC()
644 {
645     if (stage == Cancelled)
646         return false;
647     if (!Heap::isMarked(codeBlock->ownerExecutable()))
648         return false;
649     if (!codeBlock->alternative()->isKnownToBeLiveDuringGC())
650         return false;
651     if (!!profiledDFGCodeBlock && !profiledDFGCodeBlock->isKnownToBeLiveDuringGC())
652         return false;
653     return true;
654 }
655
656 void Plan::cancel()
657 {
658     codeBlock = nullptr;
659     profiledDFGCodeBlock = nullptr;
660     mustHandleValues.clear();
661     compilation = nullptr;
662     finalizer = nullptr;
663     inlineCallFrames = nullptr;
664     watchpoints = DesiredWatchpoints();
665     identifiers = DesiredIdentifiers();
666     weakReferences = DesiredWeakReferences();
667     transitions = DesiredTransitions();
668     callback = nullptr;
669     stage = Cancelled;
670 }
671
672 HashMap<CString, double> Plan::compileTimeStats()
673 {
674     HashMap<CString, double> result;
675     if (Options::reportTotalCompileTimes()) {
676         result.add("Compile Time", totalDFGCompileTime + totalFTLCompileTime);
677         result.add("DFG Compile Time", totalDFGCompileTime);
678         result.add("FTL Compile Time", totalFTLCompileTime);
679         result.add("FTL (DFG) Compile Time", totalFTLDFGCompileTime);
680         result.add("FTL (LLVM) Compile Time", totalFTLLLVMCompileTime);
681     }
682     return result;
683 }
684
685 } } // namespace JSC::DFG
686
687 #endif // ENABLE(DFG_JIT)
688