Remove remaining references to LLVM, and make sure comments refer to the backend...
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGPlan.cpp
1 /*
2  * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGPlan.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGArgumentsEliminationPhase.h"
32 #include "DFGBackwardsPropagationPhase.h"
33 #include "DFGByteCodeParser.h"
34 #include "DFGCFAPhase.h"
35 #include "DFGCFGSimplificationPhase.h"
36 #include "DFGCPSRethreadingPhase.h"
37 #include "DFGCSEPhase.h"
38 #include "DFGCleanUpPhase.h"
39 #include "DFGConstantFoldingPhase.h"
40 #include "DFGConstantHoistingPhase.h"
41 #include "DFGCopyBarrierOptimizationPhase.h"
42 #include "DFGCriticalEdgeBreakingPhase.h"
43 #include "DFGDCEPhase.h"
44 #include "DFGFailedFinalizer.h"
45 #include "DFGFixupPhase.h"
46 #include "DFGGraphSafepoint.h"
47 #include "DFGIntegerCheckCombiningPhase.h"
48 #include "DFGIntegerRangeOptimizationPhase.h"
49 #include "DFGInvalidationPointInjectionPhase.h"
50 #include "DFGJITCompiler.h"
51 #include "DFGLICMPhase.h"
52 #include "DFGLiveCatchVariablePreservationPhase.h"
53 #include "DFGLivenessAnalysisPhase.h"
54 #include "DFGLoopPreHeaderCreationPhase.h"
55 #include "DFGMaximalFlushInsertionPhase.h"
56 #include "DFGMovHintRemovalPhase.h"
57 #include "DFGOSRAvailabilityAnalysisPhase.h"
58 #include "DFGOSREntrypointCreationPhase.h"
59 #include "DFGObjectAllocationSinkingPhase.h"
60 #include "DFGPhantomInsertionPhase.h"
61 #include "DFGPredictionInjectionPhase.h"
62 #include "DFGPredictionPropagationPhase.h"
63 #include "DFGPutStackSinkingPhase.h"
64 #include "DFGSSAConversionPhase.h"
65 #include "DFGSSALoweringPhase.h"
66 #include "DFGStackLayoutPhase.h"
67 #include "DFGStaticExecutionCountEstimationPhase.h"
68 #include "DFGStoreBarrierInsertionPhase.h"
69 #include "DFGStrengthReductionPhase.h"
70 #include "DFGStructureRegistrationPhase.h"
71 #include "DFGTierUpCheckInjectionPhase.h"
72 #include "DFGTypeCheckHoistingPhase.h"
73 #include "DFGUnificationPhase.h"
74 #include "DFGValidate.h"
75 #include "DFGVarargsForwardingPhase.h"
76 #include "DFGVirtualRegisterAllocationPhase.h"
77 #include "DFGWatchpointCollectionPhase.h"
78 #include "Debugger.h"
79 #include "JSCInlines.h"
80 #include "OperandsInlines.h"
81 #include "ProfilerDatabase.h"
82 #include "TrackedReferences.h"
83 #include <wtf/CurrentTime.h>
84
85 #if ENABLE(FTL_JIT)
86 #include "FTLCapabilities.h"
87 #include "FTLCompile.h"
88 #include "FTLFail.h"
89 #include "FTLLink.h"
90 #include "FTLLowerDFGToB3.h"
91 #include "FTLState.h"
92 #endif
93
94 namespace JSC { namespace DFG {
95
96 namespace {
97
98 double totalDFGCompileTime;
99 double totalFTLCompileTime;
100 double totalFTLDFGCompileTime;
101 double totalFTLB3CompileTime;
102
103 void dumpAndVerifyGraph(Graph& graph, const char* text, bool forceDump = false)
104 {
105     GraphDumpMode modeForFinalValidate = DumpGraph;
106     if (verboseCompilationEnabled(graph.m_plan.mode) || forceDump) {
107         dataLog(text, "\n");
108         graph.dump();
109         modeForFinalValidate = DontDumpGraph;
110     }
111     if (validationEnabled())
112         validate(graph, modeForFinalValidate);
113 }
114
115 Profiler::CompilationKind profilerCompilationKindForMode(CompilationMode mode)
116 {
117     switch (mode) {
118     case InvalidCompilationMode:
119         RELEASE_ASSERT_NOT_REACHED();
120         return Profiler::DFG;
121     case DFGMode:
122         return Profiler::DFG;
123     case FTLMode:
124         return Profiler::FTL;
125     case FTLForOSREntryMode:
126         return Profiler::FTLForOSREntry;
127     }
128     RELEASE_ASSERT_NOT_REACHED();
129     return Profiler::DFG;
130 }
131
132 } // anonymous namespace
133
134 Plan::Plan(CodeBlock* passedCodeBlock, CodeBlock* profiledDFGCodeBlock,
135     CompilationMode mode, unsigned osrEntryBytecodeIndex,
136     const Operands<JSValue>& mustHandleValues)
137     : vm(*passedCodeBlock->vm())
138     , codeBlock(passedCodeBlock)
139     , profiledDFGCodeBlock(profiledDFGCodeBlock)
140     , mode(mode)
141     , osrEntryBytecodeIndex(osrEntryBytecodeIndex)
142     , mustHandleValues(mustHandleValues)
143     , compilation(codeBlock->vm()->m_perBytecodeProfiler ? adoptRef(new Profiler::Compilation(codeBlock->vm()->m_perBytecodeProfiler->ensureBytecodesFor(codeBlock), profilerCompilationKindForMode(mode))) : 0)
144     , inlineCallFrames(adoptRef(new InlineCallFrameSet()))
145     , identifiers(codeBlock)
146     , weakReferences(codeBlock)
147     , willTryToTierUp(false)
148     , stage(Preparing)
149 {
150 }
151
152 Plan::~Plan()
153 {
154 }
155
156 bool Plan::computeCompileTimes() const
157 {
158     return reportCompileTimes()
159         || Options::reportTotalCompileTimes();
160 }
161
162 bool Plan::reportCompileTimes() const
163 {
164     return Options::reportCompileTimes()
165         || (Options::reportFTLCompileTimes() && isFTL(mode));
166 }
167
168 void Plan::compileInThread(LongLivedState& longLivedState, ThreadData* threadData)
169 {
170     this->threadData = threadData;
171     
172     double before = 0;
173     CString codeBlockName;
174     if (computeCompileTimes())
175         before = monotonicallyIncreasingTimeMS();
176     if (reportCompileTimes())
177         codeBlockName = toCString(*codeBlock);
178     
179     SamplingRegion samplingRegion("DFG Compilation (Plan)");
180     CompilationScope compilationScope;
181
182     if (logCompilationChanges(mode))
183         dataLog("DFG(Plan) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n");
184
185     CompilationPath path = compileInThreadImpl(longLivedState);
186
187     RELEASE_ASSERT(path == CancelPath || finalizer);
188     RELEASE_ASSERT((path == CancelPath) == (stage == Cancelled));
189     
190     double after = 0;
191     if (computeCompileTimes())
192         after = monotonicallyIncreasingTimeMS();
193     
194     if (Options::reportTotalCompileTimes()) {
195         if (isFTL(mode)) {
196             totalFTLCompileTime += after - before;
197             totalFTLDFGCompileTime += m_timeBeforeFTL - before;
198             totalFTLB3CompileTime += after - m_timeBeforeFTL;
199         } else
200             totalDFGCompileTime += after - before;
201     }
202     
203     if (reportCompileTimes()) {
204         const char* pathName;
205         switch (path) {
206         case FailPath:
207             pathName = "N/A (fail)";
208             break;
209         case DFGPath:
210             pathName = "DFG";
211             break;
212         case FTLPath:
213             pathName = "FTL";
214             break;
215         case CancelPath:
216             pathName = "Cancelled";
217             break;
218         default:
219             RELEASE_ASSERT_NOT_REACHED();
220 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
221             pathName = "";
222 #endif
223             break;
224         }
225         dataLog("Optimized ", codeBlockName, " using ", mode, " with ", pathName, " into ", finalizer ? finalizer->codeSize() : 0, " bytes in ", after - before, " ms");
226         if (path == FTLPath)
227             dataLog(" (DFG: ", m_timeBeforeFTL - before, ", B3: ", after - m_timeBeforeFTL, ")");
228         dataLog(".\n");
229     }
230 }
231
232 Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState)
233 {
234     if (verboseCompilationEnabled(mode) && osrEntryBytecodeIndex != UINT_MAX) {
235         dataLog("\n");
236         dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n");
237         dataLog("\n");
238     }
239     
240     Graph dfg(vm, *this, longLivedState);
241     
242     if (!parse(dfg)) {
243         finalizer = std::make_unique<FailedFinalizer>(*this);
244         return FailPath;
245     }
246
247     codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters());
248     
249     // By this point the DFG bytecode parser will have potentially mutated various tables
250     // in the CodeBlock. This is a good time to perform an early shrink, which is more
251     // powerful than a late one. It's safe to do so because we haven't generated any code
252     // that references any of the tables directly, yet.
253     codeBlock->shrinkToFit(CodeBlock::EarlyShrink);
254
255     if (validationEnabled())
256         validate(dfg);
257     
258     if (Options::dumpGraphAfterParsing()) {
259         dataLog("Graph after parsing:\n");
260         dfg.dump();
261     }
262
263     performLiveCatchVariablePreservationPhase(dfg);
264
265     if (Options::useMaximalFlushInsertionPhase())
266         performMaximalFlushInsertion(dfg);
267     
268     performCPSRethreading(dfg);
269     performUnification(dfg);
270     performPredictionInjection(dfg);
271     
272     performStaticExecutionCountEstimation(dfg);
273     
274     if (mode == FTLForOSREntryMode) {
275         bool result = performOSREntrypointCreation(dfg);
276         if (!result) {
277             finalizer = std::make_unique<FailedFinalizer>(*this);
278             return FailPath;
279         }
280         performCPSRethreading(dfg);
281     }
282     
283     if (validationEnabled())
284         validate(dfg);
285     
286     performBackwardsPropagation(dfg);
287     performPredictionPropagation(dfg);
288     performFixup(dfg);
289     performStructureRegistration(dfg);
290     performInvalidationPointInjection(dfg);
291     performTypeCheckHoisting(dfg);
292     
293     dfg.m_fixpointState = FixpointNotConverged;
294     
295     // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision
296     // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is
297     // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means
298     // that the compiler compiles more quickly. We want the third tier to compile quickly, which
299     // not fixpointing accomplishes; and the fourth tier shouldn't need a fixpoint.
300     if (validationEnabled())
301         validate(dfg);
302         
303     performStrengthReduction(dfg);
304     performLocalCSE(dfg);
305     performCPSRethreading(dfg);
306     performCFA(dfg);
307     performConstantFolding(dfg);
308     bool changed = false;
309     changed |= performCFGSimplification(dfg);
310     changed |= performLocalCSE(dfg);
311     
312     if (validationEnabled())
313         validate(dfg);
314     
315     performCPSRethreading(dfg);
316     if (!isFTL(mode)) {
317         // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and
318         // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL
319         // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack
320         // sequence then gets sunk, eliminating anything that looks like an escape for subsequent phases,
321         // while the ForwardVarargs doesn't get simplified until later (or not at all) and looks like an
322         // escape for all of the arguments. This then disables object allocation sinking.
323         //
324         // So, for now, we just disable this phase for the FTL.
325         //
326         // If we wanted to enable it, we'd have to do any of the following:
327         // - Enable ForwardVarargs->GetStack+PutStack strength reduction, and have that run before
328         //   PutStack sinking and object allocation sinking.
329         // - Make VarargsForwarding emit a GetLocal+SetLocal sequence, that we can later turn into
330         //   GetStack+PutStack.
331         //
332         // But, it's not super valuable to enable those optimizations, since the FTL
333         // ArgumentsEliminationPhase does everything that this phase does, and it doesn't introduce this
334         // pathology.
335         
336         changed |= performVarargsForwarding(dfg); // Do this after CFG simplification and CPS rethreading.
337     }
338     if (changed) {
339         performCFA(dfg);
340         performConstantFolding(dfg);
341     }
342     
343     // If we're doing validation, then run some analyses, to give them an opportunity
344     // to self-validate. Now is as good a time as any to do this.
345     if (validationEnabled()) {
346         dfg.ensureDominators();
347         dfg.ensureNaturalLoops();
348         dfg.ensurePrePostNumbering();
349     }
350
351     switch (mode) {
352     case DFGMode: {
353         dfg.m_fixpointState = FixpointConverged;
354     
355         performTierUpCheckInjection(dfg);
356
357         performFastStoreBarrierInsertion(dfg);
358         performCleanUp(dfg);
359         performCPSRethreading(dfg);
360         performDCE(dfg);
361         if (Options::useCopyBarrierOptimization())
362             performCopyBarrierOptimization(dfg);
363         performPhantomInsertion(dfg);
364         performStackLayout(dfg);
365         performVirtualRegisterAllocation(dfg);
366         performWatchpointCollection(dfg);
367         dumpAndVerifyGraph(dfg, "Graph after optimization:");
368         
369         JITCompiler dataFlowJIT(dfg);
370         if (codeBlock->codeType() == FunctionCode)
371             dataFlowJIT.compileFunction();
372         else
373             dataFlowJIT.compile();
374         
375         return DFGPath;
376     }
377     
378     case FTLMode:
379     case FTLForOSREntryMode: {
380 #if ENABLE(FTL_JIT)
381         if (FTL::canCompile(dfg) == FTL::CannotCompile) {
382             finalizer = std::make_unique<FailedFinalizer>(*this);
383             return FailPath;
384         }
385         
386         performCleanUp(dfg); // Reduce the graph size a bit.
387         performCriticalEdgeBreaking(dfg);
388         if (Options::createPreHeaders())
389             performLoopPreHeaderCreation(dfg);
390         performCPSRethreading(dfg);
391         performSSAConversion(dfg);
392         performSSALowering(dfg);
393         
394         // Ideally, these would be run to fixpoint with the object allocation sinking phase.
395         performArgumentsElimination(dfg);
396         if (Options::usePutStackSinking())
397             performPutStackSinking(dfg);
398         
399         performConstantHoisting(dfg);
400         performGlobalCSE(dfg);
401         performLivenessAnalysis(dfg);
402         performIntegerRangeOptimization(dfg);
403         performLivenessAnalysis(dfg);
404         performCFA(dfg);
405         performConstantFolding(dfg);
406         performCleanUp(dfg); // Reduce the graph size a lot.
407         changed = false;
408         changed |= performStrengthReduction(dfg);
409         if (Options::useObjectAllocationSinking()) {
410             changed |= performCriticalEdgeBreaking(dfg);
411             changed |= performObjectAllocationSinking(dfg);
412         }
413         if (changed) {
414             // State-at-tail and state-at-head will be invalid if we did strength reduction since
415             // it might increase live ranges.
416             performLivenessAnalysis(dfg);
417             performCFA(dfg);
418             performConstantFolding(dfg);
419         }
420         
421         // Currently, this relies on pre-headers still being valid. That precludes running CFG
422         // simplification before it, unless we re-created the pre-headers. There wouldn't be anything
423         // wrong with running LICM earlier, if we wanted to put other CFG transforms above this point.
424         // Alternatively, we could run loop pre-header creation after SSA conversion - but if we did that
425         // then we'd need to do some simple SSA fix-up.
426         performLICM(dfg);
427         
428         performCleanUp(dfg);
429         performIntegerCheckCombining(dfg);
430         performGlobalCSE(dfg);
431         
432         // At this point we're not allowed to do any further code motion because our reasoning
433         // about code motion assumes that it's OK to insert GC points in random places.
434         dfg.m_fixpointState = FixpointConverged;
435         
436         performLivenessAnalysis(dfg);
437         performCFA(dfg);
438         performGlobalStoreBarrierInsertion(dfg);
439         if (Options::useMovHintRemoval())
440             performMovHintRemoval(dfg);
441         performCleanUp(dfg);
442         performDCE(dfg); // We rely on this to kill dead code that won't be recognized as dead by B3.
443         if (Options::useCopyBarrierOptimization())
444             performCopyBarrierOptimization(dfg);
445         performStackLayout(dfg);
446         performLivenessAnalysis(dfg);
447         performOSRAvailabilityAnalysis(dfg);
448         performWatchpointCollection(dfg);
449         
450         if (FTL::canCompile(dfg) == FTL::CannotCompile) {
451             finalizer = std::make_unique<FailedFinalizer>(*this);
452             return FailPath;
453         }
454
455         dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldDumpDisassembly(mode));
456
457         // Flash a safepoint in case the GC wants some action.
458         Safepoint::Result safepointResult;
459         {
460             GraphSafepoint safepoint(dfg, safepointResult);
461         }
462         if (safepointResult.didGetCancelled())
463             return CancelPath;
464
465         FTL::State state(dfg);
466         FTL::lowerDFGToB3(state);
467         
468         if (computeCompileTimes())
469             m_timeBeforeFTL = monotonicallyIncreasingTimeMS();
470         
471         if (Options::b3AlwaysFailsBeforeCompile()) {
472             FTL::fail(state);
473             return FTLPath;
474         }
475         
476         FTL::compile(state, safepointResult);
477         if (safepointResult.didGetCancelled())
478             return CancelPath;
479         
480         if (Options::b3AlwaysFailsBeforeLink()) {
481             FTL::fail(state);
482             return FTLPath;
483         }
484         
485         if (state.allocationFailed) {
486             FTL::fail(state);
487             return FTLPath;
488         }
489
490         FTL::link(state);
491         
492         if (state.allocationFailed) {
493             FTL::fail(state);
494             return FTLPath;
495         }
496         
497         return FTLPath;
498 #else
499         RELEASE_ASSERT_NOT_REACHED();
500         return FailPath;
501 #endif // ENABLE(FTL_JIT)
502     }
503         
504     default:
505         RELEASE_ASSERT_NOT_REACHED();
506         return FailPath;
507     }
508 }
509
510 bool Plan::isStillValid()
511 {
512     CodeBlock* replacement = codeBlock->replacement();
513     if (!replacement)
514         return false;
515     // FIXME: This is almost certainly not necessary. There's no way for the baseline
516     // code to be replaced during a compilation, except if we delete the plan, in which
517     // case we wouldn't be here.
518     // https://bugs.webkit.org/show_bug.cgi?id=132707
519     if (codeBlock->alternative() != replacement->baselineVersion())
520         return false;
521     if (!watchpoints.areStillValid())
522         return false;
523     return true;
524 }
525
526 void Plan::reallyAdd(CommonData* commonData)
527 {
528     watchpoints.reallyAdd(codeBlock, *commonData);
529     identifiers.reallyAdd(vm, commonData);
530     weakReferences.reallyAdd(vm, commonData);
531     transitions.reallyAdd(vm, commonData);
532 }
533
534 void Plan::notifyCompiling()
535 {
536     stage = Compiling;
537 }
538
539 void Plan::notifyCompiled()
540 {
541     stage = Compiled;
542 }
543
544 void Plan::notifyReady()
545 {
546     callback->compilationDidBecomeReadyAsynchronously(codeBlock, profiledDFGCodeBlock);
547     stage = Ready;
548 }
549
550 CompilationResult Plan::finalizeWithoutNotifyingCallback()
551 {
552     // We will establish new references from the code block to things. So, we need a barrier.
553     vm.heap.writeBarrier(codeBlock);
554     
555     if (!isStillValid())
556         return CompilationInvalidated;
557
558     bool result;
559     if (codeBlock->codeType() == FunctionCode)
560         result = finalizer->finalizeFunction();
561     else
562         result = finalizer->finalize();
563     
564     if (!result)
565         return CompilationFailed;
566     
567     reallyAdd(codeBlock->jitCode()->dfgCommon());
568     
569     if (validationEnabled()) {
570         TrackedReferences trackedReferences;
571         
572         for (WriteBarrier<JSCell>& reference : codeBlock->jitCode()->dfgCommon()->weakReferences)
573             trackedReferences.add(reference.get());
574         for (WriteBarrier<Structure>& reference : codeBlock->jitCode()->dfgCommon()->weakStructureReferences)
575             trackedReferences.add(reference.get());
576         for (WriteBarrier<Unknown>& constant : codeBlock->constants())
577             trackedReferences.add(constant.get());
578         
579         // Check that any other references that we have anywhere in the JITCode are also
580         // tracked either strongly or weakly.
581         codeBlock->jitCode()->validateReferences(trackedReferences);
582     }
583     
584     return CompilationSuccessful;
585 }
586
587 void Plan::finalizeAndNotifyCallback()
588 {
589     callback->compilationDidComplete(codeBlock, profiledDFGCodeBlock, finalizeWithoutNotifyingCallback());
590 }
591
592 CompilationKey Plan::key()
593 {
594     return CompilationKey(codeBlock->alternative(), mode);
595 }
596
597 void Plan::rememberCodeBlocks()
598 {
599     // Compilation writes lots of values to a CodeBlock without performing
600     // an explicit barrier. So, we need to be pessimistic and assume that
601     // all our CodeBlocks must be visited during GC.
602
603     Heap::heap(codeBlock)->writeBarrier(codeBlock);
604     Heap::heap(codeBlock)->writeBarrier(codeBlock->alternative());
605     if (profiledDFGCodeBlock)
606         Heap::heap(profiledDFGCodeBlock)->writeBarrier(profiledDFGCodeBlock);
607 }
608
609 void Plan::checkLivenessAndVisitChildren(SlotVisitor& visitor)
610 {
611     if (!isKnownToBeLiveDuringGC())
612         return;
613     
614     for (unsigned i = mustHandleValues.size(); i--;)
615         visitor.appendUnbarrieredValue(&mustHandleValues[i]);
616
617     visitor.appendUnbarrieredReadOnlyPointer(codeBlock);
618     visitor.appendUnbarrieredReadOnlyPointer(codeBlock->alternative());
619     visitor.appendUnbarrieredReadOnlyPointer(profiledDFGCodeBlock);
620
621     if (inlineCallFrames) {
622         for (auto* inlineCallFrame : *inlineCallFrames) {
623             ASSERT(inlineCallFrame->baselineCodeBlock.get());
624             visitor.appendUnbarrieredReadOnlyPointer(inlineCallFrame->baselineCodeBlock.get());
625         }
626     }
627
628     weakReferences.visitChildren(visitor);
629     transitions.visitChildren(visitor);
630 }
631
632 bool Plan::isKnownToBeLiveDuringGC()
633 {
634     if (stage == Cancelled)
635         return false;
636     if (!Heap::isMarked(codeBlock->ownerExecutable()))
637         return false;
638     if (!Heap::isMarked(codeBlock->alternative()))
639         return false;
640     if (!!profiledDFGCodeBlock && !Heap::isMarked(profiledDFGCodeBlock))
641         return false;
642     return true;
643 }
644
645 void Plan::cancel()
646 {
647     codeBlock = nullptr;
648     profiledDFGCodeBlock = nullptr;
649     mustHandleValues.clear();
650     compilation = nullptr;
651     finalizer = nullptr;
652     inlineCallFrames = nullptr;
653     watchpoints = DesiredWatchpoints();
654     identifiers = DesiredIdentifiers();
655     weakReferences = DesiredWeakReferences();
656     transitions = DesiredTransitions();
657     callback = nullptr;
658     stage = Cancelled;
659 }
660
661 HashMap<CString, double> Plan::compileTimeStats()
662 {
663     HashMap<CString, double> result;
664     if (Options::reportTotalCompileTimes()) {
665         result.add("Compile Time", totalDFGCompileTime + totalFTLCompileTime);
666         result.add("DFG Compile Time", totalDFGCompileTime);
667         result.add("FTL Compile Time", totalFTLCompileTime);
668         result.add("FTL (DFG) Compile Time", totalFTLDFGCompileTime);
669         result.add("FTL (B3) Compile Time", totalFTLB3CompileTime);
670     }
671     return result;
672 }
673
674 } } // namespace JSC::DFG
675
676 #endif // ENABLE(DFG_JIT)
677