ab14b5ae1bc279f929904f9434ce77ab73d90a82
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGPlan.cpp
1 /*
2  * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGPlan.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGArgumentsEliminationPhase.h"
32 #include "DFGBackwardsPropagationPhase.h"
33 #include "DFGByteCodeParser.h"
34 #include "DFGCFAPhase.h"
35 #include "DFGCFGSimplificationPhase.h"
36 #include "DFGCPSRethreadingPhase.h"
37 #include "DFGCSEPhase.h"
38 #include "DFGCleanUpPhase.h"
39 #include "DFGConstantFoldingPhase.h"
40 #include "DFGConstantHoistingPhase.h"
41 #include "DFGCriticalEdgeBreakingPhase.h"
42 #include "DFGDCEPhase.h"
43 #include "DFGFailedFinalizer.h"
44 #include "DFGFixupPhase.h"
45 #include "DFGGraphSafepoint.h"
46 #include "DFGIntegerCheckCombiningPhase.h"
47 #include "DFGIntegerRangeOptimizationPhase.h"
48 #include "DFGInvalidationPointInjectionPhase.h"
49 #include "DFGJITCompiler.h"
50 #include "DFGLICMPhase.h"
51 #include "DFGLivenessAnalysisPhase.h"
52 #include "DFGLoopPreHeaderCreationPhase.h"
53 #include "DFGMaximalFlushInsertionPhase.h"
54 #include "DFGMovHintRemovalPhase.h"
55 #include "DFGOSRAvailabilityAnalysisPhase.h"
56 #include "DFGOSREntrypointCreationPhase.h"
57 #include "DFGObjectAllocationSinkingPhase.h"
58 #include "DFGPhantomInsertionPhase.h"
59 #include "DFGPredictionInjectionPhase.h"
60 #include "DFGPredictionPropagationPhase.h"
61 #include "DFGPutStackSinkingPhase.h"
62 #include "DFGSSAConversionPhase.h"
63 #include "DFGSSALoweringPhase.h"
64 #include "DFGStackLayoutPhase.h"
65 #include "DFGStaticExecutionCountEstimationPhase.h"
66 #include "DFGStoreBarrierInsertionPhase.h"
67 #include "DFGStrengthReductionPhase.h"
68 #include "DFGStructureRegistrationPhase.h"
69 #include "DFGTierUpCheckInjectionPhase.h"
70 #include "DFGTypeCheckHoistingPhase.h"
71 #include "DFGUnificationPhase.h"
72 #include "DFGValidate.h"
73 #include "DFGVarargsForwardingPhase.h"
74 #include "DFGVirtualRegisterAllocationPhase.h"
75 #include "DFGWatchpointCollectionPhase.h"
76 #include "Debugger.h"
77 #include "JSCInlines.h"
78 #include "OperandsInlines.h"
79 #include "ProfilerDatabase.h"
80 #include "TrackedReferences.h"
81 #include <wtf/CurrentTime.h>
82
83 #if ENABLE(FTL_JIT)
84 #include "FTLCapabilities.h"
85 #include "FTLCompile.h"
86 #include "FTLFail.h"
87 #include "FTLLink.h"
88 #include "FTLLowerDFGToLLVM.h"
89 #include "FTLState.h"
90 #include "InitializeLLVM.h"
91 #endif
92
93 namespace JSC { namespace DFG {
94
95 namespace {
96
97 double totalDFGCompileTime;
98 double totalFTLCompileTime;
99 double totalFTLDFGCompileTime;
100 double totalFTLLLVMCompileTime;
101
102 void dumpAndVerifyGraph(Graph& graph, const char* text, bool forceDump = false)
103 {
104     GraphDumpMode modeForFinalValidate = DumpGraph;
105     if (verboseCompilationEnabled(graph.m_plan.mode) || forceDump) {
106         dataLog(text, "\n");
107         graph.dump();
108         modeForFinalValidate = DontDumpGraph;
109     }
110     if (validationEnabled())
111         validate(graph, modeForFinalValidate);
112 }
113
114 Profiler::CompilationKind profilerCompilationKindForMode(CompilationMode mode)
115 {
116     switch (mode) {
117     case InvalidCompilationMode:
118         RELEASE_ASSERT_NOT_REACHED();
119         return Profiler::DFG;
120     case DFGMode:
121         return Profiler::DFG;
122     case FTLMode:
123         return Profiler::FTL;
124     case FTLForOSREntryMode:
125         return Profiler::FTLForOSREntry;
126     }
127     RELEASE_ASSERT_NOT_REACHED();
128     return Profiler::DFG;
129 }
130
131 } // anonymous namespace
132
133 Plan::Plan(PassRefPtr<CodeBlock> passedCodeBlock, CodeBlock* profiledDFGCodeBlock,
134     CompilationMode mode, unsigned osrEntryBytecodeIndex,
135     const Operands<JSValue>& mustHandleValues)
136     : vm(*passedCodeBlock->vm())
137     , codeBlock(passedCodeBlock)
138     , profiledDFGCodeBlock(profiledDFGCodeBlock)
139     , mode(mode)
140     , osrEntryBytecodeIndex(osrEntryBytecodeIndex)
141     , mustHandleValues(mustHandleValues)
142     , compilation(codeBlock->vm()->m_perBytecodeProfiler ? adoptRef(new Profiler::Compilation(codeBlock->vm()->m_perBytecodeProfiler->ensureBytecodesFor(codeBlock.get()), profilerCompilationKindForMode(mode))) : 0)
143     , inlineCallFrames(adoptRef(new InlineCallFrameSet()))
144     , identifiers(codeBlock.get())
145     , weakReferences(codeBlock.get())
146     , willTryToTierUp(false)
147     , stage(Preparing)
148 {
149 }
150
151 Plan::~Plan()
152 {
153 }
154
155 bool Plan::computeCompileTimes() const
156 {
157     return reportCompileTimes()
158         || Options::reportTotalCompileTimes();
159 }
160
161 bool Plan::reportCompileTimes() const
162 {
163     return Options::reportCompileTimes()
164         || (Options::reportFTLCompileTimes() && isFTL(mode));
165 }
166
167 void Plan::compileInThread(LongLivedState& longLivedState, ThreadData* threadData)
168 {
169     this->threadData = threadData;
170     
171     double before = 0;
172     CString codeBlockName;
173     if (computeCompileTimes())
174         before = monotonicallyIncreasingTimeMS();
175     if (reportCompileTimes())
176         codeBlockName = toCString(*codeBlock);
177     
178     SamplingRegion samplingRegion("DFG Compilation (Plan)");
179     CompilationScope compilationScope;
180
181     if (logCompilationChanges(mode))
182         dataLog("DFG(Plan) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n");
183
184     CompilationPath path = compileInThreadImpl(longLivedState);
185
186     RELEASE_ASSERT(path == CancelPath || finalizer);
187     RELEASE_ASSERT((path == CancelPath) == (stage == Cancelled));
188     
189     double after = 0;
190     if (computeCompileTimes())
191         after = monotonicallyIncreasingTimeMS();
192     
193     if (Options::reportTotalCompileTimes()) {
194         if (isFTL(mode)) {
195             totalFTLCompileTime += after - before;
196             totalFTLDFGCompileTime += m_timeBeforeFTL - before;
197             totalFTLLLVMCompileTime += after - m_timeBeforeFTL;
198         } else
199             totalDFGCompileTime += after - before;
200     }
201     
202     if (reportCompileTimes()) {
203         const char* pathName;
204         switch (path) {
205         case FailPath:
206             pathName = "N/A (fail)";
207             break;
208         case DFGPath:
209             pathName = "DFG";
210             break;
211         case FTLPath:
212             pathName = "FTL";
213             break;
214         case CancelPath:
215             pathName = "Cancelled";
216             break;
217         default:
218             RELEASE_ASSERT_NOT_REACHED();
219 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
220             pathName = "";
221 #endif
222             break;
223         }
224         dataLog("Optimized ", codeBlockName, " using ", mode, " with ", pathName, " into ", finalizer ? finalizer->codeSize() : 0, " bytes in ", after - before, " ms");
225         if (path == FTLPath)
226             dataLog(" (DFG: ", m_timeBeforeFTL - before, ", LLVM: ", after - m_timeBeforeFTL, ")");
227         dataLog(".\n");
228     }
229 }
230
231 Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState)
232 {
233     if (verboseCompilationEnabled(mode) && osrEntryBytecodeIndex != UINT_MAX) {
234         dataLog("\n");
235         dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n");
236         dataLog("\n");
237     }
238     
239     Graph dfg(vm, *this, longLivedState);
240     
241     if (!parse(dfg)) {
242         finalizer = std::make_unique<FailedFinalizer>(*this);
243         return FailPath;
244     }
245
246     codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters());
247     
248     // By this point the DFG bytecode parser will have potentially mutated various tables
249     // in the CodeBlock. This is a good time to perform an early shrink, which is more
250     // powerful than a late one. It's safe to do so because we haven't generated any code
251     // that references any of the tables directly, yet.
252     codeBlock->shrinkToFit(CodeBlock::EarlyShrink);
253
254     if (validationEnabled())
255         validate(dfg);
256     
257     if (Options::dumpGraphAfterParsing()) {
258         dataLog("Graph after parsing:\n");
259         dfg.dump();
260     }
261
262     if (Options::enableMaximalFlushInsertionPhase())
263         performMaximalFlushInsertion(dfg);
264     
265     performCPSRethreading(dfg);
266     performUnification(dfg);
267     performPredictionInjection(dfg);
268     
269     performStaticExecutionCountEstimation(dfg);
270     
271     if (mode == FTLForOSREntryMode) {
272         bool result = performOSREntrypointCreation(dfg);
273         if (!result) {
274             finalizer = std::make_unique<FailedFinalizer>(*this);
275             return FailPath;
276         }
277         performCPSRethreading(dfg);
278     }
279     
280     if (validationEnabled())
281         validate(dfg);
282     
283     performBackwardsPropagation(dfg);
284     performPredictionPropagation(dfg);
285     performFixup(dfg);
286     performStructureRegistration(dfg);
287     performInvalidationPointInjection(dfg);
288     performTypeCheckHoisting(dfg);
289     
290     dfg.m_fixpointState = FixpointNotConverged;
291     
292     // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision
293     // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is
294     // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means
295     // that the compiler compiles more quickly. We want the third tier to compile quickly, which
296     // not fixpointing accomplishes; and the fourth tier shouldn't need a fixpoint.
297     if (validationEnabled())
298         validate(dfg);
299         
300     performStrengthReduction(dfg);
301     performLocalCSE(dfg);
302     performCPSRethreading(dfg);
303     performCFA(dfg);
304     performConstantFolding(dfg);
305     bool changed = false;
306     changed |= performCFGSimplification(dfg);
307     changed |= performLocalCSE(dfg);
308     
309     if (validationEnabled())
310         validate(dfg);
311     
312     performCPSRethreading(dfg);
313     if (!isFTL(mode)) {
314         // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and
315         // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL
316         // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack
317         // sequence then gets sunk, eliminating anything that looks like an escape for subsequent phases,
318         // while the ForwardVarargs doesn't get simplified until later (or not at all) and looks like an
319         // escape for all of the arguments. This then disables object allocation sinking.
320         //
321         // So, for now, we just disable this phase for the FTL.
322         //
323         // If we wanted to enable it, we'd have to do any of the following:
324         // - Enable ForwardVarargs->GetStack+PutStack strength reduction, and have that run before
325         //   PutStack sinking and object allocation sinking.
326         // - Make VarargsForwarding emit a GetLocal+SetLocal sequence, that we can later turn into
327         //   GetStack+PutStack.
328         //
329         // But, it's not super valuable to enable those optimizations, since the FTL
330         // ArgumentsEliminationPhase does everything that this phase does, and it doesn't introduce this
331         // pathology.
332         
333         changed |= performVarargsForwarding(dfg); // Do this after CFG simplification and CPS rethreading.
334     }
335     if (changed) {
336         performCFA(dfg);
337         performConstantFolding(dfg);
338     }
339     
340     // If we're doing validation, then run some analyses, to give them an opportunity
341     // to self-validate. Now is as good a time as any to do this.
342     if (validationEnabled()) {
343         dfg.m_dominators.computeIfNecessary(dfg);
344         dfg.m_naturalLoops.computeIfNecessary(dfg);
345         dfg.m_prePostNumbering.computeIfNecessary(dfg);
346     }
347
348     switch (mode) {
349     case DFGMode: {
350         dfg.m_fixpointState = FixpointConverged;
351     
352         performTierUpCheckInjection(dfg);
353
354         performFastStoreBarrierInsertion(dfg);
355         performCleanUp(dfg);
356         performCPSRethreading(dfg);
357         performDCE(dfg);
358         performPhantomInsertion(dfg);
359         performStackLayout(dfg);
360         performVirtualRegisterAllocation(dfg);
361         performWatchpointCollection(dfg);
362         dumpAndVerifyGraph(dfg, "Graph after optimization:");
363         
364         JITCompiler dataFlowJIT(dfg);
365         if (codeBlock->codeType() == FunctionCode)
366             dataFlowJIT.compileFunction();
367         else
368             dataFlowJIT.compile();
369         
370         return DFGPath;
371     }
372     
373     case FTLMode:
374     case FTLForOSREntryMode: {
375 #if ENABLE(FTL_JIT)
376         if (FTL::canCompile(dfg) == FTL::CannotCompile) {
377             finalizer = std::make_unique<FailedFinalizer>(*this);
378             return FailPath;
379         }
380         
381         performCleanUp(dfg); // Reduce the graph size a bit.
382         performCriticalEdgeBreaking(dfg);
383         if (Options::createPreHeaders())
384             performLoopPreHeaderCreation(dfg);
385         performCPSRethreading(dfg);
386         performSSAConversion(dfg);
387         performSSALowering(dfg);
388         
389         // Ideally, these would be run to fixpoint with the object allocation sinking phase.
390         performArgumentsElimination(dfg);
391         performPutStackSinking(dfg);
392         
393         performConstantHoisting(dfg);
394         performGlobalCSE(dfg);
395         performLivenessAnalysis(dfg);
396         performIntegerRangeOptimization(dfg);
397         performLivenessAnalysis(dfg);
398         performCFA(dfg);
399         performConstantFolding(dfg);
400         performCleanUp(dfg); // Reduce the graph size a lot.
401         changed = false;
402         changed |= performStrengthReduction(dfg);
403         if (Options::enableObjectAllocationSinking()) {
404             changed |= performCriticalEdgeBreaking(dfg);
405             changed |= performObjectAllocationSinking(dfg);
406         }
407         if (changed) {
408             // State-at-tail and state-at-head will be invalid if we did strength reduction since
409             // it might increase live ranges.
410             performLivenessAnalysis(dfg);
411             performCFA(dfg);
412             performConstantFolding(dfg);
413         }
414         
415         // Currently, this relies on pre-headers still being valid. That precludes running CFG
416         // simplification before it, unless we re-created the pre-headers. There wouldn't be anything
417         // wrong with running LICM earlier, if we wanted to put other CFG transforms above this point.
418         // Alternatively, we could run loop pre-header creation after SSA conversion - but if we did that
419         // then we'd need to do some simple SSA fix-up.
420         performLICM(dfg);
421         
422         performCleanUp(dfg);
423         performIntegerCheckCombining(dfg);
424         performGlobalCSE(dfg);
425         
426         // At this point we're not allowed to do any further code motion because our reasoning
427         // about code motion assumes that it's OK to insert GC points in random places.
428         dfg.m_fixpointState = FixpointConverged;
429         
430         performLivenessAnalysis(dfg);
431         performCFA(dfg);
432         performGlobalStoreBarrierInsertion(dfg);
433         if (Options::enableMovHintRemoval())
434             performMovHintRemoval(dfg);
435         performCleanUp(dfg);
436         performDCE(dfg); // We rely on this to kill dead code that won't be recognized as dead by LLVM.
437         performStackLayout(dfg);
438         performLivenessAnalysis(dfg);
439         performOSRAvailabilityAnalysis(dfg);
440         performWatchpointCollection(dfg);
441         
442         if (FTL::canCompile(dfg) == FTL::CannotCompile) {
443             finalizer = std::make_unique<FailedFinalizer>(*this);
444             return FailPath;
445         }
446
447         dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldShowDisassembly(mode));
448         
449         bool haveLLVM;
450         Safepoint::Result safepointResult;
451         {
452             GraphSafepoint safepoint(dfg, safepointResult);
453             haveLLVM = initializeLLVM();
454         }
455         if (safepointResult.didGetCancelled())
456             return CancelPath;
457         
458         if (!haveLLVM) {
459             if (Options::ftlCrashesIfCantInitializeLLVM()) {
460                 dataLog("LLVM can't be initialized.\n");
461                 CRASH();
462             }
463             finalizer = std::make_unique<FailedFinalizer>(*this);
464             return FailPath;
465         }
466
467         FTL::State state(dfg);
468         FTL::lowerDFGToLLVM(state);
469         
470         if (computeCompileTimes())
471             m_timeBeforeFTL = monotonicallyIncreasingTimeMS();
472         
473         if (Options::llvmAlwaysFailsBeforeCompile()) {
474             FTL::fail(state);
475             return FTLPath;
476         }
477         
478         FTL::compile(state, safepointResult);
479         if (safepointResult.didGetCancelled())
480             return CancelPath;
481         
482         if (Options::llvmAlwaysFailsBeforeLink()) {
483             FTL::fail(state);
484             return FTLPath;
485         }
486         
487         if (state.allocationFailed) {
488             FTL::fail(state);
489             return FTLPath;
490         }
491
492         if (state.jitCode->stackmaps.stackSize() > Options::llvmMaxStackSize()) {
493             FTL::fail(state);
494             return FTLPath;
495         }
496
497         FTL::link(state);
498         
499         if (state.allocationFailed) {
500             FTL::fail(state);
501             return FTLPath;
502         }
503         
504         return FTLPath;
505 #else
506         RELEASE_ASSERT_NOT_REACHED();
507         return FailPath;
508 #endif // ENABLE(FTL_JIT)
509     }
510         
511     default:
512         RELEASE_ASSERT_NOT_REACHED();
513         return FailPath;
514     }
515 }
516
517 bool Plan::isStillValid()
518 {
519     CodeBlock* replacement = codeBlock->replacement();
520     if (!replacement)
521         return false;
522     // FIXME: This is almost certainly not necessary. There's no way for the baseline
523     // code to be replaced during a compilation, except if we delete the plan, in which
524     // case we wouldn't be here.
525     // https://bugs.webkit.org/show_bug.cgi?id=132707
526     if (codeBlock->alternative() != replacement->baselineVersion())
527         return false;
528     if (!watchpoints.areStillValid())
529         return false;
530     return true;
531 }
532
533 void Plan::reallyAdd(CommonData* commonData)
534 {
535     watchpoints.reallyAdd(codeBlock.get(), *commonData);
536     identifiers.reallyAdd(vm, commonData);
537     weakReferences.reallyAdd(vm, commonData);
538     transitions.reallyAdd(vm, commonData);
539 }
540
541 void Plan::notifyCompiling()
542 {
543     stage = Compiling;
544 }
545
546 void Plan::notifyCompiled()
547 {
548     stage = Compiled;
549 }
550
551 void Plan::notifyReady()
552 {
553     callback->compilationDidBecomeReadyAsynchronously(codeBlock.get());
554     stage = Ready;
555 }
556
557 CompilationResult Plan::finalizeWithoutNotifyingCallback()
558 {
559     // We will establish new references from the code block to things. So, we need a barrier.
560     vm.heap.writeBarrier(codeBlock->ownerExecutable());
561     
562     if (!isStillValid())
563         return CompilationInvalidated;
564
565     bool result;
566     if (codeBlock->codeType() == FunctionCode)
567         result = finalizer->finalizeFunction();
568     else
569         result = finalizer->finalize();
570     
571     if (!result)
572         return CompilationFailed;
573     
574     reallyAdd(codeBlock->jitCode()->dfgCommon());
575     
576     if (validationEnabled()) {
577         TrackedReferences trackedReferences;
578         
579         for (WriteBarrier<JSCell>& reference : codeBlock->jitCode()->dfgCommon()->weakReferences)
580             trackedReferences.add(reference.get());
581         for (WriteBarrier<Structure>& reference : codeBlock->jitCode()->dfgCommon()->weakStructureReferences)
582             trackedReferences.add(reference.get());
583         for (WriteBarrier<Unknown>& constant : codeBlock->constants())
584             trackedReferences.add(constant.get());
585         
586         // Check that any other references that we have anywhere in the JITCode are also
587         // tracked either strongly or weakly.
588         codeBlock->jitCode()->validateReferences(trackedReferences);
589     }
590     
591     return CompilationSuccessful;
592 }
593
594 void Plan::finalizeAndNotifyCallback()
595 {
596     callback->compilationDidComplete(codeBlock.get(), finalizeWithoutNotifyingCallback());
597 }
598
599 CompilationKey Plan::key()
600 {
601     return CompilationKey(codeBlock->alternative(), mode);
602 }
603
604 void Plan::clearCodeBlockMarks()
605 {
606     // Compilation writes lots of values to a CodeBlock without performing
607     // an explicit barrier. So, we need to be pessimistic and assume that
608     // all our CodeBlocks must be visited during GC.
609
610     codeBlock->clearMarks();
611     codeBlock->alternative()->clearMarks();
612     if (profiledDFGCodeBlock)
613         profiledDFGCodeBlock->clearMarks();
614 }
615
616 void Plan::checkLivenessAndVisitChildren(SlotVisitor& visitor)
617 {
618     if (!isKnownToBeLiveDuringGC())
619         return;
620     
621     for (unsigned i = mustHandleValues.size(); i--;)
622         visitor.appendUnbarrieredValue(&mustHandleValues[i]);
623
624     codeBlock->visitStrongly(visitor);
625     codeBlock->alternative()->visitStrongly(visitor);
626     if (profiledDFGCodeBlock)
627         profiledDFGCodeBlock->visitStrongly(visitor);
628
629     weakReferences.visitChildren(visitor);
630     transitions.visitChildren(visitor);
631 }
632
633 bool Plan::isKnownToBeLiveDuringGC()
634 {
635     if (stage == Cancelled)
636         return false;
637     if (!Heap::isMarked(codeBlock->ownerExecutable()))
638         return false;
639     if (!codeBlock->alternative()->isKnownToBeLiveDuringGC())
640         return false;
641     if (!!profiledDFGCodeBlock && !profiledDFGCodeBlock->isKnownToBeLiveDuringGC())
642         return false;
643     return true;
644 }
645
646 void Plan::cancel()
647 {
648     codeBlock = nullptr;
649     profiledDFGCodeBlock = nullptr;
650     mustHandleValues.clear();
651     compilation = nullptr;
652     finalizer = nullptr;
653     inlineCallFrames = nullptr;
654     watchpoints = DesiredWatchpoints();
655     identifiers = DesiredIdentifiers();
656     weakReferences = DesiredWeakReferences();
657     transitions = DesiredTransitions();
658     callback = nullptr;
659     stage = Cancelled;
660 }
661
662 HashMap<CString, double> Plan::compileTimeStats()
663 {
664     HashMap<CString, double> result;
665     if (Options::reportTotalCompileTimes()) {
666         result.add("Compile Time", totalDFGCompileTime + totalFTLCompileTime);
667         result.add("DFG Compile Time", totalDFGCompileTime);
668         result.add("FTL Compile Time", totalFTLCompileTime);
669         result.add("FTL (DFG) Compile Time", totalFTLDFGCompileTime);
670         result.add("FTL (LLVM) Compile Time", totalFTLLLVMCompileTime);
671     }
672     return result;
673 }
674
675 } } // namespace JSC::DFG
676
677 #endif // ENABLE(DFG_JIT)
678