21ae411e8728d9f32a980eafa735757d19bb761d
[WebKit.git] / Source / JavaScriptCore / dfg / DFGPlan.cpp
1 /*
2  * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGPlan.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGArgumentsEliminationPhase.h"
32 #include "DFGBackwardsPropagationPhase.h"
33 #include "DFGByteCodeParser.h"
34 #include "DFGCFAPhase.h"
35 #include "DFGCFGSimplificationPhase.h"
36 #include "DFGCPSRethreadingPhase.h"
37 #include "DFGCSEPhase.h"
38 #include "DFGCleanUpPhase.h"
39 #include "DFGConstantFoldingPhase.h"
40 #include "DFGConstantHoistingPhase.h"
41 #include "DFGCriticalEdgeBreakingPhase.h"
42 #include "DFGDCEPhase.h"
43 #include "DFGFailedFinalizer.h"
44 #include "DFGFixupPhase.h"
45 #include "DFGGraphSafepoint.h"
46 #include "DFGIntegerCheckCombiningPhase.h"
47 #include "DFGIntegerRangeOptimizationPhase.h"
48 #include "DFGInvalidationPointInjectionPhase.h"
49 #include "DFGJITCompiler.h"
50 #include "DFGLICMPhase.h"
51 #include "DFGLiveCatchVariablePreservationPhase.h"
52 #include "DFGLivenessAnalysisPhase.h"
53 #include "DFGLoopPreHeaderCreationPhase.h"
54 #include "DFGMaximalFlushInsertionPhase.h"
55 #include "DFGMovHintRemovalPhase.h"
56 #include "DFGOSRAvailabilityAnalysisPhase.h"
57 #include "DFGOSREntrypointCreationPhase.h"
58 #include "DFGObjectAllocationSinkingPhase.h"
59 #include "DFGPhantomInsertionPhase.h"
60 #include "DFGPredictionInjectionPhase.h"
61 #include "DFGPredictionPropagationPhase.h"
62 #include "DFGPutStackSinkingPhase.h"
63 #include "DFGSSAConversionPhase.h"
64 #include "DFGSSALoweringPhase.h"
65 #include "DFGStackLayoutPhase.h"
66 #include "DFGStaticExecutionCountEstimationPhase.h"
67 #include "DFGStoreBarrierInsertionPhase.h"
68 #include "DFGStrengthReductionPhase.h"
69 #include "DFGStructureRegistrationPhase.h"
70 #include "DFGTierUpCheckInjectionPhase.h"
71 #include "DFGTypeCheckHoistingPhase.h"
72 #include "DFGUnificationPhase.h"
73 #include "DFGValidate.h"
74 #include "DFGVarargsForwardingPhase.h"
75 #include "DFGVirtualRegisterAllocationPhase.h"
76 #include "DFGWatchpointCollectionPhase.h"
77 #include "Debugger.h"
78 #include "JSCInlines.h"
79 #include "OperandsInlines.h"
80 #include "ProfilerDatabase.h"
81 #include "TrackedReferences.h"
82 #include <wtf/CurrentTime.h>
83
84 #if ENABLE(FTL_JIT)
85 #include "FTLCapabilities.h"
86 #include "FTLCompile.h"
87 #include "FTLFail.h"
88 #include "FTLLink.h"
89 #include "FTLLowerDFGToB3.h"
90 #include "FTLState.h"
91 #endif
92
93 namespace JSC {
94
95 extern double totalDFGCompileTime;
96 extern double totalFTLCompileTime;
97 extern double totalFTLDFGCompileTime;
98 extern double totalFTLB3CompileTime;
99
100 }
101
102 namespace JSC { namespace DFG {
103
104 namespace {
105
106 void dumpAndVerifyGraph(Graph& graph, const char* text, bool forceDump = false)
107 {
108     GraphDumpMode modeForFinalValidate = DumpGraph;
109     if (verboseCompilationEnabled(graph.m_plan.mode) || forceDump) {
110         dataLog(text, "\n");
111         graph.dump();
112         modeForFinalValidate = DontDumpGraph;
113     }
114     if (validationEnabled())
115         validate(graph, modeForFinalValidate);
116 }
117
118 Profiler::CompilationKind profilerCompilationKindForMode(CompilationMode mode)
119 {
120     switch (mode) {
121     case InvalidCompilationMode:
122         RELEASE_ASSERT_NOT_REACHED();
123         return Profiler::DFG;
124     case DFGMode:
125         return Profiler::DFG;
126     case FTLMode:
127         return Profiler::FTL;
128     case FTLForOSREntryMode:
129         return Profiler::FTLForOSREntry;
130     }
131     RELEASE_ASSERT_NOT_REACHED();
132     return Profiler::DFG;
133 }
134
135 } // anonymous namespace
136
137 Plan::Plan(CodeBlock* passedCodeBlock, CodeBlock* profiledDFGCodeBlock,
138     CompilationMode mode, unsigned osrEntryBytecodeIndex,
139     const Operands<JSValue>& mustHandleValues)
140     : vm(*passedCodeBlock->vm())
141     , codeBlock(passedCodeBlock)
142     , profiledDFGCodeBlock(profiledDFGCodeBlock)
143     , mode(mode)
144     , osrEntryBytecodeIndex(osrEntryBytecodeIndex)
145     , mustHandleValues(mustHandleValues)
146     , compilation(codeBlock->vm()->m_perBytecodeProfiler ? adoptRef(new Profiler::Compilation(codeBlock->vm()->m_perBytecodeProfiler->ensureBytecodesFor(codeBlock), profilerCompilationKindForMode(mode))) : 0)
147     , inlineCallFrames(adoptRef(new InlineCallFrameSet()))
148     , identifiers(codeBlock)
149     , weakReferences(codeBlock)
150     , stage(Preparing)
151 {
152 }
153
154 Plan::~Plan()
155 {
156 }
157
158 bool Plan::computeCompileTimes() const
159 {
160     return reportCompileTimes()
161         || Options::reportTotalCompileTimes();
162 }
163
164 bool Plan::reportCompileTimes() const
165 {
166     return Options::reportCompileTimes()
167         || Options::reportDFGCompileTimes()
168         || (Options::reportFTLCompileTimes() && isFTL(mode));
169 }
170
171 void Plan::compileInThread(LongLivedState& longLivedState, ThreadData* threadData)
172 {
173     this->threadData = threadData;
174     
175     double before = 0;
176     CString codeBlockName;
177     if (UNLIKELY(computeCompileTimes()))
178         before = monotonicallyIncreasingTimeMS();
179     if (UNLIKELY(reportCompileTimes()))
180         codeBlockName = toCString(*codeBlock);
181     
182     CompilationScope compilationScope;
183
184     if (logCompilationChanges(mode))
185         dataLog("DFG(Plan) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n");
186
187     CompilationPath path = compileInThreadImpl(longLivedState);
188
189     RELEASE_ASSERT(path == CancelPath || finalizer);
190     RELEASE_ASSERT((path == CancelPath) == (stage == Cancelled));
191     
192     double after = 0;
193     if (UNLIKELY(computeCompileTimes())) {
194         after = monotonicallyIncreasingTimeMS();
195     
196         if (Options::reportTotalCompileTimes()) {
197             if (isFTL(mode)) {
198                 totalFTLCompileTime += after - before;
199                 totalFTLDFGCompileTime += m_timeBeforeFTL - before;
200                 totalFTLB3CompileTime += after - m_timeBeforeFTL;
201             } else
202                 totalDFGCompileTime += after - before;
203         }
204     }
205     if (UNLIKELY(reportCompileTimes())) {
206         const char* pathName;
207         switch (path) {
208         case FailPath:
209             pathName = "N/A (fail)";
210             break;
211         case DFGPath:
212             pathName = "DFG";
213             break;
214         case FTLPath:
215             pathName = "FTL";
216             break;
217         case CancelPath:
218             pathName = "Cancelled";
219             break;
220         default:
221             RELEASE_ASSERT_NOT_REACHED();
222 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
223             pathName = "";
224 #endif
225             break;
226         }
227         dataLog("Optimized ", codeBlockName, " using ", mode, " with ", pathName, " into ", finalizer ? finalizer->codeSize() : 0, " bytes in ", after - before, " ms");
228         if (path == FTLPath)
229             dataLog(" (DFG: ", m_timeBeforeFTL - before, ", B3: ", after - m_timeBeforeFTL, ")");
230         dataLog(".\n");
231     }
232 }
233
234 Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState)
235 {
236     if (verboseCompilationEnabled(mode) && osrEntryBytecodeIndex != UINT_MAX) {
237         dataLog("\n");
238         dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n");
239         dataLog("\n");
240     }
241     
242     Graph dfg(vm, *this, longLivedState);
243     
244     if (!parse(dfg)) {
245         finalizer = std::make_unique<FailedFinalizer>(*this);
246         return FailPath;
247     }
248
249     codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters());
250     
251     // By this point the DFG bytecode parser will have potentially mutated various tables
252     // in the CodeBlock. This is a good time to perform an early shrink, which is more
253     // powerful than a late one. It's safe to do so because we haven't generated any code
254     // that references any of the tables directly, yet.
255     codeBlock->shrinkToFit(CodeBlock::EarlyShrink);
256
257     if (validationEnabled())
258         validate(dfg);
259     
260     if (Options::dumpGraphAfterParsing()) {
261         dataLog("Graph after parsing:\n");
262         dfg.dump();
263     }
264
265     performLiveCatchVariablePreservationPhase(dfg);
266
267     if (Options::useMaximalFlushInsertionPhase())
268         performMaximalFlushInsertion(dfg);
269     
270     performCPSRethreading(dfg);
271     performUnification(dfg);
272     performPredictionInjection(dfg);
273     
274     performStaticExecutionCountEstimation(dfg);
275     
276     if (mode == FTLForOSREntryMode) {
277         bool result = performOSREntrypointCreation(dfg);
278         if (!result) {
279             finalizer = std::make_unique<FailedFinalizer>(*this);
280             return FailPath;
281         }
282         performCPSRethreading(dfg);
283     }
284     
285     if (validationEnabled())
286         validate(dfg);
287     
288     performBackwardsPropagation(dfg);
289     performPredictionPropagation(dfg);
290     performFixup(dfg);
291     performStructureRegistration(dfg);
292     performInvalidationPointInjection(dfg);
293     performTypeCheckHoisting(dfg);
294     
295     dfg.m_fixpointState = FixpointNotConverged;
296     
297     // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision
298     // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is
299     // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means
300     // that the compiler compiles more quickly. We want the third tier to compile quickly, which
301     // not fixpointing accomplishes; and the fourth tier shouldn't need a fixpoint.
302     if (validationEnabled())
303         validate(dfg);
304         
305     performStrengthReduction(dfg);
306     performLocalCSE(dfg);
307     performCPSRethreading(dfg);
308     performCFA(dfg);
309     performConstantFolding(dfg);
310     bool changed = false;
311     changed |= performCFGSimplification(dfg);
312     changed |= performLocalCSE(dfg);
313     
314     if (validationEnabled())
315         validate(dfg);
316     
317     performCPSRethreading(dfg);
318     if (!isFTL(mode)) {
319         // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and
320         // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL
321         // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack
322         // sequence then gets sunk, eliminating anything that looks like an escape for subsequent phases,
323         // while the ForwardVarargs doesn't get simplified until later (or not at all) and looks like an
324         // escape for all of the arguments. This then disables object allocation sinking.
325         //
326         // So, for now, we just disable this phase for the FTL.
327         //
328         // If we wanted to enable it, we'd have to do any of the following:
329         // - Enable ForwardVarargs->GetStack+PutStack strength reduction, and have that run before
330         //   PutStack sinking and object allocation sinking.
331         // - Make VarargsForwarding emit a GetLocal+SetLocal sequence, that we can later turn into
332         //   GetStack+PutStack.
333         //
334         // But, it's not super valuable to enable those optimizations, since the FTL
335         // ArgumentsEliminationPhase does everything that this phase does, and it doesn't introduce this
336         // pathology.
337         
338         changed |= performVarargsForwarding(dfg); // Do this after CFG simplification and CPS rethreading.
339     }
340     if (changed) {
341         performCFA(dfg);
342         performConstantFolding(dfg);
343     }
344     
345     // If we're doing validation, then run some analyses, to give them an opportunity
346     // to self-validate. Now is as good a time as any to do this.
347     if (validationEnabled()) {
348         dfg.ensureDominators();
349         dfg.ensureNaturalLoops();
350         dfg.ensurePrePostNumbering();
351     }
352
353     switch (mode) {
354     case DFGMode: {
355         dfg.m_fixpointState = FixpointConverged;
356     
357         performTierUpCheckInjection(dfg);
358
359         performFastStoreBarrierInsertion(dfg);
360         performCleanUp(dfg);
361         performCPSRethreading(dfg);
362         performDCE(dfg);
363         performPhantomInsertion(dfg);
364         performStackLayout(dfg);
365         performVirtualRegisterAllocation(dfg);
366         performWatchpointCollection(dfg);
367         dumpAndVerifyGraph(dfg, "Graph after optimization:");
368         
369         JITCompiler dataFlowJIT(dfg);
370         if (codeBlock->codeType() == FunctionCode)
371             dataFlowJIT.compileFunction();
372         else
373             dataFlowJIT.compile();
374         
375         return DFGPath;
376     }
377     
378     case FTLMode:
379     case FTLForOSREntryMode: {
380 #if ENABLE(FTL_JIT)
381         if (FTL::canCompile(dfg) == FTL::CannotCompile) {
382             finalizer = std::make_unique<FailedFinalizer>(*this);
383             return FailPath;
384         }
385         
386         performCleanUp(dfg); // Reduce the graph size a bit.
387         performCriticalEdgeBreaking(dfg);
388         if (Options::createPreHeaders())
389             performLoopPreHeaderCreation(dfg);
390         performCPSRethreading(dfg);
391         performSSAConversion(dfg);
392         performSSALowering(dfg);
393         
394         // Ideally, these would be run to fixpoint with the object allocation sinking phase.
395         performArgumentsElimination(dfg);
396         if (Options::usePutStackSinking())
397             performPutStackSinking(dfg);
398         
399         performConstantHoisting(dfg);
400         performGlobalCSE(dfg);
401         performLivenessAnalysis(dfg);
402         performCFA(dfg);
403         performConstantFolding(dfg);
404         performCleanUp(dfg); // Reduce the graph size a lot.
405         changed = false;
406         changed |= performStrengthReduction(dfg);
407         if (Options::useObjectAllocationSinking()) {
408             changed |= performCriticalEdgeBreaking(dfg);
409             changed |= performObjectAllocationSinking(dfg);
410         }
411         if (changed) {
412             // State-at-tail and state-at-head will be invalid if we did strength reduction since
413             // it might increase live ranges.
414             performLivenessAnalysis(dfg);
415             performCFA(dfg);
416             performConstantFolding(dfg);
417         }
418         
419         // Currently, this relies on pre-headers still being valid. That precludes running CFG
420         // simplification before it, unless we re-created the pre-headers. There wouldn't be anything
421         // wrong with running LICM earlier, if we wanted to put other CFG transforms above this point.
422         // Alternatively, we could run loop pre-header creation after SSA conversion - but if we did that
423         // then we'd need to do some simple SSA fix-up.
424         performLICM(dfg);
425
426         // FIXME: Currently: IntegerRangeOptimization *must* be run after LICM.
427         //
428         // IntegerRangeOptimization makes changes on nodes based on preceding blocks
429         // and nodes. LICM moves nodes which can invalidates assumptions used
430         // by IntegerRangeOptimization.
431         //
432         // Ideally, the dependencies should be explicit. See https://bugs.webkit.org/show_bug.cgi?id=157534.
433         performLivenessAnalysis(dfg);
434         performIntegerRangeOptimization(dfg);
435         
436         performCleanUp(dfg);
437         performIntegerCheckCombining(dfg);
438         performGlobalCSE(dfg);
439         
440         // At this point we're not allowed to do any further code motion because our reasoning
441         // about code motion assumes that it's OK to insert GC points in random places.
442         dfg.m_fixpointState = FixpointConverged;
443         
444         performLivenessAnalysis(dfg);
445         performCFA(dfg);
446         performGlobalStoreBarrierInsertion(dfg);
447         if (Options::useMovHintRemoval())
448             performMovHintRemoval(dfg);
449         performCleanUp(dfg);
450         performDCE(dfg); // We rely on this to kill dead code that won't be recognized as dead by B3.
451         performStackLayout(dfg);
452         performLivenessAnalysis(dfg);
453         performOSRAvailabilityAnalysis(dfg);
454         performWatchpointCollection(dfg);
455         
456         if (FTL::canCompile(dfg) == FTL::CannotCompile) {
457             finalizer = std::make_unique<FailedFinalizer>(*this);
458             return FailPath;
459         }
460
461         dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldDumpDisassembly(mode));
462
463         // Flash a safepoint in case the GC wants some action.
464         Safepoint::Result safepointResult;
465         {
466             GraphSafepoint safepoint(dfg, safepointResult);
467         }
468         if (safepointResult.didGetCancelled())
469             return CancelPath;
470
471         FTL::State state(dfg);
472         FTL::lowerDFGToB3(state);
473         
474         if (UNLIKELY(computeCompileTimes()))
475             m_timeBeforeFTL = monotonicallyIncreasingTimeMS();
476         
477         if (Options::b3AlwaysFailsBeforeCompile()) {
478             FTL::fail(state);
479             return FTLPath;
480         }
481         
482         FTL::compile(state, safepointResult);
483         if (safepointResult.didGetCancelled())
484             return CancelPath;
485         
486         if (Options::b3AlwaysFailsBeforeLink()) {
487             FTL::fail(state);
488             return FTLPath;
489         }
490         
491         if (state.allocationFailed) {
492             FTL::fail(state);
493             return FTLPath;
494         }
495
496         FTL::link(state);
497         
498         if (state.allocationFailed) {
499             FTL::fail(state);
500             return FTLPath;
501         }
502         
503         return FTLPath;
504 #else
505         RELEASE_ASSERT_NOT_REACHED();
506         return FailPath;
507 #endif // ENABLE(FTL_JIT)
508     }
509         
510     default:
511         RELEASE_ASSERT_NOT_REACHED();
512         return FailPath;
513     }
514 }
515
516 bool Plan::isStillValid()
517 {
518     CodeBlock* replacement = codeBlock->replacement();
519     if (!replacement)
520         return false;
521     // FIXME: This is almost certainly not necessary. There's no way for the baseline
522     // code to be replaced during a compilation, except if we delete the plan, in which
523     // case we wouldn't be here.
524     // https://bugs.webkit.org/show_bug.cgi?id=132707
525     if (codeBlock->alternative() != replacement->baselineVersion())
526         return false;
527     if (!watchpoints.areStillValid())
528         return false;
529     return true;
530 }
531
532 void Plan::reallyAdd(CommonData* commonData)
533 {
534     watchpoints.reallyAdd(codeBlock, *commonData);
535     identifiers.reallyAdd(vm, commonData);
536     weakReferences.reallyAdd(vm, commonData);
537     transitions.reallyAdd(vm, commonData);
538 }
539
540 void Plan::notifyCompiling()
541 {
542     stage = Compiling;
543 }
544
545 void Plan::notifyCompiled()
546 {
547     stage = Compiled;
548 }
549
550 void Plan::notifyReady()
551 {
552     callback->compilationDidBecomeReadyAsynchronously(codeBlock, profiledDFGCodeBlock);
553     stage = Ready;
554 }
555
556 CompilationResult Plan::finalizeWithoutNotifyingCallback()
557 {
558     // We will establish new references from the code block to things. So, we need a barrier.
559     vm.heap.writeBarrier(codeBlock);
560     
561     if (!isStillValid())
562         return CompilationInvalidated;
563
564     bool result;
565     if (codeBlock->codeType() == FunctionCode)
566         result = finalizer->finalizeFunction();
567     else
568         result = finalizer->finalize();
569     
570     if (!result)
571         return CompilationFailed;
572     
573     reallyAdd(codeBlock->jitCode()->dfgCommon());
574     
575     if (validationEnabled()) {
576         TrackedReferences trackedReferences;
577         
578         for (WriteBarrier<JSCell>& reference : codeBlock->jitCode()->dfgCommon()->weakReferences)
579             trackedReferences.add(reference.get());
580         for (WriteBarrier<Structure>& reference : codeBlock->jitCode()->dfgCommon()->weakStructureReferences)
581             trackedReferences.add(reference.get());
582         for (WriteBarrier<Unknown>& constant : codeBlock->constants())
583             trackedReferences.add(constant.get());
584         
585         // Check that any other references that we have anywhere in the JITCode are also
586         // tracked either strongly or weakly.
587         codeBlock->jitCode()->validateReferences(trackedReferences);
588     }
589     
590     return CompilationSuccessful;
591 }
592
593 void Plan::finalizeAndNotifyCallback()
594 {
595     callback->compilationDidComplete(codeBlock, profiledDFGCodeBlock, finalizeWithoutNotifyingCallback());
596 }
597
598 CompilationKey Plan::key()
599 {
600     return CompilationKey(codeBlock->alternative(), mode);
601 }
602
603 void Plan::rememberCodeBlocks()
604 {
605     // Compilation writes lots of values to a CodeBlock without performing
606     // an explicit barrier. So, we need to be pessimistic and assume that
607     // all our CodeBlocks must be visited during GC.
608
609     Heap::heap(codeBlock)->writeBarrier(codeBlock);
610     Heap::heap(codeBlock)->writeBarrier(codeBlock->alternative());
611     if (profiledDFGCodeBlock)
612         Heap::heap(profiledDFGCodeBlock)->writeBarrier(profiledDFGCodeBlock);
613 }
614
615 void Plan::checkLivenessAndVisitChildren(SlotVisitor& visitor)
616 {
617     if (!isKnownToBeLiveDuringGC())
618         return;
619     
620     for (unsigned i = mustHandleValues.size(); i--;)
621         visitor.appendUnbarrieredValue(&mustHandleValues[i]);
622
623     visitor.appendUnbarrieredReadOnlyPointer(codeBlock);
624     visitor.appendUnbarrieredReadOnlyPointer(codeBlock->alternative());
625     visitor.appendUnbarrieredReadOnlyPointer(profiledDFGCodeBlock);
626
627     if (inlineCallFrames) {
628         for (auto* inlineCallFrame : *inlineCallFrames) {
629             ASSERT(inlineCallFrame->baselineCodeBlock.get());
630             visitor.appendUnbarrieredReadOnlyPointer(inlineCallFrame->baselineCodeBlock.get());
631         }
632     }
633
634     weakReferences.visitChildren(visitor);
635     transitions.visitChildren(visitor);
636 }
637
638 bool Plan::isKnownToBeLiveDuringGC()
639 {
640     if (stage == Cancelled)
641         return false;
642     if (!Heap::isMarked(codeBlock->ownerExecutable()))
643         return false;
644     if (!Heap::isMarked(codeBlock->alternative()))
645         return false;
646     if (!!profiledDFGCodeBlock && !Heap::isMarked(profiledDFGCodeBlock))
647         return false;
648     return true;
649 }
650
651 void Plan::cancel()
652 {
653     codeBlock = nullptr;
654     profiledDFGCodeBlock = nullptr;
655     mustHandleValues.clear();
656     compilation = nullptr;
657     finalizer = nullptr;
658     inlineCallFrames = nullptr;
659     watchpoints = DesiredWatchpoints();
660     identifiers = DesiredIdentifiers();
661     weakReferences = DesiredWeakReferences();
662     transitions = DesiredTransitions();
663     callback = nullptr;
664     stage = Cancelled;
665 }
666
667 } } // namespace JSC::DFG
668
669 #endif // ENABLE(DFG_JIT)
670