Beef up JSC profiler event log
[WebKit.git] / Source / JavaScriptCore / dfg / DFGPlan.cpp
1 /*
2  * Copyright (C) 2013-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGPlan.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGArgumentsEliminationPhase.h"
32 #include "DFGBackwardsPropagationPhase.h"
33 #include "DFGByteCodeParser.h"
34 #include "DFGCFAPhase.h"
35 #include "DFGCFGSimplificationPhase.h"
36 #include "DFGCPSRethreadingPhase.h"
37 #include "DFGCSEPhase.h"
38 #include "DFGCleanUpPhase.h"
39 #include "DFGConstantFoldingPhase.h"
40 #include "DFGConstantHoistingPhase.h"
41 #include "DFGCriticalEdgeBreakingPhase.h"
42 #include "DFGDCEPhase.h"
43 #include "DFGFailedFinalizer.h"
44 #include "DFGFixupPhase.h"
45 #include "DFGGraphSafepoint.h"
46 #include "DFGIntegerCheckCombiningPhase.h"
47 #include "DFGIntegerRangeOptimizationPhase.h"
48 #include "DFGInvalidationPointInjectionPhase.h"
49 #include "DFGJITCompiler.h"
50 #include "DFGLICMPhase.h"
51 #include "DFGLiveCatchVariablePreservationPhase.h"
52 #include "DFGLivenessAnalysisPhase.h"
53 #include "DFGLoopPreHeaderCreationPhase.h"
54 #include "DFGMaximalFlushInsertionPhase.h"
55 #include "DFGMovHintRemovalPhase.h"
56 #include "DFGOSRAvailabilityAnalysisPhase.h"
57 #include "DFGOSREntrypointCreationPhase.h"
58 #include "DFGObjectAllocationSinkingPhase.h"
59 #include "DFGPhantomInsertionPhase.h"
60 #include "DFGPredictionInjectionPhase.h"
61 #include "DFGPredictionPropagationPhase.h"
62 #include "DFGPutStackSinkingPhase.h"
63 #include "DFGSSAConversionPhase.h"
64 #include "DFGSSALoweringPhase.h"
65 #include "DFGStackLayoutPhase.h"
66 #include "DFGStaticExecutionCountEstimationPhase.h"
67 #include "DFGStoreBarrierInsertionPhase.h"
68 #include "DFGStrengthReductionPhase.h"
69 #include "DFGStructureRegistrationPhase.h"
70 #include "DFGTierUpCheckInjectionPhase.h"
71 #include "DFGTypeCheckHoistingPhase.h"
72 #include "DFGUnificationPhase.h"
73 #include "DFGValidate.h"
74 #include "DFGVarargsForwardingPhase.h"
75 #include "DFGVirtualRegisterAllocationPhase.h"
76 #include "DFGWatchpointCollectionPhase.h"
77 #include "Debugger.h"
78 #include "JSCInlines.h"
79 #include "OperandsInlines.h"
80 #include "ProfilerDatabase.h"
81 #include "TrackedReferences.h"
82 #include "VMInlines.h"
83 #include <wtf/CurrentTime.h>
84
85 #if ENABLE(FTL_JIT)
86 #include "FTLCapabilities.h"
87 #include "FTLCompile.h"
88 #include "FTLFail.h"
89 #include "FTLLink.h"
90 #include "FTLLowerDFGToB3.h"
91 #include "FTLState.h"
92 #endif
93
94 namespace JSC {
95
96 extern double totalDFGCompileTime;
97 extern double totalFTLCompileTime;
98 extern double totalFTLDFGCompileTime;
99 extern double totalFTLB3CompileTime;
100
101 }
102
103 namespace JSC { namespace DFG {
104
105 namespace {
106
107 void dumpAndVerifyGraph(Graph& graph, const char* text, bool forceDump = false)
108 {
109     GraphDumpMode modeForFinalValidate = DumpGraph;
110     if (verboseCompilationEnabled(graph.m_plan.mode) || forceDump) {
111         dataLog(text, "\n");
112         graph.dump();
113         modeForFinalValidate = DontDumpGraph;
114     }
115     if (validationEnabled())
116         validate(graph, modeForFinalValidate);
117 }
118
119 Profiler::CompilationKind profilerCompilationKindForMode(CompilationMode mode)
120 {
121     switch (mode) {
122     case InvalidCompilationMode:
123         RELEASE_ASSERT_NOT_REACHED();
124         return Profiler::DFG;
125     case DFGMode:
126         return Profiler::DFG;
127     case FTLMode:
128         return Profiler::FTL;
129     case FTLForOSREntryMode:
130         return Profiler::FTLForOSREntry;
131     }
132     RELEASE_ASSERT_NOT_REACHED();
133     return Profiler::DFG;
134 }
135
136 } // anonymous namespace
137
138 Plan::Plan(CodeBlock* passedCodeBlock, CodeBlock* profiledDFGCodeBlock,
139     CompilationMode mode, unsigned osrEntryBytecodeIndex,
140     const Operands<JSValue>& mustHandleValues)
141     : vm(*passedCodeBlock->vm())
142     , codeBlock(passedCodeBlock)
143     , profiledDFGCodeBlock(profiledDFGCodeBlock)
144     , mode(mode)
145     , osrEntryBytecodeIndex(osrEntryBytecodeIndex)
146     , mustHandleValues(mustHandleValues)
147     , compilation(codeBlock->vm()->m_perBytecodeProfiler ? adoptRef(new Profiler::Compilation(codeBlock->vm()->m_perBytecodeProfiler->ensureBytecodesFor(codeBlock), profilerCompilationKindForMode(mode))) : 0)
148     , inlineCallFrames(adoptRef(new InlineCallFrameSet()))
149     , identifiers(codeBlock)
150     , weakReferences(codeBlock)
151     , stage(Preparing)
152 {
153 }
154
155 Plan::~Plan()
156 {
157 }
158
159 bool Plan::computeCompileTimes() const
160 {
161     return reportCompileTimes()
162         || Options::reportTotalCompileTimes()
163         || vm.m_perBytecodeProfiler;
164 }
165
166 bool Plan::reportCompileTimes() const
167 {
168     return Options::reportCompileTimes()
169         || Options::reportDFGCompileTimes()
170         || (Options::reportFTLCompileTimes() && isFTL(mode));
171 }
172
173 void Plan::compileInThread(LongLivedState& longLivedState, ThreadData* threadData)
174 {
175     this->threadData = threadData;
176     
177     double before = 0;
178     CString codeBlockName;
179     if (UNLIKELY(computeCompileTimes()))
180         before = monotonicallyIncreasingTimeMS();
181     if (UNLIKELY(reportCompileTimes()))
182         codeBlockName = toCString(*codeBlock);
183     
184     CompilationScope compilationScope;
185
186     if (logCompilationChanges(mode))
187         dataLog("DFG(Plan) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n");
188
189     CompilationPath path = compileInThreadImpl(longLivedState);
190
191     RELEASE_ASSERT(path == CancelPath || finalizer);
192     RELEASE_ASSERT((path == CancelPath) == (stage == Cancelled));
193     
194     double after = 0;
195     if (UNLIKELY(computeCompileTimes())) {
196         after = monotonicallyIncreasingTimeMS();
197     
198         if (Options::reportTotalCompileTimes()) {
199             if (isFTL(mode)) {
200                 totalFTLCompileTime += after - before;
201                 totalFTLDFGCompileTime += m_timeBeforeFTL - before;
202                 totalFTLB3CompileTime += after - m_timeBeforeFTL;
203             } else
204                 totalDFGCompileTime += after - before;
205         }
206     }
207     const char* pathName = nullptr;
208     switch (path) {
209     case FailPath:
210         pathName = "N/A (fail)";
211         break;
212     case DFGPath:
213         pathName = "DFG";
214         break;
215     case FTLPath:
216         pathName = "FTL";
217         break;
218     case CancelPath:
219         pathName = "Cancelled";
220         break;
221     default:
222         RELEASE_ASSERT_NOT_REACHED();
223         break;
224     }
225     if (codeBlock) { // codeBlock will be null if the compilation was cancelled.
226         if (path == FTLPath)
227             CODEBLOCK_LOG_EVENT(codeBlock, "ftlCompile", ("took ", after - before, " ms (DFG: ", m_timeBeforeFTL - before, ", B3: ", after - m_timeBeforeFTL, ") with ", pathName));
228         else
229             CODEBLOCK_LOG_EVENT(codeBlock, "dfgCompile", ("took ", after - before, " ms with ", pathName));
230     }
231     if (UNLIKELY(reportCompileTimes())) {
232         dataLog("Optimized ", codeBlockName, " using ", mode, " with ", pathName, " into ", finalizer ? finalizer->codeSize() : 0, " bytes in ", after - before, " ms");
233         if (path == FTLPath)
234             dataLog(" (DFG: ", m_timeBeforeFTL - before, ", B3: ", after - m_timeBeforeFTL, ")");
235         dataLog(".\n");
236     }
237 }
238
239 Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState)
240 {
241     if (verboseCompilationEnabled(mode) && osrEntryBytecodeIndex != UINT_MAX) {
242         dataLog("\n");
243         dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n");
244         dataLog("\n");
245     }
246     
247     Graph dfg(vm, *this, longLivedState);
248     
249     if (!parse(dfg)) {
250         finalizer = std::make_unique<FailedFinalizer>(*this);
251         return FailPath;
252     }
253
254     codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters());
255     
256     // By this point the DFG bytecode parser will have potentially mutated various tables
257     // in the CodeBlock. This is a good time to perform an early shrink, which is more
258     // powerful than a late one. It's safe to do so because we haven't generated any code
259     // that references any of the tables directly, yet.
260     codeBlock->shrinkToFit(CodeBlock::EarlyShrink);
261
262     if (validationEnabled())
263         validate(dfg);
264     
265     if (Options::dumpGraphAfterParsing()) {
266         dataLog("Graph after parsing:\n");
267         dfg.dump();
268     }
269
270     performLiveCatchVariablePreservationPhase(dfg);
271
272     if (Options::useMaximalFlushInsertionPhase())
273         performMaximalFlushInsertion(dfg);
274     
275     performCPSRethreading(dfg);
276     performUnification(dfg);
277     performPredictionInjection(dfg);
278     
279     performStaticExecutionCountEstimation(dfg);
280     
281     if (mode == FTLForOSREntryMode) {
282         bool result = performOSREntrypointCreation(dfg);
283         if (!result) {
284             finalizer = std::make_unique<FailedFinalizer>(*this);
285             return FailPath;
286         }
287         performCPSRethreading(dfg);
288     }
289     
290     if (validationEnabled())
291         validate(dfg);
292     
293     performBackwardsPropagation(dfg);
294     performPredictionPropagation(dfg);
295     performFixup(dfg);
296     performStructureRegistration(dfg);
297     performInvalidationPointInjection(dfg);
298     performTypeCheckHoisting(dfg);
299     
300     dfg.m_fixpointState = FixpointNotConverged;
301     
302     // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision
303     // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is
304     // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means
305     // that the compiler compiles more quickly. We want the third tier to compile quickly, which
306     // not fixpointing accomplishes; and the fourth tier shouldn't need a fixpoint.
307     if (validationEnabled())
308         validate(dfg);
309         
310     performStrengthReduction(dfg);
311     performLocalCSE(dfg);
312     performCPSRethreading(dfg);
313     performCFA(dfg);
314     performConstantFolding(dfg);
315     bool changed = false;
316     changed |= performCFGSimplification(dfg);
317     changed |= performLocalCSE(dfg);
318     
319     if (validationEnabled())
320         validate(dfg);
321     
322     performCPSRethreading(dfg);
323     if (!isFTL(mode)) {
324         // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and
325         // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL
326         // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack
327         // sequence then gets sunk, eliminating anything that looks like an escape for subsequent phases,
328         // while the ForwardVarargs doesn't get simplified until later (or not at all) and looks like an
329         // escape for all of the arguments. This then disables object allocation sinking.
330         //
331         // So, for now, we just disable this phase for the FTL.
332         //
333         // If we wanted to enable it, we'd have to do any of the following:
334         // - Enable ForwardVarargs->GetStack+PutStack strength reduction, and have that run before
335         //   PutStack sinking and object allocation sinking.
336         // - Make VarargsForwarding emit a GetLocal+SetLocal sequence, that we can later turn into
337         //   GetStack+PutStack.
338         //
339         // But, it's not super valuable to enable those optimizations, since the FTL
340         // ArgumentsEliminationPhase does everything that this phase does, and it doesn't introduce this
341         // pathology.
342         
343         changed |= performVarargsForwarding(dfg); // Do this after CFG simplification and CPS rethreading.
344     }
345     if (changed) {
346         performCFA(dfg);
347         performConstantFolding(dfg);
348     }
349     
350     // If we're doing validation, then run some analyses, to give them an opportunity
351     // to self-validate. Now is as good a time as any to do this.
352     if (validationEnabled()) {
353         dfg.ensureDominators();
354         dfg.ensureNaturalLoops();
355         dfg.ensurePrePostNumbering();
356     }
357
358     switch (mode) {
359     case DFGMode: {
360         dfg.m_fixpointState = FixpointConverged;
361     
362         performTierUpCheckInjection(dfg);
363
364         performFastStoreBarrierInsertion(dfg);
365         performCleanUp(dfg);
366         performCPSRethreading(dfg);
367         performDCE(dfg);
368         performPhantomInsertion(dfg);
369         performStackLayout(dfg);
370         performVirtualRegisterAllocation(dfg);
371         performWatchpointCollection(dfg);
372         dumpAndVerifyGraph(dfg, "Graph after optimization:");
373         
374         JITCompiler dataFlowJIT(dfg);
375         if (codeBlock->codeType() == FunctionCode)
376             dataFlowJIT.compileFunction();
377         else
378             dataFlowJIT.compile();
379         
380         return DFGPath;
381     }
382     
383     case FTLMode:
384     case FTLForOSREntryMode: {
385 #if ENABLE(FTL_JIT)
386         if (FTL::canCompile(dfg) == FTL::CannotCompile) {
387             finalizer = std::make_unique<FailedFinalizer>(*this);
388             return FailPath;
389         }
390         
391         performCleanUp(dfg); // Reduce the graph size a bit.
392         performCriticalEdgeBreaking(dfg);
393         if (Options::createPreHeaders())
394             performLoopPreHeaderCreation(dfg);
395         performCPSRethreading(dfg);
396         performSSAConversion(dfg);
397         performSSALowering(dfg);
398         
399         // Ideally, these would be run to fixpoint with the object allocation sinking phase.
400         performArgumentsElimination(dfg);
401         if (Options::usePutStackSinking())
402             performPutStackSinking(dfg);
403         
404         performConstantHoisting(dfg);
405         performGlobalCSE(dfg);
406         performLivenessAnalysis(dfg);
407         performCFA(dfg);
408         performConstantFolding(dfg);
409         performCleanUp(dfg); // Reduce the graph size a lot.
410         changed = false;
411         changed |= performStrengthReduction(dfg);
412         if (Options::useObjectAllocationSinking()) {
413             changed |= performCriticalEdgeBreaking(dfg);
414             changed |= performObjectAllocationSinking(dfg);
415         }
416         if (changed) {
417             // State-at-tail and state-at-head will be invalid if we did strength reduction since
418             // it might increase live ranges.
419             performLivenessAnalysis(dfg);
420             performCFA(dfg);
421             performConstantFolding(dfg);
422         }
423         
424         // Currently, this relies on pre-headers still being valid. That precludes running CFG
425         // simplification before it, unless we re-created the pre-headers. There wouldn't be anything
426         // wrong with running LICM earlier, if we wanted to put other CFG transforms above this point.
427         // Alternatively, we could run loop pre-header creation after SSA conversion - but if we did that
428         // then we'd need to do some simple SSA fix-up.
429         performLICM(dfg);
430
431         // FIXME: Currently: IntegerRangeOptimization *must* be run after LICM.
432         //
433         // IntegerRangeOptimization makes changes on nodes based on preceding blocks
434         // and nodes. LICM moves nodes which can invalidates assumptions used
435         // by IntegerRangeOptimization.
436         //
437         // Ideally, the dependencies should be explicit. See https://bugs.webkit.org/show_bug.cgi?id=157534.
438         performLivenessAnalysis(dfg);
439         performIntegerRangeOptimization(dfg);
440         
441         performCleanUp(dfg);
442         performIntegerCheckCombining(dfg);
443         performGlobalCSE(dfg);
444         
445         // At this point we're not allowed to do any further code motion because our reasoning
446         // about code motion assumes that it's OK to insert GC points in random places.
447         dfg.m_fixpointState = FixpointConverged;
448         
449         performLivenessAnalysis(dfg);
450         performCFA(dfg);
451         performGlobalStoreBarrierInsertion(dfg);
452         if (Options::useMovHintRemoval())
453             performMovHintRemoval(dfg);
454         performCleanUp(dfg);
455         performDCE(dfg); // We rely on this to kill dead code that won't be recognized as dead by B3.
456         performStackLayout(dfg);
457         performLivenessAnalysis(dfg);
458         performOSRAvailabilityAnalysis(dfg);
459         performWatchpointCollection(dfg);
460         
461         if (FTL::canCompile(dfg) == FTL::CannotCompile) {
462             finalizer = std::make_unique<FailedFinalizer>(*this);
463             return FailPath;
464         }
465
466         dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldDumpDisassembly(mode));
467
468         // Flash a safepoint in case the GC wants some action.
469         Safepoint::Result safepointResult;
470         {
471             GraphSafepoint safepoint(dfg, safepointResult);
472         }
473         if (safepointResult.didGetCancelled())
474             return CancelPath;
475
476         FTL::State state(dfg);
477         FTL::lowerDFGToB3(state);
478         
479         if (UNLIKELY(computeCompileTimes()))
480             m_timeBeforeFTL = monotonicallyIncreasingTimeMS();
481         
482         if (Options::b3AlwaysFailsBeforeCompile()) {
483             FTL::fail(state);
484             return FTLPath;
485         }
486         
487         FTL::compile(state, safepointResult);
488         if (safepointResult.didGetCancelled())
489             return CancelPath;
490         
491         if (Options::b3AlwaysFailsBeforeLink()) {
492             FTL::fail(state);
493             return FTLPath;
494         }
495         
496         if (state.allocationFailed) {
497             FTL::fail(state);
498             return FTLPath;
499         }
500
501         FTL::link(state);
502         
503         if (state.allocationFailed) {
504             FTL::fail(state);
505             return FTLPath;
506         }
507         
508         return FTLPath;
509 #else
510         RELEASE_ASSERT_NOT_REACHED();
511         return FailPath;
512 #endif // ENABLE(FTL_JIT)
513     }
514         
515     default:
516         RELEASE_ASSERT_NOT_REACHED();
517         return FailPath;
518     }
519 }
520
521 bool Plan::isStillValid()
522 {
523     CodeBlock* replacement = codeBlock->replacement();
524     if (!replacement)
525         return false;
526     // FIXME: This is almost certainly not necessary. There's no way for the baseline
527     // code to be replaced during a compilation, except if we delete the plan, in which
528     // case we wouldn't be here.
529     // https://bugs.webkit.org/show_bug.cgi?id=132707
530     if (codeBlock->alternative() != replacement->baselineVersion())
531         return false;
532     if (!watchpoints.areStillValid())
533         return false;
534     return true;
535 }
536
537 void Plan::reallyAdd(CommonData* commonData)
538 {
539     watchpoints.reallyAdd(codeBlock, *commonData);
540     identifiers.reallyAdd(vm, commonData);
541     weakReferences.reallyAdd(vm, commonData);
542     transitions.reallyAdd(vm, commonData);
543 }
544
545 void Plan::notifyCompiling()
546 {
547     stage = Compiling;
548 }
549
550 void Plan::notifyCompiled()
551 {
552     stage = Compiled;
553 }
554
555 void Plan::notifyReady()
556 {
557     callback->compilationDidBecomeReadyAsynchronously(codeBlock, profiledDFGCodeBlock);
558     stage = Ready;
559 }
560
561 CompilationResult Plan::finalizeWithoutNotifyingCallback()
562 {
563     // We will establish new references from the code block to things. So, we need a barrier.
564     vm.heap.writeBarrier(codeBlock);
565     
566     if (!isStillValid()) {
567         CODEBLOCK_LOG_EVENT(codeBlock, "dfgFinalize", ("invalidated"));
568         return CompilationInvalidated;
569     }
570
571     bool result;
572     if (codeBlock->codeType() == FunctionCode)
573         result = finalizer->finalizeFunction();
574     else
575         result = finalizer->finalize();
576     
577     if (!result) {
578         CODEBLOCK_LOG_EVENT(codeBlock, "dfgFinalize", ("failed"));
579         return CompilationFailed;
580     }
581     
582     reallyAdd(codeBlock->jitCode()->dfgCommon());
583     
584     if (validationEnabled()) {
585         TrackedReferences trackedReferences;
586         
587         for (WriteBarrier<JSCell>& reference : codeBlock->jitCode()->dfgCommon()->weakReferences)
588             trackedReferences.add(reference.get());
589         for (WriteBarrier<Structure>& reference : codeBlock->jitCode()->dfgCommon()->weakStructureReferences)
590             trackedReferences.add(reference.get());
591         for (WriteBarrier<Unknown>& constant : codeBlock->constants())
592             trackedReferences.add(constant.get());
593         
594         // Check that any other references that we have anywhere in the JITCode are also
595         // tracked either strongly or weakly.
596         codeBlock->jitCode()->validateReferences(trackedReferences);
597     }
598     
599     CODEBLOCK_LOG_EVENT(codeBlock, "dfgFinalize", ("succeeded"));
600     return CompilationSuccessful;
601 }
602
603 void Plan::finalizeAndNotifyCallback()
604 {
605     callback->compilationDidComplete(codeBlock, profiledDFGCodeBlock, finalizeWithoutNotifyingCallback());
606 }
607
608 CompilationKey Plan::key()
609 {
610     return CompilationKey(codeBlock->alternative(), mode);
611 }
612
613 void Plan::rememberCodeBlocks()
614 {
615     // Compilation writes lots of values to a CodeBlock without performing
616     // an explicit barrier. So, we need to be pessimistic and assume that
617     // all our CodeBlocks must be visited during GC.
618
619     Heap::heap(codeBlock)->writeBarrier(codeBlock);
620     Heap::heap(codeBlock)->writeBarrier(codeBlock->alternative());
621     if (profiledDFGCodeBlock)
622         Heap::heap(profiledDFGCodeBlock)->writeBarrier(profiledDFGCodeBlock);
623 }
624
625 void Plan::checkLivenessAndVisitChildren(SlotVisitor& visitor)
626 {
627     if (!isKnownToBeLiveDuringGC())
628         return;
629     
630     for (unsigned i = mustHandleValues.size(); i--;)
631         visitor.appendUnbarrieredValue(&mustHandleValues[i]);
632
633     visitor.appendUnbarrieredReadOnlyPointer(codeBlock);
634     visitor.appendUnbarrieredReadOnlyPointer(codeBlock->alternative());
635     visitor.appendUnbarrieredReadOnlyPointer(profiledDFGCodeBlock);
636
637     if (inlineCallFrames) {
638         for (auto* inlineCallFrame : *inlineCallFrames) {
639             ASSERT(inlineCallFrame->baselineCodeBlock.get());
640             visitor.appendUnbarrieredReadOnlyPointer(inlineCallFrame->baselineCodeBlock.get());
641         }
642     }
643
644     weakReferences.visitChildren(visitor);
645     transitions.visitChildren(visitor);
646 }
647
648 bool Plan::isKnownToBeLiveDuringGC()
649 {
650     if (stage == Cancelled)
651         return false;
652     if (!Heap::isMarked(codeBlock->ownerExecutable()))
653         return false;
654     if (!Heap::isMarked(codeBlock->alternative()))
655         return false;
656     if (!!profiledDFGCodeBlock && !Heap::isMarked(profiledDFGCodeBlock))
657         return false;
658     return true;
659 }
660
661 void Plan::cancel()
662 {
663     codeBlock = nullptr;
664     profiledDFGCodeBlock = nullptr;
665     mustHandleValues.clear();
666     compilation = nullptr;
667     finalizer = nullptr;
668     inlineCallFrames = nullptr;
669     watchpoints = DesiredWatchpoints();
670     identifiers = DesiredIdentifiers();
671     weakReferences = DesiredWeakReferences();
672     transitions = DesiredTransitions();
673     callback = nullptr;
674     stage = Cancelled;
675 }
676
677 } } // namespace JSC::DFG
678
679 #endif // ENABLE(DFG_JIT)
680