c0c9479ca8a4c3467088f9d7bfd298b3c2ea6118
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGPlan.cpp
1 /*
2  * Copyright (C) 2013-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGPlan.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGArgumentsEliminationPhase.h"
32 #include "DFGBackwardsPropagationPhase.h"
33 #include "DFGByteCodeParser.h"
34 #include "DFGCFAPhase.h"
35 #include "DFGCFGSimplificationPhase.h"
36 #include "DFGCPSRethreadingPhase.h"
37 #include "DFGCSEPhase.h"
38 #include "DFGCleanUpPhase.h"
39 #include "DFGConstantFoldingPhase.h"
40 #include "DFGConstantHoistingPhase.h"
41 #include "DFGCriticalEdgeBreakingPhase.h"
42 #include "DFGDCEPhase.h"
43 #include "DFGFailedFinalizer.h"
44 #include "DFGFixupPhase.h"
45 #include "DFGGraphSafepoint.h"
46 #include "DFGIntegerCheckCombiningPhase.h"
47 #include "DFGIntegerRangeOptimizationPhase.h"
48 #include "DFGInvalidationPointInjectionPhase.h"
49 #include "DFGJITCompiler.h"
50 #include "DFGLICMPhase.h"
51 #include "DFGLiveCatchVariablePreservationPhase.h"
52 #include "DFGLivenessAnalysisPhase.h"
53 #include "DFGLoopPreHeaderCreationPhase.h"
54 #include "DFGMaximalFlushInsertionPhase.h"
55 #include "DFGMovHintRemovalPhase.h"
56 #include "DFGOSRAvailabilityAnalysisPhase.h"
57 #include "DFGOSREntrypointCreationPhase.h"
58 #include "DFGObjectAllocationSinkingPhase.h"
59 #include "DFGPhantomInsertionPhase.h"
60 #include "DFGPredictionInjectionPhase.h"
61 #include "DFGPredictionPropagationPhase.h"
62 #include "DFGPutStackSinkingPhase.h"
63 #include "DFGSSAConversionPhase.h"
64 #include "DFGSSALoweringPhase.h"
65 #include "DFGStackLayoutPhase.h"
66 #include "DFGStaticExecutionCountEstimationPhase.h"
67 #include "DFGStoreBarrierClusteringPhase.h"
68 #include "DFGStoreBarrierInsertionPhase.h"
69 #include "DFGStrengthReductionPhase.h"
70 #include "DFGStructureRegistrationPhase.h"
71 #include "DFGTierUpCheckInjectionPhase.h"
72 #include "DFGTypeCheckHoistingPhase.h"
73 #include "DFGUnificationPhase.h"
74 #include "DFGValidate.h"
75 #include "DFGVarargsForwardingPhase.h"
76 #include "DFGVirtualRegisterAllocationPhase.h"
77 #include "DFGWatchpointCollectionPhase.h"
78 #include "JSCInlines.h"
79 #include "OperandsInlines.h"
80 #include "ProfilerDatabase.h"
81 #include "TrackedReferences.h"
82 #include "VMInlines.h"
83 #include <wtf/CurrentTime.h>
84
85 #if ENABLE(FTL_JIT)
86 #include "FTLCapabilities.h"
87 #include "FTLCompile.h"
88 #include "FTLFail.h"
89 #include "FTLLink.h"
90 #include "FTLLowerDFGToB3.h"
91 #include "FTLState.h"
92 #endif
93
94 namespace JSC {
95
96 extern double totalDFGCompileTime;
97 extern double totalFTLCompileTime;
98 extern double totalFTLDFGCompileTime;
99 extern double totalFTLB3CompileTime;
100
101 }
102
103 namespace JSC { namespace DFG {
104
105 namespace {
106
107 void dumpAndVerifyGraph(Graph& graph, const char* text, bool forceDump = false)
108 {
109     GraphDumpMode modeForFinalValidate = DumpGraph;
110     if (verboseCompilationEnabled(graph.m_plan.mode) || forceDump) {
111         dataLog(text, "\n");
112         graph.dump();
113         modeForFinalValidate = DontDumpGraph;
114     }
115     if (validationEnabled())
116         validate(graph, modeForFinalValidate);
117 }
118
119 Profiler::CompilationKind profilerCompilationKindForMode(CompilationMode mode)
120 {
121     switch (mode) {
122     case InvalidCompilationMode:
123         RELEASE_ASSERT_NOT_REACHED();
124         return Profiler::DFG;
125     case DFGMode:
126         return Profiler::DFG;
127     case FTLMode:
128         return Profiler::FTL;
129     case FTLForOSREntryMode:
130         return Profiler::FTLForOSREntry;
131     }
132     RELEASE_ASSERT_NOT_REACHED();
133     return Profiler::DFG;
134 }
135
136 } // anonymous namespace
137
138 Plan::Plan(CodeBlock* passedCodeBlock, CodeBlock* profiledDFGCodeBlock,
139     CompilationMode mode, unsigned osrEntryBytecodeIndex,
140     const Operands<JSValue>& mustHandleValues)
141     : vm(passedCodeBlock->vm())
142     , codeBlock(passedCodeBlock)
143     , profiledDFGCodeBlock(profiledDFGCodeBlock)
144     , mode(mode)
145     , osrEntryBytecodeIndex(osrEntryBytecodeIndex)
146     , mustHandleValues(mustHandleValues)
147     , compilation(vm->m_perBytecodeProfiler ? adoptRef(new Profiler::Compilation(vm->m_perBytecodeProfiler->ensureBytecodesFor(codeBlock), profilerCompilationKindForMode(mode))) : 0)
148     , inlineCallFrames(adoptRef(new InlineCallFrameSet()))
149     , identifiers(codeBlock)
150     , weakReferences(codeBlock)
151     , stage(Preparing)
152 {
153 }
154
155 Plan::~Plan()
156 {
157 }
158
159 bool Plan::computeCompileTimes() const
160 {
161     return reportCompileTimes()
162         || Options::reportTotalCompileTimes()
163         || (vm && vm->m_perBytecodeProfiler);
164 }
165
166 bool Plan::reportCompileTimes() const
167 {
168     return Options::reportCompileTimes()
169         || Options::reportDFGCompileTimes()
170         || (Options::reportFTLCompileTimes() && isFTL(mode));
171 }
172
173 void Plan::compileInThread(LongLivedState& longLivedState, ThreadData* threadData)
174 {
175     this->threadData = threadData;
176     
177     double before = 0;
178     CString codeBlockName;
179     if (UNLIKELY(computeCompileTimes()))
180         before = monotonicallyIncreasingTimeMS();
181     if (UNLIKELY(reportCompileTimes()))
182         codeBlockName = toCString(*codeBlock);
183     
184     CompilationScope compilationScope;
185
186     if (logCompilationChanges(mode) || Options::reportDFGPhaseTimes())
187         dataLog("DFG(Plan) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n");
188
189     CompilationPath path = compileInThreadImpl(longLivedState);
190
191     RELEASE_ASSERT(path == CancelPath || finalizer);
192     RELEASE_ASSERT((path == CancelPath) == (stage == Cancelled));
193     
194     double after = 0;
195     if (UNLIKELY(computeCompileTimes())) {
196         after = monotonicallyIncreasingTimeMS();
197     
198         if (Options::reportTotalCompileTimes()) {
199             if (isFTL(mode)) {
200                 totalFTLCompileTime += after - before;
201                 totalFTLDFGCompileTime += m_timeBeforeFTL - before;
202                 totalFTLB3CompileTime += after - m_timeBeforeFTL;
203             } else
204                 totalDFGCompileTime += after - before;
205         }
206     }
207     const char* pathName = nullptr;
208     switch (path) {
209     case FailPath:
210         pathName = "N/A (fail)";
211         break;
212     case DFGPath:
213         pathName = "DFG";
214         break;
215     case FTLPath:
216         pathName = "FTL";
217         break;
218     case CancelPath:
219         pathName = "Cancelled";
220         break;
221     default:
222         RELEASE_ASSERT_NOT_REACHED();
223         break;
224     }
225     if (codeBlock) { // codeBlock will be null if the compilation was cancelled.
226         if (path == FTLPath)
227             CODEBLOCK_LOG_EVENT(codeBlock, "ftlCompile", ("took ", after - before, " ms (DFG: ", m_timeBeforeFTL - before, ", B3: ", after - m_timeBeforeFTL, ") with ", pathName));
228         else
229             CODEBLOCK_LOG_EVENT(codeBlock, "dfgCompile", ("took ", after - before, " ms with ", pathName));
230     }
231     if (UNLIKELY(reportCompileTimes())) {
232         dataLog("Optimized ", codeBlockName, " using ", mode, " with ", pathName, " into ", finalizer ? finalizer->codeSize() : 0, " bytes in ", after - before, " ms");
233         if (path == FTLPath)
234             dataLog(" (DFG: ", m_timeBeforeFTL - before, ", B3: ", after - m_timeBeforeFTL, ")");
235         dataLog(".\n");
236     }
237 }
238
239 Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState)
240 {
241     cleanMustHandleValuesIfNecessary();
242     
243     if (verboseCompilationEnabled(mode) && osrEntryBytecodeIndex != UINT_MAX) {
244         dataLog("\n");
245         dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n");
246         dataLog("\n");
247     }
248     
249     Graph dfg(*vm, *this, longLivedState);
250     
251     if (!parse(dfg)) {
252         finalizer = std::make_unique<FailedFinalizer>(*this);
253         return FailPath;
254     }
255
256     codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters());
257     
258     // By this point the DFG bytecode parser will have potentially mutated various tables
259     // in the CodeBlock. This is a good time to perform an early shrink, which is more
260     // powerful than a late one. It's safe to do so because we haven't generated any code
261     // that references any of the tables directly, yet.
262     codeBlock->shrinkToFit(CodeBlock::EarlyShrink);
263
264     if (validationEnabled())
265         validate(dfg);
266     
267     if (Options::dumpGraphAfterParsing()) {
268         dataLog("Graph after parsing:\n");
269         dfg.dump();
270     }
271
272     performLiveCatchVariablePreservationPhase(dfg);
273
274     if (Options::useMaximalFlushInsertionPhase())
275         performMaximalFlushInsertion(dfg);
276     
277     performCPSRethreading(dfg);
278     performUnification(dfg);
279     performPredictionInjection(dfg);
280     
281     performStaticExecutionCountEstimation(dfg);
282     
283     if (mode == FTLForOSREntryMode) {
284         bool result = performOSREntrypointCreation(dfg);
285         if (!result) {
286             finalizer = std::make_unique<FailedFinalizer>(*this);
287             return FailPath;
288         }
289         performCPSRethreading(dfg);
290     }
291     
292     if (validationEnabled())
293         validate(dfg);
294     
295     performBackwardsPropagation(dfg);
296     performPredictionPropagation(dfg);
297     performFixup(dfg);
298     performStructureRegistration(dfg);
299     performInvalidationPointInjection(dfg);
300     performTypeCheckHoisting(dfg);
301     
302     dfg.m_fixpointState = FixpointNotConverged;
303     
304     // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision
305     // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is
306     // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means
307     // that the compiler compiles more quickly. We want the third tier to compile quickly, which
308     // not fixpointing accomplishes; and the fourth tier shouldn't need a fixpoint.
309     if (validationEnabled())
310         validate(dfg);
311         
312     performStrengthReduction(dfg);
313     performCPSRethreading(dfg);
314     performCFA(dfg);
315     performConstantFolding(dfg);
316     bool changed = false;
317     changed |= performCFGSimplification(dfg);
318     changed |= performLocalCSE(dfg);
319     
320     if (validationEnabled())
321         validate(dfg);
322     
323     performCPSRethreading(dfg);
324     if (!isFTL(mode)) {
325         // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and
326         // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL
327         // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack
328         // sequence then gets sunk, eliminating anything that looks like an escape for subsequent phases,
329         // while the ForwardVarargs doesn't get simplified until later (or not at all) and looks like an
330         // escape for all of the arguments. This then disables object allocation sinking.
331         //
332         // So, for now, we just disable this phase for the FTL.
333         //
334         // If we wanted to enable it, we'd have to do any of the following:
335         // - Enable ForwardVarargs->GetStack+PutStack strength reduction, and have that run before
336         //   PutStack sinking and object allocation sinking.
337         // - Make VarargsForwarding emit a GetLocal+SetLocal sequence, that we can later turn into
338         //   GetStack+PutStack.
339         //
340         // But, it's not super valuable to enable those optimizations, since the FTL
341         // ArgumentsEliminationPhase does everything that this phase does, and it doesn't introduce this
342         // pathology.
343         
344         changed |= performVarargsForwarding(dfg); // Do this after CFG simplification and CPS rethreading.
345     }
346     if (changed) {
347         performCFA(dfg);
348         performConstantFolding(dfg);
349     }
350     
351     // If we're doing validation, then run some analyses, to give them an opportunity
352     // to self-validate. Now is as good a time as any to do this.
353     if (validationEnabled()) {
354         dfg.ensureDominators();
355         dfg.ensureNaturalLoops();
356         dfg.ensurePrePostNumbering();
357     }
358
359     switch (mode) {
360     case DFGMode: {
361         dfg.m_fixpointState = FixpointConverged;
362     
363         performTierUpCheckInjection(dfg);
364
365         performFastStoreBarrierInsertion(dfg);
366         performStoreBarrierClustering(dfg);
367         performCleanUp(dfg);
368         performCPSRethreading(dfg);
369         performDCE(dfg);
370         performPhantomInsertion(dfg);
371         performStackLayout(dfg);
372         performVirtualRegisterAllocation(dfg);
373         performWatchpointCollection(dfg);
374         dumpAndVerifyGraph(dfg, "Graph after optimization:");
375         
376         JITCompiler dataFlowJIT(dfg);
377         if (codeBlock->codeType() == FunctionCode)
378             dataFlowJIT.compileFunction();
379         else
380             dataFlowJIT.compile();
381         
382         return DFGPath;
383     }
384     
385     case FTLMode:
386     case FTLForOSREntryMode: {
387 #if ENABLE(FTL_JIT)
388         if (FTL::canCompile(dfg) == FTL::CannotCompile) {
389             finalizer = std::make_unique<FailedFinalizer>(*this);
390             return FailPath;
391         }
392         
393         performCleanUp(dfg); // Reduce the graph size a bit.
394         performCriticalEdgeBreaking(dfg);
395         if (Options::createPreHeaders())
396             performLoopPreHeaderCreation(dfg);
397         performCPSRethreading(dfg);
398         performSSAConversion(dfg);
399         performSSALowering(dfg);
400         
401         // Ideally, these would be run to fixpoint with the object allocation sinking phase.
402         performArgumentsElimination(dfg);
403         if (Options::usePutStackSinking())
404             performPutStackSinking(dfg);
405         
406         performConstantHoisting(dfg);
407         performGlobalCSE(dfg);
408         performLivenessAnalysis(dfg);
409         performCFA(dfg);
410         performConstantFolding(dfg);
411         performCleanUp(dfg); // Reduce the graph size a lot.
412         changed = false;
413         changed |= performStrengthReduction(dfg);
414         if (Options::useObjectAllocationSinking()) {
415             changed |= performCriticalEdgeBreaking(dfg);
416             changed |= performObjectAllocationSinking(dfg);
417         }
418         if (changed) {
419             // State-at-tail and state-at-head will be invalid if we did strength reduction since
420             // it might increase live ranges.
421             performLivenessAnalysis(dfg);
422             performCFA(dfg);
423             performConstantFolding(dfg);
424         }
425         
426         // Currently, this relies on pre-headers still being valid. That precludes running CFG
427         // simplification before it, unless we re-created the pre-headers. There wouldn't be anything
428         // wrong with running LICM earlier, if we wanted to put other CFG transforms above this point.
429         // Alternatively, we could run loop pre-header creation after SSA conversion - but if we did that
430         // then we'd need to do some simple SSA fix-up.
431         performLivenessAnalysis(dfg);
432         performCFA(dfg);
433         performLICM(dfg);
434
435         // FIXME: Currently: IntegerRangeOptimization *must* be run after LICM.
436         //
437         // IntegerRangeOptimization makes changes on nodes based on preceding blocks
438         // and nodes. LICM moves nodes which can invalidates assumptions used
439         // by IntegerRangeOptimization.
440         //
441         // Ideally, the dependencies should be explicit. See https://bugs.webkit.org/show_bug.cgi?id=157534.
442         performLivenessAnalysis(dfg);
443         performIntegerRangeOptimization(dfg);
444         
445         performCleanUp(dfg);
446         performIntegerCheckCombining(dfg);
447         performGlobalCSE(dfg);
448         
449         // At this point we're not allowed to do any further code motion because our reasoning
450         // about code motion assumes that it's OK to insert GC points in random places.
451         dfg.m_fixpointState = FixpointConverged;
452         
453         performLivenessAnalysis(dfg);
454         performCFA(dfg);
455         performGlobalStoreBarrierInsertion(dfg);
456         performStoreBarrierClustering(dfg);
457         if (Options::useMovHintRemoval())
458             performMovHintRemoval(dfg);
459         performCleanUp(dfg);
460         performDCE(dfg); // We rely on this to kill dead code that won't be recognized as dead by B3.
461         performStackLayout(dfg);
462         performLivenessAnalysis(dfg);
463         performOSRAvailabilityAnalysis(dfg);
464         performWatchpointCollection(dfg);
465         
466         if (FTL::canCompile(dfg) == FTL::CannotCompile) {
467             finalizer = std::make_unique<FailedFinalizer>(*this);
468             return FailPath;
469         }
470
471         dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldDumpDisassembly(mode));
472
473         // Flash a safepoint in case the GC wants some action.
474         Safepoint::Result safepointResult;
475         {
476             GraphSafepoint safepoint(dfg, safepointResult);
477         }
478         if (safepointResult.didGetCancelled())
479             return CancelPath;
480
481         FTL::State state(dfg);
482         FTL::lowerDFGToB3(state);
483         
484         if (UNLIKELY(computeCompileTimes()))
485             m_timeBeforeFTL = monotonicallyIncreasingTimeMS();
486         
487         if (Options::b3AlwaysFailsBeforeCompile()) {
488             FTL::fail(state);
489             return FTLPath;
490         }
491         
492         FTL::compile(state, safepointResult);
493         if (safepointResult.didGetCancelled())
494             return CancelPath;
495         
496         if (Options::b3AlwaysFailsBeforeLink()) {
497             FTL::fail(state);
498             return FTLPath;
499         }
500         
501         if (state.allocationFailed) {
502             FTL::fail(state);
503             return FTLPath;
504         }
505
506         FTL::link(state);
507         
508         if (state.allocationFailed) {
509             FTL::fail(state);
510             return FTLPath;
511         }
512         
513         return FTLPath;
514 #else
515         RELEASE_ASSERT_NOT_REACHED();
516         return FailPath;
517 #endif // ENABLE(FTL_JIT)
518     }
519         
520     default:
521         RELEASE_ASSERT_NOT_REACHED();
522         return FailPath;
523     }
524 }
525
526 bool Plan::isStillValid()
527 {
528     CodeBlock* replacement = codeBlock->replacement();
529     if (!replacement)
530         return false;
531     // FIXME: This is almost certainly not necessary. There's no way for the baseline
532     // code to be replaced during a compilation, except if we delete the plan, in which
533     // case we wouldn't be here.
534     // https://bugs.webkit.org/show_bug.cgi?id=132707
535     if (codeBlock->alternative() != replacement->baselineVersion())
536         return false;
537     if (!watchpoints.areStillValid())
538         return false;
539     return true;
540 }
541
542 void Plan::reallyAdd(CommonData* commonData)
543 {
544     watchpoints.reallyAdd(codeBlock, *commonData);
545     identifiers.reallyAdd(*vm, commonData);
546     weakReferences.reallyAdd(*vm, commonData);
547     transitions.reallyAdd(*vm, commonData);
548 }
549
550 void Plan::notifyCompiling()
551 {
552     stage = Compiling;
553 }
554
555 void Plan::notifyReady()
556 {
557     callback->compilationDidBecomeReadyAsynchronously(codeBlock, profiledDFGCodeBlock);
558     stage = Ready;
559 }
560
561 CompilationResult Plan::finalizeWithoutNotifyingCallback()
562 {
563     // We will establish new references from the code block to things. So, we need a barrier.
564     vm->heap.writeBarrier(codeBlock);
565     
566     if (!isStillValid()) {
567         CODEBLOCK_LOG_EVENT(codeBlock, "dfgFinalize", ("invalidated"));
568         return CompilationInvalidated;
569     }
570
571     bool result;
572     if (codeBlock->codeType() == FunctionCode)
573         result = finalizer->finalizeFunction();
574     else
575         result = finalizer->finalize();
576     
577     if (!result) {
578         CODEBLOCK_LOG_EVENT(codeBlock, "dfgFinalize", ("failed"));
579         return CompilationFailed;
580     }
581     
582     reallyAdd(codeBlock->jitCode()->dfgCommon());
583     
584     if (validationEnabled()) {
585         TrackedReferences trackedReferences;
586         
587         for (WriteBarrier<JSCell>& reference : codeBlock->jitCode()->dfgCommon()->weakReferences)
588             trackedReferences.add(reference.get());
589         for (WriteBarrier<Structure>& reference : codeBlock->jitCode()->dfgCommon()->weakStructureReferences)
590             trackedReferences.add(reference.get());
591         for (WriteBarrier<Unknown>& constant : codeBlock->constants())
592             trackedReferences.add(constant.get());
593         
594         // Check that any other references that we have anywhere in the JITCode are also
595         // tracked either strongly or weakly.
596         codeBlock->jitCode()->validateReferences(trackedReferences);
597     }
598     
599     CODEBLOCK_LOG_EVENT(codeBlock, "dfgFinalize", ("succeeded"));
600     return CompilationSuccessful;
601 }
602
603 void Plan::finalizeAndNotifyCallback()
604 {
605     callback->compilationDidComplete(codeBlock, profiledDFGCodeBlock, finalizeWithoutNotifyingCallback());
606 }
607
608 CompilationKey Plan::key()
609 {
610     return CompilationKey(codeBlock->alternative(), mode);
611 }
612
613 void Plan::markCodeBlocks(SlotVisitor& slotVisitor)
614 {
615     if (!isKnownToBeLiveDuringGC())
616         return;
617     
618     // Compilation writes lots of values to a CodeBlock without performing
619     // an explicit barrier. So, we need to be pessimistic and assume that
620     // all our CodeBlocks must be visited during GC.
621
622     slotVisitor.appendUnbarrieredReadOnlyPointer(codeBlock);
623     slotVisitor.appendUnbarrieredReadOnlyPointer(codeBlock->alternative());
624     if (profiledDFGCodeBlock)
625         slotVisitor.appendUnbarrieredReadOnlyPointer(profiledDFGCodeBlock);
626 }
627
628 void Plan::rememberCodeBlocks(VM& vm)
629 {
630     if (!isKnownToBeLiveDuringGC())
631         return;
632     
633     // Compilation writes lots of values to a CodeBlock without performing
634     // an explicit barrier. So, we need to be pessimistic and assume that
635     // all our CodeBlocks must be visited during GC.
636
637     vm.heap.writeBarrier(codeBlock);
638     vm.heap.writeBarrier(codeBlock->alternative());
639     if (profiledDFGCodeBlock)
640         vm.heap.writeBarrier(profiledDFGCodeBlock);
641 }
642
643 void Plan::checkLivenessAndVisitChildren(SlotVisitor& visitor)
644 {
645     if (!isKnownToBeLiveDuringGC())
646         return;
647
648     cleanMustHandleValuesIfNecessary();
649     for (unsigned i = mustHandleValues.size(); i--;)
650         visitor.appendUnbarrieredValue(&mustHandleValues[i]);
651
652     visitor.appendUnbarrieredReadOnlyPointer(codeBlock);
653     visitor.appendUnbarrieredReadOnlyPointer(codeBlock->alternative());
654     visitor.appendUnbarrieredReadOnlyPointer(profiledDFGCodeBlock);
655
656     if (inlineCallFrames) {
657         for (auto* inlineCallFrame : *inlineCallFrames) {
658             ASSERT(inlineCallFrame->baselineCodeBlock.get());
659             visitor.appendUnbarrieredReadOnlyPointer(inlineCallFrame->baselineCodeBlock.get());
660         }
661     }
662
663     weakReferences.visitChildren(visitor);
664     transitions.visitChildren(visitor);
665 }
666
667 bool Plan::isKnownToBeLiveDuringGC()
668 {
669     if (stage == Cancelled)
670         return false;
671     if (!Heap::isMarked(codeBlock->ownerExecutable()))
672         return false;
673     if (!Heap::isMarked(codeBlock->alternative()))
674         return false;
675     if (!!profiledDFGCodeBlock && !Heap::isMarked(profiledDFGCodeBlock))
676         return false;
677     return true;
678 }
679
680 void Plan::cancel()
681 {
682     vm = nullptr;
683     codeBlock = nullptr;
684     profiledDFGCodeBlock = nullptr;
685     mustHandleValues.clear();
686     compilation = nullptr;
687     finalizer = nullptr;
688     inlineCallFrames = nullptr;
689     watchpoints = DesiredWatchpoints();
690     identifiers = DesiredIdentifiers();
691     weakReferences = DesiredWeakReferences();
692     transitions = DesiredTransitions();
693     callback = nullptr;
694     stage = Cancelled;
695 }
696
697 void Plan::cleanMustHandleValuesIfNecessary()
698 {
699     LockHolder locker(mustHandleValueCleaningLock);
700     
701     if (!mustHandleValuesMayIncludeGarbage)
702         return;
703     
704     mustHandleValuesMayIncludeGarbage = false;
705     
706     if (!codeBlock)
707         return;
708     
709     if (!mustHandleValues.numberOfLocals())
710         return;
711     
712     FastBitVector liveness = codeBlock->alternative()->livenessAnalysis().getLivenessInfoAtBytecodeOffset(osrEntryBytecodeIndex);
713     
714     for (unsigned local = mustHandleValues.numberOfLocals(); local--;) {
715         if (!liveness[local])
716             mustHandleValues.local(local) = jsUndefined();
717     }
718 }
719
720 } } // namespace JSC::DFG
721
722 #endif // ENABLE(DFG_JIT)
723