Remove MaximalFlushInsertionPhase
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGPlan.cpp
1 /*
2  * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGPlan.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGArgumentsEliminationPhase.h"
32 #include "DFGBackwardsPropagationPhase.h"
33 #include "DFGByteCodeParser.h"
34 #include "DFGCFAPhase.h"
35 #include "DFGCFGSimplificationPhase.h"
36 #include "DFGCPSRethreadingPhase.h"
37 #include "DFGCSEPhase.h"
38 #include "DFGCleanUpPhase.h"
39 #include "DFGConstantFoldingPhase.h"
40 #include "DFGConstantHoistingPhase.h"
41 #include "DFGCriticalEdgeBreakingPhase.h"
42 #include "DFGDCEPhase.h"
43 #include "DFGFailedFinalizer.h"
44 #include "DFGFixupPhase.h"
45 #include "DFGGraphSafepoint.h"
46 #include "DFGIntegerCheckCombiningPhase.h"
47 #include "DFGIntegerRangeOptimizationPhase.h"
48 #include "DFGInvalidationPointInjectionPhase.h"
49 #include "DFGJITCompiler.h"
50 #include "DFGLICMPhase.h"
51 #include "DFGLiveCatchVariablePreservationPhase.h"
52 #include "DFGLivenessAnalysisPhase.h"
53 #include "DFGLoopPreHeaderCreationPhase.h"
54 #include "DFGMovHintRemovalPhase.h"
55 #include "DFGOSRAvailabilityAnalysisPhase.h"
56 #include "DFGOSREntrypointCreationPhase.h"
57 #include "DFGObjectAllocationSinkingPhase.h"
58 #include "DFGPhantomInsertionPhase.h"
59 #include "DFGPredictionInjectionPhase.h"
60 #include "DFGPredictionPropagationPhase.h"
61 #include "DFGPutStackSinkingPhase.h"
62 #include "DFGSSAConversionPhase.h"
63 #include "DFGSSALoweringPhase.h"
64 #include "DFGStackLayoutPhase.h"
65 #include "DFGStaticExecutionCountEstimationPhase.h"
66 #include "DFGStoreBarrierClusteringPhase.h"
67 #include "DFGStoreBarrierInsertionPhase.h"
68 #include "DFGStrengthReductionPhase.h"
69 #include "DFGTierUpCheckInjectionPhase.h"
70 #include "DFGTypeCheckHoistingPhase.h"
71 #include "DFGUnificationPhase.h"
72 #include "DFGValidate.h"
73 #include "DFGValueRepReductionPhase.h"
74 #include "DFGVarargsForwardingPhase.h"
75 #include "DFGVirtualRegisterAllocationPhase.h"
76 #include "DFGWatchpointCollectionPhase.h"
77 #include "JSCInlines.h"
78 #include "OperandsInlines.h"
79 #include "ProfilerDatabase.h"
80 #include "TrackedReferences.h"
81 #include "VMInlines.h"
82
83 #if ENABLE(FTL_JIT)
84 #include "FTLCapabilities.h"
85 #include "FTLCompile.h"
86 #include "FTLFail.h"
87 #include "FTLLink.h"
88 #include "FTLLowerDFGToB3.h"
89 #include "FTLState.h"
90 #endif
91
92 namespace JSC {
93
94 extern Seconds totalDFGCompileTime;
95 extern Seconds totalFTLCompileTime;
96 extern Seconds totalFTLDFGCompileTime;
97 extern Seconds totalFTLB3CompileTime;
98
99 }
100
101 namespace JSC { namespace DFG {
102
103 namespace {
104
105 void dumpAndVerifyGraph(Graph& graph, const char* text, bool forceDump = false)
106 {
107     GraphDumpMode modeForFinalValidate = DumpGraph;
108     if (verboseCompilationEnabled(graph.m_plan.mode()) || forceDump) {
109         dataLog(text, "\n");
110         graph.dump();
111         modeForFinalValidate = DontDumpGraph;
112     }
113     if (validationEnabled())
114         validate(graph, modeForFinalValidate);
115 }
116
117 Profiler::CompilationKind profilerCompilationKindForMode(CompilationMode mode)
118 {
119     switch (mode) {
120     case InvalidCompilationMode:
121         RELEASE_ASSERT_NOT_REACHED();
122         return Profiler::DFG;
123     case DFGMode:
124         return Profiler::DFG;
125     case FTLMode:
126         return Profiler::FTL;
127     case FTLForOSREntryMode:
128         return Profiler::FTLForOSREntry;
129     }
130     RELEASE_ASSERT_NOT_REACHED();
131     return Profiler::DFG;
132 }
133
134 } // anonymous namespace
135
136 Plan::Plan(CodeBlock* passedCodeBlock, CodeBlock* profiledDFGCodeBlock,
137     CompilationMode mode, unsigned osrEntryBytecodeIndex,
138     const Operands<Optional<JSValue>>& mustHandleValues)
139     : m_mode(mode)
140     , m_vm(passedCodeBlock->vm())
141     , m_codeBlock(passedCodeBlock)
142     , m_profiledDFGCodeBlock(profiledDFGCodeBlock)
143     , m_mustHandleValues(mustHandleValues)
144     , m_osrEntryBytecodeIndex(osrEntryBytecodeIndex)
145     , m_compilation(UNLIKELY(m_vm->m_perBytecodeProfiler) ? adoptRef(new Profiler::Compilation(m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock), profilerCompilationKindForMode(mode))) : nullptr)
146     , m_inlineCallFrames(adoptRef(new InlineCallFrameSet()))
147     , m_identifiers(m_codeBlock)
148     , m_weakReferences(m_codeBlock)
149     , m_stage(Preparing)
150 {
151     RELEASE_ASSERT(m_codeBlock->alternative()->jitCode());
152     m_inlineCallFrames->disableThreadingChecks();
153 }
154
155 Plan::~Plan()
156 {
157 }
158
159 bool Plan::computeCompileTimes() const
160 {
161     return reportCompileTimes()
162         || Options::reportTotalCompileTimes()
163         || (m_vm && m_vm->m_perBytecodeProfiler);
164 }
165
166 bool Plan::reportCompileTimes() const
167 {
168     return Options::reportCompileTimes()
169         || Options::reportDFGCompileTimes()
170         || (Options::reportFTLCompileTimes() && isFTL());
171 }
172
173 void Plan::compileInThread(ThreadData* threadData)
174 {
175     m_threadData = threadData;
176
177     MonotonicTime before { };
178     CString codeBlockName;
179     if (UNLIKELY(computeCompileTimes()))
180         before = MonotonicTime::now();
181     if (UNLIKELY(reportCompileTimes()))
182         codeBlockName = toCString(*m_codeBlock);
183
184     CompilationScope compilationScope;
185
186     if (logCompilationChanges(m_mode) || Options::logPhaseTimes())
187         dataLog("DFG(Plan) compiling ", *m_codeBlock, " with ", m_mode, ", instructions size = ", m_codeBlock->instructionsSize(), "\n");
188
189     CompilationPath path = compileInThreadImpl();
190
191     RELEASE_ASSERT(path == CancelPath || m_finalizer);
192     RELEASE_ASSERT((path == CancelPath) == (m_stage == Cancelled));
193
194     MonotonicTime after { };
195     if (UNLIKELY(computeCompileTimes())) {
196         after = MonotonicTime::now();
197     
198         if (Options::reportTotalCompileTimes()) {
199             if (isFTL()) {
200                 totalFTLCompileTime += after - before;
201                 totalFTLDFGCompileTime += m_timeBeforeFTL - before;
202                 totalFTLB3CompileTime += after - m_timeBeforeFTL;
203             } else
204                 totalDFGCompileTime += after - before;
205         }
206     }
207     const char* pathName = nullptr;
208     switch (path) {
209     case FailPath:
210         pathName = "N/A (fail)";
211         break;
212     case DFGPath:
213         pathName = "DFG";
214         break;
215     case FTLPath:
216         pathName = "FTL";
217         break;
218     case CancelPath:
219         pathName = "Cancelled";
220         break;
221     default:
222         RELEASE_ASSERT_NOT_REACHED();
223         break;
224     }
225     if (m_codeBlock) { // m_codeBlock will be null if the compilation was cancelled.
226         if (path == FTLPath)
227             CODEBLOCK_LOG_EVENT(m_codeBlock, "ftlCompile", ("took ", (after - before).milliseconds(), " ms (DFG: ", (m_timeBeforeFTL - before).milliseconds(), ", B3: ", (after - m_timeBeforeFTL).milliseconds(), ") with ", pathName));
228         else
229             CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgCompile", ("took ", (after - before).milliseconds(), " ms with ", pathName));
230     }
231     if (UNLIKELY(reportCompileTimes())) {
232         dataLog("Optimized ", codeBlockName, " using ", m_mode, " with ", pathName, " into ", m_finalizer ? m_finalizer->codeSize() : 0, " bytes in ", (after - before).milliseconds(), " ms");
233         if (path == FTLPath)
234             dataLog(" (DFG: ", (m_timeBeforeFTL - before).milliseconds(), ", B3: ", (after - m_timeBeforeFTL).milliseconds(), ")");
235         dataLog(".\n");
236     }
237 }
238
239 Plan::CompilationPath Plan::compileInThreadImpl()
240 {
241     cleanMustHandleValuesIfNecessary();
242
243     if (verboseCompilationEnabled(m_mode) && m_osrEntryBytecodeIndex != UINT_MAX) {
244         dataLog("\n");
245         dataLog("Compiler must handle OSR entry from bc#", m_osrEntryBytecodeIndex, " with values: ", m_mustHandleValues, "\n");
246         dataLog("\n");
247     }
248
249     Graph dfg(*m_vm, *this);
250     parse(dfg);
251
252     m_codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters());
253
254     bool changed = false;
255
256 #define RUN_PHASE(phase)                                         \
257     do {                                                         \
258         if (Options::safepointBeforeEachPhase()) {               \
259             Safepoint::Result safepointResult;                   \
260             {                                                    \
261                 GraphSafepoint safepoint(dfg, safepointResult);  \
262             }                                                    \
263             if (safepointResult.didGetCancelled())               \
264                 return CancelPath;                               \
265         }                                                        \
266         dfg.nextPhase();                                         \
267         changed |= phase(dfg);                                   \
268     } while (false);                                             \
269
270     
271     // By this point the DFG bytecode parser will have potentially mutated various tables
272     // in the CodeBlock. This is a good time to perform an early shrink, which is more
273     // powerful than a late one. It's safe to do so because we haven't generated any code
274     // that references any of the tables directly, yet.
275     m_codeBlock->shrinkToFit(CodeBlock::EarlyShrink);
276
277     if (validationEnabled())
278         validate(dfg);
279     
280     if (Options::dumpGraphAfterParsing()) {
281         dataLog("Graph after parsing:\n");
282         dfg.dump();
283     }
284
285     RUN_PHASE(performLiveCatchVariablePreservationPhase);
286
287     RUN_PHASE(performCPSRethreading);
288     RUN_PHASE(performUnification);
289     RUN_PHASE(performPredictionInjection);
290     
291     RUN_PHASE(performStaticExecutionCountEstimation);
292
293     if (m_mode == FTLForOSREntryMode) {
294         bool result = performOSREntrypointCreation(dfg);
295         if (!result) {
296             m_finalizer = makeUnique<FailedFinalizer>(*this);
297             return FailPath;
298         }
299         RUN_PHASE(performCPSRethreading);
300     }
301     
302     if (validationEnabled())
303         validate(dfg);
304     
305     RUN_PHASE(performBackwardsPropagation);
306     RUN_PHASE(performPredictionPropagation);
307     RUN_PHASE(performFixup);
308     RUN_PHASE(performInvalidationPointInjection);
309     RUN_PHASE(performTypeCheckHoisting);
310
311     dfg.m_fixpointState = FixpointNotConverged;
312
313     // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision
314     // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is
315     // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means
316     // that the compiler compiles more quickly. We want the third tier to compile quickly, which
317     // not fixpointing accomplishes; and the fourth tier shouldn't need a fixpoint.
318     if (validationEnabled())
319         validate(dfg);
320         
321     RUN_PHASE(performStrengthReduction);
322     RUN_PHASE(performCPSRethreading);
323     RUN_PHASE(performCFA);
324     RUN_PHASE(performConstantFolding);
325     changed = false;
326     RUN_PHASE(performCFGSimplification);
327     RUN_PHASE(performLocalCSE);
328     
329     if (validationEnabled())
330         validate(dfg);
331     
332     RUN_PHASE(performCPSRethreading);
333     if (!isFTL()) {
334         // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and
335         // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL
336         // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack
337         // sequence then gets sunk, eliminating anything that looks like an escape for subsequent phases,
338         // while the ForwardVarargs doesn't get simplified until later (or not at all) and looks like an
339         // escape for all of the arguments. This then disables object allocation sinking.
340         //
341         // So, for now, we just disable this phase for the FTL.
342         //
343         // If we wanted to enable it, we'd have to do any of the following:
344         // - Enable ForwardVarargs->GetStack+PutStack strength reduction, and have that run before
345         //   PutStack sinking and object allocation sinking.
346         // - Make VarargsForwarding emit a GetLocal+SetLocal sequence, that we can later turn into
347         //   GetStack+PutStack.
348         //
349         // But, it's not super valuable to enable those optimizations, since the FTL
350         // ArgumentsEliminationPhase does everything that this phase does, and it doesn't introduce this
351         // pathology.
352         
353         RUN_PHASE(performVarargsForwarding); // Do this after CFG simplification and CPS rethreading.
354     }
355     if (changed) {
356         RUN_PHASE(performCFA);
357         RUN_PHASE(performConstantFolding);
358     }
359     
360     // If we're doing validation, then run some analyses, to give them an opportunity
361     // to self-validate. Now is as good a time as any to do this.
362     if (validationEnabled()) {
363         dfg.ensureCPSDominators();
364         dfg.ensureCPSNaturalLoops();
365     }
366
367     switch (m_mode) {
368     case DFGMode: {
369         dfg.m_fixpointState = FixpointConverged;
370
371         RUN_PHASE(performTierUpCheckInjection);
372
373         RUN_PHASE(performFastStoreBarrierInsertion);
374         RUN_PHASE(performStoreBarrierClustering);
375         RUN_PHASE(performCleanUp);
376         RUN_PHASE(performCPSRethreading);
377         RUN_PHASE(performDCE);
378         RUN_PHASE(performPhantomInsertion);
379         RUN_PHASE(performStackLayout);
380         RUN_PHASE(performVirtualRegisterAllocation);
381         RUN_PHASE(performWatchpointCollection);
382         dumpAndVerifyGraph(dfg, "Graph after optimization:");
383         
384         JITCompiler dataFlowJIT(dfg);
385         if (m_codeBlock->codeType() == FunctionCode)
386             dataFlowJIT.compileFunction();
387         else
388             dataFlowJIT.compile();
389         
390         return DFGPath;
391     }
392     
393     case FTLMode:
394     case FTLForOSREntryMode: {
395 #if ENABLE(FTL_JIT)
396         if (FTL::canCompile(dfg) == FTL::CannotCompile) {
397             m_finalizer = makeUnique<FailedFinalizer>(*this);
398             return FailPath;
399         }
400         
401         RUN_PHASE(performCleanUp); // Reduce the graph size a bit.
402         RUN_PHASE(performCriticalEdgeBreaking);
403         if (Options::createPreHeaders())
404             RUN_PHASE(performLoopPreHeaderCreation);
405         RUN_PHASE(performCPSRethreading);
406         RUN_PHASE(performSSAConversion);
407         RUN_PHASE(performSSALowering);
408         
409         // Ideally, these would be run to fixpoint with the object allocation sinking phase.
410         RUN_PHASE(performArgumentsElimination);
411         if (Options::usePutStackSinking())
412             RUN_PHASE(performPutStackSinking);
413         
414         RUN_PHASE(performConstantHoisting);
415         RUN_PHASE(performGlobalCSE);
416         RUN_PHASE(performLivenessAnalysis);
417         RUN_PHASE(performCFA);
418         RUN_PHASE(performConstantFolding);
419         RUN_PHASE(performCleanUp); // Reduce the graph size a lot.
420         changed = false;
421         RUN_PHASE(performStrengthReduction);
422         if (Options::useObjectAllocationSinking()) {
423             RUN_PHASE(performCriticalEdgeBreaking);
424             RUN_PHASE(performObjectAllocationSinking);
425         }
426         if (Options::useValueRepElimination())
427             RUN_PHASE(performValueRepReduction);
428         if (changed) {
429             // State-at-tail and state-at-head will be invalid if we did strength reduction since
430             // it might increase live ranges.
431             RUN_PHASE(performLivenessAnalysis);
432             RUN_PHASE(performCFA);
433             RUN_PHASE(performConstantFolding);
434         }
435         
436         // Currently, this relies on pre-headers still being valid. That precludes running CFG
437         // simplification before it, unless we re-created the pre-headers. There wouldn't be anything
438         // wrong with running LICM earlier, if we wanted to put other CFG transforms above this point.
439         // Alternatively, we could run loop pre-header creation after SSA conversion - but if we did that
440         // then we'd need to do some simple SSA fix-up.
441         RUN_PHASE(performLivenessAnalysis);
442         RUN_PHASE(performCFA);
443         RUN_PHASE(performLICM);
444
445         // FIXME: Currently: IntegerRangeOptimization *must* be run after LICM.
446         //
447         // IntegerRangeOptimization makes changes on nodes based on preceding blocks
448         // and nodes. LICM moves nodes which can invalidates assumptions used
449         // by IntegerRangeOptimization.
450         //
451         // Ideally, the dependencies should be explicit. See https://bugs.webkit.org/show_bug.cgi?id=157534.
452         RUN_PHASE(performLivenessAnalysis);
453         RUN_PHASE(performIntegerRangeOptimization);
454         
455         RUN_PHASE(performCleanUp);
456         RUN_PHASE(performIntegerCheckCombining);
457         RUN_PHASE(performGlobalCSE);
458
459         // At this point we're not allowed to do any further code motion because our reasoning
460         // about code motion assumes that it's OK to insert GC points in random places.
461         dfg.m_fixpointState = FixpointConverged;
462
463         RUN_PHASE(performLivenessAnalysis);
464         RUN_PHASE(performCFA);
465         RUN_PHASE(performGlobalStoreBarrierInsertion);
466         RUN_PHASE(performStoreBarrierClustering);
467         if (Options::useMovHintRemoval())
468             RUN_PHASE(performMovHintRemoval);
469         RUN_PHASE(performCleanUp);
470         RUN_PHASE(performDCE); // We rely on this to kill dead code that won't be recognized as dead by B3.
471         RUN_PHASE(performStackLayout);
472         RUN_PHASE(performLivenessAnalysis);
473         RUN_PHASE(performOSRAvailabilityAnalysis);
474         RUN_PHASE(performWatchpointCollection);
475         
476         if (FTL::canCompile(dfg) == FTL::CannotCompile) {
477             m_finalizer = makeUnique<FailedFinalizer>(*this);
478             return FailPath;
479         }
480
481         dfg.nextPhase();
482         dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldDumpDisassembly(m_mode));
483
484         // Flash a safepoint in case the GC wants some action.
485         Safepoint::Result safepointResult;
486         {
487             GraphSafepoint safepoint(dfg, safepointResult);
488         }
489         if (safepointResult.didGetCancelled())
490             return CancelPath;
491
492         dfg.nextPhase();
493         FTL::State state(dfg);
494         FTL::lowerDFGToB3(state);
495
496         if (UNLIKELY(computeCompileTimes()))
497             m_timeBeforeFTL = MonotonicTime::now();
498         
499         if (Options::b3AlwaysFailsBeforeCompile()) {
500             FTL::fail(state);
501             return FTLPath;
502         }
503         
504         FTL::compile(state, safepointResult);
505         if (safepointResult.didGetCancelled())
506             return CancelPath;
507         
508         if (Options::b3AlwaysFailsBeforeLink()) {
509             FTL::fail(state);
510             return FTLPath;
511         }
512         
513         if (state.allocationFailed) {
514             FTL::fail(state);
515             return FTLPath;
516         }
517
518         FTL::link(state);
519         
520         if (state.allocationFailed) {
521             FTL::fail(state);
522             return FTLPath;
523         }
524         
525         return FTLPath;
526 #else
527         RELEASE_ASSERT_NOT_REACHED();
528         return FailPath;
529 #endif // ENABLE(FTL_JIT)
530     }
531         
532     default:
533         RELEASE_ASSERT_NOT_REACHED();
534         return FailPath;
535     }
536
537 #undef RUN_PHASE
538 }
539
540 bool Plan::isStillValid()
541 {
542     CodeBlock* replacement = m_codeBlock->replacement();
543     if (!replacement)
544         return false;
545     // FIXME: This is almost certainly not necessary. There's no way for the baseline
546     // code to be replaced during a compilation, except if we delete the plan, in which
547     // case we wouldn't be here.
548     // https://bugs.webkit.org/show_bug.cgi?id=132707
549     if (m_codeBlock->alternative() != replacement->baselineVersion())
550         return false;
551     if (!m_watchpoints.areStillValid())
552         return false;
553     return true;
554 }
555
556 void Plan::reallyAdd(CommonData* commonData)
557 {
558     m_watchpoints.reallyAdd(m_codeBlock, *commonData);
559     m_identifiers.reallyAdd(*m_vm, commonData);
560     m_weakReferences.reallyAdd(*m_vm, commonData);
561     m_transitions.reallyAdd(*m_vm, commonData);
562     m_globalProperties.reallyAdd(m_codeBlock, m_identifiers, *commonData);
563     commonData->recordedStatuses = WTFMove(m_recordedStatuses);
564 }
565
566 void Plan::notifyCompiling()
567 {
568     m_stage = Compiling;
569 }
570
571 void Plan::notifyReady()
572 {
573     m_callback->compilationDidBecomeReadyAsynchronously(m_codeBlock, m_profiledDFGCodeBlock);
574     m_stage = Ready;
575 }
576
577 bool Plan::isStillValidOnMainThread()
578 {
579     return m_globalProperties.isStillValidOnMainThread(*m_vm, m_identifiers);
580 }
581
582 CompilationResult Plan::finalizeWithoutNotifyingCallback()
583 {
584     // We perform multiple stores before emitting a write-barrier. To ensure that no GC happens between store and write-barrier, we should ensure that
585     // GC is deferred when this function is called.
586     ASSERT(m_vm->heap.isDeferred());
587
588     CompilationResult result = [&] {
589         if (!isStillValidOnMainThread() || !isStillValid()) {
590             CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgFinalize", ("invalidated"));
591             return CompilationInvalidated;
592         }
593
594         bool result;
595         if (m_codeBlock->codeType() == FunctionCode)
596             result = m_finalizer->finalizeFunction();
597         else
598             result = m_finalizer->finalize();
599
600         if (!result) {
601             CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgFinalize", ("failed"));
602             return CompilationFailed;
603         }
604
605         reallyAdd(m_codeBlock->jitCode()->dfgCommon());
606
607         if (validationEnabled()) {
608             TrackedReferences trackedReferences;
609
610             for (WriteBarrier<JSCell>& reference : m_codeBlock->jitCode()->dfgCommon()->weakReferences)
611                 trackedReferences.add(reference.get());
612             for (WriteBarrier<Structure>& reference : m_codeBlock->jitCode()->dfgCommon()->weakStructureReferences)
613                 trackedReferences.add(reference.get());
614             for (WriteBarrier<Unknown>& constant : m_codeBlock->constants())
615                 trackedReferences.add(constant.get());
616
617             for (auto* inlineCallFrame : *m_inlineCallFrames) {
618                 ASSERT(inlineCallFrame->baselineCodeBlock.get());
619                 trackedReferences.add(inlineCallFrame->baselineCodeBlock.get());
620             }
621
622             // Check that any other references that we have anywhere in the JITCode are also
623             // tracked either strongly or weakly.
624             m_codeBlock->jitCode()->validateReferences(trackedReferences);
625         }
626
627         CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgFinalize", ("succeeded"));
628         return CompilationSuccessful;
629     }();
630
631     // We will establish new references from the code block to things. So, we need a barrier.
632     m_vm->heap.writeBarrier(m_codeBlock);
633     return result;
634 }
635
636 void Plan::finalizeAndNotifyCallback()
637 {
638     m_callback->compilationDidComplete(m_codeBlock, m_profiledDFGCodeBlock, finalizeWithoutNotifyingCallback());
639 }
640
641 CompilationKey Plan::key()
642 {
643     return CompilationKey(m_codeBlock->alternative(), m_mode);
644 }
645
646 void Plan::checkLivenessAndVisitChildren(SlotVisitor& visitor)
647 {
648     if (!isKnownToBeLiveDuringGC())
649         return;
650
651     cleanMustHandleValuesIfNecessary();
652     for (unsigned i = m_mustHandleValues.size(); i--;) {
653         Optional<JSValue> value = m_mustHandleValues[i];
654         if (value)
655             visitor.appendUnbarriered(value.value());
656     }
657
658     m_recordedStatuses.markIfCheap(visitor);
659
660     visitor.appendUnbarriered(m_codeBlock);
661     visitor.appendUnbarriered(m_codeBlock->alternative());
662     visitor.appendUnbarriered(m_profiledDFGCodeBlock);
663
664     if (m_inlineCallFrames) {
665         for (auto* inlineCallFrame : *m_inlineCallFrames) {
666             ASSERT(inlineCallFrame->baselineCodeBlock.get());
667             visitor.appendUnbarriered(inlineCallFrame->baselineCodeBlock.get());
668         }
669     }
670
671     m_weakReferences.visitChildren(visitor);
672     m_transitions.visitChildren(visitor);
673 }
674
675 void Plan::finalizeInGC()
676 {
677     ASSERT(m_vm);
678     m_recordedStatuses.finalizeWithoutDeleting(*m_vm);
679 }
680
681 bool Plan::isKnownToBeLiveDuringGC()
682 {
683     if (m_stage == Cancelled)
684         return false;
685     if (!m_vm->heap.isMarked(m_codeBlock->ownerExecutable()))
686         return false;
687     if (!m_vm->heap.isMarked(m_codeBlock->alternative()))
688         return false;
689     if (!!m_profiledDFGCodeBlock && !m_vm->heap.isMarked(m_profiledDFGCodeBlock))
690         return false;
691     return true;
692 }
693
694 void Plan::cancel()
695 {
696     m_vm = nullptr;
697     m_codeBlock = nullptr;
698     m_profiledDFGCodeBlock = nullptr;
699     m_mustHandleValues.clear();
700     m_compilation = nullptr;
701     m_finalizer = nullptr;
702     m_inlineCallFrames = nullptr;
703     m_watchpoints = DesiredWatchpoints();
704     m_identifiers = DesiredIdentifiers();
705     m_globalProperties = DesiredGlobalProperties();
706     m_weakReferences = DesiredWeakReferences();
707     m_transitions = DesiredTransitions();
708     m_callback = nullptr;
709     m_stage = Cancelled;
710 }
711
712 void Plan::cleanMustHandleValuesIfNecessary()
713 {
714     LockHolder locker(m_mustHandleValueCleaningLock);
715
716     if (!m_mustHandleValuesMayIncludeGarbage)
717         return;
718
719     m_mustHandleValuesMayIncludeGarbage = false;
720
721     if (!m_codeBlock)
722         return;
723
724     if (!m_mustHandleValues.numberOfLocals())
725         return;
726
727     CodeBlock* alternative = m_codeBlock->alternative();
728     FastBitVector liveness = alternative->livenessAnalysis().getLivenessInfoAtBytecodeOffset(alternative, m_osrEntryBytecodeIndex);
729
730     for (unsigned local = m_mustHandleValues.numberOfLocals(); local--;) {
731         if (!liveness[local])
732             m_mustHandleValues.local(local) = WTF::nullopt;
733     }
734 }
735
736 } } // namespace JSC::DFG
737
738 #endif // ENABLE(DFG_JIT)
739