2 * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "DFGArgumentsEliminationPhase.h"
32 #include "DFGBackwardsPropagationPhase.h"
33 #include "DFGByteCodeParser.h"
34 #include "DFGCFAPhase.h"
35 #include "DFGCFGSimplificationPhase.h"
36 #include "DFGCPSRethreadingPhase.h"
37 #include "DFGCSEPhase.h"
38 #include "DFGCleanUpPhase.h"
39 #include "DFGConstantFoldingPhase.h"
40 #include "DFGConstantHoistingPhase.h"
41 #include "DFGCriticalEdgeBreakingPhase.h"
42 #include "DFGDCEPhase.h"
43 #include "DFGFailedFinalizer.h"
44 #include "DFGFixupPhase.h"
45 #include "DFGGraphSafepoint.h"
46 #include "DFGIntegerCheckCombiningPhase.h"
47 #include "DFGIntegerRangeOptimizationPhase.h"
48 #include "DFGInvalidationPointInjectionPhase.h"
49 #include "DFGJITCompiler.h"
50 #include "DFGLICMPhase.h"
51 #include "DFGLiveCatchVariablePreservationPhase.h"
52 #include "DFGLivenessAnalysisPhase.h"
53 #include "DFGLoopPreHeaderCreationPhase.h"
54 #include "DFGMaximalFlushInsertionPhase.h"
55 #include "DFGMovHintRemovalPhase.h"
56 #include "DFGOSRAvailabilityAnalysisPhase.h"
57 #include "DFGOSREntrypointCreationPhase.h"
58 #include "DFGObjectAllocationSinkingPhase.h"
59 #include "DFGPhantomInsertionPhase.h"
60 #include "DFGPredictionInjectionPhase.h"
61 #include "DFGPredictionPropagationPhase.h"
62 #include "DFGPutStackSinkingPhase.h"
63 #include "DFGSSAConversionPhase.h"
64 #include "DFGSSALoweringPhase.h"
65 #include "DFGStackLayoutPhase.h"
66 #include "DFGStaticExecutionCountEstimationPhase.h"
67 #include "DFGStoreBarrierClusteringPhase.h"
68 #include "DFGStoreBarrierInsertionPhase.h"
69 #include "DFGStrengthReductionPhase.h"
70 #include "DFGTierUpCheckInjectionPhase.h"
71 #include "DFGTypeCheckHoistingPhase.h"
72 #include "DFGUnificationPhase.h"
73 #include "DFGValidate.h"
74 #include "DFGValueRepReductionPhase.h"
75 #include "DFGVarargsForwardingPhase.h"
76 #include "DFGVirtualRegisterAllocationPhase.h"
77 #include "DFGWatchpointCollectionPhase.h"
78 #include "JSCInlines.h"
79 #include "OperandsInlines.h"
80 #include "ProfilerDatabase.h"
81 #include "TrackedReferences.h"
82 #include "VMInlines.h"
85 #include "FTLCapabilities.h"
86 #include "FTLCompile.h"
89 #include "FTLLowerDFGToB3.h"
95 extern Seconds totalDFGCompileTime;
96 extern Seconds totalFTLCompileTime;
97 extern Seconds totalFTLDFGCompileTime;
98 extern Seconds totalFTLB3CompileTime;
102 namespace JSC { namespace DFG {
106 void dumpAndVerifyGraph(Graph& graph, const char* text, bool forceDump = false)
108 GraphDumpMode modeForFinalValidate = DumpGraph;
109 if (verboseCompilationEnabled(graph.m_plan.mode()) || forceDump) {
112 modeForFinalValidate = DontDumpGraph;
114 if (validationEnabled())
115 validate(graph, modeForFinalValidate);
118 Profiler::CompilationKind profilerCompilationKindForMode(CompilationMode mode)
121 case InvalidCompilationMode:
122 RELEASE_ASSERT_NOT_REACHED();
123 return Profiler::DFG;
125 return Profiler::DFG;
127 return Profiler::FTL;
128 case FTLForOSREntryMode:
129 return Profiler::FTLForOSREntry;
131 RELEASE_ASSERT_NOT_REACHED();
132 return Profiler::DFG;
135 } // anonymous namespace
137 Plan::Plan(CodeBlock* passedCodeBlock, CodeBlock* profiledDFGCodeBlock,
138 CompilationMode mode, unsigned osrEntryBytecodeIndex,
139 const Operands<Optional<JSValue>>& mustHandleValues)
141 , m_vm(passedCodeBlock->vm())
142 , m_codeBlock(passedCodeBlock)
143 , m_profiledDFGCodeBlock(profiledDFGCodeBlock)
144 , m_mustHandleValues(mustHandleValues)
145 , m_osrEntryBytecodeIndex(osrEntryBytecodeIndex)
146 , m_compilation(UNLIKELY(m_vm->m_perBytecodeProfiler) ? adoptRef(new Profiler::Compilation(m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock), profilerCompilationKindForMode(mode))) : nullptr)
147 , m_inlineCallFrames(adoptRef(new InlineCallFrameSet()))
148 , m_identifiers(m_codeBlock)
149 , m_weakReferences(m_codeBlock)
152 RELEASE_ASSERT(m_codeBlock->alternative()->jitCode());
153 m_inlineCallFrames->disableThreadingChecks();
160 bool Plan::computeCompileTimes() const
162 return reportCompileTimes()
163 || Options::reportTotalCompileTimes()
164 || (m_vm && m_vm->m_perBytecodeProfiler);
167 bool Plan::reportCompileTimes() const
169 return Options::reportCompileTimes()
170 || Options::reportDFGCompileTimes()
171 || (Options::reportFTLCompileTimes() && isFTL());
174 void Plan::compileInThread(ThreadData* threadData)
176 m_threadData = threadData;
178 MonotonicTime before { };
179 CString codeBlockName;
180 if (UNLIKELY(computeCompileTimes()))
181 before = MonotonicTime::now();
182 if (UNLIKELY(reportCompileTimes()))
183 codeBlockName = toCString(*m_codeBlock);
185 CompilationScope compilationScope;
187 if (logCompilationChanges(m_mode) || Options::logPhaseTimes())
188 dataLog("DFG(Plan) compiling ", *m_codeBlock, " with ", m_mode, ", instructions size = ", m_codeBlock->instructionsSize(), "\n");
190 CompilationPath path = compileInThreadImpl();
192 RELEASE_ASSERT(path == CancelPath || m_finalizer);
193 RELEASE_ASSERT((path == CancelPath) == (m_stage == Cancelled));
195 MonotonicTime after { };
196 if (UNLIKELY(computeCompileTimes())) {
197 after = MonotonicTime::now();
199 if (Options::reportTotalCompileTimes()) {
201 totalFTLCompileTime += after - before;
202 totalFTLDFGCompileTime += m_timeBeforeFTL - before;
203 totalFTLB3CompileTime += after - m_timeBeforeFTL;
205 totalDFGCompileTime += after - before;
208 const char* pathName = nullptr;
211 pathName = "N/A (fail)";
220 pathName = "Cancelled";
223 RELEASE_ASSERT_NOT_REACHED();
226 if (m_codeBlock) { // m_codeBlock will be null if the compilation was cancelled.
228 CODEBLOCK_LOG_EVENT(m_codeBlock, "ftlCompile", ("took ", (after - before).milliseconds(), " ms (DFG: ", (m_timeBeforeFTL - before).milliseconds(), ", B3: ", (after - m_timeBeforeFTL).milliseconds(), ") with ", pathName));
230 CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgCompile", ("took ", (after - before).milliseconds(), " ms with ", pathName));
232 if (UNLIKELY(reportCompileTimes())) {
233 dataLog("Optimized ", codeBlockName, " using ", m_mode, " with ", pathName, " into ", m_finalizer ? m_finalizer->codeSize() : 0, " bytes in ", (after - before).milliseconds(), " ms");
235 dataLog(" (DFG: ", (m_timeBeforeFTL - before).milliseconds(), ", B3: ", (after - m_timeBeforeFTL).milliseconds(), ")");
240 Plan::CompilationPath Plan::compileInThreadImpl()
242 cleanMustHandleValuesIfNecessary();
244 if (verboseCompilationEnabled(m_mode) && m_osrEntryBytecodeIndex != UINT_MAX) {
246 dataLog("Compiler must handle OSR entry from bc#", m_osrEntryBytecodeIndex, " with values: ", m_mustHandleValues, "\n");
250 Graph dfg(*m_vm, *this);
253 m_codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters());
255 bool changed = false;
257 #define RUN_PHASE(phase) \
259 if (Options::safepointBeforeEachPhase()) { \
260 Safepoint::Result safepointResult; \
262 GraphSafepoint safepoint(dfg, safepointResult); \
264 if (safepointResult.didGetCancelled()) \
268 changed |= phase(dfg); \
272 // By this point the DFG bytecode parser will have potentially mutated various tables
273 // in the CodeBlock. This is a good time to perform an early shrink, which is more
274 // powerful than a late one. It's safe to do so because we haven't generated any code
275 // that references any of the tables directly, yet.
276 m_codeBlock->shrinkToFit(CodeBlock::EarlyShrink);
278 if (validationEnabled())
281 if (Options::dumpGraphAfterParsing()) {
282 dataLog("Graph after parsing:\n");
286 RUN_PHASE(performLiveCatchVariablePreservationPhase);
288 if (Options::useMaximalFlushInsertionPhase())
289 RUN_PHASE(performMaximalFlushInsertion);
291 RUN_PHASE(performCPSRethreading);
292 RUN_PHASE(performUnification);
293 RUN_PHASE(performPredictionInjection);
295 RUN_PHASE(performStaticExecutionCountEstimation);
297 if (m_mode == FTLForOSREntryMode) {
298 bool result = performOSREntrypointCreation(dfg);
300 m_finalizer = makeUnique<FailedFinalizer>(*this);
303 RUN_PHASE(performCPSRethreading);
306 if (validationEnabled())
309 RUN_PHASE(performBackwardsPropagation);
310 RUN_PHASE(performPredictionPropagation);
311 RUN_PHASE(performFixup);
312 RUN_PHASE(performInvalidationPointInjection);
313 RUN_PHASE(performTypeCheckHoisting);
315 dfg.m_fixpointState = FixpointNotConverged;
317 // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision
318 // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is
319 // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means
320 // that the compiler compiles more quickly. We want the third tier to compile quickly, which
321 // not fixpointing accomplishes; and the fourth tier shouldn't need a fixpoint.
322 if (validationEnabled())
325 RUN_PHASE(performStrengthReduction);
326 RUN_PHASE(performCPSRethreading);
327 RUN_PHASE(performCFA);
328 RUN_PHASE(performConstantFolding);
330 RUN_PHASE(performCFGSimplification);
331 RUN_PHASE(performLocalCSE);
333 if (validationEnabled())
336 RUN_PHASE(performCPSRethreading);
338 // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and
339 // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL
340 // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack
341 // sequence then gets sunk, eliminating anything that looks like an escape for subsequent phases,
342 // while the ForwardVarargs doesn't get simplified until later (or not at all) and looks like an
343 // escape for all of the arguments. This then disables object allocation sinking.
345 // So, for now, we just disable this phase for the FTL.
347 // If we wanted to enable it, we'd have to do any of the following:
348 // - Enable ForwardVarargs->GetStack+PutStack strength reduction, and have that run before
349 // PutStack sinking and object allocation sinking.
350 // - Make VarargsForwarding emit a GetLocal+SetLocal sequence, that we can later turn into
351 // GetStack+PutStack.
353 // But, it's not super valuable to enable those optimizations, since the FTL
354 // ArgumentsEliminationPhase does everything that this phase does, and it doesn't introduce this
357 RUN_PHASE(performVarargsForwarding); // Do this after CFG simplification and CPS rethreading.
360 RUN_PHASE(performCFA);
361 RUN_PHASE(performConstantFolding);
364 // If we're doing validation, then run some analyses, to give them an opportunity
365 // to self-validate. Now is as good a time as any to do this.
366 if (validationEnabled()) {
367 dfg.ensureCPSDominators();
368 dfg.ensureCPSNaturalLoops();
373 dfg.m_fixpointState = FixpointConverged;
375 RUN_PHASE(performTierUpCheckInjection);
377 RUN_PHASE(performFastStoreBarrierInsertion);
378 RUN_PHASE(performStoreBarrierClustering);
379 RUN_PHASE(performCleanUp);
380 RUN_PHASE(performCPSRethreading);
381 RUN_PHASE(performDCE);
382 RUN_PHASE(performPhantomInsertion);
383 RUN_PHASE(performStackLayout);
384 RUN_PHASE(performVirtualRegisterAllocation);
385 RUN_PHASE(performWatchpointCollection);
386 dumpAndVerifyGraph(dfg, "Graph after optimization:");
388 JITCompiler dataFlowJIT(dfg);
389 if (m_codeBlock->codeType() == FunctionCode)
390 dataFlowJIT.compileFunction();
392 dataFlowJIT.compile();
398 case FTLForOSREntryMode: {
400 if (FTL::canCompile(dfg) == FTL::CannotCompile) {
401 m_finalizer = makeUnique<FailedFinalizer>(*this);
405 RUN_PHASE(performCleanUp); // Reduce the graph size a bit.
406 RUN_PHASE(performCriticalEdgeBreaking);
407 if (Options::createPreHeaders())
408 RUN_PHASE(performLoopPreHeaderCreation);
409 RUN_PHASE(performCPSRethreading);
410 RUN_PHASE(performSSAConversion);
411 RUN_PHASE(performSSALowering);
413 // Ideally, these would be run to fixpoint with the object allocation sinking phase.
414 RUN_PHASE(performArgumentsElimination);
415 if (Options::usePutStackSinking())
416 RUN_PHASE(performPutStackSinking);
418 RUN_PHASE(performConstantHoisting);
419 RUN_PHASE(performGlobalCSE);
420 RUN_PHASE(performLivenessAnalysis);
421 RUN_PHASE(performCFA);
422 RUN_PHASE(performConstantFolding);
423 RUN_PHASE(performCleanUp); // Reduce the graph size a lot.
425 RUN_PHASE(performStrengthReduction);
426 if (Options::useObjectAllocationSinking()) {
427 RUN_PHASE(performCriticalEdgeBreaking);
428 RUN_PHASE(performObjectAllocationSinking);
430 if (Options::useValueRepElimination())
431 RUN_PHASE(performValueRepReduction);
433 // State-at-tail and state-at-head will be invalid if we did strength reduction since
434 // it might increase live ranges.
435 RUN_PHASE(performLivenessAnalysis);
436 RUN_PHASE(performCFA);
437 RUN_PHASE(performConstantFolding);
440 // Currently, this relies on pre-headers still being valid. That precludes running CFG
441 // simplification before it, unless we re-created the pre-headers. There wouldn't be anything
442 // wrong with running LICM earlier, if we wanted to put other CFG transforms above this point.
443 // Alternatively, we could run loop pre-header creation after SSA conversion - but if we did that
444 // then we'd need to do some simple SSA fix-up.
445 RUN_PHASE(performLivenessAnalysis);
446 RUN_PHASE(performCFA);
447 RUN_PHASE(performLICM);
449 // FIXME: Currently: IntegerRangeOptimization *must* be run after LICM.
451 // IntegerRangeOptimization makes changes on nodes based on preceding blocks
452 // and nodes. LICM moves nodes which can invalidates assumptions used
453 // by IntegerRangeOptimization.
455 // Ideally, the dependencies should be explicit. See https://bugs.webkit.org/show_bug.cgi?id=157534.
456 RUN_PHASE(performLivenessAnalysis);
457 RUN_PHASE(performIntegerRangeOptimization);
459 RUN_PHASE(performCleanUp);
460 RUN_PHASE(performIntegerCheckCombining);
461 RUN_PHASE(performGlobalCSE);
463 // At this point we're not allowed to do any further code motion because our reasoning
464 // about code motion assumes that it's OK to insert GC points in random places.
465 dfg.m_fixpointState = FixpointConverged;
467 RUN_PHASE(performLivenessAnalysis);
468 RUN_PHASE(performCFA);
469 RUN_PHASE(performGlobalStoreBarrierInsertion);
470 RUN_PHASE(performStoreBarrierClustering);
471 if (Options::useMovHintRemoval())
472 RUN_PHASE(performMovHintRemoval);
473 RUN_PHASE(performCleanUp);
474 RUN_PHASE(performDCE); // We rely on this to kill dead code that won't be recognized as dead by B3.
475 RUN_PHASE(performStackLayout);
476 RUN_PHASE(performLivenessAnalysis);
477 RUN_PHASE(performOSRAvailabilityAnalysis);
478 RUN_PHASE(performWatchpointCollection);
480 if (FTL::canCompile(dfg) == FTL::CannotCompile) {
481 m_finalizer = makeUnique<FailedFinalizer>(*this);
486 dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldDumpDisassembly(m_mode));
488 // Flash a safepoint in case the GC wants some action.
489 Safepoint::Result safepointResult;
491 GraphSafepoint safepoint(dfg, safepointResult);
493 if (safepointResult.didGetCancelled())
497 FTL::State state(dfg);
498 FTL::lowerDFGToB3(state);
500 if (UNLIKELY(computeCompileTimes()))
501 m_timeBeforeFTL = MonotonicTime::now();
503 if (Options::b3AlwaysFailsBeforeCompile()) {
508 FTL::compile(state, safepointResult);
509 if (safepointResult.didGetCancelled())
512 if (Options::b3AlwaysFailsBeforeLink()) {
517 if (state.allocationFailed) {
524 if (state.allocationFailed) {
531 RELEASE_ASSERT_NOT_REACHED();
533 #endif // ENABLE(FTL_JIT)
537 RELEASE_ASSERT_NOT_REACHED();
544 bool Plan::isStillValid()
546 CodeBlock* replacement = m_codeBlock->replacement();
549 // FIXME: This is almost certainly not necessary. There's no way for the baseline
550 // code to be replaced during a compilation, except if we delete the plan, in which
551 // case we wouldn't be here.
552 // https://bugs.webkit.org/show_bug.cgi?id=132707
553 if (m_codeBlock->alternative() != replacement->baselineVersion())
555 if (!m_watchpoints.areStillValid())
560 void Plan::reallyAdd(CommonData* commonData)
562 m_watchpoints.reallyAdd(m_codeBlock, *commonData);
563 m_identifiers.reallyAdd(*m_vm, commonData);
564 m_weakReferences.reallyAdd(*m_vm, commonData);
565 m_transitions.reallyAdd(*m_vm, commonData);
566 m_globalProperties.reallyAdd(m_codeBlock, m_identifiers, *commonData);
567 commonData->recordedStatuses = WTFMove(m_recordedStatuses);
570 void Plan::notifyCompiling()
575 void Plan::notifyReady()
577 m_callback->compilationDidBecomeReadyAsynchronously(m_codeBlock, m_profiledDFGCodeBlock);
581 bool Plan::isStillValidOnMainThread()
583 return m_globalProperties.isStillValidOnMainThread(*m_vm, m_identifiers);
586 CompilationResult Plan::finalizeWithoutNotifyingCallback()
588 // We perform multiple stores before emitting a write-barrier. To ensure that no GC happens between store and write-barrier, we should ensure that
589 // GC is deferred when this function is called.
590 ASSERT(m_vm->heap.isDeferred());
592 CompilationResult result = [&] {
593 if (!isStillValidOnMainThread() || !isStillValid()) {
594 CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgFinalize", ("invalidated"));
595 return CompilationInvalidated;
599 if (m_codeBlock->codeType() == FunctionCode)
600 result = m_finalizer->finalizeFunction();
602 result = m_finalizer->finalize();
605 CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgFinalize", ("failed"));
606 return CompilationFailed;
609 reallyAdd(m_codeBlock->jitCode()->dfgCommon());
611 if (validationEnabled()) {
612 TrackedReferences trackedReferences;
614 for (WriteBarrier<JSCell>& reference : m_codeBlock->jitCode()->dfgCommon()->weakReferences)
615 trackedReferences.add(reference.get());
616 for (WriteBarrier<Structure>& reference : m_codeBlock->jitCode()->dfgCommon()->weakStructureReferences)
617 trackedReferences.add(reference.get());
618 for (WriteBarrier<Unknown>& constant : m_codeBlock->constants())
619 trackedReferences.add(constant.get());
621 for (auto* inlineCallFrame : *m_inlineCallFrames) {
622 ASSERT(inlineCallFrame->baselineCodeBlock.get());
623 trackedReferences.add(inlineCallFrame->baselineCodeBlock.get());
626 // Check that any other references that we have anywhere in the JITCode are also
627 // tracked either strongly or weakly.
628 m_codeBlock->jitCode()->validateReferences(trackedReferences);
631 CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgFinalize", ("succeeded"));
632 return CompilationSuccessful;
635 // We will establish new references from the code block to things. So, we need a barrier.
636 m_vm->heap.writeBarrier(m_codeBlock);
640 void Plan::finalizeAndNotifyCallback()
642 m_callback->compilationDidComplete(m_codeBlock, m_profiledDFGCodeBlock, finalizeWithoutNotifyingCallback());
645 CompilationKey Plan::key()
647 return CompilationKey(m_codeBlock->alternative(), m_mode);
650 void Plan::checkLivenessAndVisitChildren(SlotVisitor& visitor)
652 if (!isKnownToBeLiveDuringGC())
655 cleanMustHandleValuesIfNecessary();
656 for (unsigned i = m_mustHandleValues.size(); i--;) {
657 Optional<JSValue> value = m_mustHandleValues[i];
659 visitor.appendUnbarriered(value.value());
662 m_recordedStatuses.markIfCheap(visitor);
664 visitor.appendUnbarriered(m_codeBlock);
665 visitor.appendUnbarriered(m_codeBlock->alternative());
666 visitor.appendUnbarriered(m_profiledDFGCodeBlock);
668 if (m_inlineCallFrames) {
669 for (auto* inlineCallFrame : *m_inlineCallFrames) {
670 ASSERT(inlineCallFrame->baselineCodeBlock.get());
671 visitor.appendUnbarriered(inlineCallFrame->baselineCodeBlock.get());
675 m_weakReferences.visitChildren(visitor);
676 m_transitions.visitChildren(visitor);
679 void Plan::finalizeInGC()
682 m_recordedStatuses.finalizeWithoutDeleting(*m_vm);
685 bool Plan::isKnownToBeLiveDuringGC()
687 if (m_stage == Cancelled)
689 if (!m_vm->heap.isMarked(m_codeBlock->ownerExecutable()))
691 if (!m_vm->heap.isMarked(m_codeBlock->alternative()))
693 if (!!m_profiledDFGCodeBlock && !m_vm->heap.isMarked(m_profiledDFGCodeBlock))
701 m_codeBlock = nullptr;
702 m_profiledDFGCodeBlock = nullptr;
703 m_mustHandleValues.clear();
704 m_compilation = nullptr;
705 m_finalizer = nullptr;
706 m_inlineCallFrames = nullptr;
707 m_watchpoints = DesiredWatchpoints();
708 m_identifiers = DesiredIdentifiers();
709 m_globalProperties = DesiredGlobalProperties();
710 m_weakReferences = DesiredWeakReferences();
711 m_transitions = DesiredTransitions();
712 m_callback = nullptr;
716 void Plan::cleanMustHandleValuesIfNecessary()
718 LockHolder locker(m_mustHandleValueCleaningLock);
720 if (!m_mustHandleValuesMayIncludeGarbage)
723 m_mustHandleValuesMayIncludeGarbage = false;
728 if (!m_mustHandleValues.numberOfLocals())
731 CodeBlock* alternative = m_codeBlock->alternative();
732 FastBitVector liveness = alternative->livenessAnalysis().getLivenessInfoAtBytecodeOffset(alternative, m_osrEntryBytecodeIndex);
734 for (unsigned local = m_mustHandleValues.numberOfLocals(); local--;) {
735 if (!liveness[local])
736 m_mustHandleValues.local(local) = WTF::nullopt;
740 } } // namespace JSC::DFG
742 #endif // ENABLE(DFG_JIT)