2 * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "DFGArgumentsEliminationPhase.h"
32 #include "DFGBackwardsPropagationPhase.h"
33 #include "DFGByteCodeParser.h"
34 #include "DFGCFAPhase.h"
35 #include "DFGCFGSimplificationPhase.h"
36 #include "DFGCPSRethreadingPhase.h"
37 #include "DFGCSEPhase.h"
38 #include "DFGCleanUpPhase.h"
39 #include "DFGConstantFoldingPhase.h"
40 #include "DFGConstantHoistingPhase.h"
41 #include "DFGCriticalEdgeBreakingPhase.h"
42 #include "DFGDCEPhase.h"
43 #include "DFGFailedFinalizer.h"
44 #include "DFGFixupPhase.h"
45 #include "DFGGraphSafepoint.h"
46 #include "DFGIntegerCheckCombiningPhase.h"
47 #include "DFGIntegerRangeOptimizationPhase.h"
48 #include "DFGInvalidationPointInjectionPhase.h"
49 #include "DFGJITCompiler.h"
50 #include "DFGLICMPhase.h"
51 #include "DFGLiveCatchVariablePreservationPhase.h"
52 #include "DFGLivenessAnalysisPhase.h"
53 #include "DFGLoopPreHeaderCreationPhase.h"
54 #include "DFGMovHintRemovalPhase.h"
55 #include "DFGOSRAvailabilityAnalysisPhase.h"
56 #include "DFGOSREntrypointCreationPhase.h"
57 #include "DFGObjectAllocationSinkingPhase.h"
58 #include "DFGPhantomInsertionPhase.h"
59 #include "DFGPredictionInjectionPhase.h"
60 #include "DFGPredictionPropagationPhase.h"
61 #include "DFGPutStackSinkingPhase.h"
62 #include "DFGSSAConversionPhase.h"
63 #include "DFGSSALoweringPhase.h"
64 #include "DFGStackLayoutPhase.h"
65 #include "DFGStaticExecutionCountEstimationPhase.h"
66 #include "DFGStoreBarrierClusteringPhase.h"
67 #include "DFGStoreBarrierInsertionPhase.h"
68 #include "DFGStrengthReductionPhase.h"
69 #include "DFGTierUpCheckInjectionPhase.h"
70 #include "DFGTypeCheckHoistingPhase.h"
71 #include "DFGUnificationPhase.h"
72 #include "DFGValidate.h"
73 #include "DFGValueRepReductionPhase.h"
74 #include "DFGVarargsForwardingPhase.h"
75 #include "DFGVirtualRegisterAllocationPhase.h"
76 #include "DFGWatchpointCollectionPhase.h"
77 #include "JSCInlines.h"
78 #include "OperandsInlines.h"
79 #include "ProfilerDatabase.h"
80 #include "TrackedReferences.h"
81 #include "VMInlines.h"
84 #include "FTLCapabilities.h"
85 #include "FTLCompile.h"
88 #include "FTLLowerDFGToB3.h"
94 extern Seconds totalDFGCompileTime;
95 extern Seconds totalFTLCompileTime;
96 extern Seconds totalFTLDFGCompileTime;
97 extern Seconds totalFTLB3CompileTime;
101 namespace JSC { namespace DFG {
105 void dumpAndVerifyGraph(Graph& graph, const char* text, bool forceDump = false)
107 GraphDumpMode modeForFinalValidate = DumpGraph;
108 if (verboseCompilationEnabled(graph.m_plan.mode()) || forceDump) {
111 modeForFinalValidate = DontDumpGraph;
113 if (validationEnabled())
114 validate(graph, modeForFinalValidate);
117 Profiler::CompilationKind profilerCompilationKindForMode(CompilationMode mode)
120 case InvalidCompilationMode:
121 RELEASE_ASSERT_NOT_REACHED();
122 return Profiler::DFG;
124 return Profiler::DFG;
126 return Profiler::FTL;
127 case FTLForOSREntryMode:
128 return Profiler::FTLForOSREntry;
130 RELEASE_ASSERT_NOT_REACHED();
131 return Profiler::DFG;
134 } // anonymous namespace
136 Plan::Plan(CodeBlock* passedCodeBlock, CodeBlock* profiledDFGCodeBlock,
137 CompilationMode mode, unsigned osrEntryBytecodeIndex,
138 const Operands<Optional<JSValue>>& mustHandleValues)
140 , m_vm(passedCodeBlock->vm())
141 , m_codeBlock(passedCodeBlock)
142 , m_profiledDFGCodeBlock(profiledDFGCodeBlock)
143 , m_mustHandleValues(mustHandleValues)
144 , m_osrEntryBytecodeIndex(osrEntryBytecodeIndex)
145 , m_compilation(UNLIKELY(m_vm->m_perBytecodeProfiler) ? adoptRef(new Profiler::Compilation(m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock), profilerCompilationKindForMode(mode))) : nullptr)
146 , m_inlineCallFrames(adoptRef(new InlineCallFrameSet()))
147 , m_identifiers(m_codeBlock)
148 , m_weakReferences(m_codeBlock)
151 RELEASE_ASSERT(m_codeBlock->alternative()->jitCode());
152 m_inlineCallFrames->disableThreadingChecks();
159 bool Plan::computeCompileTimes() const
161 return reportCompileTimes()
162 || Options::reportTotalCompileTimes()
163 || (m_vm && m_vm->m_perBytecodeProfiler);
166 bool Plan::reportCompileTimes() const
168 return Options::reportCompileTimes()
169 || Options::reportDFGCompileTimes()
170 || (Options::reportFTLCompileTimes() && isFTL());
173 void Plan::compileInThread(ThreadData* threadData)
175 m_threadData = threadData;
177 MonotonicTime before { };
178 CString codeBlockName;
179 if (UNLIKELY(computeCompileTimes()))
180 before = MonotonicTime::now();
181 if (UNLIKELY(reportCompileTimes()))
182 codeBlockName = toCString(*m_codeBlock);
184 CompilationScope compilationScope;
186 if (logCompilationChanges(m_mode) || Options::logPhaseTimes())
187 dataLog("DFG(Plan) compiling ", *m_codeBlock, " with ", m_mode, ", instructions size = ", m_codeBlock->instructionsSize(), "\n");
189 CompilationPath path = compileInThreadImpl();
191 RELEASE_ASSERT(path == CancelPath || m_finalizer);
192 RELEASE_ASSERT((path == CancelPath) == (m_stage == Cancelled));
194 MonotonicTime after { };
195 if (UNLIKELY(computeCompileTimes())) {
196 after = MonotonicTime::now();
198 if (Options::reportTotalCompileTimes()) {
200 totalFTLCompileTime += after - before;
201 totalFTLDFGCompileTime += m_timeBeforeFTL - before;
202 totalFTLB3CompileTime += after - m_timeBeforeFTL;
204 totalDFGCompileTime += after - before;
207 const char* pathName = nullptr;
210 pathName = "N/A (fail)";
219 pathName = "Cancelled";
222 RELEASE_ASSERT_NOT_REACHED();
225 if (m_codeBlock) { // m_codeBlock will be null if the compilation was cancelled.
227 CODEBLOCK_LOG_EVENT(m_codeBlock, "ftlCompile", ("took ", (after - before).milliseconds(), " ms (DFG: ", (m_timeBeforeFTL - before).milliseconds(), ", B3: ", (after - m_timeBeforeFTL).milliseconds(), ") with ", pathName));
229 CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgCompile", ("took ", (after - before).milliseconds(), " ms with ", pathName));
231 if (UNLIKELY(reportCompileTimes())) {
232 dataLog("Optimized ", codeBlockName, " using ", m_mode, " with ", pathName, " into ", m_finalizer ? m_finalizer->codeSize() : 0, " bytes in ", (after - before).milliseconds(), " ms");
234 dataLog(" (DFG: ", (m_timeBeforeFTL - before).milliseconds(), ", B3: ", (after - m_timeBeforeFTL).milliseconds(), ")");
239 Plan::CompilationPath Plan::compileInThreadImpl()
241 cleanMustHandleValuesIfNecessary();
243 if (verboseCompilationEnabled(m_mode) && m_osrEntryBytecodeIndex != UINT_MAX) {
245 dataLog("Compiler must handle OSR entry from bc#", m_osrEntryBytecodeIndex, " with values: ", m_mustHandleValues, "\n");
249 Graph dfg(*m_vm, *this);
252 m_codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters());
254 bool changed = false;
256 #define RUN_PHASE(phase) \
258 if (Options::safepointBeforeEachPhase()) { \
259 Safepoint::Result safepointResult; \
261 GraphSafepoint safepoint(dfg, safepointResult); \
263 if (safepointResult.didGetCancelled()) \
267 changed |= phase(dfg); \
271 // By this point the DFG bytecode parser will have potentially mutated various tables
272 // in the CodeBlock. This is a good time to perform an early shrink, which is more
273 // powerful than a late one. It's safe to do so because we haven't generated any code
274 // that references any of the tables directly, yet.
275 m_codeBlock->shrinkToFit(CodeBlock::EarlyShrink);
277 if (validationEnabled())
280 if (Options::dumpGraphAfterParsing()) {
281 dataLog("Graph after parsing:\n");
285 RUN_PHASE(performLiveCatchVariablePreservationPhase);
287 RUN_PHASE(performCPSRethreading);
288 RUN_PHASE(performUnification);
289 RUN_PHASE(performPredictionInjection);
291 RUN_PHASE(performStaticExecutionCountEstimation);
293 if (m_mode == FTLForOSREntryMode) {
294 bool result = performOSREntrypointCreation(dfg);
296 m_finalizer = makeUnique<FailedFinalizer>(*this);
299 RUN_PHASE(performCPSRethreading);
302 if (validationEnabled())
305 RUN_PHASE(performBackwardsPropagation);
306 RUN_PHASE(performPredictionPropagation);
307 RUN_PHASE(performFixup);
308 RUN_PHASE(performInvalidationPointInjection);
309 RUN_PHASE(performTypeCheckHoisting);
311 dfg.m_fixpointState = FixpointNotConverged;
313 // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision
314 // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is
315 // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means
316 // that the compiler compiles more quickly. We want the third tier to compile quickly, which
317 // not fixpointing accomplishes; and the fourth tier shouldn't need a fixpoint.
318 if (validationEnabled())
321 RUN_PHASE(performStrengthReduction);
322 RUN_PHASE(performCPSRethreading);
323 RUN_PHASE(performCFA);
324 RUN_PHASE(performConstantFolding);
326 RUN_PHASE(performCFGSimplification);
327 RUN_PHASE(performLocalCSE);
329 if (validationEnabled())
332 RUN_PHASE(performCPSRethreading);
334 // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and
335 // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL
336 // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack
337 // sequence then gets sunk, eliminating anything that looks like an escape for subsequent phases,
338 // while the ForwardVarargs doesn't get simplified until later (or not at all) and looks like an
339 // escape for all of the arguments. This then disables object allocation sinking.
341 // So, for now, we just disable this phase for the FTL.
343 // If we wanted to enable it, we'd have to do any of the following:
344 // - Enable ForwardVarargs->GetStack+PutStack strength reduction, and have that run before
345 // PutStack sinking and object allocation sinking.
346 // - Make VarargsForwarding emit a GetLocal+SetLocal sequence, that we can later turn into
347 // GetStack+PutStack.
349 // But, it's not super valuable to enable those optimizations, since the FTL
350 // ArgumentsEliminationPhase does everything that this phase does, and it doesn't introduce this
353 RUN_PHASE(performVarargsForwarding); // Do this after CFG simplification and CPS rethreading.
356 RUN_PHASE(performCFA);
357 RUN_PHASE(performConstantFolding);
360 // If we're doing validation, then run some analyses, to give them an opportunity
361 // to self-validate. Now is as good a time as any to do this.
362 if (validationEnabled()) {
363 dfg.ensureCPSDominators();
364 dfg.ensureCPSNaturalLoops();
369 dfg.m_fixpointState = FixpointConverged;
371 RUN_PHASE(performTierUpCheckInjection);
373 RUN_PHASE(performFastStoreBarrierInsertion);
374 RUN_PHASE(performStoreBarrierClustering);
375 RUN_PHASE(performCleanUp);
376 RUN_PHASE(performCPSRethreading);
377 RUN_PHASE(performDCE);
378 RUN_PHASE(performPhantomInsertion);
379 RUN_PHASE(performStackLayout);
380 RUN_PHASE(performVirtualRegisterAllocation);
381 RUN_PHASE(performWatchpointCollection);
382 dumpAndVerifyGraph(dfg, "Graph after optimization:");
384 JITCompiler dataFlowJIT(dfg);
385 if (m_codeBlock->codeType() == FunctionCode)
386 dataFlowJIT.compileFunction();
388 dataFlowJIT.compile();
394 case FTLForOSREntryMode: {
396 if (FTL::canCompile(dfg) == FTL::CannotCompile) {
397 m_finalizer = makeUnique<FailedFinalizer>(*this);
401 RUN_PHASE(performCleanUp); // Reduce the graph size a bit.
402 RUN_PHASE(performCriticalEdgeBreaking);
403 if (Options::createPreHeaders())
404 RUN_PHASE(performLoopPreHeaderCreation);
405 RUN_PHASE(performCPSRethreading);
406 RUN_PHASE(performSSAConversion);
407 RUN_PHASE(performSSALowering);
409 // Ideally, these would be run to fixpoint with the object allocation sinking phase.
410 RUN_PHASE(performArgumentsElimination);
411 if (Options::usePutStackSinking())
412 RUN_PHASE(performPutStackSinking);
414 RUN_PHASE(performConstantHoisting);
415 RUN_PHASE(performGlobalCSE);
416 RUN_PHASE(performLivenessAnalysis);
417 RUN_PHASE(performCFA);
418 RUN_PHASE(performConstantFolding);
419 RUN_PHASE(performCleanUp); // Reduce the graph size a lot.
421 RUN_PHASE(performStrengthReduction);
422 if (Options::useObjectAllocationSinking()) {
423 RUN_PHASE(performCriticalEdgeBreaking);
424 RUN_PHASE(performObjectAllocationSinking);
426 if (Options::useValueRepElimination())
427 RUN_PHASE(performValueRepReduction);
429 // State-at-tail and state-at-head will be invalid if we did strength reduction since
430 // it might increase live ranges.
431 RUN_PHASE(performLivenessAnalysis);
432 RUN_PHASE(performCFA);
433 RUN_PHASE(performConstantFolding);
436 // Currently, this relies on pre-headers still being valid. That precludes running CFG
437 // simplification before it, unless we re-created the pre-headers. There wouldn't be anything
438 // wrong with running LICM earlier, if we wanted to put other CFG transforms above this point.
439 // Alternatively, we could run loop pre-header creation after SSA conversion - but if we did that
440 // then we'd need to do some simple SSA fix-up.
441 RUN_PHASE(performLivenessAnalysis);
442 RUN_PHASE(performCFA);
443 RUN_PHASE(performLICM);
445 // FIXME: Currently: IntegerRangeOptimization *must* be run after LICM.
447 // IntegerRangeOptimization makes changes on nodes based on preceding blocks
448 // and nodes. LICM moves nodes which can invalidates assumptions used
449 // by IntegerRangeOptimization.
451 // Ideally, the dependencies should be explicit. See https://bugs.webkit.org/show_bug.cgi?id=157534.
452 RUN_PHASE(performLivenessAnalysis);
453 RUN_PHASE(performIntegerRangeOptimization);
455 RUN_PHASE(performCleanUp);
456 RUN_PHASE(performIntegerCheckCombining);
457 RUN_PHASE(performGlobalCSE);
459 // At this point we're not allowed to do any further code motion because our reasoning
460 // about code motion assumes that it's OK to insert GC points in random places.
461 dfg.m_fixpointState = FixpointConverged;
463 RUN_PHASE(performLivenessAnalysis);
464 RUN_PHASE(performCFA);
465 RUN_PHASE(performGlobalStoreBarrierInsertion);
466 RUN_PHASE(performStoreBarrierClustering);
467 if (Options::useMovHintRemoval())
468 RUN_PHASE(performMovHintRemoval);
469 RUN_PHASE(performCleanUp);
470 RUN_PHASE(performDCE); // We rely on this to kill dead code that won't be recognized as dead by B3.
471 RUN_PHASE(performStackLayout);
472 RUN_PHASE(performLivenessAnalysis);
473 RUN_PHASE(performOSRAvailabilityAnalysis);
474 RUN_PHASE(performWatchpointCollection);
476 if (FTL::canCompile(dfg) == FTL::CannotCompile) {
477 m_finalizer = makeUnique<FailedFinalizer>(*this);
482 dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldDumpDisassembly(m_mode));
484 // Flash a safepoint in case the GC wants some action.
485 Safepoint::Result safepointResult;
487 GraphSafepoint safepoint(dfg, safepointResult);
489 if (safepointResult.didGetCancelled())
493 FTL::State state(dfg);
494 FTL::lowerDFGToB3(state);
496 if (UNLIKELY(computeCompileTimes()))
497 m_timeBeforeFTL = MonotonicTime::now();
499 if (Options::b3AlwaysFailsBeforeCompile()) {
504 FTL::compile(state, safepointResult);
505 if (safepointResult.didGetCancelled())
508 if (Options::b3AlwaysFailsBeforeLink()) {
513 if (state.allocationFailed) {
520 if (state.allocationFailed) {
527 RELEASE_ASSERT_NOT_REACHED();
529 #endif // ENABLE(FTL_JIT)
533 RELEASE_ASSERT_NOT_REACHED();
540 bool Plan::isStillValid()
542 CodeBlock* replacement = m_codeBlock->replacement();
545 // FIXME: This is almost certainly not necessary. There's no way for the baseline
546 // code to be replaced during a compilation, except if we delete the plan, in which
547 // case we wouldn't be here.
548 // https://bugs.webkit.org/show_bug.cgi?id=132707
549 if (m_codeBlock->alternative() != replacement->baselineVersion())
551 if (!m_watchpoints.areStillValid())
556 void Plan::reallyAdd(CommonData* commonData)
558 m_watchpoints.reallyAdd(m_codeBlock, *commonData);
559 m_identifiers.reallyAdd(*m_vm, commonData);
560 m_weakReferences.reallyAdd(*m_vm, commonData);
561 m_transitions.reallyAdd(*m_vm, commonData);
562 m_globalProperties.reallyAdd(m_codeBlock, m_identifiers, *commonData);
563 commonData->recordedStatuses = WTFMove(m_recordedStatuses);
566 void Plan::notifyCompiling()
571 void Plan::notifyReady()
573 m_callback->compilationDidBecomeReadyAsynchronously(m_codeBlock, m_profiledDFGCodeBlock);
577 bool Plan::isStillValidOnMainThread()
579 return m_globalProperties.isStillValidOnMainThread(*m_vm, m_identifiers);
582 CompilationResult Plan::finalizeWithoutNotifyingCallback()
584 // We perform multiple stores before emitting a write-barrier. To ensure that no GC happens between store and write-barrier, we should ensure that
585 // GC is deferred when this function is called.
586 ASSERT(m_vm->heap.isDeferred());
588 CompilationResult result = [&] {
589 if (!isStillValidOnMainThread() || !isStillValid()) {
590 CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgFinalize", ("invalidated"));
591 return CompilationInvalidated;
595 if (m_codeBlock->codeType() == FunctionCode)
596 result = m_finalizer->finalizeFunction();
598 result = m_finalizer->finalize();
601 CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgFinalize", ("failed"));
602 return CompilationFailed;
605 reallyAdd(m_codeBlock->jitCode()->dfgCommon());
607 if (validationEnabled()) {
608 TrackedReferences trackedReferences;
610 for (WriteBarrier<JSCell>& reference : m_codeBlock->jitCode()->dfgCommon()->weakReferences)
611 trackedReferences.add(reference.get());
612 for (WriteBarrier<Structure>& reference : m_codeBlock->jitCode()->dfgCommon()->weakStructureReferences)
613 trackedReferences.add(reference.get());
614 for (WriteBarrier<Unknown>& constant : m_codeBlock->constants())
615 trackedReferences.add(constant.get());
617 for (auto* inlineCallFrame : *m_inlineCallFrames) {
618 ASSERT(inlineCallFrame->baselineCodeBlock.get());
619 trackedReferences.add(inlineCallFrame->baselineCodeBlock.get());
622 // Check that any other references that we have anywhere in the JITCode are also
623 // tracked either strongly or weakly.
624 m_codeBlock->jitCode()->validateReferences(trackedReferences);
627 CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgFinalize", ("succeeded"));
628 return CompilationSuccessful;
631 // We will establish new references from the code block to things. So, we need a barrier.
632 m_vm->heap.writeBarrier(m_codeBlock);
636 void Plan::finalizeAndNotifyCallback()
638 m_callback->compilationDidComplete(m_codeBlock, m_profiledDFGCodeBlock, finalizeWithoutNotifyingCallback());
641 CompilationKey Plan::key()
643 return CompilationKey(m_codeBlock->alternative(), m_mode);
646 void Plan::checkLivenessAndVisitChildren(SlotVisitor& visitor)
648 if (!isKnownToBeLiveDuringGC())
651 cleanMustHandleValuesIfNecessary();
652 for (unsigned i = m_mustHandleValues.size(); i--;) {
653 Optional<JSValue> value = m_mustHandleValues[i];
655 visitor.appendUnbarriered(value.value());
658 m_recordedStatuses.markIfCheap(visitor);
660 visitor.appendUnbarriered(m_codeBlock);
661 visitor.appendUnbarriered(m_codeBlock->alternative());
662 visitor.appendUnbarriered(m_profiledDFGCodeBlock);
664 if (m_inlineCallFrames) {
665 for (auto* inlineCallFrame : *m_inlineCallFrames) {
666 ASSERT(inlineCallFrame->baselineCodeBlock.get());
667 visitor.appendUnbarriered(inlineCallFrame->baselineCodeBlock.get());
671 m_weakReferences.visitChildren(visitor);
672 m_transitions.visitChildren(visitor);
675 void Plan::finalizeInGC()
678 m_recordedStatuses.finalizeWithoutDeleting(*m_vm);
681 bool Plan::isKnownToBeLiveDuringGC()
683 if (m_stage == Cancelled)
685 if (!m_vm->heap.isMarked(m_codeBlock->ownerExecutable()))
687 if (!m_vm->heap.isMarked(m_codeBlock->alternative()))
689 if (!!m_profiledDFGCodeBlock && !m_vm->heap.isMarked(m_profiledDFGCodeBlock))
697 m_codeBlock = nullptr;
698 m_profiledDFGCodeBlock = nullptr;
699 m_mustHandleValues.clear();
700 m_compilation = nullptr;
701 m_finalizer = nullptr;
702 m_inlineCallFrames = nullptr;
703 m_watchpoints = DesiredWatchpoints();
704 m_identifiers = DesiredIdentifiers();
705 m_globalProperties = DesiredGlobalProperties();
706 m_weakReferences = DesiredWeakReferences();
707 m_transitions = DesiredTransitions();
708 m_callback = nullptr;
712 void Plan::cleanMustHandleValuesIfNecessary()
714 LockHolder locker(m_mustHandleValueCleaningLock);
716 if (!m_mustHandleValuesMayIncludeGarbage)
719 m_mustHandleValuesMayIncludeGarbage = false;
724 if (!m_mustHandleValues.numberOfLocals())
727 CodeBlock* alternative = m_codeBlock->alternative();
728 FastBitVector liveness = alternative->livenessAnalysis().getLivenessInfoAtBytecodeOffset(alternative, m_osrEntryBytecodeIndex);
730 for (unsigned local = m_mustHandleValues.numberOfLocals(); local--;) {
731 if (!liveness[local])
732 m_mustHandleValues.local(local) = WTF::nullopt;
736 } } // namespace JSC::DFG
738 #endif // ENABLE(DFG_JIT)