Refactoring: make DFG::Plan a class.
authormark.lam@apple.com <mark.lam@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 24 Jul 2018 23:14:12 +0000 (23:14 +0000)
committermark.lam@apple.com <mark.lam@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 24 Jul 2018 23:14:12 +0000 (23:14 +0000)
https://bugs.webkit.org/show_bug.cgi?id=187968

Reviewed by Saam Barati.

This patch makes all the DFG::Plan fields private, and provide accessor methods
for them.  This makes it easier to reason about how these fields are used and
modified.

* dfg/DFGAbstractInterpreterInlines.h:
(JSC::DFG::AbstractInterpreter<AbstractStateType>::executeEffects):
* dfg/DFGByteCodeParser.cpp:
(JSC::DFG::ByteCodeParser::handleCall):
(JSC::DFG::ByteCodeParser::handleVarargsCall):
(JSC::DFG::ByteCodeParser::handleInlining):
(JSC::DFG::ByteCodeParser::handleIntrinsicCall):
(JSC::DFG::ByteCodeParser::handleDOMJITGetter):
(JSC::DFG::ByteCodeParser::handleModuleNamespaceLoad):
(JSC::DFG::ByteCodeParser::handleGetById):
(JSC::DFG::ByteCodeParser::handlePutById):
(JSC::DFG::ByteCodeParser::parseBlock):
(JSC::DFG::ByteCodeParser::InlineStackEntry::InlineStackEntry):
(JSC::DFG::ByteCodeParser::parseCodeBlock):
(JSC::DFG::ByteCodeParser::parse):
* dfg/DFGCFAPhase.cpp:
(JSC::DFG::CFAPhase::run):
(JSC::DFG::CFAPhase::injectOSR):
* dfg/DFGClobberize.h:
(JSC::DFG::clobberize):
* dfg/DFGCommonData.cpp:
(JSC::DFG::CommonData::notifyCompilingStructureTransition):
* dfg/DFGCommonData.h:
* dfg/DFGConstantFoldingPhase.cpp:
(JSC::DFG::ConstantFoldingPhase::foldConstants):
* dfg/DFGDriver.cpp:
(JSC::DFG::compileImpl):
* dfg/DFGFinalizer.h:
* dfg/DFGFixupPhase.cpp:
(JSC::DFG::FixupPhase::fixupNode):
(JSC::DFG::FixupPhase::fixupCompareStrictEqAndSameValue):
* dfg/DFGGraph.cpp:
(JSC::DFG::Graph::Graph):
(JSC::DFG::Graph::watchCondition):
(JSC::DFG::Graph::inferredTypeFor):
(JSC::DFG::Graph::requiredRegisterCountForExit):
(JSC::DFG::Graph::registerFrozenValues):
(JSC::DFG::Graph::registerStructure):
(JSC::DFG::Graph::registerAndWatchStructureTransition):
(JSC::DFG::Graph::assertIsRegistered):
* dfg/DFGGraph.h:
(JSC::DFG::Graph::compilation):
(JSC::DFG::Graph::identifiers):
(JSC::DFG::Graph::watchpoints):
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::JITCompiler):
(JSC::DFG::JITCompiler::link):
(JSC::DFG::JITCompiler::compile):
(JSC::DFG::JITCompiler::compileFunction):
(JSC::DFG::JITCompiler::disassemble):
* dfg/DFGJITCompiler.h:
(JSC::DFG::JITCompiler::addWeakReference):
* dfg/DFGJITFinalizer.cpp:
(JSC::DFG::JITFinalizer::finalize):
(JSC::DFG::JITFinalizer::finalizeFunction):
(JSC::DFG::JITFinalizer::finalizeCommon):
* dfg/DFGOSREntrypointCreationPhase.cpp:
(JSC::DFG::OSREntrypointCreationPhase::run):
* dfg/DFGPhase.cpp:
(JSC::DFG::Phase::beginPhase):
* dfg/DFGPhase.h:
(JSC::DFG::runAndLog):
* dfg/DFGPlan.cpp:
(JSC::DFG::Plan::Plan):
(JSC::DFG::Plan::computeCompileTimes const):
(JSC::DFG::Plan::reportCompileTimes const):
(JSC::DFG::Plan::compileInThread):
(JSC::DFG::Plan::compileInThreadImpl):
(JSC::DFG::Plan::isStillValid):
(JSC::DFG::Plan::reallyAdd):
(JSC::DFG::Plan::notifyCompiling):
(JSC::DFG::Plan::notifyReady):
(JSC::DFG::Plan::finalizeWithoutNotifyingCallback):
(JSC::DFG::Plan::finalizeAndNotifyCallback):
(JSC::DFG::Plan::key):
(JSC::DFG::Plan::checkLivenessAndVisitChildren):
(JSC::DFG::Plan::finalizeInGC):
(JSC::DFG::Plan::isKnownToBeLiveDuringGC):
(JSC::DFG::Plan::cancel):
(JSC::DFG::Plan::cleanMustHandleValuesIfNecessary):
* dfg/DFGPlan.h:
(JSC::DFG::Plan::canTierUpAndOSREnter const):
(JSC::DFG::Plan::vm const):
(JSC::DFG::Plan::codeBlock):
(JSC::DFG::Plan::mode const):
(JSC::DFG::Plan::osrEntryBytecodeIndex const):
(JSC::DFG::Plan::mustHandleValues const):
(JSC::DFG::Plan::threadData const):
(JSC::DFG::Plan::compilation const):
(JSC::DFG::Plan::finalizer const):
(JSC::DFG::Plan::setFinalizer):
(JSC::DFG::Plan::inlineCallFrames const):
(JSC::DFG::Plan::watchpoints):
(JSC::DFG::Plan::identifiers):
(JSC::DFG::Plan::weakReferences):
(JSC::DFG::Plan::transitions):
(JSC::DFG::Plan::recordedStatuses):
(JSC::DFG::Plan::willTryToTierUp const):
(JSC::DFG::Plan::setWillTryToTierUp):
(JSC::DFG::Plan::tierUpInLoopHierarchy):
(JSC::DFG::Plan::tierUpAndOSREnterBytecodes):
(JSC::DFG::Plan::stage const):
(JSC::DFG::Plan::callback const):
(JSC::DFG::Plan::setCallback):
* dfg/DFGPlanInlines.h:
(JSC::DFG::Plan::iterateCodeBlocksForGC):
* dfg/DFGPreciseLocalClobberize.h:
(JSC::DFG::PreciseLocalClobberizeAdaptor::readTop):
* dfg/DFGPredictionInjectionPhase.cpp:
(JSC::DFG::PredictionInjectionPhase::run):
* dfg/DFGSafepoint.cpp:
(JSC::DFG::Safepoint::Safepoint):
(JSC::DFG::Safepoint::~Safepoint):
(JSC::DFG::Safepoint::begin):
* dfg/DFGSafepoint.h:
* dfg/DFGSpeculativeJIT.h:
(JSC::DFG::SpeculativeJIT::TrustedImmPtr::weakPointer):
(JSC::DFG::SpeculativeJIT::TrustedImmPtr::weakPoisonedPointer):
* dfg/DFGStackLayoutPhase.cpp:
(JSC::DFG::StackLayoutPhase::run):
* dfg/DFGStrengthReductionPhase.cpp:
(JSC::DFG::StrengthReductionPhase::handleNode):
* dfg/DFGTierUpCheckInjectionPhase.cpp:
(JSC::DFG::TierUpCheckInjectionPhase::run):
* dfg/DFGTypeCheckHoistingPhase.cpp:
(JSC::DFG::TypeCheckHoistingPhase::disableHoistingAcrossOSREntries):
* dfg/DFGWorklist.cpp:
(JSC::DFG::Worklist::isActiveForVM const):
(JSC::DFG::Worklist::compilationState):
(JSC::DFG::Worklist::waitUntilAllPlansForVMAreReady):
(JSC::DFG::Worklist::removeAllReadyPlansForVM):
(JSC::DFG::Worklist::completeAllReadyPlansForVM):
(JSC::DFG::Worklist::visitWeakReferences):
(JSC::DFG::Worklist::removeDeadPlans):
(JSC::DFG::Worklist::removeNonCompilingPlansForVM):
* dfg/DFGWorklistInlines.h:
(JSC::DFG::Worklist::iterateCodeBlocksForGC):
* ftl/FTLCompile.cpp:
(JSC::FTL::compile):
* ftl/FTLFail.cpp:
(JSC::FTL::fail):
* ftl/FTLJITFinalizer.cpp:
(JSC::FTL::JITFinalizer::finalizeCommon):
* ftl/FTLLink.cpp:
(JSC::FTL::link):
* ftl/FTLLowerDFGToB3.cpp:
(JSC::FTL::DFG::LowerDFGToB3::compileMultiPutByOffset):
(JSC::FTL::DFG::LowerDFGToB3::buildExitArguments):
(JSC::FTL::DFG::LowerDFGToB3::addWeakReference):
* ftl/FTLState.cpp:
(JSC::FTL::State::State):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@234178 268f45cc-cd09-0410-ab3c-d52691b4dbfc

40 files changed:
Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h
Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
Source/JavaScriptCore/dfg/DFGClobberize.h
Source/JavaScriptCore/dfg/DFGCommonData.cpp
Source/JavaScriptCore/dfg/DFGCommonData.h
Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
Source/JavaScriptCore/dfg/DFGDriver.cpp
Source/JavaScriptCore/dfg/DFGFinalizer.h
Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
Source/JavaScriptCore/dfg/DFGGraph.cpp
Source/JavaScriptCore/dfg/DFGGraph.h
Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
Source/JavaScriptCore/dfg/DFGJITCompiler.h
Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp
Source/JavaScriptCore/dfg/DFGOSREntrypointCreationPhase.cpp
Source/JavaScriptCore/dfg/DFGPhase.cpp
Source/JavaScriptCore/dfg/DFGPhase.h
Source/JavaScriptCore/dfg/DFGPlan.cpp
Source/JavaScriptCore/dfg/DFGPlan.h
Source/JavaScriptCore/dfg/DFGPlanInlines.h
Source/JavaScriptCore/dfg/DFGPreciseLocalClobberize.h
Source/JavaScriptCore/dfg/DFGPredictionInjectionPhase.cpp
Source/JavaScriptCore/dfg/DFGSafepoint.cpp
Source/JavaScriptCore/dfg/DFGSafepoint.h
Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
Source/JavaScriptCore/dfg/DFGStackLayoutPhase.cpp
Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp
Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.cpp
Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.cpp
Source/JavaScriptCore/dfg/DFGWorklist.cpp
Source/JavaScriptCore/dfg/DFGWorklistInlines.h
Source/JavaScriptCore/ftl/FTLCompile.cpp
Source/JavaScriptCore/ftl/FTLFail.cpp
Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp
Source/JavaScriptCore/ftl/FTLLink.cpp
Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp
Source/JavaScriptCore/ftl/FTLOutput.h
Source/JavaScriptCore/ftl/FTLState.cpp

index e04639c..1296303 100644 (file)
@@ -1,3 +1,166 @@
+2018-07-24  Mark Lam  <mark.lam@apple.com>
+
+        Refactoring: make DFG::Plan a class.
+        https://bugs.webkit.org/show_bug.cgi?id=187968
+
+        Reviewed by Saam Barati.
+
+        This patch makes all the DFG::Plan fields private, and provide accessor methods
+        for them.  This makes it easier to reason about how these fields are used and
+        modified.
+
+        * dfg/DFGAbstractInterpreterInlines.h:
+        (JSC::DFG::AbstractInterpreter<AbstractStateType>::executeEffects):
+        * dfg/DFGByteCodeParser.cpp:
+        (JSC::DFG::ByteCodeParser::handleCall):
+        (JSC::DFG::ByteCodeParser::handleVarargsCall):
+        (JSC::DFG::ByteCodeParser::handleInlining):
+        (JSC::DFG::ByteCodeParser::handleIntrinsicCall):
+        (JSC::DFG::ByteCodeParser::handleDOMJITGetter):
+        (JSC::DFG::ByteCodeParser::handleModuleNamespaceLoad):
+        (JSC::DFG::ByteCodeParser::handleGetById):
+        (JSC::DFG::ByteCodeParser::handlePutById):
+        (JSC::DFG::ByteCodeParser::parseBlock):
+        (JSC::DFG::ByteCodeParser::InlineStackEntry::InlineStackEntry):
+        (JSC::DFG::ByteCodeParser::parseCodeBlock):
+        (JSC::DFG::ByteCodeParser::parse):
+        * dfg/DFGCFAPhase.cpp:
+        (JSC::DFG::CFAPhase::run):
+        (JSC::DFG::CFAPhase::injectOSR):
+        * dfg/DFGClobberize.h:
+        (JSC::DFG::clobberize):
+        * dfg/DFGCommonData.cpp:
+        (JSC::DFG::CommonData::notifyCompilingStructureTransition):
+        * dfg/DFGCommonData.h:
+        * dfg/DFGConstantFoldingPhase.cpp:
+        (JSC::DFG::ConstantFoldingPhase::foldConstants):
+        * dfg/DFGDriver.cpp:
+        (JSC::DFG::compileImpl):
+        * dfg/DFGFinalizer.h:
+        * dfg/DFGFixupPhase.cpp:
+        (JSC::DFG::FixupPhase::fixupNode):
+        (JSC::DFG::FixupPhase::fixupCompareStrictEqAndSameValue):
+        * dfg/DFGGraph.cpp:
+        (JSC::DFG::Graph::Graph):
+        (JSC::DFG::Graph::watchCondition):
+        (JSC::DFG::Graph::inferredTypeFor):
+        (JSC::DFG::Graph::requiredRegisterCountForExit):
+        (JSC::DFG::Graph::registerFrozenValues):
+        (JSC::DFG::Graph::registerStructure):
+        (JSC::DFG::Graph::registerAndWatchStructureTransition):
+        (JSC::DFG::Graph::assertIsRegistered):
+        * dfg/DFGGraph.h:
+        (JSC::DFG::Graph::compilation):
+        (JSC::DFG::Graph::identifiers):
+        (JSC::DFG::Graph::watchpoints):
+        * dfg/DFGJITCompiler.cpp:
+        (JSC::DFG::JITCompiler::JITCompiler):
+        (JSC::DFG::JITCompiler::link):
+        (JSC::DFG::JITCompiler::compile):
+        (JSC::DFG::JITCompiler::compileFunction):
+        (JSC::DFG::JITCompiler::disassemble):
+        * dfg/DFGJITCompiler.h:
+        (JSC::DFG::JITCompiler::addWeakReference):
+        * dfg/DFGJITFinalizer.cpp:
+        (JSC::DFG::JITFinalizer::finalize):
+        (JSC::DFG::JITFinalizer::finalizeFunction):
+        (JSC::DFG::JITFinalizer::finalizeCommon):
+        * dfg/DFGOSREntrypointCreationPhase.cpp:
+        (JSC::DFG::OSREntrypointCreationPhase::run):
+        * dfg/DFGPhase.cpp:
+        (JSC::DFG::Phase::beginPhase):
+        * dfg/DFGPhase.h:
+        (JSC::DFG::runAndLog):
+        * dfg/DFGPlan.cpp:
+        (JSC::DFG::Plan::Plan):
+        (JSC::DFG::Plan::computeCompileTimes const):
+        (JSC::DFG::Plan::reportCompileTimes const):
+        (JSC::DFG::Plan::compileInThread):
+        (JSC::DFG::Plan::compileInThreadImpl):
+        (JSC::DFG::Plan::isStillValid):
+        (JSC::DFG::Plan::reallyAdd):
+        (JSC::DFG::Plan::notifyCompiling):
+        (JSC::DFG::Plan::notifyReady):
+        (JSC::DFG::Plan::finalizeWithoutNotifyingCallback):
+        (JSC::DFG::Plan::finalizeAndNotifyCallback):
+        (JSC::DFG::Plan::key):
+        (JSC::DFG::Plan::checkLivenessAndVisitChildren):
+        (JSC::DFG::Plan::finalizeInGC):
+        (JSC::DFG::Plan::isKnownToBeLiveDuringGC):
+        (JSC::DFG::Plan::cancel):
+        (JSC::DFG::Plan::cleanMustHandleValuesIfNecessary):
+        * dfg/DFGPlan.h:
+        (JSC::DFG::Plan::canTierUpAndOSREnter const):
+        (JSC::DFG::Plan::vm const):
+        (JSC::DFG::Plan::codeBlock):
+        (JSC::DFG::Plan::mode const):
+        (JSC::DFG::Plan::osrEntryBytecodeIndex const):
+        (JSC::DFG::Plan::mustHandleValues const):
+        (JSC::DFG::Plan::threadData const):
+        (JSC::DFG::Plan::compilation const):
+        (JSC::DFG::Plan::finalizer const):
+        (JSC::DFG::Plan::setFinalizer):
+        (JSC::DFG::Plan::inlineCallFrames const):
+        (JSC::DFG::Plan::watchpoints):
+        (JSC::DFG::Plan::identifiers):
+        (JSC::DFG::Plan::weakReferences):
+        (JSC::DFG::Plan::transitions):
+        (JSC::DFG::Plan::recordedStatuses):
+        (JSC::DFG::Plan::willTryToTierUp const):
+        (JSC::DFG::Plan::setWillTryToTierUp):
+        (JSC::DFG::Plan::tierUpInLoopHierarchy):
+        (JSC::DFG::Plan::tierUpAndOSREnterBytecodes):
+        (JSC::DFG::Plan::stage const):
+        (JSC::DFG::Plan::callback const):
+        (JSC::DFG::Plan::setCallback):
+        * dfg/DFGPlanInlines.h:
+        (JSC::DFG::Plan::iterateCodeBlocksForGC):
+        * dfg/DFGPreciseLocalClobberize.h:
+        (JSC::DFG::PreciseLocalClobberizeAdaptor::readTop):
+        * dfg/DFGPredictionInjectionPhase.cpp:
+        (JSC::DFG::PredictionInjectionPhase::run):
+        * dfg/DFGSafepoint.cpp:
+        (JSC::DFG::Safepoint::Safepoint):
+        (JSC::DFG::Safepoint::~Safepoint):
+        (JSC::DFG::Safepoint::begin):
+        * dfg/DFGSafepoint.h:
+        * dfg/DFGSpeculativeJIT.h:
+        (JSC::DFG::SpeculativeJIT::TrustedImmPtr::weakPointer):
+        (JSC::DFG::SpeculativeJIT::TrustedImmPtr::weakPoisonedPointer):
+        * dfg/DFGStackLayoutPhase.cpp:
+        (JSC::DFG::StackLayoutPhase::run):
+        * dfg/DFGStrengthReductionPhase.cpp:
+        (JSC::DFG::StrengthReductionPhase::handleNode):
+        * dfg/DFGTierUpCheckInjectionPhase.cpp:
+        (JSC::DFG::TierUpCheckInjectionPhase::run):
+        * dfg/DFGTypeCheckHoistingPhase.cpp:
+        (JSC::DFG::TypeCheckHoistingPhase::disableHoistingAcrossOSREntries):
+        * dfg/DFGWorklist.cpp:
+        (JSC::DFG::Worklist::isActiveForVM const):
+        (JSC::DFG::Worklist::compilationState):
+        (JSC::DFG::Worklist::waitUntilAllPlansForVMAreReady):
+        (JSC::DFG::Worklist::removeAllReadyPlansForVM):
+        (JSC::DFG::Worklist::completeAllReadyPlansForVM):
+        (JSC::DFG::Worklist::visitWeakReferences):
+        (JSC::DFG::Worklist::removeDeadPlans):
+        (JSC::DFG::Worklist::removeNonCompilingPlansForVM):
+        * dfg/DFGWorklistInlines.h:
+        (JSC::DFG::Worklist::iterateCodeBlocksForGC):
+        * ftl/FTLCompile.cpp:
+        (JSC::FTL::compile):
+        * ftl/FTLFail.cpp:
+        (JSC::FTL::fail):
+        * ftl/FTLJITFinalizer.cpp:
+        (JSC::FTL::JITFinalizer::finalizeCommon):
+        * ftl/FTLLink.cpp:
+        (JSC::FTL::link):
+        * ftl/FTLLowerDFGToB3.cpp:
+        (JSC::FTL::DFG::LowerDFGToB3::compileMultiPutByOffset):
+        (JSC::FTL::DFG::LowerDFGToB3::buildExitArguments):
+        (JSC::FTL::DFG::LowerDFGToB3::addWeakReference):
+        * ftl/FTLState.cpp:
+        (JSC::FTL::State::State):
+
 2018-07-24  Saam Barati  <sbarati@apple.com>
 
         Make VM::canUseJIT an inlined function
index c048472..940faf6 100644 (file)
@@ -3395,8 +3395,8 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
                         newSet.merge(*m_graph.addStructureSet(variant.oldStructure()));
                     }
                 }
-                
-                if (status.numVariants() == 1 || isFTL(m_graph.m_plan.mode))
+
+                if (status.numVariants() == 1 || m_graph.m_plan.isFTL())
                     m_state.setFoundConstants(true);
                 
                 didFoldClobberWorld();
index e9c7fa5..54d7156 100644 (file)
@@ -1238,8 +1238,8 @@ ByteCodeParser::Terminality ByteCodeParser::handleCall(
     // If we have profiling information about this call, and it did not behave too polymorphically,
     // we may be able to inline it, or in the case of recursive tail calls turn it into a jump.
     if (callLinkStatus.canOptimize()) {
-        addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses.addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
-        
+        addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
+
         VirtualRegister thisArgument = virtualRegisterForArgument(0, registerOffset);
         auto optimizationResult = handleInlining(callTarget, result, callLinkStatus, registerOffset, thisArgument,
             argumentCountIncludingThis, m_currentIndex + instructionSize, op, kind, prediction);
@@ -1283,8 +1283,8 @@ ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, N
     VERBOSE_LOG("    Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
     
     if (callLinkStatus.canOptimize()) {
-        addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses.addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
-        
+        addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
+
         if (handleVarargsInlining(callTarget, result,
             callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments),
             firstVarArgOffset, op,
@@ -1915,7 +1915,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
     // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
     // we could improve that aspect of this by doing polymorphic inlining but having the profiling
     // also.
-    if (!isFTL(m_graph.m_plan.mode) || !Options::usePolymorphicCallInlining()) {
+    if (!m_graph.m_plan.isFTL() || !Options::usePolymorphicCallInlining()) {
         VERBOSE_LOG("Bailing inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
         return CallOptimizationResult::DidNothing;
     }
@@ -2807,7 +2807,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin
 
     case FTLTrueIntrinsic: {
         insertChecks();
-        set(VirtualRegister(resultOperand), jsConstant(jsBoolean(isFTL(m_graph.m_plan.mode))));
+        set(VirtualRegister(resultOperand), jsConstant(jsBoolean(m_graph.m_plan.isFTL())));
         return true;
     }
         
@@ -2821,7 +2821,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin
     case IsFinalTierIntrinsic: {
         insertChecks();
         set(VirtualRegister(resultOperand),
-            jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true)));
+            jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true)));
         return true;
     }
         
@@ -3167,7 +3167,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin
     case CPUCpuidIntrinsic:
     case CPUPauseIntrinsic: {
 #if CPU(X86_64)
-        if (!isFTL(m_graph.m_plan.mode))
+        if (!m_graph.m_plan.isFTL())
             return false;
         insertChecks();
         set(VirtualRegister(resultOperand),
@@ -3335,7 +3335,7 @@ bool ByteCodeParser::handleDOMJITGetter(int resultOperand, const GetByIdVariant&
     addToGraph(CheckSubClass, OpInfo(domAttribute.classInfo), thisNode);
     
     bool wasSeenInJIT = true;
-    addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addGetByIdStatus(currentCodeOrigin(), GetByIdStatus(GetByIdStatus::Custom, wasSeenInJIT, variant))), thisNode);
+    addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), GetByIdStatus(GetByIdStatus::Custom, wasSeenInJIT, variant))), thisNode);
 
     CallDOMGetterData* callDOMGetterData = m_graph.m_callDOMGetterData.add();
     callDOMGetterData->customAccessorGetter = variant.customAccessorGetter();
@@ -3367,8 +3367,8 @@ bool ByteCodeParser::handleModuleNamespaceLoad(int resultOperand, SpeculatedType
     if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
         return false;
     addToGraph(CheckCell, OpInfo(m_graph.freeze(getById.moduleNamespaceObject())), Edge(base, CellUse));
-    
-    addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addGetByIdStatus(currentCodeOrigin(), getById)), base);
+
+    addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), getById)), base);
 
     // Ideally we wouldn't have to do this Phantom. But:
     //
@@ -4029,16 +4029,16 @@ void ByteCodeParser::handleGetById(
     // GetByIdStatus. That means that the constant folder also needs to do the same!
     
     if (getByIdStatus.numVariants() > 1) {
-        if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode)
+        if (getByIdStatus.makesCalls() || !m_graph.m_plan.isFTL()
             || !Options::usePolymorphicAccessInlining()
             || getByIdStatus.numVariants() > Options::maxPolymorphicAccessInliningListSize()) {
             set(VirtualRegister(destinationOperand),
                 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
             return;
         }
-        
-        addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addGetByIdStatus(currentCodeOrigin(), getByIdStatus)), base);
-        
+
+        addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), getByIdStatus)), base);
+
         Vector<MultiGetByOffsetCase, 2> cases;
         
         // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
@@ -4081,8 +4081,8 @@ void ByteCodeParser::handleGetById(
         return;
     }
 
-    addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addGetByIdStatus(currentCodeOrigin(), getByIdStatus)), base);
-    
+    addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), getByIdStatus)), base);
+
     ASSERT(getByIdStatus.numVariants() == 1);
     GetByIdVariant variant = getByIdStatus[0];
     
@@ -4175,7 +4175,7 @@ void ByteCodeParser::handlePutById(
     }
     
     if (putByIdStatus.numVariants() > 1) {
-        if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls()
+        if (!m_graph.m_plan.isFTL() || putByIdStatus.makesCalls()
             || !Options::usePolymorphicAccessInlining()
             || putByIdStatus.numVariants() > Options::maxPolymorphicAccessInliningListSize()) {
             emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
@@ -4195,8 +4195,8 @@ void ByteCodeParser::handlePutById(
         
         if (UNLIKELY(m_graph.compilation()))
             m_graph.compilation()->noticeInlinedPutById();
-        
-        addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
+
+        addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
 
         for (const PutByIdVariant& variant : putByIdStatus.variants()) {
             m_graph.registerInferredType(variant.requiredType());
@@ -4218,8 +4218,8 @@ void ByteCodeParser::handlePutById(
     
     switch (variant.kind()) {
     case PutByIdVariant::Replace: {
-        addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
-        
+        addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
+
         store(base, identifierNumber, variant, value);
         if (UNLIKELY(m_graph.compilation()))
             m_graph.compilation()->noticeInlinedPutById();
@@ -4227,8 +4227,8 @@ void ByteCodeParser::handlePutById(
     }
     
     case PutByIdVariant::Transition: {
-        addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
-        
+        addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
+
         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
         if (!check(variant.conditionSet())) {
             emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
@@ -4296,8 +4296,8 @@ void ByteCodeParser::handlePutById(
     }
         
     case PutByIdVariant::Setter: {
-        addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
-        
+        addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
+
         Node* loadedValue = load(SpecCellOther, base, identifierNumber, variant);
         if (!loadedValue) {
             emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
@@ -5648,7 +5648,7 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 NEXT_OPCODE(op_catch);
             }
 
-            if (m_graph.m_plan.mode == FTLForOSREntryMode) {
+            if (m_graph.m_plan.mode() == FTLForOSREntryMode) {
                 NEXT_OPCODE(op_catch);
             }
 
@@ -6495,8 +6495,8 @@ void ByteCodeParser::parseBlock(unsigned limit)
                 }
 
                 if (allOK) {
-                    addToGraph(FilterInByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addInByIdStatus(currentCodeOrigin(), status)), base);
-                    
+                    addToGraph(FilterInByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addInByIdStatus(currentCodeOrigin(), status)), base);
+
                     Node* match = addToGraph(MatchStructure, OpInfo(data), base);
                     set(VirtualRegister(currentInstruction[1].u.operand), match);
                     NEXT_OPCODE(op_in_by_id);
@@ -6699,8 +6699,8 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
         // Inline case.
         ASSERT(codeBlock != byteCodeParser->m_codeBlock);
         ASSERT(inlineCallFrameStart.isValid());
-        
-        m_inlineCallFrame = byteCodeParser->m_graph.m_plan.inlineCallFrames->add();
+
+        m_inlineCallFrame = byteCodeParser->m_graph.m_plan.inlineCallFrames()->add();
         m_optimizedContext.inlineCallFrame = m_inlineCallFrame;
 
         // The owner is the machine code block, and we already have a barrier on that when the
@@ -6776,7 +6776,7 @@ void ByteCodeParser::parseCodeBlock()
     }
     
     if (UNLIKELY(Options::dumpSourceAtDFGTime())) {
-        Vector<DeferredSourceDump>& deferredSourceDump = m_graph.m_plan.callback->ensureDeferredSourceDump();
+        Vector<DeferredSourceDump>& deferredSourceDump = m_graph.m_plan.callback()->ensureDeferredSourceDump();
         if (inlineCallFrame()) {
             DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITCode::DFGJIT, inlineCallFrame()->directCaller);
             deferredSourceDump.append(dump);
@@ -6870,7 +6870,7 @@ void ByteCodeParser::parse()
     if (m_hasAnyForceOSRExits) {
         BlockSet blocksToIgnore;
         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
-            if (block->isOSRTarget && block->bytecodeBegin == m_graph.m_plan.osrEntryBytecodeIndex) {
+            if (block->isOSRTarget && block->bytecodeBegin == m_graph.m_plan.osrEntryBytecodeIndex()) {
                 blocksToIgnore.add(block);
                 break;
             }
index a0002eb..fa9c1bc 100644 (file)
@@ -57,8 +57,8 @@ public:
         ASSERT(m_graph.m_refCountState == EverythingIsLive);
         
         m_count = 0;
-        
-        if (m_verbose && !shouldDumpGraphAtEachPhase(m_graph.m_plan.mode)) {
+
+        if (m_verbose && !shouldDumpGraphAtEachPhase(m_graph.m_plan.mode())) {
             dataLog("Graph before CFA:\n");
             m_graph.dump();
         }
@@ -88,7 +88,7 @@ public:
                 
                 if (!block->isOSRTarget)
                     continue;
-                if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex)
+                if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex())
                     continue;
                 
                 // We record that the block needs some OSR stuff, but we don't do that yet. We want to
@@ -156,9 +156,10 @@ private:
             dataLog("   Found must-handle block: ", *block, "\n");
         
         bool changed = false;
-        for (size_t i = m_graph.m_plan.mustHandleValues.size(); i--;) {
-            int operand = m_graph.m_plan.mustHandleValues.operandForIndex(i);
-            JSValue value = m_graph.m_plan.mustHandleValues[i];
+        const Operands<JSValue>& mustHandleValues = m_graph.m_plan.mustHandleValues();
+        for (size_t i = mustHandleValues.size(); i--;) {
+            int operand = mustHandleValues.operandForIndex(i);
+            JSValue value = mustHandleValues[i];
             Node* node = block->variablesAtHead.operand(operand);
             if (!node) {
                 if (m_verbose)
index 2b04fff..4563481 100644 (file)
@@ -520,7 +520,7 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu
     case PhantomClonedArguments:
         // DFG backend requires that the locals that this reads are flushed. FTL backend can handle those
         // locals being promoted.
-        if (!isFTL(graph.m_plan.mode))
+        if (!graph.m_plan.isFTL())
             read(Stack);
         
         // Even though it's phantom, it still has the property that one can't be replaced with another.
index b11aab1..59565b5 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -42,7 +42,7 @@ namespace JSC { namespace DFG {
 
 void CommonData::notifyCompilingStructureTransition(Plan& plan, CodeBlock* codeBlock, Node* node)
 {
-    plan.transitions.addLazily(
+    plan.transitions().addLazily(
         codeBlock,
         node->origin.semantic.codeOriginOwner(),
         node->transition()->previous.get(),
index f7dbe3e..cb30438 100644 (file)
@@ -48,7 +48,7 @@ class TrackedReferences;
 namespace DFG {
 
 struct Node;
-struct Plan;
+class Plan;
 
 // CommonData holds the set of data that both DFG and FTL code blocks need to know
 // about themselves.
index 108180d..fb1f6dc 100644 (file)
@@ -560,7 +560,7 @@ private:
                 auto addFilterStatus = [&] () {
                     m_insertionSet.insertNode(
                         indexInBlock, SpecNone, FilterGetByIdStatus, node->origin,
-                        OpInfo(m_graph.m_plan.recordedStatuses.addGetByIdStatus(node->origin.semantic, status)),
+                        OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(node->origin.semantic, status)),
                         Edge(child));
                 };
                 
@@ -570,8 +570,8 @@ private:
                     changed = true;
                     break;
                 }
-                
-                if (!isFTL(m_graph.m_plan.mode))
+
+                if (!m_graph.m_plan.isFTL())
                     break;
                 
                 addFilterStatus();
@@ -617,8 +617,8 @@ private:
                     break;
 
                 ASSERT(status.numVariants());
-                
-                if (status.numVariants() > 1 && !isFTL(m_graph.m_plan.mode))
+
+                if (status.numVariants() > 1 && !m_graph.m_plan.isFTL())
                     break;
                 
                 changed = true;
@@ -650,15 +650,15 @@ private:
                 
                 m_insertionSet.insertNode(
                     indexInBlock, SpecNone, FilterPutByIdStatus, node->origin,
-                    OpInfo(m_graph.m_plan.recordedStatuses.addPutByIdStatus(node->origin.semantic, status)),
+                    OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(node->origin.semantic, status)),
                     Edge(child));
                 
                 if (status.numVariants() == 1) {
                     emitPutByOffset(indexInBlock, node, baseValue, status[0], identifierNumber);
                     break;
                 }
-                
-                ASSERT(isFTL(m_graph.m_plan.mode));
+
+                ASSERT(m_graph.m_plan.isFTL());
 
                 MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
                 data->variants = status.variants();
index 0f6e0a4..e2a3457 100644 (file)
@@ -101,8 +101,8 @@ static CompilationResult compileImpl(
     
     Ref<Plan> plan = adoptRef(
         *new Plan(codeBlock, profiledDFGCodeBlock, mode, osrEntryBytecodeIndex, mustHandleValues));
-    
-    plan->callback = WTFMove(callback);
+
+    plan->setCallback(WTFMove(callback));
     if (Options::useConcurrentJIT()) {
         Worklist& worklist = ensureGlobalWorklistFor(mode);
         if (logCompilationChanges(mode))
index f28beba..80b9242 100644 (file)
@@ -32,7 +32,7 @@
 
 namespace JSC { namespace DFG {
 
-struct Plan;
+class Plan;
 
 class Finalizer {
     WTF_MAKE_NONCOPYABLE(Finalizer); WTF_MAKE_FAST_ALLOCATED;
index 2f0e23e..a9f0b25 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -907,7 +907,7 @@ private:
                 // conversions.
                 if (!child->shouldSpeculateInt32()
                     && !child->shouldSpeculateAnyInt()
-                    && !(child->shouldSpeculateNumberOrBoolean() && isFTL(m_graph.m_plan.mode)))
+                    && !(child->shouldSpeculateNumberOrBoolean() && m_graph.m_plan.isFTL()))
                     badNews = true;
             }
             
@@ -930,7 +930,7 @@ private:
                 else if (child->shouldSpeculateAnyInt())
                     fixEdge<Int52RepUse>(child);
                 else {
-                    RELEASE_ASSERT(child->shouldSpeculateNumberOrBoolean() && isFTL(m_graph.m_plan.mode));
+                    RELEASE_ASSERT(child->shouldSpeculateNumberOrBoolean() && m_graph.m_plan.isFTL());
                     fixDoubleOrBooleanEdge(child);
                 }
             }
@@ -3497,7 +3497,7 @@ private:
             node->setOpAndDefaultFlags(CompareStrictEq);
             return;
         }
-        if (node->child1()->shouldSpeculateString() && node->child2()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters >= 7) || isFTL(m_graph.m_plan.mode))) {
+        if (node->child1()->shouldSpeculateString() && node->child2()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters >= 7) || m_graph.m_plan.isFTL())) {
             fixEdge<StringUse>(node->child1());
             fixEdge<StringUse>(node->child2());
             node->setOpAndDefaultFlags(CompareStrictEq);
@@ -3569,12 +3569,12 @@ private:
             node->setOpAndDefaultFlags(CompareStrictEq);
             return;
         }
-        if (node->child1()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters >= 8) || isFTL(m_graph.m_plan.mode))) {
+        if (node->child1()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters >= 8) || m_graph.m_plan.isFTL())) {
             fixEdge<StringUse>(node->child1());
             node->setOpAndDefaultFlags(CompareStrictEq);
             return;
         }
-        if (node->child2()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters >= 8) || isFTL(m_graph.m_plan.mode))) {
+        if (node->child2()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters >= 8) || m_graph.m_plan.isFTL())) {
             fixEdge<StringUse>(node->child2());
             node->setOpAndDefaultFlags(CompareStrictEq);
             return;
index ca4b0e2..d4483c6 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -70,7 +70,7 @@ static const char* dfgOpNames[] = {
 Graph::Graph(VM& vm, Plan& plan)
     : m_vm(vm)
     , m_plan(plan)
-    , m_codeBlock(m_plan.codeBlock)
+    , m_codeBlock(m_plan.codeBlock())
     , m_profiledBlock(m_codeBlock->alternative())
     , m_ssaCFG(std::make_unique<SSACFG>(*this))
     , m_nextMachineLocal(0)
@@ -1022,14 +1022,15 @@ bool Graph::watchCondition(const ObjectPropertyCondition& key)
 {
     if (!key.isWatchable())
         return false;
-    
-    m_plan.weakReferences.addLazily(key.object());
+
+    DesiredWeakReferences& weakReferences = m_plan.weakReferences();
+    weakReferences.addLazily(key.object());
     if (key.hasPrototype())
-        m_plan.weakReferences.addLazily(key.prototype());
+        weakReferences.addLazily(key.prototype());
     if (key.hasRequiredValue())
-        m_plan.weakReferences.addLazily(key.requiredValue());
-    
-    m_plan.watchpoints.addLazily(key);
+        weakReferences.addLazily(key.requiredValue());
+
+    m_plan.watchpoints().addLazily(key);
 
     if (key.kind() == PropertyCondition::Presence)
         m_safeToLoad.add(std::make_pair(key.object(), key.offset()));
@@ -1076,12 +1077,12 @@ InferredType::Descriptor Graph::inferredTypeFor(const PropertyTypeKey& key)
     
     m_inferredTypes.add(key, typeDescriptor);
 
-    m_plan.weakReferences.addLazily(typeObject);
+    m_plan.weakReferences().addLazily(typeObject);
     registerInferredType(typeDescriptor);
 
     // Note that we may already be watching this desired inferred type, because multiple structures may
     // point to the same InferredType instance.
-    m_plan.watchpoints.addLazily(DesiredInferredType(typeObject, typeDescriptor));
+    m_plan.watchpoints().addLazily(DesiredInferredType(typeObject, typeDescriptor));
 
     return typeDescriptor;
 }
@@ -1226,7 +1227,7 @@ unsigned Graph::stackPointerOffset()
 unsigned Graph::requiredRegisterCountForExit()
 {
     unsigned count = JIT::frameRegisterCountFor(m_profiledBlock);
-    for (InlineCallFrameSet::iterator iter = m_plan.inlineCallFrames->begin(); !!iter; ++iter) {
+    for (InlineCallFrameSet::iterator iter = m_plan.inlineCallFrames()->begin(); !!iter; ++iter) {
         InlineCallFrame* inlineCallFrame = *iter;
         CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
         unsigned requiredCount = VirtualRegister(inlineCallFrame->stackOffset).toLocal() + 1 + JIT::frameRegisterCountFor(codeBlock);
@@ -1426,11 +1427,11 @@ void Graph::registerFrozenValues()
             continue;
         
         ASSERT(value->structure());
-        ASSERT(m_plan.weakReferences.contains(value->structure()));
-        
+        ASSERT(m_plan.weakReferences().contains(value->structure()));
+
         switch (value->strength()) {
         case WeakValue: {
-            m_plan.weakReferences.addLazily(value->value().asCell());
+            m_plan.weakReferences().addLazily(value->value().asCell());
             break;
         }
         case StrongValue: {
@@ -1503,8 +1504,8 @@ void Graph::convertToStrongConstant(Node* node, JSValue value)
 
 RegisteredStructure Graph::registerStructure(Structure* structure, StructureRegistrationResult& result)
 {
-    m_plan.weakReferences.addLazily(structure);
-    if (m_plan.watchpoints.consider(structure))
+    m_plan.weakReferences().addLazily(structure);
+    if (m_plan.watchpoints().consider(structure))
         result = StructureRegisteredAndWatched;
     else
         result = StructureRegisteredNormally;
@@ -1513,8 +1514,8 @@ RegisteredStructure Graph::registerStructure(Structure* structure, StructureRegi
 
 void Graph::registerAndWatchStructureTransition(Structure* structure)
 {
-    m_plan.weakReferences.addLazily(structure);
-    m_plan.watchpoints.addLazily(structure->transitionWatchpointSet());
+    m_plan.weakReferences().addLazily(structure);
+    m_plan.watchpoints().addLazily(structure->transitionWatchpointSet());
 }
 
 void Graph::assertIsRegistered(Structure* structure)
@@ -1522,9 +1523,9 @@ void Graph::assertIsRegistered(Structure* structure)
     // It's convenient to be able to call this with a maybe-null structure.
     if (!structure)
         return;
-    
-    DFG_ASSERT(*this, nullptr, m_plan.weakReferences.contains(structure));
-    
+
+    DFG_ASSERT(*this, nullptr, m_plan.weakReferences().contains(structure));
+
     if (!structure->dfgShouldWatch())
         return;
     if (watchpoints().isWatched(structure->transitionWatchpointSet()))
index 24f1653..2a09c0f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -780,12 +780,12 @@ public:
         InlineWatchpointSet& set = globalObject->numberToStringWatchpoint();
         return isWatchingGlobalObjectWatchpoint(globalObject, set);
     }
-    
-    Profiler::Compilation* compilation() { return m_plan.compilation.get(); }
-    
-    DesiredIdentifiers& identifiers() { return m_plan.identifiers; }
-    DesiredWatchpoints& watchpoints() { return m_plan.watchpoints; }
-    
+
+    Profiler::Compilation* compilation() { return m_plan.compilation(); }
+
+    DesiredIdentifiers& identifiers() { return m_plan.identifiers(); }
+    DesiredWatchpoints& watchpoints() { return m_plan.watchpoints(); }
+
     // Returns false if the key is already invalid or unwatchable. If this is a Presence condition,
     // this also makes it cheap to query if the condition holds. Also makes sure that the GC knows
     // what's going on.
index 5fd50b8..4f06be7 100644 (file)
@@ -59,8 +59,8 @@ JITCompiler::JITCompiler(Graph& dfg)
     if (UNLIKELY(shouldDumpDisassembly() || m_graph.m_vm.m_perBytecodeProfiler))
         m_disassembler = std::make_unique<Disassembler>(dfg);
 #if ENABLE(FTL_JIT)
-    m_jitCode->tierUpInLoopHierarchy = WTFMove(m_graph.m_plan.tierUpInLoopHierarchy);
-    for (unsigned tierUpBytecode : m_graph.m_plan.tierUpAndOSREnterBytecodes)
+    m_jitCode->tierUpInLoopHierarchy = WTFMove(m_graph.m_plan.tierUpInLoopHierarchy());
+    for (unsigned tierUpBytecode : m_graph.m_plan.tierUpAndOSREnterBytecodes())
         m_jitCode->tierUpEntryTriggers.add(tierUpBytecode, JITCode::TriggerReason::DontTrigger);
 #endif
 }
@@ -192,9 +192,9 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
     m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
     m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();
 
-    if (!m_graph.m_plan.inlineCallFrames->isEmpty())
-        m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames;
-    
+    if (!m_graph.m_plan.inlineCallFrames()->isEmpty())
+        m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames();
+
 #if USE(JSVALUE32_64)
     m_jitCode->common.doubleConstants = WTFMove(m_graph.m_doubleConstants);
 #endif
@@ -406,7 +406,7 @@ void JITCompiler::compile()
 
     auto linkBuffer = std::make_unique<LinkBuffer>(*this, m_codeBlock, JITCompilationCanFail);
     if (linkBuffer->didFailToAllocate()) {
-        m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
+        m_graph.m_plan.setFinalizer(std::make_unique<FailedFinalizer>(m_graph.m_plan));
         return;
     }
     
@@ -417,9 +417,9 @@ void JITCompiler::compile()
     codeBlock()->shrinkToFit(CodeBlock::LateShrink);
 
     disassemble(*linkBuffer);
-    
-    m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
-        m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer));
+
+    m_graph.m_plan.setFinalizer(std::make_unique<JITFinalizer>(
+        m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer)));
 }
 
 void JITCompiler::compileFunction()
@@ -511,7 +511,7 @@ void JITCompiler::compileFunction()
     // === Link ===
     auto linkBuffer = std::make_unique<LinkBuffer>(*this, m_codeBlock, JITCompilationCanFail);
     if (linkBuffer->didFailToAllocate()) {
-        m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
+        m_graph.m_plan.setFinalizer(std::make_unique<FailedFinalizer>(m_graph.m_plan));
         return;
     }
     link(*linkBuffer);
@@ -527,8 +527,8 @@ void JITCompiler::compileFunction()
 
     MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = linkBuffer->locationOf<JSEntryPtrTag>(arityCheck);
 
-    m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
-        m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer), withArityCheck);
+    m_graph.m_plan.setFinalizer(std::make_unique<JITFinalizer>(
+        m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer), withArityCheck));
 }
 
 void JITCompiler::disassemble(LinkBuffer& linkBuffer)
@@ -537,9 +537,9 @@ void JITCompiler::disassemble(LinkBuffer& linkBuffer)
         m_disassembler->dump(linkBuffer);
         linkBuffer.didAlreadyDisassemble();
     }
-    
-    if (UNLIKELY(m_graph.m_plan.compilation))
-        m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer);
+
+    if (UNLIKELY(m_graph.m_plan.compilation()))
+        m_disassembler->reportToProfiler(m_graph.m_plan.compilation(), linkBuffer);
 }
 
 #if USE(JSVALUE32_64)
index c17b032..0bd66c7 100644 (file)
@@ -214,7 +214,7 @@ public:
     
     void addWeakReference(JSCell* target)
     {
-        m_graph.m_plan.weakReferences.addLazily(target);
+        m_graph.m_plan.weakReferences().addLazily(target);
     }
     
     void addWeakReferences(const StructureSet& structureSet)
index e46196c..73326e2 100644 (file)
@@ -56,11 +56,11 @@ size_t JITFinalizer::codeSize()
 
 bool JITFinalizer::finalize()
 {
-    MacroAssemblerCodeRef<JSEntryPtrTag> codeRef = FINALIZE_DFG_CODE(*m_linkBuffer, JSEntryPtrTag, "DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::DFGJIT)).data());
+    MacroAssemblerCodeRef<JSEntryPtrTag> codeRef = FINALIZE_DFG_CODE(*m_linkBuffer, JSEntryPtrTag, "DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock(), JITCode::DFGJIT)).data());
     m_jitCode->initializeCodeRef(codeRef, codeRef.code());
-    
-    m_plan.codeBlock->setJITCode(m_jitCode.copyRef());
-    
+
+    m_plan.codeBlock()->setJITCode(m_jitCode.copyRef());
+
     finalizeCommon();
     
     return true;
@@ -70,10 +70,10 @@ bool JITFinalizer::finalizeFunction()
 {
     RELEASE_ASSERT(!m_withArityCheck.isEmptyValue());
     m_jitCode->initializeCodeRef(
-        FINALIZE_DFG_CODE(*m_linkBuffer, JSEntryPtrTag, "DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::DFGJIT)).data()),
+        FINALIZE_DFG_CODE(*m_linkBuffer, JSEntryPtrTag, "DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock(), JITCode::DFGJIT)).data()),
         m_withArityCheck);
-    m_plan.codeBlock->setJITCode(m_jitCode.copyRef());
-    
+    m_plan.codeBlock()->setJITCode(m_jitCode.copyRef());
+
     finalizeCommon();
     
     return true;
@@ -82,18 +82,18 @@ bool JITFinalizer::finalizeFunction()
 void JITFinalizer::finalizeCommon()
 {
     // Some JIT finalizers may have added more constants. Shrink-to-fit those things now.
-    m_plan.codeBlock->constants().shrinkToFit();
-    m_plan.codeBlock->constantsSourceCodeRepresentation().shrinkToFit();
-    
+    m_plan.codeBlock()->constants().shrinkToFit();
+    m_plan.codeBlock()->constantsSourceCodeRepresentation().shrinkToFit();
+
 #if ENABLE(FTL_JIT)
-    m_jitCode->optimizeAfterWarmUp(m_plan.codeBlock);
+    m_jitCode->optimizeAfterWarmUp(m_plan.codeBlock());
 #endif // ENABLE(FTL_JIT)
-    
-    if (UNLIKELY(m_plan.compilation))
-        m_plan.vm->m_perBytecodeProfiler->addCompilation(m_plan.codeBlock, *m_plan.compilation);
-    
-    if (!m_plan.willTryToTierUp)
-        m_plan.codeBlock->baselineVersion()->m_didFailFTLCompilation = true;
+
+    if (UNLIKELY(m_plan.compilation()))
+        m_plan.vm()->m_perBytecodeProfiler->addCompilation(m_plan.codeBlock(), *m_plan.compilation());
+
+    if (!m_plan.willTryToTierUp())
+        m_plan.codeBlock()->baselineVersion()->m_didFailFTLCompilation = true;
 }
 
 } } // namespace JSC::DFG
index fbdbe0a..ed52c0b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -47,10 +47,10 @@ public:
     
     bool run()
     {
-        RELEASE_ASSERT(m_graph.m_plan.mode == FTLForOSREntryMode);
+        RELEASE_ASSERT(m_graph.m_plan.mode() == FTLForOSREntryMode);
         RELEASE_ASSERT(m_graph.m_form == ThreadedCPS);
-        
-        unsigned bytecodeIndex = m_graph.m_plan.osrEntryBytecodeIndex;
+
+        unsigned bytecodeIndex = m_graph.m_plan.osrEntryBytecodeIndex();
         RELEASE_ASSERT(bytecodeIndex);
         RELEASE_ASSERT(bytecodeIndex != UINT_MAX);
         
index b225531..41079e1 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -45,8 +45,8 @@ void Phase::beginPhase()
         m_graph.dump(out);
         m_graphDumpBeforePhase = out.toCString();
     }
-    
-    if (!shouldDumpGraphAtEachPhase(m_graph.m_plan.mode))
+
+    if (!shouldDumpGraphAtEachPhase(m_graph.m_plan.mode()))
         return;
     
     dataLog("Beginning DFG phase ", m_name, ".\n");
index 863e068..f9c7050 100644 (file)
@@ -81,7 +81,7 @@ bool runAndLog(PhaseType& phase)
     
     bool result = phase.run();
 
-    if (result && logCompilationChanges(phase.graph().m_plan.mode))
+    if (result && logCompilationChanges(phase.graph().m_plan.mode()))
         dataLogF("Phase %s changed the IR.\n", phase.name());
     return result;
 }
index f122f6e..cde4860 100644 (file)
@@ -105,7 +105,7 @@ namespace {
 void dumpAndVerifyGraph(Graph& graph, const char* text, bool forceDump = false)
 {
     GraphDumpMode modeForFinalValidate = DumpGraph;
-    if (verboseCompilationEnabled(graph.m_plan.mode) || forceDump) {
+    if (verboseCompilationEnabled(graph.m_plan.mode()) || forceDump) {
         dataLog(text, "\n");
         graph.dump();
         modeForFinalValidate = DontDumpGraph;
@@ -136,19 +136,19 @@ Profiler::CompilationKind profilerCompilationKindForMode(CompilationMode mode)
 Plan::Plan(CodeBlock* passedCodeBlock, CodeBlock* profiledDFGCodeBlock,
     CompilationMode mode, unsigned osrEntryBytecodeIndex,
     const Operands<JSValue>& mustHandleValues)
-    : vm(passedCodeBlock->vm())
-    , codeBlock(passedCodeBlock)
-    , profiledDFGCodeBlock(profiledDFGCodeBlock)
-    , mode(mode)
-    , osrEntryBytecodeIndex(osrEntryBytecodeIndex)
-    , mustHandleValues(mustHandleValues)
-    , compilation(UNLIKELY(vm->m_perBytecodeProfiler) ? adoptRef(new Profiler::Compilation(vm->m_perBytecodeProfiler->ensureBytecodesFor(codeBlock), profilerCompilationKindForMode(mode))) : nullptr)
-    , inlineCallFrames(adoptRef(new InlineCallFrameSet()))
-    , identifiers(codeBlock)
-    , weakReferences(codeBlock)
-    , stage(Preparing)
+    : m_vm(passedCodeBlock->vm())
+    , m_codeBlock(passedCodeBlock)
+    , m_profiledDFGCodeBlock(profiledDFGCodeBlock)
+    , m_mode(mode)
+    , m_osrEntryBytecodeIndex(osrEntryBytecodeIndex)
+    , m_mustHandleValues(mustHandleValues)
+    , m_compilation(UNLIKELY(m_vm->m_perBytecodeProfiler) ? adoptRef(new Profiler::Compilation(m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock), profilerCompilationKindForMode(mode))) : nullptr)
+    , m_inlineCallFrames(adoptRef(new InlineCallFrameSet()))
+    , m_identifiers(m_codeBlock)
+    , m_weakReferences(m_codeBlock)
+    , m_stage(Preparing)
 {
-    RELEASE_ASSERT(codeBlock->alternative()->jitCode());
+    RELEASE_ASSERT(m_codeBlock->alternative()->jitCode());
 }
 
 Plan::~Plan()
@@ -159,43 +159,43 @@ bool Plan::computeCompileTimes() const
 {
     return reportCompileTimes()
         || Options::reportTotalCompileTimes()
-        || (vm && vm->m_perBytecodeProfiler);
+        || (m_vm && m_vm->m_perBytecodeProfiler);
 }
 
 bool Plan::reportCompileTimes() const
 {
     return Options::reportCompileTimes()
         || Options::reportDFGCompileTimes()
-        || (Options::reportFTLCompileTimes() && isFTL(mode));
+        || (Options::reportFTLCompileTimes() && isFTL());
 }
 
 void Plan::compileInThread(ThreadData* threadData)
 {
-    this->threadData = threadData;
-    
+    m_threadData = threadData;
+
     MonotonicTime before { };
     CString codeBlockName;
     if (UNLIKELY(computeCompileTimes()))
         before = MonotonicTime::now();
     if (UNLIKELY(reportCompileTimes()))
-        codeBlockName = toCString(*codeBlock);
-    
+        codeBlockName = toCString(*m_codeBlock);
+
     CompilationScope compilationScope;
 
-    if (logCompilationChanges(mode) || Options::logPhaseTimes())
-        dataLog("DFG(Plan) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n");
+    if (logCompilationChanges(m_mode) || Options::logPhaseTimes())
+        dataLog("DFG(Plan) compiling ", *m_codeBlock, " with ", m_mode, ", number of instructions = ", m_codeBlock->instructionCount(), "\n");
 
     CompilationPath path = compileInThreadImpl();
 
-    RELEASE_ASSERT(path == CancelPath || finalizer);
-    RELEASE_ASSERT((path == CancelPath) == (stage == Cancelled));
-    
+    RELEASE_ASSERT(path == CancelPath || m_finalizer);
+    RELEASE_ASSERT((path == CancelPath) == (m_stage == Cancelled));
+
     MonotonicTime after { };
     if (UNLIKELY(computeCompileTimes())) {
         after = MonotonicTime::now();
     
         if (Options::reportTotalCompileTimes()) {
-            if (isFTL(mode)) {
+            if (isFTL()) {
                 totalFTLCompileTime += after - before;
                 totalFTLDFGCompileTime += m_timeBeforeFTL - before;
                 totalFTLB3CompileTime += after - m_timeBeforeFTL;
@@ -221,14 +221,14 @@ void Plan::compileInThread(ThreadData* threadData)
         RELEASE_ASSERT_NOT_REACHED();
         break;
     }
-    if (codeBlock) { // codeBlock will be null if the compilation was cancelled.
+    if (m_codeBlock) { // m_codeBlock will be null if the compilation was cancelled.
         if (path == FTLPath)
-            CODEBLOCK_LOG_EVENT(codeBlock, "ftlCompile", ("took ", (after - before).milliseconds(), " ms (DFG: ", (m_timeBeforeFTL - before).milliseconds(), ", B3: ", (after - m_timeBeforeFTL).milliseconds(), ") with ", pathName));
+            CODEBLOCK_LOG_EVENT(m_codeBlock, "ftlCompile", ("took ", (after - before).milliseconds(), " ms (DFG: ", (m_timeBeforeFTL - before).milliseconds(), ", B3: ", (after - m_timeBeforeFTL).milliseconds(), ") with ", pathName));
         else
-            CODEBLOCK_LOG_EVENT(codeBlock, "dfgCompile", ("took ", (after - before).milliseconds(), " ms with ", pathName));
+            CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgCompile", ("took ", (after - before).milliseconds(), " ms with ", pathName));
     }
     if (UNLIKELY(reportCompileTimes())) {
-        dataLog("Optimized ", codeBlockName, " using ", mode, " with ", pathName, " into ", finalizer ? finalizer->codeSize() : 0, " bytes in ", (after - before).milliseconds(), " ms");
+        dataLog("Optimized ", codeBlockName, " using ", m_mode, " with ", pathName, " into ", m_finalizer ? m_finalizer->codeSize() : 0, " bytes in ", (after - before).milliseconds(), " ms");
         if (path == FTLPath)
             dataLog(" (DFG: ", (m_timeBeforeFTL - before).milliseconds(), ", B3: ", (after - m_timeBeforeFTL).milliseconds(), ")");
         dataLog(".\n");
@@ -238,17 +238,17 @@ void Plan::compileInThread(ThreadData* threadData)
 Plan::CompilationPath Plan::compileInThreadImpl()
 {
     cleanMustHandleValuesIfNecessary();
-    
-    if (verboseCompilationEnabled(mode) && osrEntryBytecodeIndex != UINT_MAX) {
+
+    if (verboseCompilationEnabled(m_mode) && m_osrEntryBytecodeIndex != UINT_MAX) {
         dataLog("\n");
-        dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n");
+        dataLog("Compiler must handle OSR entry from bc#", m_osrEntryBytecodeIndex, " with values: ", m_mustHandleValues, "\n");
         dataLog("\n");
     }
-    
-    Graph dfg(*vm, *this);
+
+    Graph dfg(*m_vm, *this);
     parse(dfg);
 
-    codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters());
+    m_codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters());
 
     bool changed = false;
 
@@ -270,7 +270,7 @@ Plan::CompilationPath Plan::compileInThreadImpl()
     // in the CodeBlock. This is a good time to perform an early shrink, which is more
     // powerful than a late one. It's safe to do so because we haven't generated any code
     // that references any of the tables directly, yet.
-    codeBlock->shrinkToFit(CodeBlock::EarlyShrink);
+    m_codeBlock->shrinkToFit(CodeBlock::EarlyShrink);
 
     if (validationEnabled())
         validate(dfg);
@@ -290,11 +290,11 @@ Plan::CompilationPath Plan::compileInThreadImpl()
     RUN_PHASE(performPredictionInjection);
     
     RUN_PHASE(performStaticExecutionCountEstimation);
-    
-    if (mode == FTLForOSREntryMode) {
+
+    if (m_mode == FTLForOSREntryMode) {
         bool result = performOSREntrypointCreation(dfg);
         if (!result) {
-            finalizer = std::make_unique<FailedFinalizer>(*this);
+            m_finalizer = std::make_unique<FailedFinalizer>(*this);
             return FailPath;
         }
         RUN_PHASE(performCPSRethreading);
@@ -308,9 +308,9 @@ Plan::CompilationPath Plan::compileInThreadImpl()
     RUN_PHASE(performFixup);
     RUN_PHASE(performInvalidationPointInjection);
     RUN_PHASE(performTypeCheckHoisting);
-    
+
     dfg.m_fixpointState = FixpointNotConverged;
-    
+
     // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision
     // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is
     // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means
@@ -331,7 +331,7 @@ Plan::CompilationPath Plan::compileInThreadImpl()
         validate(dfg);
     
     RUN_PHASE(performCPSRethreading);
-    if (!isFTL(mode)) {
+    if (!isFTL()) {
         // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and
         // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL
         // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack
@@ -365,10 +365,10 @@ Plan::CompilationPath Plan::compileInThreadImpl()
         dfg.ensureCPSNaturalLoops();
     }
 
-    switch (mode) {
+    switch (m_mode) {
     case DFGMode: {
         dfg.m_fixpointState = FixpointConverged;
-    
+
         RUN_PHASE(performTierUpCheckInjection);
 
         RUN_PHASE(performFastStoreBarrierInsertion);
@@ -383,7 +383,7 @@ Plan::CompilationPath Plan::compileInThreadImpl()
         dumpAndVerifyGraph(dfg, "Graph after optimization:");
         
         JITCompiler dataFlowJIT(dfg);
-        if (codeBlock->codeType() == FunctionCode)
+        if (m_codeBlock->codeType() == FunctionCode)
             dataFlowJIT.compileFunction();
         else
             dataFlowJIT.compile();
@@ -395,7 +395,7 @@ Plan::CompilationPath Plan::compileInThreadImpl()
     case FTLForOSREntryMode: {
 #if ENABLE(FTL_JIT)
         if (FTL::canCompile(dfg) == FTL::CannotCompile) {
-            finalizer = std::make_unique<FailedFinalizer>(*this);
+            m_finalizer = std::make_unique<FailedFinalizer>(*this);
             return FailPath;
         }
         
@@ -454,11 +454,11 @@ Plan::CompilationPath Plan::compileInThreadImpl()
         RUN_PHASE(performCleanUp);
         RUN_PHASE(performIntegerCheckCombining);
         RUN_PHASE(performGlobalCSE);
-        
+
         // At this point we're not allowed to do any further code motion because our reasoning
         // about code motion assumes that it's OK to insert GC points in random places.
         dfg.m_fixpointState = FixpointConverged;
-        
+
         RUN_PHASE(performLivenessAnalysis);
         RUN_PHASE(performCFA);
         RUN_PHASE(performGlobalStoreBarrierInsertion);
@@ -473,11 +473,11 @@ Plan::CompilationPath Plan::compileInThreadImpl()
         RUN_PHASE(performWatchpointCollection);
         
         if (FTL::canCompile(dfg) == FTL::CannotCompile) {
-            finalizer = std::make_unique<FailedFinalizer>(*this);
+            m_finalizer = std::make_unique<FailedFinalizer>(*this);
             return FailPath;
         }
 
-        dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldDumpDisassembly(mode));
+        dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldDumpDisassembly(m_mode));
 
         // Flash a safepoint in case the GC wants some action.
         Safepoint::Result safepointResult;
@@ -536,95 +536,95 @@ Plan::CompilationPath Plan::compileInThreadImpl()
 
 bool Plan::isStillValid()
 {
-    CodeBlock* replacement = codeBlock->replacement();
+    CodeBlock* replacement = m_codeBlock->replacement();
     if (!replacement)
         return false;
     // FIXME: This is almost certainly not necessary. There's no way for the baseline
     // code to be replaced during a compilation, except if we delete the plan, in which
     // case we wouldn't be here.
     // https://bugs.webkit.org/show_bug.cgi?id=132707
-    if (codeBlock->alternative() != replacement->baselineVersion())
+    if (m_codeBlock->alternative() != replacement->baselineVersion())
         return false;
-    if (!watchpoints.areStillValid())
+    if (!m_watchpoints.areStillValid())
         return false;
     return true;
 }
 
 void Plan::reallyAdd(CommonData* commonData)
 {
-    watchpoints.reallyAdd(codeBlock, *commonData);
-    identifiers.reallyAdd(*vm, commonData);
-    weakReferences.reallyAdd(*vm, commonData);
-    transitions.reallyAdd(*vm, commonData);
-    commonData->recordedStatuses = WTFMove(recordedStatuses);
+    m_watchpoints.reallyAdd(m_codeBlock, *commonData);
+    m_identifiers.reallyAdd(*m_vm, commonData);
+    m_weakReferences.reallyAdd(*m_vm, commonData);
+    m_transitions.reallyAdd(*m_vm, commonData);
+    commonData->recordedStatuses = WTFMove(m_recordedStatuses);
 }
 
 void Plan::notifyCompiling()
 {
-    stage = Compiling;
+    m_stage = Compiling;
 }
 
 void Plan::notifyReady()
 {
-    callback->compilationDidBecomeReadyAsynchronously(codeBlock, profiledDFGCodeBlock);
-    stage = Ready;
+    m_callback->compilationDidBecomeReadyAsynchronously(m_codeBlock, m_profiledDFGCodeBlock);
+    m_stage = Ready;
 }
 
 CompilationResult Plan::finalizeWithoutNotifyingCallback()
 {
     // We will establish new references from the code block to things. So, we need a barrier.
-    vm->heap.writeBarrier(codeBlock);
-    
+    m_vm->heap.writeBarrier(m_codeBlock);
+
     if (!isStillValid()) {
-        CODEBLOCK_LOG_EVENT(codeBlock, "dfgFinalize", ("invalidated"));
+        CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgFinalize", ("invalidated"));
         return CompilationInvalidated;
     }
 
     bool result;
-    if (codeBlock->codeType() == FunctionCode)
-        result = finalizer->finalizeFunction();
+    if (m_codeBlock->codeType() == FunctionCode)
+        result = m_finalizer->finalizeFunction();
     else
-        result = finalizer->finalize();
-    
+        result = m_finalizer->finalize();
+
     if (!result) {
-        CODEBLOCK_LOG_EVENT(codeBlock, "dfgFinalize", ("failed"));
+        CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgFinalize", ("failed"));
         return CompilationFailed;
     }
-    
-    reallyAdd(codeBlock->jitCode()->dfgCommon());
-    
+
+    reallyAdd(m_codeBlock->jitCode()->dfgCommon());
+
     if (validationEnabled()) {
         TrackedReferences trackedReferences;
-        
-        for (WriteBarrier<JSCell>& reference : codeBlock->jitCode()->dfgCommon()->weakReferences)
+
+        for (WriteBarrier<JSCell>& reference : m_codeBlock->jitCode()->dfgCommon()->weakReferences)
             trackedReferences.add(reference.get());
-        for (WriteBarrier<Structure>& reference : codeBlock->jitCode()->dfgCommon()->weakStructureReferences)
+        for (WriteBarrier<Structure>& reference : m_codeBlock->jitCode()->dfgCommon()->weakStructureReferences)
             trackedReferences.add(reference.get());
-        for (WriteBarrier<Unknown>& constant : codeBlock->constants())
+        for (WriteBarrier<Unknown>& constant : m_codeBlock->constants())
             trackedReferences.add(constant.get());
 
-        for (auto* inlineCallFrame : *inlineCallFrames) {
+        for (auto* inlineCallFrame : *m_inlineCallFrames) {
             ASSERT(inlineCallFrame->baselineCodeBlock.get());
             trackedReferences.add(inlineCallFrame->baselineCodeBlock.get());
         }
-        
+
         // Check that any other references that we have anywhere in the JITCode are also
         // tracked either strongly or weakly.
-        codeBlock->jitCode()->validateReferences(trackedReferences);
+        m_codeBlock->jitCode()->validateReferences(trackedReferences);
     }
-    
-    CODEBLOCK_LOG_EVENT(codeBlock, "dfgFinalize", ("succeeded"));
+
+    CODEBLOCK_LOG_EVENT(m_codeBlock, "dfgFinalize", ("succeeded"));
     return CompilationSuccessful;
 }
 
 void Plan::finalizeAndNotifyCallback()
 {
-    callback->compilationDidComplete(codeBlock, profiledDFGCodeBlock, finalizeWithoutNotifyingCallback());
+    m_callback->compilationDidComplete(m_codeBlock, m_profiledDFGCodeBlock, finalizeWithoutNotifyingCallback());
 }
 
 CompilationKey Plan::key()
 {
-    return CompilationKey(codeBlock->alternative(), mode);
+    return CompilationKey(m_codeBlock->alternative(), m_mode);
 }
 
 void Plan::checkLivenessAndVisitChildren(SlotVisitor& visitor)
@@ -633,82 +633,82 @@ void Plan::checkLivenessAndVisitChildren(SlotVisitor& visitor)
         return;
 
     cleanMustHandleValuesIfNecessary();
-    for (unsigned i = mustHandleValues.size(); i--;)
-        visitor.appendUnbarriered(mustHandleValues[i]);
-    
-    recordedStatuses.markIfCheap(visitor);
+    for (unsigned i = m_mustHandleValues.size(); i--;)
+        visitor.appendUnbarriered(m_mustHandleValues[i]);
+
+    m_recordedStatuses.markIfCheap(visitor);
 
-    visitor.appendUnbarriered(codeBlock);
-    visitor.appendUnbarriered(codeBlock->alternative());
-    visitor.appendUnbarriered(profiledDFGCodeBlock);
+    visitor.appendUnbarriered(m_codeBlock);
+    visitor.appendUnbarriered(m_codeBlock->alternative());
+    visitor.appendUnbarriered(m_profiledDFGCodeBlock);
 
-    if (inlineCallFrames) {
-        for (auto* inlineCallFrame : *inlineCallFrames) {
+    if (m_inlineCallFrames) {
+        for (auto* inlineCallFrame : *m_inlineCallFrames) {
             ASSERT(inlineCallFrame->baselineCodeBlock.get());
             visitor.appendUnbarriered(inlineCallFrame->baselineCodeBlock.get());
         }
     }
 
-    weakReferences.visitChildren(visitor);
-    transitions.visitChildren(visitor);
+    m_weakReferences.visitChildren(visitor);
+    m_transitions.visitChildren(visitor);
 }
 
 void Plan::finalizeInGC()
 {
-    recordedStatuses.finalizeWithoutDeleting();
+    m_recordedStatuses.finalizeWithoutDeleting();
 }
 
 bool Plan::isKnownToBeLiveDuringGC()
 {
-    if (stage == Cancelled)
+    if (m_stage == Cancelled)
         return false;
-    if (!Heap::isMarked(codeBlock->ownerExecutable()))
+    if (!Heap::isMarked(m_codeBlock->ownerExecutable()))
         return false;
-    if (!Heap::isMarked(codeBlock->alternative()))
+    if (!Heap::isMarked(m_codeBlock->alternative()))
         return false;
-    if (!!profiledDFGCodeBlock && !Heap::isMarked(profiledDFGCodeBlock))
+    if (!!m_profiledDFGCodeBlock && !Heap::isMarked(m_profiledDFGCodeBlock))
         return false;
     return true;
 }
 
 void Plan::cancel()
 {
-    vm = nullptr;
-    codeBlock = nullptr;
-    profiledDFGCodeBlock = nullptr;
-    mustHandleValues.clear();
-    compilation = nullptr;
-    finalizer = nullptr;
-    inlineCallFrames = nullptr;
-    watchpoints = DesiredWatchpoints();
-    identifiers = DesiredIdentifiers();
-    weakReferences = DesiredWeakReferences();
-    transitions = DesiredTransitions();
-    callback = nullptr;
-    stage = Cancelled;
+    m_vm = nullptr;
+    m_codeBlock = nullptr;
+    m_profiledDFGCodeBlock = nullptr;
+    m_mustHandleValues.clear();
+    m_compilation = nullptr;
+    m_finalizer = nullptr;
+    m_inlineCallFrames = nullptr;
+    m_watchpoints = DesiredWatchpoints();
+    m_identifiers = DesiredIdentifiers();
+    m_weakReferences = DesiredWeakReferences();
+    m_transitions = DesiredTransitions();
+    m_callback = nullptr;
+    m_stage = Cancelled;
 }
 
 void Plan::cleanMustHandleValuesIfNecessary()
 {
-    LockHolder locker(mustHandleValueCleaningLock);
-    
-    if (!mustHandleValuesMayIncludeGarbage)
+    LockHolder locker(m_mustHandleValueCleaningLock);
+
+    if (!m_mustHandleValuesMayIncludeGarbage)
         return;
-    
-    mustHandleValuesMayIncludeGarbage = false;
-    
-    if (!codeBlock)
+
+    m_mustHandleValuesMayIncludeGarbage = false;
+
+    if (!m_codeBlock)
         return;
-    
-    if (!mustHandleValues.numberOfLocals())
+
+    if (!m_mustHandleValues.numberOfLocals())
         return;
-    
-    CodeBlock* alternative = codeBlock->alternative();
-    FastBitVector liveness = alternative->livenessAnalysis().getLivenessInfoAtBytecodeOffset(alternative, osrEntryBytecodeIndex);
-    
-    for (unsigned local = mustHandleValues.numberOfLocals(); local--;) {
+
+    CodeBlock* alternative = m_codeBlock->alternative();
+    FastBitVector liveness = alternative->livenessAnalysis().getLivenessInfoAtBytecodeOffset(alternative, m_osrEntryBytecodeIndex);
+
+    for (unsigned local = m_mustHandleValues.numberOfLocals(); local--;) {
         if (!liveness[local])
-            mustHandleValues.local(local) = jsUndefined();
+            m_mustHandleValues.local(local) = jsUndefined();
     }
 }
 
index ef4ab5c..3da11bd 100644 (file)
@@ -51,7 +51,8 @@ class ThreadData;
 
 #if ENABLE(DFG_JIT)
 
-struct Plan : public ThreadSafeRefCounted<Plan> {
+class Plan : public ThreadSafeRefCounted<Plan> {
+public:
     Plan(
         CodeBlock* codeBlockToCompile, CodeBlock* profiledDFGCodeBlock,
         CompilationMode, unsigned osrEntryBytecodeIndex,
@@ -76,47 +77,43 @@ struct Plan : public ThreadSafeRefCounted<Plan> {
     void finalizeInGC();
     void cancel();
 
-    bool canTierUpAndOSREnter() const { return !tierUpAndOSREnterBytecodes.isEmpty(); }
-    
+    bool canTierUpAndOSREnter() const { return !m_tierUpAndOSREnterBytecodes.isEmpty(); }
+
     void cleanMustHandleValuesIfNecessary();
-    
-    // Warning: pretty much all of the pointer fields in this object get nulled by cancel(). So, if
-    // you're writing code that is callable on the cancel path, be sure to null check everything!
-    
-    VM* vm;
 
-    // These can be raw pointers because we visit them during every GC in checkLivenessAndVisitChildren.
-    CodeBlock* codeBlock;
-    CodeBlock* profiledDFGCodeBlock;
-
-    CompilationMode mode;
-    const unsigned osrEntryBytecodeIndex;
-    Operands<JSValue> mustHandleValues;
-    bool mustHandleValuesMayIncludeGarbage { true };
-    Lock mustHandleValueCleaningLock;
-    
-    ThreadData* threadData;
+    VM* vm() const { return m_vm; }
 
-    RefPtr<Profiler::Compilation> compilation;
+    CodeBlock* codeBlock() { return m_codeBlock; }
 
-    std::unique_ptr<Finalizer> finalizer;
-    
-    RefPtr<InlineCallFrameSet> inlineCallFrames;
-    DesiredWatchpoints watchpoints;
-    DesiredIdentifiers identifiers;
-    DesiredWeakReferences weakReferences;
-    DesiredTransitions transitions;
-    RecordedStatuses recordedStatuses;
-    
-    bool willTryToTierUp { false };
+    bool isFTL() const { return DFG::isFTL(m_mode); }
+    CompilationMode mode() const { return m_mode; }
+    unsigned osrEntryBytecodeIndex() const { return m_osrEntryBytecodeIndex; }
+    const Operands<JSValue>& mustHandleValues() const { return m_mustHandleValues; }
+
+    ThreadData* threadData() const { return m_threadData; }
+    Profiler::Compilation* compilation() const { return m_compilation.get(); }
+
+    Finalizer* finalizer() const { return m_finalizer.get(); }
+    void setFinalizer(std::unique_ptr<Finalizer>&& finalizer) { m_finalizer = WTFMove(finalizer); }
 
-    HashMap<unsigned, Vector<unsigned>> tierUpInLoopHierarchy;
-    Vector<unsigned> tierUpAndOSREnterBytecodes;
+    RefPtr<InlineCallFrameSet> inlineCallFrames() const { return m_inlineCallFrames; }
+    DesiredWatchpoints& watchpoints() { return m_watchpoints; }
+    DesiredIdentifiers& identifiers() { return m_identifiers; }
+    DesiredWeakReferences& weakReferences() { return m_weakReferences; }
+    DesiredTransitions& transitions() { return m_transitions; }
+    RecordedStatuses& recordedStatuses() { return m_recordedStatuses; }
+
+    bool willTryToTierUp() const { return m_willTryToTierUp; }
+    void setWillTryToTierUp(bool willTryToTierUp) { m_willTryToTierUp = willTryToTierUp; }
+
+    HashMap<unsigned, Vector<unsigned>>& tierUpInLoopHierarchy() { return m_tierUpInLoopHierarchy; }
+    Vector<unsigned>& tierUpAndOSREnterBytecodes() { return m_tierUpAndOSREnterBytecodes; }
 
     enum Stage { Preparing, Compiling, Ready, Cancelled };
-    Stage stage;
+    Stage stage() const { return m_stage; }
 
-    RefPtr<DeferredCompilationCallback> callback;
+    DeferredCompilationCallback* callback() const { return m_callback.get(); }
+    void setCallback(Ref<DeferredCompilationCallback>&& callback) { m_callback = WTFMove(callback); }
 
 private:
     bool computeCompileTimes() const;
@@ -128,6 +125,43 @@ private:
     bool isStillValid();
     void reallyAdd(CommonData*);
 
+    // Warning: pretty much all of the pointer fields in this object get nulled by cancel(). So, if
+    // you're writing code that is callable on the cancel path, be sure to null check everything!
+
+    VM* m_vm;
+
+    // These can be raw pointers because we visit them during every GC in checkLivenessAndVisitChildren.
+    CodeBlock* m_codeBlock;
+    CodeBlock* m_profiledDFGCodeBlock;
+
+    CompilationMode m_mode;
+    const unsigned m_osrEntryBytecodeIndex;
+    Operands<JSValue> m_mustHandleValues;
+    bool m_mustHandleValuesMayIncludeGarbage { true };
+    Lock m_mustHandleValueCleaningLock;
+
+    ThreadData* m_threadData;
+
+    RefPtr<Profiler::Compilation> m_compilation;
+
+    std::unique_ptr<Finalizer> m_finalizer;
+
+    RefPtr<InlineCallFrameSet> m_inlineCallFrames;
+    DesiredWatchpoints m_watchpoints;
+    DesiredIdentifiers m_identifiers;
+    DesiredWeakReferences m_weakReferences;
+    DesiredTransitions m_transitions;
+    RecordedStatuses m_recordedStatuses;
+
+    bool m_willTryToTierUp { false };
+
+    HashMap<unsigned, Vector<unsigned>> m_tierUpInLoopHierarchy;
+    Vector<unsigned> m_tierUpAndOSREnterBytecodes;
+
+    Stage m_stage;
+
+    RefPtr<DeferredCompilationCallback> m_callback;
+
     MonotonicTime m_timeBeforeFTL;
 };
 
index 151f089..37cd046 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2017-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -41,10 +41,10 @@ void Plan::iterateCodeBlocksForGC(const Func& func)
     // an explicit barrier. So, we need to be pessimistic and assume that
     // all our CodeBlocks must be visited during GC.
 
-    func(codeBlock);
-    func(codeBlock->alternative());
-    if (profiledDFGCodeBlock)
-        func(profiledDFGCodeBlock);
+    func(m_codeBlock);
+    func(m_codeBlock->alternative());
+    if (m_profiledDFGCodeBlock)
+        func(m_profiledDFGCodeBlock);
 }
 
 #endif // ENABLE(DFG_JIT)
index 0556640..0d8bffe 100644 (file)
@@ -179,8 +179,8 @@ private:
             default:
                 break;
             }
-            
-            if (isPhantomNode && isFTL(m_graph.m_plan.mode))
+
+            if (isPhantomNode && m_graph.m_plan.isFTL())
                 break;
             
             if (isForwardingNode && m_node->hasArgumentsChild() && m_node->argumentsChild()
index 6ce864b..d0c720d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -68,16 +68,17 @@ public:
                 continue;
             if (!block->isOSRTarget)
                 continue;
-            if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex)
+            if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex())
                 continue;
-            for (size_t i = 0; i < m_graph.m_plan.mustHandleValues.size(); ++i) {
-                int operand = m_graph.m_plan.mustHandleValues.operandForIndex(i);
+            const Operands<JSValue>& mustHandleValues = m_graph.m_plan.mustHandleValues();
+            for (size_t i = 0; i < mustHandleValues.size(); ++i) {
+                int operand = mustHandleValues.operandForIndex(i);
                 Node* node = block->variablesAtHead.operand(operand);
                 if (!node)
                     continue;
                 ASSERT(node->accessesStack(m_graph));
                 node->variableAccessData()->predict(
-                    speculationFromValue(m_graph.m_plan.mustHandleValues[i]));
+                    speculationFromValue(mustHandleValues[i]));
             }
         }
         
index 948c057..0ef0318 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2014-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -47,7 +47,7 @@ bool Safepoint::Result::didGetCancelled()
 }
 
 Safepoint::Safepoint(Plan& plan, Result& result)
-    : m_vm(plan.vm)
+    : m_vm(plan.vm())
     , m_plan(plan)
     , m_didCallBegin(false)
     , m_result(result)
@@ -60,7 +60,7 @@ Safepoint::Safepoint(Plan& plan, Result& result)
 Safepoint::~Safepoint()
 {
     RELEASE_ASSERT(m_didCallBegin);
-    if (ThreadData* data = m_plan.threadData) {
+    if (ThreadData* data = m_plan.threadData()) {
         RELEASE_ASSERT(data->m_safepoint == this);
         data->m_rightToRun.lock();
         data->m_safepoint = nullptr;
@@ -77,7 +77,7 @@ void Safepoint::begin()
 {
     RELEASE_ASSERT(!m_didCallBegin);
     m_didCallBegin = true;
-    if (ThreadData* data = m_plan.threadData) {
+    if (ThreadData* data = m_plan.threadData()) {
         RELEASE_ASSERT(!data->m_safepoint);
         data->m_safepoint = this;
         data->m_rightToRun.unlockFairly();
index f46ccfd..720d0e3 100644 (file)
@@ -36,8 +36,8 @@ class VM;
 
 namespace DFG {
 
+class Plan;
 class Scannable;
-struct Plan;
 
 class Safepoint {
 public:
index 65be2d1..8bb6a54 100644 (file)
@@ -145,14 +145,14 @@ public:
 
         static TrustedImmPtr weakPointer(Graph& graph, JSCell* cell)
         {     
-            graph.m_plan.weakReferences.addLazily(cell);
+            graph.m_plan.weakReferences().addLazily(cell);
             return TrustedImmPtr(bitwise_cast<size_t>(cell));
         }
 
         template<typename Key>
         static TrustedImmPtr weakPoisonedPointer(Graph& graph, JSCell* cell)
         {     
-            graph.m_plan.weakReferences.addLazily(cell);
+            graph.m_plan.weakReferences().addLazily(cell);
             return TrustedImmPtr(bitwise_cast<size_t>(cell) ^ Key::key());
         }
 
index 1aeec68..e864d45 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013, 2015-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -103,8 +103,8 @@ public:
                 }
             }
         }
-        
-        for (InlineCallFrameSet::iterator iter = m_graph.m_plan.inlineCallFrames->begin(); !!iter; ++iter) {
+
+        for (InlineCallFrameSet::iterator iter = m_graph.m_plan.inlineCallFrames()->begin(); !!iter; ++iter) {
             InlineCallFrame* inlineCallFrame = *iter;
             
             if (inlineCallFrame->isVarargs()) {
index 9ab4e53..4467d5f 100644 (file)
@@ -922,9 +922,9 @@ private:
                         Graph::parameterSlotsForArgCount(numAllocatedArgs));
                 }
             }
-            
-            m_graph.m_plan.recordedStatuses.addCallLinkStatus(m_node->origin.semantic, CallLinkStatus(callVariant));
-            
+
+            m_graph.m_plan.recordedStatuses().addCallLinkStatus(m_node->origin.semantic, CallLinkStatus(callVariant));
+
             m_node->convertToDirectCall(m_graph.freeze(executable));
             m_changed = true;
             break;
index a0c2e82..c5067ef 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -61,8 +61,8 @@ public:
     
     bool run()
     {
-        RELEASE_ASSERT(m_graph.m_plan.mode == DFGMode);
-        
+        RELEASE_ASSERT(m_graph.m_plan.mode() == DFGMode);
+
         if (!Options::useFTLJIT())
             return false;
         
@@ -110,7 +110,7 @@ public:
 
                 unsigned bytecodeIndex = origin.semantic.bytecodeIndex;
                 if (canOSREnter)
-                    m_graph.m_plan.tierUpAndOSREnterBytecodes.append(bytecodeIndex);
+                    m_graph.m_plan.tierUpAndOSREnterBytecodes().append(bytecodeIndex);
 
                 if (const NaturalLoop* loop = naturalLoops.innerMostLoopOf(block)) {
                     LoopHintDescriptor descriptor;
@@ -147,9 +147,9 @@ public:
             }
 
             if (!tierUpCandidates.isEmpty())
-                m_graph.m_plan.tierUpInLoopHierarchy.add(entry.key, WTFMove(tierUpCandidates));
+                m_graph.m_plan.tierUpInLoopHierarchy().add(entry.key, WTFMove(tierUpCandidates));
         }
-        m_graph.m_plan.willTryToTierUp = true;
+        m_graph.m_plan.setWillTryToTierUp(true);
         return true;
 #else // ENABLE(FTL_JIT)
         RELEASE_ASSERT_NOT_REACHED();
index bacdc1d..9ff9e92 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012, 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -442,10 +442,11 @@ private:
             ASSERT(block->isReachable);
             if (!block->isOSRTarget)
                 continue;
-            if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex)
+            if (block->bytecodeBegin != m_graph.m_plan.osrEntryBytecodeIndex())
                 continue;
-            for (size_t i = 0; i < m_graph.m_plan.mustHandleValues.size(); ++i) {
-                int operand = m_graph.m_plan.mustHandleValues.operandForIndex(i);
+            const Operands<JSValue>& mustHandleValues = m_graph.m_plan.mustHandleValues();
+            for (size_t i = 0; i < mustHandleValues.size(); ++i) {
+                int operand = mustHandleValues.operandForIndex(i);
                 Node* node = block->variablesAtHead.operand(operand);
                 if (!node)
                     continue;
@@ -455,7 +456,7 @@ private:
                     continue;
                 if (!TypeCheck::isValidToHoist(iter->value))
                     continue;
-                JSValue value = m_graph.m_plan.mustHandleValues[i];
+                JSValue value = mustHandleValues[i];
                 if (!value || !value.isCell() || TypeCheck::isContravenedByValue(iter->value, value)) {
                     TypeCheck::disableHoisting(iter->value);
                     continue;
index 68f6aac..b305b71 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -66,7 +66,7 @@ protected:
             }
             return PollResult::Stop;
         }
-        RELEASE_ASSERT(m_plan->stage == Plan::Preparing);
+        RELEASE_ASSERT(m_plan->stage() == Plan::Preparing);
         m_worklist.m_numberOfActiveThreads++;
         return PollResult::Work;
     }
@@ -100,7 +100,7 @@ protected:
         LockHolder locker(m_data.m_rightToRun);
         {
             LockHolder locker(*m_worklist.m_lock);
-            if (m_plan->stage == Plan::Cancelled)
+            if (m_plan->stage() == Plan::Cancelled)
                 return WorkResult::Continue;
             m_plan->notifyCompiling();
         }
@@ -109,13 +109,13 @@ protected:
             dataLog(m_worklist, ": Compiling ", m_plan->key(), " asynchronously\n");
         
         // There's no way for the GC to be safepointing since we own rightToRun.
-        if (m_plan->vm->heap.worldIsStopped()) {
+        if (m_plan->vm()->heap.worldIsStopped()) {
             dataLog("Heap is stoped but here we are! (1)\n");
             RELEASE_ASSERT_NOT_REACHED();
         }
         m_plan->compileInThread(&m_data);
-        if (m_plan->stage != Plan::Cancelled) {
-            if (m_plan->vm->heap.worldIsStopped()) {
+        if (m_plan->stage() != Plan::Cancelled) {
+            if (m_plan->vm()->heap.worldIsStopped()) {
                 dataLog("Heap is stopped but here we are! (2)\n");
                 RELEASE_ASSERT_NOT_REACHED();
             }
@@ -123,7 +123,7 @@ protected:
         
         {
             LockHolder locker(*m_worklist.m_lock);
-            if (m_plan->stage == Plan::Cancelled)
+            if (m_plan->stage() == Plan::Cancelled)
                 return WorkResult::Continue;
             
             m_plan->notifyReady();
@@ -134,8 +134,8 @@ protected:
             }
             
             m_worklist.m_readyPlans.append(m_plan);
-            
-            RELEASE_ASSERT(!m_plan->vm->heap.worldIsStopped());
+
+            RELEASE_ASSERT(!m_plan->vm()->heap.worldIsStopped());
             m_worklist.m_planCompiled.notifyAll();
         }
         
@@ -223,7 +223,7 @@ bool Worklist::isActiveForVM(VM& vm) const
     LockHolder locker(*m_lock);
     PlanMap::const_iterator end = m_plans.end();
     for (PlanMap::const_iterator iter = m_plans.begin(); iter != end; ++iter) {
-        if (iter->value->vm == &vm)
+        if (iter->value->vm() == &vm)
             return true;
     }
     return false;
@@ -248,7 +248,7 @@ Worklist::State Worklist::compilationState(CompilationKey key)
     PlanMap::iterator iter = m_plans.find(key);
     if (iter == m_plans.end())
         return NotKnown;
-    return iter->value->stage == Plan::Ready ? Compiled : Compiling;
+    return iter->value->stage() == Plan::Ready ? Compiled : Compiling;
 }
 
 void Worklist::waitUntilAllPlansForVMAreReady(VM& vm)
@@ -278,9 +278,9 @@ void Worklist::waitUntilAllPlansForVMAreReady(VM& vm)
         bool allAreCompiled = true;
         PlanMap::iterator end = m_plans.end();
         for (PlanMap::iterator iter = m_plans.begin(); iter != end; ++iter) {
-            if (iter->value->vm != &vm)
+            if (iter->value->vm() != &vm)
                 continue;
-            if (iter->value->stage != Plan::Ready) {
+            if (iter->value->stage() != Plan::Ready) {
                 allAreCompiled = false;
                 break;
             }
@@ -299,9 +299,9 @@ void Worklist::removeAllReadyPlansForVM(VM& vm, Vector<RefPtr<Plan>, 8>& myReady
     LockHolder locker(*m_lock);
     for (size_t i = 0; i < m_readyPlans.size(); ++i) {
         RefPtr<Plan> plan = m_readyPlans[i];
-        if (plan->vm != &vm)
+        if (plan->vm() != &vm)
             continue;
-        if (plan->stage != Plan::Ready)
+        if (plan->stage() != Plan::Ready)
             continue;
         myReadyPlans.append(plan);
         m_readyPlans[i--] = m_readyPlans.last();
@@ -331,9 +331,9 @@ Worklist::State Worklist::completeAllReadyPlansForVM(VM& vm, CompilationKey requ
         
         if (Options::verboseCompilationQueue())
             dataLog(*this, ": Completing ", currentKey, "\n");
-        
-        RELEASE_ASSERT(plan->stage == Plan::Ready);
-        
+
+        RELEASE_ASSERT(plan->stage() == Plan::Ready);
+
         plan->finalizeAndNotifyCallback();
         
         if (currentKey == requestedKey)
@@ -377,7 +377,7 @@ void Worklist::visitWeakReferences(SlotVisitor& visitor)
         LockHolder locker(*m_lock);
         for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
             Plan* plan = iter->value.get();
-            if (plan->vm != vm)
+            if (plan->vm() != vm)
                 continue;
             plan->checkLivenessAndVisitChildren(visitor);
         }
@@ -401,13 +401,13 @@ void Worklist::removeDeadPlans(VM& vm)
         HashSet<CompilationKey> deadPlanKeys;
         for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
             Plan* plan = iter->value.get();
-            if (plan->vm != &vm)
+            if (plan->vm() != &vm)
                 continue;
             if (plan->isKnownToBeLiveDuringGC()) {
                 plan->finalizeInGC();
                 continue;
             }
-            RELEASE_ASSERT(plan->stage != Plan::Cancelled); // Should not be cancelled, yet.
+            RELEASE_ASSERT(plan->stage() != Plan::Cancelled); // Should not be cancelled, yet.
             ASSERT(!deadPlanKeys.contains(plan->key()));
             deadPlanKeys.add(plan->key());
         }
@@ -417,12 +417,12 @@ void Worklist::removeDeadPlans(VM& vm)
             Deque<RefPtr<Plan>> newQueue;
             while (!m_queue.isEmpty()) {
                 RefPtr<Plan> plan = m_queue.takeFirst();
-                if (plan->stage != Plan::Cancelled)
+                if (plan->stage() != Plan::Cancelled)
                     newQueue.append(plan);
             }
             m_queue.swap(newQueue);
             for (unsigned i = 0; i < m_readyPlans.size(); ++i) {
-                if (m_readyPlans[i]->stage != Plan::Cancelled)
+                if (m_readyPlans[i]->stage() != Plan::Cancelled)
                     continue;
                 m_readyPlans[i--] = m_readyPlans.last();
                 m_readyPlans.removeLast();
@@ -451,9 +451,9 @@ void Worklist::removeNonCompilingPlansForVM(VM& vm)
     Vector<RefPtr<Plan>> deadPlans;
     for (auto& entry : m_plans) {
         Plan* plan = entry.value.get();
-        if (plan->vm != &vm)
+        if (plan->vm() != &vm)
             continue;
-        if (plan->stage == Plan::Compiling)
+        if (plan->stage() == Plan::Compiling)
             continue;
         deadPlanKeys.add(plan->key());
         deadPlans.append(plan);
index 8a5bfec..d264711 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2017-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -47,7 +47,7 @@ void Worklist::iterateCodeBlocksForGC(VM& vm, const Func& func)
     LockHolder locker(*m_lock);
     for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
         Plan* plan = iter->value.get();
-        if (plan->vm != &vm)
+        if (plan->vm() != &vm)
             continue;
         plan->iterateCodeBlocksForGC(func);
     }
index ca87da3..f8c6ee6 100644 (file)
@@ -168,7 +168,7 @@ void compile(State& state, Safepoint::Result& safepointResult)
     if (B3::Air::Disassembler* disassembler = state.proc->code().disassembler()) {
         PrintStream& out = WTF::dataFile();
 
-        out.print("Generated ", state.graph.m_plan.mode, " code for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ", instruction count = ", state.graph.m_codeBlock->instructionCount(), ":\n");
+        out.print("Generated ", state.graph.m_plan.mode(), " code for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ", instruction count = ", state.graph.m_codeBlock->instructionCount(), ":\n");
 
         LinkBuffer& linkBuffer = *state.finalizer->b3CodeLinkBuffer;
         B3::Value* currentB3Value = nullptr;
index 5c6426a..1653350 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -37,7 +37,7 @@ using namespace DFG;
 
 void fail(State& state)
 {
-    state.graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(state.graph.m_plan);
+    state.graph.m_plan.setFinalizer(std::make_unique<FailedFinalizer>(state.graph.m_plan));
 }
 
 } } // namespace JSC::FTL
index 0919f0b..7643eda 100644 (file)
@@ -75,21 +75,21 @@ bool JITFinalizer::finalizeCommon()
     
     MacroAssemblerCodeRef<JSEntryPtrTag> b3CodeRef =
         FINALIZE_CODE_IF(dumpDisassembly, *b3CodeLinkBuffer, JSEntryPtrTag,
-            "FTL B3 code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::FTLJIT)).data());
+            "FTL B3 code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock(), JITCode::FTLJIT)).data());
 
     MacroAssemblerCodeRef<JSEntryPtrTag> arityCheckCodeRef = entrypointLinkBuffer
         ? FINALIZE_CODE_IF(dumpDisassembly, *entrypointLinkBuffer, JSEntryPtrTag,
-            "FTL entrypoint thunk for %s with B3 generated code at %p", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::FTLJIT)).data(), function)
+            "FTL entrypoint thunk for %s with B3 generated code at %p", toCString(CodeBlockWithJITType(m_plan.codeBlock(), JITCode::FTLJIT)).data(), function)
         : MacroAssemblerCodeRef<JSEntryPtrTag>::createSelfManagedCodeRef(b3CodeRef.code());
 
     jitCode->initializeB3Code(b3CodeRef);
     jitCode->initializeArityCheckEntrypoint(arityCheckCodeRef);
 
-    m_plan.codeBlock->setJITCode(*jitCode);
+    m_plan.codeBlock()->setJITCode(*jitCode);
+
+    if (UNLIKELY(m_plan.compilation()))
+        m_plan.vm()->m_perBytecodeProfiler->addCompilation(m_plan.codeBlock(), *m_plan.compilation());
 
-    if (UNLIKELY(m_plan.compilation))
-        m_plan.vm->m_perBytecodeProfiler->addCompilation(m_plan.codeBlock, *m_plan.compilation);
-    
     return true;
 }
 
index 174fce9..3fca11e 100644 (file)
@@ -53,9 +53,9 @@ void link(State& state)
 
     state.jitCode->common.requiredRegisterCountForExit = graph.requiredRegisterCountForExit();
     
-    if (!graph.m_plan.inlineCallFrames->isEmpty())
-        state.jitCode->common.inlineCallFrames = graph.m_plan.inlineCallFrames;
-    
+    if (!graph.m_plan.inlineCallFrames()->isEmpty())
+        state.jitCode->common.inlineCallFrames = graph.m_plan.inlineCallFrames();
+
     graph.registerFrozenValues();
 
     // Create the entrypoint. Note that we use this entrypoint totally differently
@@ -125,8 +125,8 @@ void link(State& state)
         
         state.jitCode->common.compilation = compilation;
     }
-    
-    switch (graph.m_plan.mode) {
+
+    switch (graph.m_plan.mode()) {
     case FTLMode: {
         bool requiresArityFixup = codeBlock->numParameters() != 1;
         if (codeBlock->codeType() == FunctionCode && requiresArityFixup) {
index 3ba87e1..761687b 100644 (file)
@@ -6669,7 +6669,7 @@ private:
                     storage = m_out.loadPtr(base, m_heaps.JSObject_butterfly);
             } else {
                 DFG_ASSERT(m_graph, m_node, variant.kind() == PutByIdVariant::Transition, variant.kind());
-                m_graph.m_plan.transitions.addLazily(
+                m_graph.m_plan.transitions().addLazily(
                     codeBlock(), m_node->origin.semantic.codeOriginOwner(),
                     variant.oldStructureForTransition(), variant.newStructure());
                 
@@ -16108,8 +16108,8 @@ private:
             Availability availability = availabilityMap.m_locals[i];
             
             if (Options::validateFTLOSRExitLiveness()
-                && m_graph.m_plan.mode != FTLForOSREntryMode) {
-                
+                && m_graph.m_plan.mode() != FTLForOSREntryMode) {
+
                 if (availability.isDead() && m_graph.isLiveInBytecode(VirtualRegister(operand), exitOrigin))
                     DFG_CRASH(m_graph, m_node, toCString("Live bytecode local not available: operand = ", VirtualRegister(operand), ", availability = ", availability, ", origin = ", exitOrigin).data());
             }
@@ -16384,7 +16384,7 @@ private:
     
     void addWeakReference(JSCell* target)
     {
-        m_graph.m_plan.weakReferences.addLazily(target);
+        m_graph.m_plan.weakReferences().addLazily(target);
     }
 
     LValue loadStructure(LValue value)
index 588edd8..9f59369 100644 (file)
@@ -109,7 +109,7 @@ public:
 
     LValue weakPointer(DFG::Graph& graph, JSCell* cell)
     {
-        ASSERT(graph.m_plan.weakReferences.contains(cell));
+        ASSERT(graph.m_plan.weakReferences().contains(cell));
 
         return constIntPtr(bitwise_cast<intptr_t>(cell));
     }
@@ -117,7 +117,7 @@ public:
     template<typename Key>
     LValue weakPoisonedPointer(DFG::Graph& graph, JSCell* cell)
     {
-        ASSERT(graph.m_plan.weakReferences.contains(cell));
+        ASSERT(graph.m_plan.weakReferences().contains(cell));
 
         return constIntPtr(bitwise_cast<intptr_t>(cell) ^ Key::key());
     }
index 9670129..653ec6f 100644 (file)
@@ -43,7 +43,7 @@ using namespace DFG;
 State::State(Graph& graph)
     : graph(graph)
 {
-    switch (graph.m_plan.mode) {
+    switch (graph.m_plan.mode()) {
     case FTLMode: {
         jitCode = adoptRef(new JITCode());
         break;
@@ -51,7 +51,7 @@ State::State(Graph& graph)
     case FTLForOSREntryMode: {
         RefPtr<ForOSREntryJITCode> code = adoptRef(new ForOSREntryJITCode());
         code->initializeEntryBuffer(graph.m_vm, graph.m_profiledBlock->numCalleeLocals());
-        code->setBytecodeIndex(graph.m_plan.osrEntryBytecodeIndex);
+        code->setBytecodeIndex(graph.m_plan.osrEntryBytecodeIndex());
         jitCode = code;
         break;
     }
@@ -60,8 +60,8 @@ State::State(Graph& graph)
         break;
     }
 
-    graph.m_plan.finalizer = std::make_unique<JITFinalizer>(graph.m_plan);
-    finalizer = static_cast<JITFinalizer*>(graph.m_plan.finalizer.get());
+    graph.m_plan.setFinalizer(std::make_unique<JITFinalizer>(graph.m_plan));
+    finalizer = static_cast<JITFinalizer*>(graph.m_plan.finalizer());
 
     proc = std::make_unique<Procedure>();