https://bugs.webkit.org/show_bug.cgi?id=175396
Reviewed by Filip Pizlo.
This patch implements op_catch in the FTL. It extends the DFG implementation
by supporting multiple entrypoints in DFG-SSA. This patch implements this
by introducing an EntrySwitch node. When converting to SSA, we introduce a new
root block with an EntrySwitch that has the previous DFG entrypoints as its
successors. By convention, we pick the zeroth entry point index to be the
op_enter entrypoint. Like in B3, in DFG-SSA, EntrySwitch just acts like a
switch over the entrypoint index argument. DFG::EntrySwitch in the FTL
simply lowers to B3::EntrySwitch. The EntrySwitch in the root block that
SSAConversion creates can not exit because we would both not know where to exit
to in the program: we would not have valid OSR exit state. This design also
mandates that anything we hoist above EntrySwitch in the new root block
can not exit since they also do not have valid OSR exit state.
This patch also adds a new metadata node named InitializeEntrypointArguments.
InitializeEntrypointArguments is a metadata node that initializes the flush format for
the arguments at a given entrypoint. For a given entrypoint index, this node
tells AI and OSRAvailabilityAnalysis what the flush format for each argument
is. This allows each individual entrypoint to have an independent set of
argument types. Currently, this won't happen in practice because ArgumentPosition
unifies flush formats, but this is an implementation detail we probably want
to modify in the future. SSAConversion will add InitializeEntrypointArguments
to the beginning of each of the original DFG entrypoint blocks.
This patch also adds the ability to specify custom prologue code generators in Air.
This allows the FTL to specify a custom prologue for catch entrypoints that
matches the op_catch OSR entry calling convention that the DFG uses. This way,
the baseline JIT code OSR enters into op_catch the same way both in the DFG
and the FTL. In the future, we can use this same mechanism to perform stack
overflow checks instead of using a patchpoint.
* b3/air/AirCode.cpp:
(JSC::B3::Air::Code::isEntrypoint):
(JSC::B3::Air::Code::entrypointIndex):
* b3/air/AirCode.h:
(JSC::B3::Air::Code::setPrologueForEntrypoint):
(JSC::B3::Air::Code::prologueGeneratorForEntrypoint):
* b3/air/AirGenerate.cpp:
(JSC::B3::Air::generate):
* dfg/DFGAbstractInterpreterInlines.h:
(JSC::DFG::AbstractInterpreter<AbstractStateType>::executeEffects):
* dfg/DFGBasicBlock.h:
* dfg/DFGByteCodeParser.cpp:
(JSC::DFG::ByteCodeParser::parseBlock):
(JSC::DFG::ByteCodeParser::parse):
* dfg/DFGCFG.h:
(JSC::DFG::selectCFG):
* dfg/DFGClobberize.h:
(JSC::DFG::clobberize):
* dfg/DFGClobbersExitState.cpp:
(JSC::DFG::clobbersExitState):
* dfg/DFGCommonData.cpp:
(JSC::DFG::CommonData::shrinkToFit):
(JSC::DFG::CommonData::finalizeCatchEntrypoints):
* dfg/DFGCommonData.h:
(JSC::DFG::CommonData::catchOSREntryDataForBytecodeIndex):
(JSC::DFG::CommonData::appendCatchEntrypoint):
* dfg/DFGDoesGC.cpp:
(JSC::DFG::doesGC):
* dfg/DFGFixupPhase.cpp:
(JSC::DFG::FixupPhase::fixupNode):
* dfg/DFGGraph.cpp:
(JSC::DFG::Graph::dump):
(JSC::DFG::Graph::invalidateCFG):
(JSC::DFG::Graph::ensureCPSCFG):
(JSC::DFG::Graph::methodOfGettingAValueProfileFor):
* dfg/DFGGraph.h:
(JSC::DFG::Graph::isEntrypoint):
* dfg/DFGInPlaceAbstractState.cpp:
(JSC::DFG::InPlaceAbstractState::initialize):
(JSC::DFG::InPlaceAbstractState::mergeToSuccessors):
* dfg/DFGJITCode.cpp:
(JSC::DFG::JITCode::shrinkToFit):
(JSC::DFG::JITCode::finalizeOSREntrypoints):
* dfg/DFGJITCode.h:
(JSC::DFG::JITCode::catchOSREntryDataForBytecodeIndex): Deleted.
(JSC::DFG::JITCode::appendCatchEntrypoint): Deleted.
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::noticeCatchEntrypoint):
(JSC::DFG::JITCompiler::makeCatchOSREntryBuffer):
* dfg/DFGMayExit.cpp:
* dfg/DFGNode.h:
(JSC::DFG::Node::isEntrySwitch):
(JSC::DFG::Node::isTerminal):
(JSC::DFG::Node::entrySwitchData):
(JSC::DFG::Node::numSuccessors):
(JSC::DFG::Node::successor):
(JSC::DFG::Node::entrypointIndex):
* dfg/DFGNodeType.h:
* dfg/DFGOSRAvailabilityAnalysisPhase.cpp:
(JSC::DFG::OSRAvailabilityAnalysisPhase::run):
(JSC::DFG::LocalOSRAvailabilityCalculator::executeNode):
* dfg/DFGOSREntry.cpp:
(JSC::DFG::prepareCatchOSREntry):
* dfg/DFGOSREntry.h:
* dfg/DFGOSREntrypointCreationPhase.cpp:
(JSC::DFG::OSREntrypointCreationPhase::run):
* dfg/DFGPredictionPropagationPhase.cpp:
* dfg/DFGSSAConversionPhase.cpp:
(JSC::DFG::SSAConversionPhase::SSAConversionPhase):
(JSC::DFG::SSAConversionPhase::run):
* dfg/DFGSafeToExecute.h:
(JSC::DFG::safeToExecute):
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::linkOSREntries):
* dfg/DFGSpeculativeJIT32_64.cpp:
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGSpeculativeJIT64.cpp:
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGStaticExecutionCountEstimationPhase.cpp:
(JSC::DFG::StaticExecutionCountEstimationPhase::run):
* dfg/DFGValidate.cpp:
* ftl/FTLCapabilities.cpp:
(JSC::FTL::canCompile):
* ftl/FTLCompile.cpp:
(JSC::FTL::compile):
* ftl/FTLLowerDFGToB3.cpp:
(JSC::FTL::DFG::LowerDFGToB3::lower):
(JSC::FTL::DFG::LowerDFGToB3::compileNode):
(JSC::FTL::DFG::LowerDFGToB3::compileExtractCatchLocal):
(JSC::FTL::DFG::LowerDFGToB3::compileGetStack):
(JSC::FTL::DFG::LowerDFGToB3::compileEntrySwitch):
(JSC::FTL::DFG::LowerDFGToB3::speculate):
(JSC::FTL::DFG::LowerDFGToB3::appendOSRExitDescriptor):
(JSC::FTL::DFG::LowerDFGToB3::appendOSRExit):
(JSC::FTL::DFG::LowerDFGToB3::blessSpeculation):
* ftl/FTLOutput.cpp:
(JSC::FTL::Output::entrySwitch):
* ftl/FTLOutput.h:
* jit/JITOperations.cpp:
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@221602
268f45cc-cd09-0410-ab3c-
d52691b4dbfc
+2017-09-04 Saam Barati <sbarati@apple.com>
+
+ Support compiling catch in the FTL
+ https://bugs.webkit.org/show_bug.cgi?id=175396
+
+ Reviewed by Filip Pizlo.
+
+ This patch implements op_catch in the FTL. It extends the DFG implementation
+ by supporting multiple entrypoints in DFG-SSA. This patch implements this
+ by introducing an EntrySwitch node. When converting to SSA, we introduce a new
+ root block with an EntrySwitch that has the previous DFG entrypoints as its
+ successors. By convention, we pick the zeroth entry point index to be the
+ op_enter entrypoint. Like in B3, in DFG-SSA, EntrySwitch just acts like a
+ switch over the entrypoint index argument. DFG::EntrySwitch in the FTL
+ simply lowers to B3::EntrySwitch. The EntrySwitch in the root block that
+ SSAConversion creates can not exit because we would both not know where to exit
+ to in the program: we would not have valid OSR exit state. This design also
+ mandates that anything we hoist above EntrySwitch in the new root block
+ can not exit since they also do not have valid OSR exit state.
+
+ This patch also adds a new metadata node named InitializeEntrypointArguments.
+ InitializeEntrypointArguments is a metadata node that initializes the flush format for
+ the arguments at a given entrypoint. For a given entrypoint index, this node
+ tells AI and OSRAvailabilityAnalysis what the flush format for each argument
+ is. This allows each individual entrypoint to have an independent set of
+ argument types. Currently, this won't happen in practice because ArgumentPosition
+ unifies flush formats, but this is an implementation detail we probably want
+ to modify in the future. SSAConversion will add InitializeEntrypointArguments
+ to the beginning of each of the original DFG entrypoint blocks.
+
+ This patch also adds the ability to specify custom prologue code generators in Air.
+ This allows the FTL to specify a custom prologue for catch entrypoints that
+ matches the op_catch OSR entry calling convention that the DFG uses. This way,
+ the baseline JIT code OSR enters into op_catch the same way both in the DFG
+ and the FTL. In the future, we can use this same mechanism to perform stack
+ overflow checks instead of using a patchpoint.
+
+ * b3/air/AirCode.cpp:
+ (JSC::B3::Air::Code::isEntrypoint):
+ (JSC::B3::Air::Code::entrypointIndex):
+ * b3/air/AirCode.h:
+ (JSC::B3::Air::Code::setPrologueForEntrypoint):
+ (JSC::B3::Air::Code::prologueGeneratorForEntrypoint):
+ * b3/air/AirGenerate.cpp:
+ (JSC::B3::Air::generate):
+ * dfg/DFGAbstractInterpreterInlines.h:
+ (JSC::DFG::AbstractInterpreter<AbstractStateType>::executeEffects):
+ * dfg/DFGBasicBlock.h:
+ * dfg/DFGByteCodeParser.cpp:
+ (JSC::DFG::ByteCodeParser::parseBlock):
+ (JSC::DFG::ByteCodeParser::parse):
+ * dfg/DFGCFG.h:
+ (JSC::DFG::selectCFG):
+ * dfg/DFGClobberize.h:
+ (JSC::DFG::clobberize):
+ * dfg/DFGClobbersExitState.cpp:
+ (JSC::DFG::clobbersExitState):
+ * dfg/DFGCommonData.cpp:
+ (JSC::DFG::CommonData::shrinkToFit):
+ (JSC::DFG::CommonData::finalizeCatchEntrypoints):
+ * dfg/DFGCommonData.h:
+ (JSC::DFG::CommonData::catchOSREntryDataForBytecodeIndex):
+ (JSC::DFG::CommonData::appendCatchEntrypoint):
+ * dfg/DFGDoesGC.cpp:
+ (JSC::DFG::doesGC):
+ * dfg/DFGFixupPhase.cpp:
+ (JSC::DFG::FixupPhase::fixupNode):
+ * dfg/DFGGraph.cpp:
+ (JSC::DFG::Graph::dump):
+ (JSC::DFG::Graph::invalidateCFG):
+ (JSC::DFG::Graph::ensureCPSCFG):
+ (JSC::DFG::Graph::methodOfGettingAValueProfileFor):
+ * dfg/DFGGraph.h:
+ (JSC::DFG::Graph::isEntrypoint):
+ * dfg/DFGInPlaceAbstractState.cpp:
+ (JSC::DFG::InPlaceAbstractState::initialize):
+ (JSC::DFG::InPlaceAbstractState::mergeToSuccessors):
+ * dfg/DFGJITCode.cpp:
+ (JSC::DFG::JITCode::shrinkToFit):
+ (JSC::DFG::JITCode::finalizeOSREntrypoints):
+ * dfg/DFGJITCode.h:
+ (JSC::DFG::JITCode::catchOSREntryDataForBytecodeIndex): Deleted.
+ (JSC::DFG::JITCode::appendCatchEntrypoint): Deleted.
+ * dfg/DFGJITCompiler.cpp:
+ (JSC::DFG::JITCompiler::noticeCatchEntrypoint):
+ (JSC::DFG::JITCompiler::makeCatchOSREntryBuffer):
+ * dfg/DFGMayExit.cpp:
+ * dfg/DFGNode.h:
+ (JSC::DFG::Node::isEntrySwitch):
+ (JSC::DFG::Node::isTerminal):
+ (JSC::DFG::Node::entrySwitchData):
+ (JSC::DFG::Node::numSuccessors):
+ (JSC::DFG::Node::successor):
+ (JSC::DFG::Node::entrypointIndex):
+ * dfg/DFGNodeType.h:
+ * dfg/DFGOSRAvailabilityAnalysisPhase.cpp:
+ (JSC::DFG::OSRAvailabilityAnalysisPhase::run):
+ (JSC::DFG::LocalOSRAvailabilityCalculator::executeNode):
+ * dfg/DFGOSREntry.cpp:
+ (JSC::DFG::prepareCatchOSREntry):
+ * dfg/DFGOSREntry.h:
+ * dfg/DFGOSREntrypointCreationPhase.cpp:
+ (JSC::DFG::OSREntrypointCreationPhase::run):
+ * dfg/DFGPredictionPropagationPhase.cpp:
+ * dfg/DFGSSAConversionPhase.cpp:
+ (JSC::DFG::SSAConversionPhase::SSAConversionPhase):
+ (JSC::DFG::SSAConversionPhase::run):
+ * dfg/DFGSafeToExecute.h:
+ (JSC::DFG::safeToExecute):
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::linkOSREntries):
+ * dfg/DFGSpeculativeJIT32_64.cpp:
+ (JSC::DFG::SpeculativeJIT::compile):
+ * dfg/DFGSpeculativeJIT64.cpp:
+ (JSC::DFG::SpeculativeJIT::compile):
+ * dfg/DFGStaticExecutionCountEstimationPhase.cpp:
+ (JSC::DFG::StaticExecutionCountEstimationPhase::run):
+ * dfg/DFGValidate.cpp:
+ * ftl/FTLCapabilities.cpp:
+ (JSC::FTL::canCompile):
+ * ftl/FTLCompile.cpp:
+ (JSC::FTL::compile):
+ * ftl/FTLLowerDFGToB3.cpp:
+ (JSC::FTL::DFG::LowerDFGToB3::lower):
+ (JSC::FTL::DFG::LowerDFGToB3::compileNode):
+ (JSC::FTL::DFG::LowerDFGToB3::compileExtractCatchLocal):
+ (JSC::FTL::DFG::LowerDFGToB3::compileGetStack):
+ (JSC::FTL::DFG::LowerDFGToB3::compileEntrySwitch):
+ (JSC::FTL::DFG::LowerDFGToB3::speculate):
+ (JSC::FTL::DFG::LowerDFGToB3::appendOSRExitDescriptor):
+ (JSC::FTL::DFG::LowerDFGToB3::appendOSRExit):
+ (JSC::FTL::DFG::LowerDFGToB3::blessSpeculation):
+ * ftl/FTLOutput.cpp:
+ (JSC::FTL::Output::entrySwitch):
+ * ftl/FTLOutput.h:
+ * jit/JITOperations.cpp:
+
2017-09-03 Yusuke Suzuki <utatane.tea@gmail.com>
[DFG][FTL] Efficiently execute number#toString()
bool Code::isEntrypoint(BasicBlock* block) const
{
+ // Note: This function must work both before and after LowerEntrySwitch.
+
if (m_entrypoints.isEmpty())
return !block->index();
return false;
}
+std::optional<unsigned> Code::entrypointIndex(BasicBlock* block) const
+{
+ RELEASE_ASSERT(m_entrypoints.size());
+ for (unsigned i = 0; i < m_entrypoints.size(); ++i) {
+ if (m_entrypoints[i].block() == block)
+ return i;
+ }
+ return std::nullopt;
+}
+
void Code::setCalleeSaveRegisterAtOffsetList(RegisterAtOffsetList&& registerAtOffsetList, StackSlot* slot)
{
m_uncorrectedCalleeSaveRegisterAtOffsetList = WTFMove(registerAtOffsetList);
class BlockInsertionSet;
class CCallSpecial;
class CFG;
+class Code;
class Disassembler;
typedef void WasmBoundsCheckGeneratorFunction(CCallHelpers&, GPRReg);
typedef SharedTask<WasmBoundsCheckGeneratorFunction> WasmBoundsCheckGenerator;
+typedef void PrologueGeneratorFunction(CCallHelpers&, Code&);
+typedef SharedTask<PrologueGeneratorFunction> PrologueGenerator;
+
// This is an IR that is very close to the bare metal. It requires about 40x more bytes than the
// generated machine code - for example if you're generating 1MB of machine code, you need about
// 40MB of Air.
const Vector<FrequentedBlock>& entrypoints() const { return m_entrypoints; }
const FrequentedBlock& entrypoint(unsigned index) const { return m_entrypoints[index]; }
bool isEntrypoint(BasicBlock*) const;
-
+ // Note: It is only valid to call this function after LowerEntrySwitch.
+ std::optional<unsigned> entrypointIndex(BasicBlock*) const;
+ void setPrologueForEntrypoint(unsigned entrypointIndex, RefPtr<PrologueGenerator> generator)
+ {
+ // Note: We allow this to be called even before we set m_entrypoints just for convenience to users of this API.
+ m_entrypointIndexToGenerator.set(entrypointIndex, generator);
+ }
+ RefPtr<PrologueGenerator> prologueGeneratorForEntrypoint(unsigned entrypointIndex)
+ {
+ return m_entrypointIndexToGenerator.get(entrypointIndex);
+ }
+
// This is used by lowerEntrySwitch().
template<typename Vector>
void setEntrypoints(Vector&& vector)
StackSlot* m_calleeSaveStackSlot { nullptr };
Vector<FrequentedBlock> m_entrypoints; // This is empty until after lowerEntrySwitch().
Vector<CCallHelpers::Label> m_entrypointLabels; // This is empty until code generation.
+ HashMap<unsigned, RefPtr<PrologueGenerator>, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_entrypointIndexToGenerator;
RefPtr<WasmBoundsCheckGenerator> m_wasmBoundsCheckGenerator;
const char* m_lastPhaseName;
std::unique_ptr<Disassembler> m_disassembler;
if (disassembler)
disassembler->startBlock(block, jit);
- if (code.isEntrypoint(block)) {
+ if (std::optional<unsigned> entrypointIndex = code.entrypointIndex(block)) {
+ ASSERT(code.isEntrypoint(block));
+
if (disassembler)
disassembler->startEntrypoint(jit);
- jit.emitFunctionPrologue();
- if (code.frameSize()) {
- AllowMacroScratchRegisterUsageIf allowScratch(jit, isARM64());
- jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), MacroAssembler::stackPointerRegister);
+ if (RefPtr<PrologueGenerator> prologueGenerator = code.prologueGeneratorForEntrypoint(*entrypointIndex))
+ prologueGenerator->run(jit, code);
+ else {
+ jit.emitFunctionPrologue();
+ if (code.frameSize()) {
+ AllowMacroScratchRegisterUsageIf allowScratch(jit, isARM64());
+ jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), MacroAssembler::stackPointerRegister);
+ }
+
+ jit.emitSave(code.calleeSaveRegisterAtOffsetList());
}
-
- jit.emitSave(code.calleeSaveRegisterAtOffsetList());
if (disassembler)
disassembler->endEntrypoint(jit);
- }
+ } else
+ ASSERT(!code.isEntrypoint(block));
ASSERT(block->size() >= 1);
for (unsigned i = 0; i < block->size() - 1; ++i) {
// DFG execution.
break;
}
-
+
case KillStack: {
// This is just a hint telling us that the OSR state of the local is no longer inside the
// flushed data.
// non-clear value.
ASSERT(!m_state.variables().operand(node->local()).isClear());
break;
-
+
+ case InitializeEntrypointArguments: {
+ unsigned entrypointIndex = node->entrypointIndex();
+ const Vector<FlushFormat>& argumentFormats = m_graph.m_argumentFormats[entrypointIndex];
+ for (unsigned argument = 0; argument < argumentFormats.size(); ++argument) {
+ AbstractValue& value = m_state.variables().argument(argument);
+ switch (argumentFormats[argument]) {
+ case FlushedInt32:
+ value.setType(SpecInt32Only);
+ break;
+ case FlushedBoolean:
+ value.setType(SpecBoolean);
+ break;
+ case FlushedCell:
+ value.setType(m_graph, SpecCell);
+ break;
+ case FlushedJSValue:
+ value.makeBytecodeTop();
+ break;
+ default:
+ DFG_CRASH(m_graph, node, "Bad flush format for argument");
+ break;
+ }
+ }
+ break;
+ }
+
case LoadVarargs:
case ForwardVarargs: {
// FIXME: ForwardVarargs should check if the count becomes known, and if it does, it should turn
break;
}
+ case EntrySwitch:
+ break;
+
case Return:
m_state.setIsValid(false);
break;
float executionCount;
- // These fields are reserved for NaturalLoops.
-
struct SSAData {
WTF_MAKE_FAST_ALLOCATED;
public:
NEXT_OPCODE(op_catch);
}
- if (isFTL(m_graph.m_plan.mode)) {
- // FIXME: Support catch in the FTL.
- // https://bugs.webkit.org/show_bug.cgi?id=175396
+ if (m_graph.m_plan.mode == FTLForOSREntryMode) {
NEXT_OPCODE(op_catch);
}
m_graph.determineReachability();
m_graph.killUnreachableBlocks();
- m_graph.m_cpsCFG = std::make_unique<CPSCFG>(m_graph);
-
for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
template <typename T, typename = typename std::enable_if<std::is_same<T, CPSCFG>::value>::type>
CPSCFG& selectCFG(Graph& graph)
{
- RELEASE_ASSERT(graph.m_cpsCFG);
- return *graph.m_cpsCFG;
+ return graph.ensureCPSCFG();
}
template <typename T, typename = typename std::enable_if<std::is_same<T, SSACFG>::value>::type>
case Jump:
case Branch:
case Switch:
+ case EntrySwitch:
case ForceOSRExit:
case CheckBadCell:
case Return:
case ProfileType:
case ProfileControlFlow:
case PutHint:
+ case InitializeEntrypointArguments:
write(SideState);
return;
// There are certain nodes whose effect on the exit state has nothing to do with what they
// normally clobber.
switch (node->op()) {
+ case InitializeEntrypointArguments:
case MovHint:
case ZombieHint:
case PutHint:
codeOrigins.shrinkToFit();
weakReferences.shrinkToFit();
transitions.shrinkToFit();
+ catchEntrypoints.shrinkToFit();
}
static StaticLock pcCodeBlockMapLock;
watchpoint->key().validateReferences(trackedReferences);
}
+void CommonData::finalizeCatchEntrypoints()
+{
+ std::sort(catchEntrypoints.begin(), catchEntrypoints.end(),
+ [] (const CatchEntrypointData& a, const CatchEntrypointData& b) { return a.bytecodeIndex < b.bytecodeIndex; });
+
+#if !ASSERT_DISABLED
+ for (unsigned i = 0; i + 1 < catchEntrypoints.size(); ++i)
+ ASSERT(catchEntrypoints[i].bytecodeIndex <= catchEntrypoints[i + 1].bytecodeIndex);
+#endif
+}
+
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
#include "DFGAdaptiveInferredPropertyValueWatchpoint.h"
#include "DFGAdaptiveStructureWatchpoint.h"
#include "DFGJumpReplacement.h"
+#include "DFGOSREntry.h"
#include "InlineCallFrameSet.h"
#include "JSCell.h"
#include "ProfilerCompilation.h"
void installVMTrapBreakpoints(CodeBlock* owner);
bool isVMTrapBreakpoint(void* address);
+ CatchEntrypointData* catchOSREntryDataForBytecodeIndex(unsigned bytecodeIndex)
+ {
+ return tryBinarySearch<CatchEntrypointData, unsigned>(
+ catchEntrypoints, catchEntrypoints.size(), bytecodeIndex,
+ [] (const CatchEntrypointData* item) { return item->bytecodeIndex; });
+ }
+
+ void appendCatchEntrypoint(unsigned bytecodeIndex, void* machineCode, Vector<FlushFormat>&& argumentFormats)
+ {
+ catchEntrypoints.append(CatchEntrypointData { machineCode, WTFMove(argumentFormats), bytecodeIndex });
+ }
+
+ void finalizeCatchEntrypoints();
+
unsigned requiredRegisterCountForExecutionAndExit() const
{
return std::max(frameRegisterCount, requiredRegisterCountForExit);
Vector<WeakReferenceTransition> transitions;
Vector<WriteBarrier<JSCell>> weakReferences;
Vector<WriteBarrier<Structure>> weakStructureReferences;
+ Vector<CatchEntrypointData> catchEntrypoints;
Bag<CodeBlockJettisoningWatchpoint> watchpoints;
Bag<AdaptiveStructureWatchpoint> adaptiveStructureWatchpoints;
Bag<AdaptiveInferredPropertyValueWatchpoint> adaptiveInferredPropertyValueWatchpoints;
Vector<JumpReplacement> jumpReplacements;
+ ScratchBuffer* catchOSREntryBuffer;
RefPtr<Profiler::Compilation> compilation;
bool livenessHasBeenProved; // Initialized and used on every GC.
bool allTransitionsHaveBeenMarked; // Initialized and used on every GC.
case GetLocal:
case SetLocal:
case MovHint:
+ case InitializeEntrypointArguments:
case ZombieHint:
case ExitOK:
case Phantom:
case Jump:
case Branch:
case Switch:
+ case EntrySwitch:
case Return:
case TailCall:
case DirectTailCall:
case Phi:
case Upsilon:
+ case EntrySwitch:
case GetIndexedPropertyStorage:
case LastNodeType:
case CheckTierUpInLoop:
case ExtractCatchLocal:
case LoopHint:
case MovHint:
+ case InitializeEntrypointArguments:
case ZombieHint:
case ExitOK:
case BottomValue:
out.print(comma, inContext(data->cases[i].value, context), ":", data->cases[i].target);
out.print(comma, "default:", data->fallThrough);
}
+ if (node->isEntrySwitch()) {
+ EntrySwitchData* data = node->entrySwitchData();
+ for (unsigned i = 0; i < data->cases.size(); ++i)
+ out.print(comma, BranchTarget(data->cases[i]));
+ }
ClobberSet reads;
ClobberSet writes;
addReadsAndWrites(*this, node, reads, writes);
out.print("\n");
out.print("DFG for ", CodeBlockWithJITType(m_codeBlock, JITCode::DFGJIT), ":\n");
out.print(" Fixpoint state: ", m_fixpointState, "; Form: ", m_form, "; Unification state: ", m_unificationState, "; Ref count state: ", m_refCountState, "\n");
- if (m_form == SSA)
- out.print(" Argument formats: ", listDump(m_argumentFormats), "\n");
+ if (m_form == SSA) {
+ for (unsigned entrypointIndex = 0; entrypointIndex < m_argumentFormats.size(); ++entrypointIndex)
+ out.print(" Argument formats for entrypoint index: ", entrypointIndex, " : ", listDump(m_argumentFormats[entrypointIndex]), "\n");
+ }
else {
for (auto pair : m_entrypointToArguments)
out.print(" Arguments for block#", pair.key->index, ": ", listDump(pair.value), "\n");
m_controlEquivalenceAnalysis = nullptr;
m_backwardsDominators = nullptr;
m_backwardsCFG = nullptr;
+ m_cpsCFG = nullptr;
}
void Graph::invalidateNodeLiveness()
logDFGAssertionFailure(*this, toCString("While handling block ", pointerDump(block), "\n\n"), file, line, function, assertion);
}
+CPSCFG& Graph::ensureCPSCFG()
+{
+ RELEASE_ASSERT(m_form != SSA && !m_isInSSAConversion);
+ if (!m_cpsCFG)
+ m_cpsCFG = std::make_unique<CPSCFG>(*this);
+ return *m_cpsCFG;
+}
+
CPSDominators& Graph::ensureCPSDominators()
{
RELEASE_ASSERT(m_form != SSA && !m_isInSSAConversion);
CodeBlock* profiledBlock = baselineCodeBlockFor(node->origin.semantic);
if (node->accessesStack(*this)) {
- ValueProfile* result = [&] () -> ValueProfile* {
- if (!node->local().isArgument())
- return nullptr;
+ if (m_form != SSA && node->local().isArgument()) {
int argument = node->local().toArgument();
+ Node* argumentNode = m_entrypointToArguments.find(block(0))->value[argument];
// FIXME: We should match SetArgument nodes at other entrypoints as well:
// https://bugs.webkit.org/show_bug.cgi?id=175841
- Node* argumentNode = m_entrypointToArguments.find(block(0))->value[argument];
- if (!argumentNode)
- return nullptr;
- if (node->variableAccessData() != argumentNode->variableAccessData())
- return nullptr;
- return &profiledBlock->valueProfileForArgument(argument);
- }();
- if (result)
- return result;
+ if (argumentNode && node->variableAccessData() == argumentNode->variableAccessData())
+ return &profiledBlock->valueProfileForArgument(argument);
+ }
if (node->op() == GetLocal) {
return MethodOfGettingAValueProfile::fromLazyOperand(
BackwardsCFG& ensureBackwardsCFG();
BackwardsDominators& ensureBackwardsDominators();
ControlEquivalenceAnalysis& ensureControlEquivalenceAnalysis();
+ CPSCFG& ensureCPSCFG();
// These functions only makes sense to call after bytecode parsing
// because it queries the m_hasExceptionHandlers boolean whose value
bool isEntrypoint(BasicBlock* block) const
{
+ ASSERT_WITH_MESSAGE(!m_isInSSAConversion, "This is not written to work during SSA conversion.");
+
+ if (m_form == SSA) {
+ ASSERT(m_entrypoints.size() == 1);
+ ASSERT(m_entrypoints.contains(this->block(0)));
+ return block == this->block(0);
+ }
+
if (m_entrypoints.size() <= 4) {
bool result = m_entrypoints.contains(block);
ASSERT(result == m_entrypointToArguments.contains(block));
// In CPS, this is all of the SetArgument nodes for the arguments in the machine code block
// that survived DCE. All of them except maybe "this" will survive DCE, because of the Flush
- // nodes.
+ // nodes. In SSA, this has no meaning. It's empty.
+ HashMap<BasicBlock*, ArgumentsVector> m_entrypointToArguments;
+
+ // In SSA, this is the argument speculation that we've locked in for an entrypoint block.
//
- // In SSA, this is all of the GetStack nodes for the arguments in the machine code block that
- // may have some speculation in the prologue and survived DCE. Note that to get the speculation
- // for an argument in SSA, you must use m_argumentFormats, since we still have to speculate
- // even if the argument got killed. For example:
+ // We must speculate on the argument types at each entrypoint even if operations involving
+ // arguments get killed. For example:
//
// function foo(x) {
// var tmp = x + 1;
//
// If we DCE the ArithAdd and we remove the int check on x, then this won't do the side
// effects.
- HashMap<BasicBlock*, ArgumentsVector> m_entrypointToArguments;
-
- // In CPS, this is meaningless. In SSA, this is the argument speculation that we've locked in.
- Vector<FlushFormat> m_argumentFormats;
+ //
+ // By convention, entrypoint index 0 is used for the CodeBlock's op_enter entrypoint.
+ // So argumentFormats[0] are the argument formats for the normal call entrypoint.
+ Vector<Vector<FlushFormat>> m_argumentFormats;
+
+ // This maps an entrypoint index to a particular op_catch bytecode offset. By convention,
+ // it'll never have zero as a key because we use zero to mean the op_enter entrypoint.
+ HashMap<unsigned, unsigned> m_entrypointIndexToCatchBytecodeOffset;
+
+ // This is the number of logical entrypoints that we're compiling. This is only used
+ // in SSA. Each EntrySwitch node must have numberOfEntrypoints cases. Note, this is
+ // not the same as m_entrypoints.size(). m_entrypoints.size() represents the number
+ // of roots in the CFG. In SSA, m_entrypoints.size() == 1.
+ unsigned m_numberOfEntrypoints { UINT_MAX };
SegmentedVector<VariableAccessData, 16> m_variableAccessData;
SegmentedVector<ArgumentPosition, 8> m_argumentPositions;
std::optional<uint32_t> m_maxLocalsForCatchOSREntry;
std::unique_ptr<FlowIndexing> m_indexingCache;
std::unique_ptr<FlowMap<AbstractValue>> m_abstractValuesCache;
+ Bag<EntrySwitchData> m_entrySwitchData;
RegisteredStructure stringStructure;
RegisteredStructure symbolStructure;
entrypoint->cfaStructureClobberStateAtHead = StructuresAreWatched;
entrypoint->cfaStructureClobberStateAtTail = StructuresAreWatched;
- for (size_t i = 0; i < entrypoint->valuesAtHead.numberOfArguments(); ++i) {
- entrypoint->valuesAtTail.argument(i).clear();
-
- FlushFormat format;
- if (m_graph.m_form == SSA) {
- // FIXME: When supporting multiple entrypoints in the FTL, we need to change
- // what we do here: https://bugs.webkit.org/show_bug.cgi?id=175396
- format = m_graph.m_argumentFormats[i];
- } else {
- Node* node = m_graph.m_entrypointToArguments.find(entrypoint)->value[i];
+ if (m_graph.m_form == SSA) {
+ for (size_t i = 0; i < entrypoint->valuesAtHead.numberOfArguments(); ++i) {
+ entrypoint->valuesAtHead.argument(i).clear();
+ entrypoint->valuesAtTail.argument(i).clear();
+ }
+ } else {
+ const ArgumentsVector& arguments = m_graph.m_entrypointToArguments.find(entrypoint)->value;
+ for (size_t i = 0; i < entrypoint->valuesAtHead.numberOfArguments(); ++i) {
+ entrypoint->valuesAtTail.argument(i).clear();
+
+ FlushFormat format;
+ Node* node = arguments[i];
if (!node)
format = FlushedJSValue;
else {
ASSERT(node->op() == SetArgument);
format = node->variableAccessData()->flushFormat();
}
- }
-
- switch (format) {
- case FlushedInt32:
- entrypoint->valuesAtHead.argument(i).setType(SpecInt32Only);
- break;
- case FlushedBoolean:
- entrypoint->valuesAtHead.argument(i).setType(SpecBoolean);
- break;
- case FlushedCell:
- entrypoint->valuesAtHead.argument(i).setType(m_graph, SpecCell);
- break;
- case FlushedJSValue:
- entrypoint->valuesAtHead.argument(i).makeBytecodeTop();
- break;
- default:
- DFG_CRASH(m_graph, nullptr, "Bad flush format for argument");
- break;
+
+ switch (format) {
+ case FlushedInt32:
+ entrypoint->valuesAtHead.argument(i).setType(SpecInt32Only);
+ break;
+ case FlushedBoolean:
+ entrypoint->valuesAtHead.argument(i).setType(SpecBoolean);
+ break;
+ case FlushedCell:
+ entrypoint->valuesAtHead.argument(i).setType(m_graph, SpecCell);
+ break;
+ case FlushedJSValue:
+ entrypoint->valuesAtHead.argument(i).makeBytecodeTop();
+ break;
+ default:
+ DFG_CRASH(m_graph, nullptr, "Bad flush format for argument");
+ break;
+ }
}
}
+
for (size_t i = 0; i < entrypoint->valuesAtHead.numberOfLocals(); ++i) {
entrypoint->valuesAtHead.local(i).clear();
entrypoint->valuesAtTail.local(i).clear();
block->valuesAtTail.local(i).clear();
}
}
+
if (m_graph.m_form == SSA) {
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
BasicBlock* block = m_graph.block(blockIndex);
changed |= merge(basicBlock, data->cases[i].target.block);
return changed;
}
+
+ case EntrySwitch: {
+ EntrySwitchData* data = terminal->entrySwitchData();
+ bool changed = false;
+ for (unsigned i = data->cases.size(); i--;)
+ changed |= merge(basicBlock, data->cases[i]);
+ return changed;
+ }
case Return:
case TailCall:
common.shrinkToFit();
osrEntry.shrinkToFit();
osrExit.shrinkToFit();
- catchEntrypoints.shrinkToFit();
speculationRecovery.shrinkToFit();
minifiedDFG.prepareAndShrink();
variableEventStream.shrinkToFit();
auto comparator = [] (const auto& a, const auto& b) {
return a.m_bytecodeIndex < b.m_bytecodeIndex;
};
- std::sort(catchEntrypoints.begin(), catchEntrypoints.end(), comparator);
std::sort(osrEntry.begin(), osrEntry.end(), comparator);
#if !ASSERT_DISABLED
for (unsigned i = 0; i + 1 < osrVector.size(); ++i)
ASSERT(osrVector[i].m_bytecodeIndex <= osrVector[i + 1].m_bytecodeIndex);
};
- verifyIsSorted(catchEntrypoints);
verifyIsSorted(osrEntry);
#endif
}
getOSREntryDataBytecodeIndex);
}
- CatchEntrypointData* catchOSREntryDataForBytecodeIndex(unsigned bytecodeIndex)
- {
- return tryBinarySearch<CatchEntrypointData, unsigned>(
- catchEntrypoints, catchEntrypoints.size(), bytecodeIndex,
- [] (const CatchEntrypointData* item) { return item->m_bytecodeIndex; });
- }
-
void finalizeOSREntrypoints();
- void appendCatchEntrypoint(unsigned bytecodeIndex, unsigned machineCodeOffset, Vector<FlushFormat>&& argumentFormats)
- {
- catchEntrypoints.append(CatchEntrypointData { bytecodeIndex, machineCodeOffset, WTFMove(argumentFormats) });
- }
-
unsigned appendOSRExit(const OSRExit& exit)
{
unsigned result = osrExit.size();
public:
CommonData common;
Vector<DFG::OSREntryData> osrEntry;
- Vector<CatchEntrypointData> catchEntrypoints;
SegmentedVector<DFG::OSRExit, 8> osrExit;
Vector<DFG::SpeculationRecovery> speculationRecovery;
DFG::VariableEventStream variableEventStream;
DFG::MinifiedGraph minifiedDFG;
- ScratchBuffer* catchOSREntryBuffer;
#if ENABLE(FTL_JIT)
uint8_t neverExecutedEntry { 1 };
{
RELEASE_ASSERT(basicBlock.isCatchEntrypoint);
RELEASE_ASSERT(basicBlock.intersectionOfCFAHasVisited); // An entrypoint is reachable by definition.
- m_jitCode->appendCatchEntrypoint(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead), WTFMove(argumentFormats));
+ m_jitCode->common.appendCatchEntrypoint(basicBlock.bytecodeBegin, linkBuffer.locationOf(blockHead).executableAddress(), WTFMove(argumentFormats));
}
void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
{
if (m_graph.m_maxLocalsForCatchOSREntry) {
uint32_t numberOfLiveLocals = std::max(*m_graph.m_maxLocalsForCatchOSREntry, 1u); // Make sure we always allocate a non-null catchOSREntryBuffer.
- m_jitCode->catchOSREntryBuffer = vm()->scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals);
+ m_jitCode->common.catchOSREntryBuffer = vm()->scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals);
}
}
case LazyJSConstant:
case Int52Constant:
case MovHint:
+ case InitializeEntrypointArguments:
case SetLocal:
case Flush:
case Phantom:
case PhantomLocal:
case CountExecution:
case Jump:
+ case EntrySwitch:
case Branch:
case Unreachable:
case DoubleRep:
bool didUseJumpTable;
};
+struct EntrySwitchData {
+ Vector<BasicBlock*> cases;
+};
+
struct CallVarargsData {
int firstVarArgOffset;
};
return op() == Switch;
}
+ bool isEntrySwitch() const
+ {
+ return op() == EntrySwitch;
+ }
+
bool isTerminal()
{
switch (op()) {
case Jump:
case Branch:
case Switch:
+ case EntrySwitch:
case Return:
case TailCall:
case DirectTailCall:
ASSERT(isSwitch());
return m_opInfo.as<SwitchData*>();
}
+
+ EntrySwitchData* entrySwitchData()
+ {
+ ASSERT(isEntrySwitch());
+ return m_opInfo.as<EntrySwitchData*>();
+ }
unsigned numSuccessors()
{
return 2;
case Switch:
return switchData()->cases.size() + 1;
+ case EntrySwitch:
+ return entrySwitchData()->cases.size();
default:
return 0;
}
return switchData()->cases[index].target.block;
RELEASE_ASSERT(index == switchData()->cases.size());
return switchData()->fallThrough.block;
- }
+ } else if (isEntrySwitch())
+ return entrySwitchData()->cases[index];
switch (index) {
case 0:
return m_opInfo.as<Profiler::ExecutionCounter*>();
}
+ unsigned entrypointIndex()
+ {
+ ASSERT(op() == InitializeEntrypointArguments);
+ return m_opInfo.as<unsigned>();
+ }
+
bool shouldGenerate()
{
return m_refCount;
macro(Jump, NodeMustGenerate) \
macro(Branch, NodeMustGenerate) \
macro(Switch, NodeMustGenerate) \
+ macro(EntrySwitch, NodeMustGenerate) \
macro(Return, NodeMustGenerate) \
macro(TailCall, NodeMustGenerate | NodeHasVarArgs) \
macro(DirectTailCall, NodeMustGenerate | NodeHasVarArgs) \
/* Nodes for DOM JIT */\
macro(CallDOMGetter, NodeResultJS | NodeMustGenerate) \
macro(CallDOM, NodeResultJS | NodeMustGenerate) \
+ /* Metadata node that initializes the state for flushed argument types at an entrypoint in the program. */ \
+ /* Currently, we only use this for the blocks an EntrySwitch branches to at the root of the program. */ \
+ /* This is only used in SSA. */ \
+ macro(InitializeEntrypointArguments, NodeMustGenerate)
// This enum generates a monotonically increasing id for all Node types,
// and is used by the subsequent enum to fill out the id (as accessed via the NodeIdMask).
BasicBlock* root = m_graph.block(0);
root->ssa->availabilityAtHead.m_locals.fill(Availability::unavailable());
- for (unsigned argument = m_graph.m_argumentFormats.size(); argument--;) {
- FlushedAt flushedAt = FlushedAt(
- m_graph.m_argumentFormats[argument],
- virtualRegisterForArgument(argument));
- root->ssa->availabilityAtHead.m_locals.argument(argument) = Availability(flushedAt);
- }
+
+ for (unsigned argument = 0; argument < m_graph.block(0)->valuesAtHead.numberOfArguments(); ++argument)
+ root->ssa->availabilityAtHead.m_locals.argument(argument) = Availability::unavailable();
// This could be made more efficient by processing blocks in reverse postorder.
block->ssa->availabilityAtTail = calculator.m_availability;
changed = true;
-
+
for (unsigned successorIndex = block->numSuccessors(); successorIndex--;) {
BasicBlock* successor = block->successor(successorIndex);
successor->ssa->availabilityAtHead.merge(calculator.m_availability);
+ }
+
+ for (unsigned successorIndex = block->numSuccessors(); successorIndex--;) {
+ BasicBlock* successor = block->successor(successorIndex);
successor->ssa->availabilityAtHead.pruneByLiveness(
m_graph, successor->at(0)->origin.forExit);
}
m_availability.m_locals.operand(node->unlinkedLocal()).setNodeUnavailable();
break;
}
+
+ case InitializeEntrypointArguments: {
+ unsigned entrypointIndex = node->entrypointIndex();
+ const Vector<FlushFormat>& argumentFormats = m_graph.m_argumentFormats[entrypointIndex];
+ for (unsigned argument = argumentFormats.size(); argument--; ) {
+ FlushedAt flushedAt = FlushedAt(argumentFormats[argument], virtualRegisterForArgument(argument));
+ m_availability.m_locals.argument(argument) = Availability(flushedAt);
+ }
+ break;
+ }
case LoadVarargs:
case ForwardVarargs: {
}
void* prepareCatchOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIndex)
-{
- if (!Options::useOSREntryToDFG())
+{
+ ASSERT(codeBlock->jitType() == JITCode::DFGJIT || codeBlock->jitType() == JITCode::FTLJIT);
+
+ if (!Options::useOSREntryToDFG() && codeBlock->jitCode()->jitType() == JITCode::DFGJIT)
+ return nullptr;
+ if (!Options::useOSREntryToFTL() && codeBlock->jitCode()->jitType() == JITCode::FTLJIT)
return nullptr;
VM& vm = exec->vm();
- ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
- DFG::JITCode* jitCode = codeBlock->jitCode()->dfg();
- RELEASE_ASSERT(jitCode);
- DFG::CatchEntrypointData* catchEntrypoint = jitCode->catchOSREntryDataForBytecodeIndex(bytecodeIndex);
+ CommonData* dfgCommon = codeBlock->jitCode()->dfgCommon();
+ RELEASE_ASSERT(dfgCommon);
+ DFG::CatchEntrypointData* catchEntrypoint = dfgCommon->catchOSREntryDataForBytecodeIndex(bytecodeIndex);
if (!catchEntrypoint) {
// This can be null under some circumstances. The most common is that we didn't
// compile this op_catch as an entrypoint since it had never executed when starting
}
// We're only allowed to OSR enter if we've proven we have compatible argument types.
- for (unsigned argument = 0; argument < catchEntrypoint->m_argumentFormats.size(); ++argument) {
+ for (unsigned argument = 0; argument < catchEntrypoint->argumentFormats.size(); ++argument) {
JSValue value = exec->uncheckedR(virtualRegisterForArgument(argument)).jsValue();
- switch (catchEntrypoint->m_argumentFormats[argument]) {
+ switch (catchEntrypoint->argumentFormats[argument]) {
case DFG::FlushedInt32:
if (!value.isInt32())
return nullptr;
}
}
- unsigned frameSizeForCheck = jitCode->common.requiredRegisterCountForExecutionAndExit();
+ unsigned frameSizeForCheck = dfgCommon->requiredRegisterCountForExecutionAndExit();
if (UNLIKELY(!vm.ensureStackCapacityFor(&exec->registers()[virtualRegisterForLocal(frameSizeForCheck).offset()])))
return nullptr;
ASSERT(Interpreter::getOpcodeID(exec->codeBlock()->instructions()[exec->bytecodeOffset()].u.opcode) == op_catch);
ValueProfileAndOperandBuffer* buffer = static_cast<ValueProfileAndOperandBuffer*>(exec->codeBlock()->instructions()[exec->bytecodeOffset() + 3].u.pointer);
- JSValue* dataBuffer = reinterpret_cast<JSValue*>(jitCode->catchOSREntryBuffer->dataBuffer());
+ JSValue* dataBuffer = reinterpret_cast<JSValue*>(dfgCommon->catchOSREntryBuffer->dataBuffer());
unsigned index = 0;
buffer->forEach([&] (ValueProfileAndOperand& profile) {
if (!VirtualRegister(profile.m_operand).isLocal())
++index;
});
- jitCode->catchOSREntryBuffer->setActiveLength(sizeof(JSValue) * index);
-
- return jitCode->executableAddressAtOffset(catchEntrypoint->m_machineCodeOffset);
+ dfgCommon->catchOSREntryBuffer->setActiveLength(sizeof(JSValue) * index);
+ return catchEntrypoint->machineCode;
}
} } // namespace JSC::DFG
}
struct CatchEntrypointData {
- unsigned m_bytecodeIndex;
- unsigned m_machineCodeOffset;
// We use this when doing OSR entry at catch. We prove the arguments
// are of the expected type before entering at a catch block.
- Vector<FlushFormat> m_argumentFormats;
+ void* machineCode;
+ Vector<FlushFormat> argumentFormats;
+ unsigned bytecodeIndex;
};
// Returns a pointer to a data buffer that the OSR entry thunk will recognize and
m_graph.m_entrypointToArguments.clear();
m_graph.m_entrypointToArguments.add(newRoot, newArguments);
- m_graph.m_cpsCFG = std::make_unique<CPSCFG>(m_graph);
-
+ m_graph.invalidateCFG();
m_graph.resetReachability();
m_graph.killUnreachableBlocks();
RELEASE_ASSERT_NOT_REACHED();
break;
+ case EntrySwitch:
case Upsilon:
// These don't get inserted until we go into SSA.
RELEASE_ASSERT_NOT_REACHED();
case ForwardVarargs:
case PutDynamicVar:
case NukeStructureAndSetButterfly:
+ case InitializeEntrypointArguments:
break;
// This gets ignored because it only pretends to produce a value.
#if ENABLE(DFG_JIT)
#include "DFGBasicBlockInlines.h"
+#include "DFGBlockInsertionSet.h"
#include "DFGGraph.h"
#include "DFGInsertionSet.h"
#include "DFGPhase.h"
public:
SSAConversionPhase(Graph& graph)
: Phase(graph, "SSA conversion")
- , m_calculator(graph)
, m_insertionSet(graph)
{
}
bool run()
{
RELEASE_ASSERT(m_graph.m_form == ThreadedCPS);
+ RELEASE_ASSERT(!m_graph.m_isInSSAConversion);
+ m_graph.m_isInSSAConversion = true;
m_graph.clearReplacements();
m_graph.clearCPSCFGData();
+
+ HashMap<unsigned, BasicBlock*, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> entrypointIndexToArgumentsBlock;
+
+ {
+ m_graph.m_numberOfEntrypoints = m_graph.m_entrypoints.size();
+
+ BlockInsertionSet blockInsertionSet(m_graph);
+ BasicBlock* newRoot = blockInsertionSet.insert(0, 1.0f);
+
+ EntrySwitchData* entrySwitchData = m_graph.m_entrySwitchData.add();
+ for (unsigned entrypointIndex = 0; entrypointIndex < m_graph.m_numberOfEntrypoints; ++entrypointIndex) {
+ BasicBlock* oldRoot = m_graph.m_entrypoints[entrypointIndex];
+ entrypointIndexToArgumentsBlock.add(entrypointIndex, oldRoot);
+ entrySwitchData->cases.append(oldRoot);
+
+ ASSERT(oldRoot->predecessors.isEmpty());
+ oldRoot->predecessors.append(newRoot);
+
+ if (oldRoot->isCatchEntrypoint) {
+ ASSERT(!!entrypointIndex);
+ m_graph.m_entrypointIndexToCatchBytecodeOffset.add(entrypointIndex, oldRoot->bytecodeBegin);
+ }
+
+ NodeOrigin origin = oldRoot->at(0)->origin;
+ m_insertionSet.insertNode(
+ 0, SpecNone, InitializeEntrypointArguments, origin, OpInfo(entrypointIndex));
+ m_insertionSet.insertNode(
+ 0, SpecNone, ExitOK, origin);
+ m_insertionSet.execute(oldRoot);
+ }
+
+ RELEASE_ASSERT(entrySwitchData->cases[0] == m_graph.block(0)); // We strongly assume the normal call entrypoint is the first item in the list.
+
+ m_graph.m_argumentFormats.resize(m_graph.m_numberOfEntrypoints);
+
+ const bool exitOK = false;
+ NodeOrigin origin { CodeOrigin(0), CodeOrigin(0), exitOK };
+ newRoot->appendNode(
+ m_graph, SpecNone, EntrySwitch, origin, OpInfo(entrySwitchData));
+
+ m_graph.m_entrypoints.clear();
+ m_graph.m_entrypoints.append(newRoot);
+
+ blockInsertionSet.execute();
+ }
+
+ SSACalculator calculator(m_graph);
+
m_graph.ensureSSADominators();
if (verbose) {
if (!variable.isRoot())
continue;
- SSACalculator::Variable* ssaVariable = m_calculator.newVariable();
+ SSACalculator::Variable* ssaVariable = calculator.newVariable();
ASSERT(ssaVariable->index() == m_variableForSSAIndex.size());
m_variableForSSAIndex.append(&variable);
m_ssaVariableForVariable.add(&variable, ssaVariable);
m_argumentMapping.add(node, childNode);
}
- m_calculator.newDef(
+ calculator.newDef(
m_ssaVariableForVariable.get(variable), block, childNode);
}
// Decide where Phis are to be inserted. This creates the Phi's but doesn't insert them
// yet. We will later know where to insert based on where SSACalculator tells us to.
- m_calculator.computePhis(
+ calculator.computePhis(
[&] (SSACalculator::Variable* ssaVariable, BasicBlock* block) -> Node* {
VariableAccessData* variable = m_variableForSSAIndex[ssaVariable->index()];
for (unsigned i = 0; i < m_variableForSSAIndex.size(); ++i)
dataLog(" ", i, ": ", VariableAccessDataDump(m_graph, m_variableForSSAIndex[i]), "\n");
dataLog("\n");
- dataLog("SSA calculator: ", m_calculator, "\n");
+ dataLog("SSA calculator: ", calculator, "\n");
}
// Do the bulk of the SSA conversion. For each block, this tracks the operand->Node
dataLog("Considering live variable ", VariableAccessDataDump(m_graph, variable), " at head of block ", *block, "\n");
SSACalculator::Variable* ssaVariable = m_ssaVariableForVariable.get(variable);
- SSACalculator::Def* def = m_calculator.reachingDefAtHead(block, ssaVariable);
+ SSACalculator::Def* def = calculator.reachingDefAtHead(block, ssaVariable);
if (!def) {
// If we are required to insert a Phi, then we won't have a reaching def
// at head.
// valueForOperand with those Phis. For Phis associated with variables that are not
// flushed, we also insert a MovHint.
size_t phiInsertionPoint = 0;
- for (SSACalculator::Def* phiDef : m_calculator.phisForBlock(block)) {
+ for (SSACalculator::Def* phiDef : calculator.phisForBlock(block)) {
VariableAccessData* variable = m_variableForSSAIndex[phiDef->variable()->index()];
m_insertionSet.insert(phiInsertionPoint, phiDef->value());
NodeOrigin upsilonOrigin = terminal.node->origin;
for (unsigned successorIndex = block->numSuccessors(); successorIndex--;) {
BasicBlock* successorBlock = block->successor(successorIndex);
- for (SSACalculator::Def* phiDef : m_calculator.phisForBlock(successorBlock)) {
+ for (SSACalculator::Def* phiDef : calculator.phisForBlock(successorBlock)) {
Node* phiNode = phiDef->value();
SSACalculator::Variable* ssaVariable = phiDef->variable();
VariableAccessData* variable = m_variableForSSAIndex[ssaVariable->index()];
block->ssa = std::make_unique<BasicBlock::SSAData>(block);
}
- // FIXME: Support multiple entrypoints in DFG SSA:
- // https://bugs.webkit.org/show_bug.cgi?id=175396
- RELEASE_ASSERT(m_graph.m_entrypoints.size() == 1);
- auto& arguments = m_graph.m_entrypointToArguments.find(m_graph.block(0))->value;
- m_graph.m_argumentFormats.resize(arguments.size());
- for (unsigned i = arguments.size(); i--;) {
- FlushFormat format = FlushedJSValue;
-
- Node* node = m_argumentMapping.get(arguments[i]);
-
- RELEASE_ASSERT(node);
- format = node->stackAccessData()->format;
-
- m_graph.m_argumentFormats[i] = format;
- arguments[i] = node; // Record the load that loads the arguments for the benefit of exit profiling.
+ for (auto& pair : entrypointIndexToArgumentsBlock) {
+ unsigned entrypointIndex = pair.key;
+ BasicBlock* oldRoot = pair.value;
+ ArgumentsVector& arguments = m_graph.m_entrypointToArguments.find(oldRoot)->value;
+ Vector<FlushFormat> argumentFormats;
+ argumentFormats.reserveInitialCapacity(arguments.size());
+ for (unsigned i = 0; i < arguments.size(); ++i) {
+ Node* node = m_argumentMapping.get(arguments[i]);
+ RELEASE_ASSERT(node);
+ argumentFormats.uncheckedAppend(node->stackAccessData()->format);
+ }
+ m_graph.m_argumentFormats[entrypointIndex] = WTFMove(argumentFormats);
}
-
+
+ m_graph.m_entrypointToArguments.clear();
+
+ RELEASE_ASSERT(m_graph.m_isInSSAConversion);
+ m_graph.m_isInSSAConversion = false;
+
m_graph.m_form = SSA;
if (verbose) {
}
private:
- SSACalculator m_calculator;
InsertionSet m_insertionSet;
HashMap<VariableAccessData*, SSACalculator::Variable*> m_ssaVariableForVariable;
HashMap<Node*, Node*> m_argumentMapping;
bool performSSAConversion(Graph& graph)
{
- RELEASE_ASSERT(!graph.m_isInSSAConversion);
- graph.m_isInSSAConversion = true;
bool result = runPhase<SSAConversionPhase>(graph);
- RELEASE_ASSERT(graph.m_isInSSAConversion);
- graph.m_isInSSAConversion = false;
return result;
}
case Jump:
case Branch:
case Switch:
+ case EntrySwitch:
case Return:
case TailCall:
case DirectTailCall:
case AtomicsSub:
case AtomicsXor:
case AtomicsIsLockFree:
+ case InitializeEntrypointArguments:
return true;
case ArraySlice:
}
m_jit.jitCode()->finalizeOSREntrypoints();
+ m_jit.jitCode()->common.finalizeCatchEntrypoints();
ASSERT(osrEntryIndex == m_osrEntryHeads.size());
GPRReg tagGPR = tag.gpr();
GPRReg payloadGPR = payload.gpr();
- JSValue* ptr = &reinterpret_cast<JSValue*>(m_jit.jitCode()->catchOSREntryBuffer->dataBuffer())[node->catchOSREntryIndex()];
+ JSValue* ptr = &reinterpret_cast<JSValue*>(m_jit.jitCode()->common.catchOSREntryBuffer->dataBuffer())[node->catchOSREntryIndex()];
m_jit.move(CCallHelpers::TrustedImmPtr(ptr), tempGPR);
m_jit.load32(CCallHelpers::Address(tempGPR, TagOffset), tagGPR);
m_jit.load32(CCallHelpers::Address(tempGPR, PayloadOffset), payloadGPR);
case AtomicsSub:
case AtomicsXor:
case IdentityWithProfile:
+ case InitializeEntrypointArguments:
+ case EntrySwitch:
DFG_CRASH(m_jit.graph(), node, "unexpected node in DFG backend");
break;
}
break;
case ExtractCatchLocal: {
- JSValue* ptr = &reinterpret_cast<JSValue*>(m_jit.jitCode()->catchOSREntryBuffer->dataBuffer())[node->catchOSREntryIndex()];
+ JSValue* ptr = &reinterpret_cast<JSValue*>(m_jit.jitCode()->common.catchOSREntryBuffer->dataBuffer())[node->catchOSREntryIndex()];
GPRTemporary temp(this);
GPRReg tempGPR = temp.gpr();
m_jit.move(CCallHelpers::TrustedImmPtr(ptr), tempGPR);
#endif // ENABLE(FTL_JIT)
case LastNodeType:
+ case EntrySwitch:
+ case InitializeEntrypointArguments:
case Phi:
case Upsilon:
case ExtractOSREntryLocal:
applyCounts(data->fallThrough);
break;
}
+
+ case EntrySwitch: {
+ DFG_CRASH(m_graph, terminal, "Unexpected EntrySwitch in CPS form.");
+ break;
+ }
default:
break;
// in release builds.
VALIDATE((m_graph.block(0)), m_graph.isEntrypoint(m_graph.block(0)));
+ VALIDATE((m_graph.block(0)), m_graph.block(0) == m_graph.m_entrypoints[0]);
+
+ for (BasicBlock* block : m_graph.m_entrypoints)
+ VALIDATE((block), block->predecessors.isEmpty());
// Validate that all local variables at the head of all entrypoints are dead.
for (BasicBlock* entrypoint : m_graph.m_entrypoints) {
case PutStack:
case KillStack:
case GetStack:
+ case EntrySwitch:
+ case InitializeEntrypointArguments:
VALIDATE((node), !"unexpected node type in CPS");
break;
case MaterializeNewObject: {
// FIXME: Add more things here.
// https://bugs.webkit.org/show_bug.cgi?id=123471
+ VALIDATE((), m_graph.m_entrypoints.size() == 1);
+ VALIDATE((), m_graph.m_entrypoints[0] == m_graph.block(0));
+ VALIDATE((), !m_graph.m_argumentFormats.isEmpty()); // We always have at least one entrypoint.
+
+ for (unsigned entrypointIndex : m_graph.m_entrypointIndexToCatchBytecodeOffset.keys())
+ VALIDATE((), entrypointIndex > 0); // By convention, 0 is the entrypoint index for the op_enter entrypoint, which can not be in a catch.
+
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
break;
}
+ case EntrySwitch:
+ VALIDATE((node), node->entrySwitchData()->cases.size() == m_graph.m_numberOfEntrypoints);
+ break;
+
+ case InitializeEntrypointArguments:
+ VALIDATE((node), node->entrypointIndex() < m_graph.m_numberOfEntrypoints);
+ break;
+
default:
m_graph.doToChildren(
node,
getLocalPositions.operand(operand) < setLocalPositions.operand(operand));
}
+ void reportValidationContext() { }
+
void reportValidationContext(Node* node)
{
dataLogF("@%u", node->index());
case Phi:
case Upsilon:
case ExtractOSREntryLocal:
+ case ExtractCatchLocal:
case LoopHint:
case SkipScope:
case GetGlobalObject:
case GetMyArgumentByVal:
case GetMyArgumentByValOutOfBounds:
case ForwardVarargs:
+ case EntrySwitch:
case Switch:
case TypeOf:
case PutById:
case AtomicsSub:
case AtomicsXor:
case AtomicsIsLockFree:
+ case InitializeEntrypointArguments:
// These are OK.
break;
});
state.finalizer->b3CodeLinkBuffer = std::make_unique<LinkBuffer>(jit, codeBlock, JITCompilationCanFail);
+
if (state.finalizer->b3CodeLinkBuffer->didFailToAllocate()) {
state.allocationFailed = true;
return;
codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(PCToCodeOriginMapBuilder(vm, WTFMove(originMap)), *state.finalizer->b3CodeLinkBuffer));
state.generatedFunction = bitwise_cast<GeneratedFunction>(
- state.finalizer->b3CodeLinkBuffer->entrypoint().executableAddress());
+ state.finalizer->b3CodeLinkBuffer->locationOf(state.proc->entrypointLabel(0)));
state.jitCode->initializeB3Byproducts(state.proc->releaseByproducts());
+ for (auto pair : state.graph.m_entrypointIndexToCatchBytecodeOffset) {
+ unsigned catchBytecodeOffset = pair.value;
+ unsigned entrypointIndex = pair.key;
+ Vector<FlushFormat> argumentFormats = state.graph.m_argumentFormats[entrypointIndex];
+ state.jitCode->common.appendCatchEntrypoint(
+ catchBytecodeOffset, state.finalizer->b3CodeLinkBuffer->locationOf(state.proc->entrypointLabel(entrypointIndex)).executableAddress(), WTFMove(argumentFormats));
+ }
+ state.jitCode->common.finalizeCatchEntrypoints();
+
if (B3::Air::Disassembler* disassembler = state.proc->code().disassembler()) {
PrintStream& out = WTF::dataFile();
#if ENABLE(FTL_JIT)
+#include "AirCode.h"
#include "AirGenerationContext.h"
#include "AllowMacroScratchRegisterUsage.h"
#include "AtomicsObject.h"
"_", codeBlock()->hash());
} else
name = "jsBody";
+
+ {
+ m_proc.setNumEntrypoints(m_graph.m_numberOfEntrypoints);
+ CodeBlock* codeBlock = m_graph.m_codeBlock;
+
+ RefPtr<B3::Air::PrologueGenerator> catchPrologueGenerator = createSharedTask<B3::Air::PrologueGeneratorFunction>(
+ [codeBlock] (CCallHelpers& jit, B3::Air::Code& code) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+ jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+ jit.emitSave(code.calleeSaveRegisterAtOffsetList());
+ jit.emitPutToCallFrameHeader(codeBlock, CallFrameSlot::codeBlock);
+ });
+
+ for (unsigned catchEntrypointIndex : m_graph.m_entrypointIndexToCatchBytecodeOffset.keys()) {
+ RELEASE_ASSERT(catchEntrypointIndex != 0);
+ m_proc.code().setPrologueForEntrypoint(catchEntrypointIndex, catchPrologueGenerator);
+ }
+
+ if (m_graph.m_maxLocalsForCatchOSREntry) {
+ uint32_t numberOfLiveLocals = std::max(*m_graph.m_maxLocalsForCatchOSREntry, 1u); // Make sure we always allocate a non-null catchOSREntryBuffer.
+ m_ftlState.jitCode->common.catchOSREntryBuffer = m_graph.m_vm.scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals);
+ }
+ }
m_graph.ensureSSADominators();
// We use prologue frequency for all of the initialization code.
m_out.setFrequency(1);
- m_prologue = m_out.newBlock();
+ LBasicBlock prologue = m_out.newBlock();
+ LBasicBlock callEntrypointArgumentSpeculations = m_out.newBlock();
m_handleExceptions = m_out.newBlock();
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
// Back to prologue frequency for any bocks that get sneakily created in the initialization code.
m_out.setFrequency(1);
- m_out.appendTo(m_prologue, m_handleExceptions);
- m_out.initializeConstants(m_proc, m_prologue);
+ m_out.appendTo(prologue, callEntrypointArgumentSpeculations);
+ m_out.initializeConstants(m_proc, prologue);
createPhiVariables();
size_t sizeOfCaptured = sizeof(JSValue) * m_graph.m_nextMachineLocal;
});
LBasicBlock firstDFGBasicBlock = lowBlock(m_graph.block(0));
- // Check Arguments.
- availabilityMap().clear();
- availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0);
- for (unsigned i = codeBlock()->numParameters(); i--;) {
- availabilityMap().m_locals.argument(i) =
- Availability(FlushedAt(FlushedJSValue, virtualRegisterForArgument(i)));
- }
- m_node = nullptr;
- m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
- auto& arguments = m_graph.m_entrypointToArguments.find(m_graph.block(0))->value;
- for (unsigned i = codeBlock()->numParameters(); i--;) {
- Node* node = arguments[i];
- m_out.setOrigin(node);
- VirtualRegister operand = virtualRegisterForArgument(i);
-
- LValue jsValue = m_out.load64(addressFor(operand));
-
- if (node) {
- DFG_ASSERT(m_graph, node, operand == node->stackAccessData()->machineLocal);
-
- // This is a hack, but it's an effective one. It allows us to do CSE on the
- // primordial load of arguments. This assumes that the GetLocal that got put in
- // place of the original SetArgument doesn't have any effects before it. This
- // should hold true.
- m_loadedArgumentValues.add(node, jsValue);
+
+ {
+ Vector<LBasicBlock> successors(m_graph.m_numberOfEntrypoints);
+ successors[0] = callEntrypointArgumentSpeculations;
+ for (unsigned i = 1; i < m_graph.m_numberOfEntrypoints; ++i) {
+ // Currently, the only other entrypoint is an op_catch entrypoint.
+ // We do OSR entry at op_catch, and we prove argument formats before
+ // jumping to FTL code, so we don't need to check argument types here
+ // for these entrypoints.
+ successors[i] = firstDFGBasicBlock;
}
-
- switch (m_graph.m_argumentFormats[i]) {
- case FlushedInt32:
- speculate(BadType, jsValueValue(jsValue), node, isNotInt32(jsValue));
- break;
- case FlushedBoolean:
- speculate(BadType, jsValueValue(jsValue), node, isNotBoolean(jsValue));
- break;
- case FlushedCell:
- speculate(BadType, jsValueValue(jsValue), node, isNotCell(jsValue));
- break;
- case FlushedJSValue:
- break;
- default:
- DFG_CRASH(m_graph, node, "Bad flush format for argument");
- break;
+
+ m_out.entrySwitch(successors);
+ m_out.appendTo(callEntrypointArgumentSpeculations, m_handleExceptions);
+
+ m_node = nullptr;
+ m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
+
+ // Check Arguments.
+ availabilityMap().clear();
+ availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0);
+ for (unsigned i = codeBlock()->numParameters(); i--;) {
+ availabilityMap().m_locals.argument(i) =
+ Availability(FlushedAt(FlushedJSValue, virtualRegisterForArgument(i)));
+ }
+
+ for (unsigned i = codeBlock()->numParameters(); i--;) {
+ MethodOfGettingAValueProfile profile(&m_graph.m_profiledBlock->valueProfileForArgument(i));
+ VirtualRegister operand = virtualRegisterForArgument(i);
+ LValue jsValue = m_out.load64(addressFor(operand));
+
+ switch (m_graph.m_argumentFormats[0][i]) {
+ case FlushedInt32:
+ speculate(BadType, jsValueValue(jsValue), profile, isNotInt32(jsValue));
+ break;
+ case FlushedBoolean:
+ speculate(BadType, jsValueValue(jsValue), profile, isNotBoolean(jsValue));
+ break;
+ case FlushedCell:
+ speculate(BadType, jsValueValue(jsValue), profile, isNotCell(jsValue));
+ break;
+ case FlushedJSValue:
+ break;
+ default:
+ DFG_CRASH(m_graph, nullptr, "Bad flush format for argument");
+ break;
+ }
}
+ m_out.jump(firstDFGBasicBlock);
}
- m_out.jump(firstDFGBasicBlock);
+
m_out.appendTo(m_handleExceptions, firstDFGBasicBlock);
Box<CCallHelpers::Label> exceptionHandler = state->exceptionHandler;
case ExtractOSREntryLocal:
compileExtractOSREntryLocal();
break;
+ case ExtractCatchLocal:
+ compileExtractCatchLocal();
+ break;
case GetStack:
compileGetStack();
break;
case DFG::Switch:
compileSwitch();
break;
+ case DFG::EntrySwitch:
+ compileEntrySwitch();
+ break;
case DFG::Return:
compileReturn();
break;
case PutHint:
case BottomValue:
case KillStack:
+ case InitializeEntrypointArguments:
break;
default:
DFG_CRASH(m_graph, m_node, "Unrecognized node in FTL backend");
m_ftlState.jitCode->ftlForOSREntry()->entryBuffer()->dataBuffer());
setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedLocal().toLocal())));
}
+
+ void compileExtractCatchLocal()
+ {
+ EncodedJSValue* buffer = static_cast<EncodedJSValue*>(m_ftlState.jitCode->common.catchOSREntryBuffer->dataBuffer());
+ setJSValue(m_out.load64(m_out.absolute(buffer + m_node->catchOSREntryIndex())));
+ }
void compileGetStack()
{
- // GetLocals arise only for captured variables and arguments. For arguments, we might have
- // already loaded it.
- if (LValue value = m_loadedArgumentValues.get(m_node)) {
- setJSValue(value);
- return;
- }
-
StackAccessData* data = m_node->stackAccessData();
AbstractValue& value = m_state.variables().operand(data->local);
DFG_CRASH(m_graph, m_node, "Bad switch kind");
}
+
+ void compileEntrySwitch()
+ {
+ Vector<LBasicBlock> successors;
+ for (DFG::BasicBlock* successor : m_node->entrySwitchData()->cases)
+ successors.append(lowBlock(successor));
+ m_out.entrySwitch(successors);
+ }
void compileReturn()
{
{
appendOSRExit(kind, lowValue, highValue, failCondition, m_origin);
}
+
+ void speculate(
+ ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, LValue failCondition)
+ {
+ appendOSRExit(kind, lowValue, profile, failCondition, m_origin);
+ }
void terminate(ExitKind kind)
{
OSRExitDescriptor* appendOSRExitDescriptor(FormattedValue lowValue, Node* highValue)
{
+ return appendOSRExitDescriptor(lowValue, m_graph.methodOfGettingAValueProfileFor(m_node, highValue));
+ }
+
+ OSRExitDescriptor* appendOSRExitDescriptor(FormattedValue lowValue, const MethodOfGettingAValueProfile& profile)
+ {
return &m_ftlState.jitCode->osrExitDescriptors.alloc(
- lowValue.format(), m_graph.methodOfGettingAValueProfileFor(m_node, highValue),
+ lowValue.format(), profile,
availabilityMap().m_locals.numberOfArguments(),
availabilityMap().m_locals.numberOfLocals());
}
-
+
void appendOSRExit(
ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition,
NodeOrigin origin, bool isExceptionHandler = false)
{
+ return appendOSRExit(kind, lowValue, m_graph.methodOfGettingAValueProfileFor(m_node, highValue),
+ failCondition, origin, isExceptionHandler);
+ }
+
+ void appendOSRExit(
+ ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, LValue failCondition,
+ NodeOrigin origin, bool isExceptionHandler = false)
+ {
if (verboseCompilationEnabled()) {
dataLog(" OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap(), "\n");
if (!m_availableRecoveries.isEmpty())
return;
blessSpeculation(
- m_out.speculate(failCondition), kind, lowValue, highValue, origin);
+ m_out.speculate(failCondition), kind, lowValue, profile, origin);
}
void blessSpeculation(CheckValue* value, ExitKind kind, FormattedValue lowValue, Node* highValue, NodeOrigin origin)
{
- OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(lowValue, highValue);
+ blessSpeculation(value, kind, lowValue, m_graph.methodOfGettingAValueProfileFor(m_node, highValue), origin);
+ }
+
+ void blessSpeculation(CheckValue* value, ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, NodeOrigin origin)
+ {
+ OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(lowValue, profile);
value->appendColdAnys(buildExitArguments(exitDescriptor, origin.forExit, lowValue));
Output m_out;
Procedure& m_proc;
- LBasicBlock m_prologue;
LBasicBlock m_handleExceptions;
HashMap<DFG::BasicBlock*, LBasicBlock> m_blocks;
HashMap<Node*, LoweredNodeValue> m_storageValues;
HashMap<Node*, LoweredNodeValue> m_doubleValues;
- // This is a bit of a hack. It prevents B3 from having to do CSE on loading of arguments.
- // It's nice to have these optimizations on our end because we can guarantee them a bit better.
- // Probably also saves B3 compile time.
- HashMap<Node*, LValue> m_loadedArgumentValues;
-
HashMap<Node*, LValue> m_phis;
LocalOSRAvailabilityCalculator m_availabilityCalculator;
value.value()->as<B3::UpsilonValue>()->setPhi(phi);
}
+void Output::entrySwitch(const Vector<LBasicBlock>& cases)
+{
+ RELEASE_ASSERT(cases.size() == m_proc.numEntrypoints());
+ m_block->appendNew<Value>(m_proc, EntrySwitch, origin());
+ for (LBasicBlock block : cases)
+ m_block->appendSuccessor(FrequentedBlock(block));
+}
+
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
}
}
+ void entrySwitch(const Vector<LBasicBlock>&);
+
void ret(LValue);
void unreachable();
#include "ErrorHandlingScope.h"
#include "EvalCodeBlock.h"
#include "ExceptionFuzz.h"
+#include "FTLOSREntry.h"
#include "FrameTracers.h"
#include "FunctionCodeBlock.h"
#include "GetterSetter.h"
NativeCallFrameTracer tracer(&vm, exec);
CodeBlock* optimizedReplacement = exec->codeBlock()->replacement();
- if (optimizedReplacement->jitType() != JITCode::DFGJIT)
- return nullptr;
-
- return static_cast<char*>(DFG::prepareCatchOSREntry(exec, optimizedReplacement, bytecodeIndex));
+ switch (optimizedReplacement->jitType()) {
+ case JITCode::DFGJIT:
+ case JITCode::FTLJIT:
+ return static_cast<char*>(DFG::prepareCatchOSREntry(exec, optimizedReplacement, bytecodeIndex));
+ default:
+ break;
+ }
+ return nullptr;
}
char* JIT_OPERATION operationTryOSREnterAtCatchAndValueProfile(ExecState* exec, uint32_t bytecodeIndex)
CodeBlock* codeBlock = exec->codeBlock();
CodeBlock* optimizedReplacement = codeBlock->replacement();
- if (optimizedReplacement->jitType() == JITCode::DFGJIT)
+
+ switch (optimizedReplacement->jitType()) {
+ case JITCode::DFGJIT:
+ case JITCode::FTLJIT:
return static_cast<char*>(DFG::prepareCatchOSREntry(exec, optimizedReplacement, bytecodeIndex));
+ default:
+ break;
+ }
codeBlock->ensureCatchLivenessIsComputedForBytecodeOffset(bytecodeIndex);
ValueProfileAndOperandBuffer* buffer = static_cast<ValueProfileAndOperandBuffer*>(codeBlock->instructions()[bytecodeIndex + 3].u.pointer);