+2015-09-28 Michael Saboff <msaboff@apple.com>
+
+ [ES6] Implement tail calls in the FTL
+ https://bugs.webkit.org/show_bug.cgi?id=148664
+
+ Reviewed by Filip Pizlo.
+
+ Added one new tail call test and enabled other tail call tests.
+
+ * js/caller-property-expected.txt:
+ * js/script-tests/caller-property.js:
+ (string_appeared_here.nonStrictCallee):
+ (strictCallee):
+ (nonStrictCaller):
+ (strictCaller):
+ (strictTailCaller):
+ (getFooGetter):
+
2015-09-28 Ryosuke Niwa <rniwa@webkit.org>
relatedNode should be retargeted respecting slots
PASS nonStrictCaller(strictCallee) threw exception TypeError: Type error.
PASS strictCaller(nonStrictCallee) threw exception TypeError: Function.caller used to retrieve strict caller.
PASS strictCaller(strictCallee) threw exception TypeError: Type error.
+PASS strictTailCaller(nonStrictCallee) is null
+PASS strictTailCaller(strictCallee) threw exception TypeError: Type error.
PASS nonStrictCaller(boundNonStrictCallee) is nonStrictCaller
PASS nonStrictCaller(boundStrictCallee) threw exception TypeError: Type error.
PASS strictCaller(boundNonStrictCallee) threw exception TypeError: Function.caller used to retrieve strict caller.
PASS strictCaller(boundStrictCallee) threw exception TypeError: Type error.
+PASS strictTailCaller(boundNonStrictCallee) is null
+PASS strictTailCaller(boundStrictCallee) threw exception TypeError: Type error.
PASS nonStrictGetter(nonStrictAccessor) is nonStrictGetter
PASS nonStrictSetter(nonStrictAccessor) is true
PASS nonStrictGetter(strictAccessor) threw exception TypeError: Type error.
function nonStrictCallee() { return nonStrictCallee.caller; }
function strictCallee() { "use strict"; return strictCallee.caller; }
function nonStrictCaller(x) { return x(); }
-function strictCaller(x) { "use strict"; return x(); }
+// Tail calls leak and show our caller's caller, which is null here
+function strictCaller(x) { "use strict"; var result = x(); return result; }
+function strictTailCaller(x) { "use strict"; return x(); }
shouldBe("nonStrictCaller(nonStrictCallee)", "nonStrictCaller");
shouldThrow("nonStrictCaller(strictCallee)", '"TypeError: Type error"');
shouldThrow("strictCaller(nonStrictCallee)", '"TypeError: Function.caller used to retrieve strict caller"');
shouldThrow("strictCaller(strictCallee)", '"TypeError: Type error"');
+shouldBe("strictTailCaller(nonStrictCallee)", "null");
+shouldThrow("strictTailCaller(strictCallee)", '"TypeError: Type error"');
// .caller within a bound function reaches the caller, ignoring the binding.
var boundNonStrictCallee = nonStrictCallee.bind();
shouldThrow("nonStrictCaller(boundStrictCallee)", '"TypeError: Type error"');
shouldThrow("strictCaller(boundNonStrictCallee)", '"TypeError: Function.caller used to retrieve strict caller"');
shouldThrow("strictCaller(boundStrictCallee)", '"TypeError: Type error"');
+shouldBe("strictTailCaller(boundNonStrictCallee)", "null");
+shouldThrow("strictTailCaller(boundStrictCallee)", '"TypeError: Type error"');
// Check that .caller works (or throws) as expected, over an accessor call.
function getFooGetter(x) { return Object.getOwnPropertyDescriptor(x, 'foo').get; }
ftl/FTLJSCall.cpp
ftl/FTLJSCallBase.cpp
ftl/FTLJSCallVarargs.cpp
+ ftl/FTLJSTailCall.cpp
ftl/FTLLink.cpp
ftl/FTLLocation.cpp
ftl/FTLLowerDFGToLLVM.cpp
+2015-09-28 basile_clement@apple.com <basile_clement@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
+
+ [ES6] Implement tail calls in the FTL
+ https://bugs.webkit.org/show_bug.cgi?id=148664
+
+ Reviewed by Filip Pizlo.
+
+ This patch implements the tail call opcodes in the FTL, making tail
+ calls available through all tiers. The changes are relatively
+ straightforward, although the frame shuffler had to be extended to
+ handle the possibility of running out of stack when spilling or
+ building a slow path frame. The other tiers always ensure that we have
+ enough stack space to build the new frame at the bottom of the old one,
+ but that is not true for the FTL.
+
+ Moreover, for efficiency, this adds to the shuffler the ability to
+ record the state of the TagTypeNumber, and to re-use the same register
+ when doing several consecutive integer boxings with no spilling in
+ between.
+
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * bytecode/ValueRecovery.h:
+ (JSC::ValueRecovery::inRegister):
+ * dfg/DFGByteCodeParser.cpp:
+ (JSC::DFG::ByteCodeParser::handleInlining):
+ (JSC::DFG::ByteCodeParser::parseBlock):
+ * dfg/DFGClobberize.h:
+ (JSC::DFG::clobberize):
+ * dfg/DFGNode.h:
+ (JSC::DFG::Node::isFunctionTerminal):
+ * dfg/DFGSpeculativeJIT64.cpp:
+ (JSC::DFG::SpeculativeJIT::emitCall):
+ * dfg/DFGTierUpCheckInjectionPhase.cpp:
+ (JSC::DFG::TierUpCheckInjectionPhase::run):
+ * ftl/FTLCapabilities.cpp:
+ (JSC::FTL::canCompile):
+ * ftl/FTLCompile.cpp:
+ (JSC::FTL::mmAllocateDataSection):
+ * ftl/FTLInlineCacheSize.cpp:
+ (JSC::FTL::sizeOfTailCallVarargs):
+ (JSC::FTL::sizeOfTailCallForwardVarargs):
+ (JSC::FTL::sizeOfICFor):
+ * ftl/FTLInlineCacheSize.h:
+ * ftl/FTLJSCall.cpp:
+ (JSC::FTL::JSCall::JSCall):
+ * ftl/FTLJSCallBase.cpp:
+ (JSC::FTL::JSCallBase::emit):
+ (JSC::FTL::JSCallBase::link):
+ * ftl/FTLJSCallBase.h:
+ * ftl/FTLJSCallVarargs.cpp:
+ (JSC::FTL::JSCallVarargs::JSCallVarargs):
+ (JSC::FTL::JSCallVarargs::emit):
+ * ftl/FTLJSTailCall.cpp: Added.
+ (JSC::FTL::getRegisterWithAddend):
+ (JSC::FTL::recoveryFor):
+ (JSC::FTL::sizeFor):
+ (JSC::FTL::JSTailCall::JSTailCall):
+ (JSC::FTL::m_instructionOffset):
+ (JSC::FTL::JSTailCall::emit):
+ * ftl/FTLJSTailCall.h: Copied from Source/JavaScriptCore/ftl/FTLJSCallBase.h.
+ (JSC::FTL::JSTailCall::stackmapID):
+ (JSC::FTL::JSTailCall::estimatedSize):
+ (JSC::FTL::JSTailCall::numArguments):
+ (JSC::FTL::JSTailCall::operator<):
+ * ftl/FTLLocation.h:
+ (JSC::FTL::Location::operator bool):
+ (JSC::FTL::Location::operator!):
+ * ftl/FTLLowerDFGToLLVM.cpp:
+ (JSC::FTL::DFG::LowerDFGToLLVM::lower):
+ (JSC::FTL::DFG::LowerDFGToLLVM::compileNode):
+ (JSC::FTL::DFG::LowerDFGToLLVM::compileTailCall):
+ (JSC::FTL::DFG::LowerDFGToLLVM::compileCallOrConstructVarargs):
+ (JSC::FTL::DFG::LowerDFGToLLVM::callPreflight):
+ (JSC::FTL::DFG::LowerDFGToLLVM::exitValueForTailCall):
+ * ftl/FTLState.h:
+ * jit/AssemblyHelpers.cpp:
+ (JSC::AssemblyHelpers::emitExceptionCheck):
+ * jit/CallFrameShuffleData.h:
+ * jit/CallFrameShuffler.cpp:
+ (JSC::CallFrameShuffler::CallFrameShuffler):
+ (JSC::CallFrameShuffler::dump):
+ (JSC::CallFrameShuffler::spill):
+ (JSC::CallFrameShuffler::extendFrameIfNeeded):
+ (JSC::CallFrameShuffler::prepareForSlowPath):
+ (JSC::CallFrameShuffler::prepareAny):
+ * jit/CallFrameShuffler.h:
+ (JSC::CallFrameShuffler::restoreGPR):
+ (JSC::CallFrameShuffler::getFreeRegister):
+ (JSC::CallFrameShuffler::getFreeTempGPR):
+ (JSC::CallFrameShuffler::ensureTempGPR):
+ (JSC::CallFrameShuffler::addNew):
+ * jit/CallFrameShuffler64.cpp:
+ (JSC::CallFrameShuffler::emitBox):
+ (JSC::CallFrameShuffler::tryAcquireTagTypeNumber):
+ * jit/JITCall.cpp:
+ (JSC::JIT::compileOpCall):
+ * jit/Reg.h:
+ (JSC::Reg::Reg):
+ (JSC::Reg::isHashTableDeletedValue):
+ (JSC::Reg::deleted):
+ (JSC::RegHash::hash):
+ (JSC::RegHash::equal):
+ * test/es6.yaml:
+
2015-09-28 Keith Miller <keith_miller@apple.com>
ObjectPropertyConditionSet::mergedWith does not produce a minimal intersection.
<ClCompile Include="..\ftl\FTLJSCall.cpp" />
<ClCompile Include="..\ftl\FTLJSCallBase.cpp" />
<ClCompile Include="..\ftl\FTLJSCallVarargs.cpp" />
+ <ClCompile Include="..\ftl\FTLJSTailCall.cpp" />
<ClCompile Include="..\ftl\FTLLink.cpp" />
<ClCompile Include="..\ftl\FTLLocation.cpp" />
<ClCompile Include="..\ftl\FTLLowerDFGToLLVM.cpp" />
<ClInclude Include="..\ftl\FTLJSCall.h" />
<ClInclude Include="..\ftl\FTLJSCallBase.h" />
<ClInclude Include="..\ftl\FTLJSCallVarargs.h" />
+ <ClInclude Include="..\ftl\FTLJSTailCall.h" />
<ClInclude Include="..\ftl\FTLLink.h" />
<ClInclude Include="..\ftl\FTLLocation.h" />
<ClInclude Include="..\ftl\FTLLowerDFGToLLVM.h" />
<ClCompile Include="..\ftl\FTLJSCall.cpp">
<Filter>ftl</Filter>
</ClCompile>
+ <ClCompile Include="..\ftl\FTLJSTailCall.cpp">
+ <Filter>ftl</Filter>
+ </ClCompile>
<ClCompile Include="..\ftl\FTLLink.cpp">
<Filter>ftl</Filter>
</ClCompile>
<ClInclude Include="..\ftl\FTLJSCall.h">
<Filter>ftl</Filter>
</ClInclude>
+ <ClInclude Include="..\ftl\FTLJSTailCall.h">
+ <Filter>ftl</Filter>
+ </ClInclude>
<ClInclude Include="..\ftl\FTLLink.h">
<Filter>ftl</Filter>
</ClInclude>
623A37EC1B87A7C000754209 /* RegisterMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 623A37EB1B87A7BD00754209 /* RegisterMap.h */; settings = {ATTRIBUTES = (Private, ); }; };
627673231B680C1E00FD9F2E /* CallMode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 627673211B680C1E00FD9F2E /* CallMode.cpp */; };
627673241B680C1E00FD9F2E /* CallMode.h in Headers */ = {isa = PBXBuildFile; fileRef = 627673221B680C1E00FD9F2E /* CallMode.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 62774DAA1B8D4B190006F05A /* FTLJSTailCall.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 62774DA81B8D4B190006F05A /* FTLJSTailCall.cpp */; };
+ 62774DAB1B8D4B190006F05A /* FTLJSTailCall.h in Headers */ = {isa = PBXBuildFile; fileRef = 62774DA91B8D4B190006F05A /* FTLJSTailCall.h */; };
62D2D38F1ADF103F000206C1 /* FunctionRareData.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 62D2D38D1ADF103F000206C1 /* FunctionRareData.cpp */; };
62D2D3901ADF103F000206C1 /* FunctionRareData.h in Headers */ = {isa = PBXBuildFile; fileRef = 62D2D38E1ADF103F000206C1 /* FunctionRareData.h */; settings = {ATTRIBUTES = (Private, ); }; };
62D755D41B84FB3D001801FA /* CallFrameShuffler64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 62D755D31B84FB39001801FA /* CallFrameShuffler64.cpp */; };
623A37EB1B87A7BD00754209 /* RegisterMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RegisterMap.h; sourceTree = "<group>"; };
627673211B680C1E00FD9F2E /* CallMode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CallMode.cpp; sourceTree = "<group>"; };
627673221B680C1E00FD9F2E /* CallMode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallMode.h; sourceTree = "<group>"; };
+ 62774DA81B8D4B190006F05A /* FTLJSTailCall.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = FTLJSTailCall.cpp; path = ftl/FTLJSTailCall.cpp; sourceTree = "<group>"; };
+ 62774DA91B8D4B190006F05A /* FTLJSTailCall.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = FTLJSTailCall.h; path = ftl/FTLJSTailCall.h; sourceTree = "<group>"; };
62A9A29E1B0BED4800BD54CA /* DFGLazyNode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGLazyNode.cpp; path = dfg/DFGLazyNode.cpp; sourceTree = "<group>"; };
62A9A29F1B0BED4800BD54CA /* DFGLazyNode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGLazyNode.h; path = dfg/DFGLazyNode.h; sourceTree = "<group>"; };
62D2D38D1ADF103F000206C1 /* FunctionRareData.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = FunctionRareData.cpp; sourceTree = "<group>"; };
0FD1202E1A8AED12000F5280 /* FTLJSCallBase.h */,
0FD120311A8C85BD000F5280 /* FTLJSCallVarargs.cpp */,
0FD120321A8C85BD000F5280 /* FTLJSCallVarargs.h */,
+ 62774DA81B8D4B190006F05A /* FTLJSTailCall.cpp */,
+ 62774DA91B8D4B190006F05A /* FTLJSTailCall.h */,
0F8F2B93172E049E007DBDA5 /* FTLLink.cpp */,
0F8F2B94172E049E007DBDA5 /* FTLLink.h */,
0FCEFADD180738C000472CE4 /* FTLLocation.cpp */,
0F6B1CB6185FC9E900845D97 /* FTLJSCall.h in Headers */,
0FD120301A8AED12000F5280 /* FTLJSCallBase.h in Headers */,
0FD120341A8C85BD000F5280 /* FTLJSCallVarargs.h in Headers */,
+ 62774DAB1B8D4B190006F05A /* FTLJSTailCall.h in Headers */,
0F8F2B96172E04A3007DBDA5 /* FTLLink.h in Headers */,
0FCEFAE0180738C000472CE4 /* FTLLocation.h in Headers */,
0FEA0A10170513DB00BB722C /* FTLLowerDFGToLLVM.h in Headers */,
0F6B1CB5185FC9E900845D97 /* FTLJSCall.cpp in Sources */,
0FD1202F1A8AED12000F5280 /* FTLJSCallBase.cpp in Sources */,
0FD120331A8C85BD000F5280 /* FTLJSCallVarargs.cpp in Sources */,
+ 62774DAA1B8D4B190006F05A /* FTLJSTailCall.cpp in Sources */,
0F8F2B95172E04A0007DBDA5 /* FTLLink.cpp in Sources */,
0FCEFADF180738C000472CE4 /* FTLLocation.cpp in Sources */,
0FEA0A0F170513DB00BB722C /* FTLLowerDFGToLLVM.cpp in Sources */,
m_currentIndex = nextOffset;
m_exitOK = true;
processSetLocalQueue(); // This only comes into play for intrinsics, since normal inlined code will leave an empty queue.
- addToGraph(Jump);
+ if (Node* terminal = m_currentBlock->terminal())
+ ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs);
+ else {
+ addToGraph(Jump);
+ landingBlocks.append(m_currentBlock);
+ }
if (verbose)
dataLog("Marking ", RawPointer(m_currentBlock), " as linked (tail of poly inlinee)\n");
m_currentBlock->didLink();
- landingBlocks.append(m_currentBlock);
if (verbose)
dataLog("Finished inlining ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
m_currentIndex = nextOffset;
m_exitOK = true; // Origin changed, so it's fine to exit again.
processSetLocalQueue();
- addToGraph(Jump);
- landingBlocks.append(m_currentBlock);
+ if (Node* terminal = m_currentBlock->terminal())
+ ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs);
+ else {
+ addToGraph(Jump);
+ landingBlocks.append(m_currentBlock);
+ }
RefPtr<BasicBlock> continuationBlock = adoptRef(
new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
// We could be the dummy jump to a return after a non-inlined, non-emulated tail call in a ternary operator
Node* terminal = m_currentBlock->terminal();
ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs);
- LAST_OPCODE(op_ret);
+ LAST_OPCODE(op_jmp);
}
int relativeOffset = currentInstruction[1].u.operand;
addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
case ThrowReferenceError:
write(SideState);
- read(HeapObjectCount);
- write(HeapObjectCount);
return;
case CountExecution:
}
}
+ bool isFunctionTerminal()
+ {
+ if (isTerminal() && !numSuccessors())
+ return true;
+
+ return false;
+ }
+
unsigned targetBytecodeOffsetDuringParsing()
{
ASSERT(isJump());
calleeGPR = callee.gpr();
callee.use();
+ shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister;
shuffleData.numLocals = m_jit.graph().frameRegisterCount();
shuffleData.callee = ValueRecovery::inGPR(calleeGPR, DataFormatJS);
shuffleData.args.resize(numPassedArgs);
}
NodeAndIndex terminal = block->findTerminal();
- if (terminal.node->op() == Return) {
+ if (terminal.node->isFunctionTerminal()) {
insertionSet.insertNode(
terminal.index, SpecNone, CheckTierUpAtReturn, terminal.node->origin);
}
case NotifyWrite:
case StoreBarrier:
case Call:
+ case TailCall:
+ case TailCallInlinedCaller:
case Construct:
case CallVarargs:
- case CallForwardVarargs:
+ case TailCallVarargs:
+ case TailCallVarargsInlinedCaller:
case ConstructVarargs:
+ case CallForwardVarargs:
+ case TailCallForwardVarargs:
+ case TailCallForwardVarargsInlinedCaller:
case ConstructForwardVarargs:
case LoadVarargs:
case ValueToInt32:
call.link(vm, linkBuffer, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
});
}
+
+ adjustCallICsForStackmaps(state.jsTailCalls, recordMap);
+
+ for (unsigned i = state.jsTailCalls.size(); i--;) {
+ JSTailCall& call = state.jsTailCalls[i];
+
+ CCallHelpers fastPathJIT(&vm, codeBlock);
+ call.emit(*state.jitCode.get(), fastPathJIT);
+
+ char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
+ size_t sizeOfIC = call.estimatedSize();
+
+ generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "tail call inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
+ call.link(vm, linkBuffer);
+ });
+ }
auto iter = recordMap.find(state.handleStackOverflowExceptionStackmapID);
// It's sort of remotely possible that we won't have an in-band exception handling
#endif
}
+size_t sizeOfTailCallVarargs()
+{
+#if CPU(ARM64)
+ return 188 + sizeOfCallVarargs();
+#else
+ return 151 + sizeOfCallVarargs();
+#endif
+}
+
size_t sizeOfCallForwardVarargs()
{
#if CPU(ARM64)
#endif
}
+size_t sizeOfTailCallForwardVarargs()
+{
+#if CPU(ARM64)
+ return 188 + sizeOfCallForwardVarargs();
+#else
+ return 151 + sizeOfCallForwardVarargs();
+#endif
+}
+
size_t sizeOfConstructVarargs()
{
return sizeOfCallVarargs(); // Should be the same size.
case Construct:
return sizeOfCall();
case CallVarargs:
+ case TailCallVarargsInlinedCaller:
return sizeOfCallVarargs();
+ case TailCallVarargs:
+ return sizeOfTailCallVarargs();
case CallForwardVarargs:
+ case TailCallForwardVarargsInlinedCaller:
return sizeOfCallForwardVarargs();
+ case TailCallForwardVarargs:
+ return sizeOfTailCallForwardVarargs();
case ConstructVarargs:
return sizeOfConstructVarargs();
case ConstructForwardVarargs:
case In:
return sizeOfIn();
default:
- return 0;
+ RELEASE_ASSERT_NOT_REACHED();
}
}
size_t sizeOfPutById();
size_t sizeOfCall();
size_t sizeOfCallVarargs();
+size_t sizeOfTailCallVarargs();
size_t sizeOfCallForwardVarargs();
+size_t sizeOfTailCallForwardVarargs();
size_t sizeOfConstructVarargs();
size_t sizeOfConstructForwardVarargs();
size_t sizeOfIn();
, m_stackmapID(stackmapID)
, m_instructionOffset(0)
{
- ASSERT(node->op() == Call || node->op() == Construct);
+ ASSERT(node->op() == Call || node->op() == Construct || node->op() == TailCallInlinedCaller);
}
void JSCall::emit(CCallHelpers& jit, unsigned stackSizeForLocals)
CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
CCallHelpers::NotEqual, GPRInfo::regT0, m_targetToCheck,
CCallHelpers::TrustedImmPtr(0));
-
- m_fastCall = jit.nearCall();
- CCallHelpers::Jump done = jit.jump();
-
+
+ CCallHelpers::Jump done;
+
+ if (CallLinkInfo::callModeFor(m_type) == CallMode::Tail) {
+ jit.emitRestoreCalleeSaves();
+ jit.prepareForTailCallSlow();
+ m_fastCall = jit.nearTailCall();
+ } else {
+ m_fastCall = jit.nearCall();
+ done = jit.jump();
+ }
+
slowPath.link(&jit);
-
+
jit.move(CCallHelpers::TrustedImmPtr(m_callLinkInfo), GPRInfo::regT2);
m_slowCall = jit.nearCall();
-
- done.link(&jit);
+
+ if (CallLinkInfo::callModeFor(m_type) == CallMode::Tail)
+ jit.abortWithReason(JITDidReturnFromTailCall);
+ else
+ done.link(&jit);
+
+ m_callLinkInfo->setUpCall(m_type, m_origin, GPRInfo::regT0);
}
void JSCallBase::link(VM& vm, LinkBuffer& linkBuffer)
linkBuffer.link(
m_slowCall, FunctionPtr(vm.getCTIStub(linkCallThunkGenerator).code().executableAddress()));
- m_callLinkInfo->setUpCallFromFTL(m_type, m_origin, linkBuffer.locationOfNearCall(m_slowCall),
- linkBuffer.locationOf(m_targetToCheck), linkBuffer.locationOfNearCall(m_fastCall),
- GPRInfo::regT0);
+ m_callLinkInfo->setCallLocations(linkBuffer.locationOfNearCall(m_slowCall),
+ linkBuffer.locationOf(m_targetToCheck), linkBuffer.locationOfNearCall(m_fastCall));
}
} } // namespace JSC::FTL
void emit(CCallHelpers&);
void link(VM&, LinkBuffer&);
-private:
+protected:
CallLinkInfo::CallType m_type;
CodeOrigin m_origin;
CCallHelpers::DataLabelPtr m_targetToCheck;
, m_node(node)
, m_callBase(
(node->op() == ConstructVarargs || node->op() == ConstructForwardVarargs)
- ? CallLinkInfo::ConstructVarargs : CallLinkInfo::CallVarargs,
+ ? CallLinkInfo::ConstructVarargs : (node->op() == TailCallVarargs || node->op() == TailCallForwardVarargs)
+ ? CallLinkInfo::TailCallVarargs : CallLinkInfo::CallVarargs,
node->origin.semantic)
, m_instructionOffset(0)
{
ASSERT(
node->op() == CallVarargs || node->op() == CallForwardVarargs
+ || node->op() == TailCallVarargsInlinedCaller || node->op() == TailCallForwardVarargsInlinedCaller
+ || node->op() == TailCallVarargs || node->op() == TailCallForwardVarargs
|| node->op() == ConstructVarargs || node->op() == ConstructForwardVarargs);
}
switch (m_node->op()) {
case CallVarargs:
+ case TailCallVarargs:
+ case TailCallVarargsInlinedCaller:
case ConstructVarargs:
argumentsGPR = GPRInfo::argumentGPR1;
thisGPR = GPRInfo::argumentGPR2;
break;
case CallForwardVarargs:
+ case TailCallForwardVarargs:
+ case TailCallForwardVarargsInlinedCaller:
case ConstructForwardVarargs:
thisGPR = GPRInfo::argumentGPR1;
forwarding = true;
// Henceforth we make the call. The base FTL call machinery expects the callee in regT0 and for the
// stack frame to already be set up, which it is.
jit.store64(GPRInfo::regT0, CCallHelpers::calleeFrameSlot(JSStack::Callee));
-
+
m_callBase.emit(jit);
// Undo the damage we've done.
--- /dev/null
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FTLJSTailCall.h"
+
+#if ENABLE(FTL_JIT)
+
+#include "CallFrameShuffler.h"
+#include "DFGNode.h"
+#include "FTLJITCode.h"
+#include "FTLLocation.h"
+#include "FTLStackMaps.h"
+#include "JSCJSValueInlines.h"
+#include "LinkBuffer.h"
+
+namespace JSC { namespace FTL {
+
+using namespace DFG;
+
+namespace {
+
+FTL::Location getRegisterWithAddend(const ExitValue& value, StackMaps::Record& record, StackMaps& stackmaps)
+{
+ if (value.kind() != ExitValueArgument)
+ return { };
+
+ auto location =
+ FTL::Location::forStackmaps(&stackmaps, record.locations[value.exitArgument().argument()]);
+
+ if (location.kind() != Location::Register || !location.addend())
+ return { };
+
+ RELEASE_ASSERT(location.isGPR());
+ return location;
+}
+
+ValueRecovery recoveryFor(const ExitValue& value, StackMaps::Record& record, StackMaps& stackmaps)
+{
+ switch (value.kind()) {
+ case ExitValueConstant:
+ return ValueRecovery::constant(value.constant());
+
+ case ExitValueArgument: {
+ auto location =
+ FTL::Location::forStackmaps(&stackmaps, record.locations[value.exitArgument().argument()]);
+ auto format = value.exitArgument().format();
+
+ switch (location.kind()) {
+ case Location::Register:
+ // We handle the addend outside
+ return ValueRecovery::inRegister(location.dwarfReg().reg(), format);
+
+ case Location::Indirect:
+ // Oh LLVM, you crazy...
+ RELEASE_ASSERT(location.dwarfReg().reg() == Reg(MacroAssembler::framePointerRegister));
+ RELEASE_ASSERT(!(location.offset() % sizeof(void*)));
+ return ValueRecovery::displacedInJSStack(VirtualRegister { static_cast<int>(location.offset() / sizeof(void*)) }, format);
+
+ case Location::Constant:
+ return ValueRecovery::constant(JSValue::decode(location.constant()));
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ case ExitValueInJSStack:
+ return ValueRecovery::displacedInJSStack(value.virtualRegister(), DataFormatJS);
+
+ case ExitValueInJSStackAsInt32:
+ return ValueRecovery::displacedInJSStack(value.virtualRegister(), DataFormatInt32);
+
+ case ExitValueInJSStackAsInt52:
+ return ValueRecovery::displacedInJSStack(value.virtualRegister(), DataFormatInt52);
+
+ case ExitValueInJSStackAsDouble:
+ return ValueRecovery::displacedInJSStack(value.virtualRegister(), DataFormatDouble);
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+}
+
+// This computes an estimated size (in bits) for the sequence of
+// instructions required to load, box, and store a value of a given
+// type, assuming no spilling is required.
+uint32_t sizeFor(DataFormat format)
+{
+ switch (format) {
+ case DataFormatInt32:
+ // Boxing is zero-extending and tagging
+#if CPU(X86_64)
+ return 6 + sizeFor(DataFormatJS);
+#elif CPU(ARM64)
+ return 8 + sizeFor(DataFormatJS);
+#else
+ return sizeOfZeroExtend32 + sizeOfOrImm64 + sizeFor(DataFormatJS);
+#endif
+
+ case DataFormatInt52:
+ // Boxing is first a conversion to StrictInt52, then
+ // StrictInt52 boxing
+#if CPU(X86_64)
+ return 4 + sizeFor(DataFormatStrictInt52);
+#elif CPU(ARM64)
+ return 4 + sizeFor(DataFormatStrictInt52);
+#else
+ return sizeOfShiftImm32 + sizeFor(DataFormatStrictInt52);
+#endif
+
+ case DataFormatStrictInt52:
+ // Boxing is first a conversion to double, then double boxing
+#if CPU(X86_64)
+ return 8 + sizeFor(DataFormatDouble);
+#elif CPU(ARM64)
+ return 4 + sizeFor(DataFormatDouble);
+#else
+ return sizeOfConvertInt64ToDouble + sizeFor(DataFormatDouble);
+#endif
+
+ case DataFormatDouble:
+ // Boxing is purifying, moving to a GPR, and tagging
+#if CPU(X86_64)
+ return 38 + sizeFor(DataFormatJS);
+#elif CPU(ARM64)
+ return 28 + sizeFor(DataFormatJS);
+#else
+ return sizeOfPurifyNaN + sizeOfSubImm64 + sizeOfMoveDoubleTo64 + sizeFor(DataFormatJS);
+#endif
+
+ case DataFormatBoolean:
+ // Boxing is adding ValueFalse
+#if CPU(X86_64)
+ return 4 + sizeFor(DataFormatJS);
+#elif CPU(ARM64)
+ return 4 + sizeFor(DataFormatJS);
+#else
+ return sizeOfAddImm32 + sizeFor(DataFormatJS);
+#endif
+
+ case DataFormatJS:
+ // We will load (in a GPR or FPR) then store the value
+#if CPU(X86_64)
+ return 8;
+#elif CPU(ARM64)
+ return 8;
+#else
+ return sizeOfLoad + sizeOfStore;
+#endif
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+}
+
+} // anonymous namespace
+
+JSTailCall::JSTailCall(unsigned stackmapID, Node* node, Vector<ExitValue> arguments)
+ : JSCallBase(CallLinkInfo::TailCall, node->origin.semantic)
+ , m_stackmapID(stackmapID)
+ , m_arguments { WTF::move(arguments) }
+ , m_instructionOffset(0)
+{
+ ASSERT(node->op() == TailCall);
+ ASSERT(numArguments() == node->numChildren() - 1);
+
+ // Estimate the size of the inline cache, assuming that every
+ // value goes from the stack to the stack (in practice, this will
+ // seldom be true, giving us some amount of leeway) and that no
+ // spilling will occur (in practice, this will almost always be
+ // true).
+
+ // We first compute the new frame base and load the fp/lr
+ // registers final values. On debug builds, we also need to
+ // account for the fp-sp delta check (twice: fast and slow path).
+#if CPU(X86_64)
+ m_estimatedSize = 56;
+#if !ASSERT_DISABLED
+ m_estimatedSize += 26;
+# endif
+#elif CPU(ARM64)
+ m_estimatedSize = 44;
+#if !ASSERT_DISABLED
+ m_estimatedSize += 24;
+# endif
+#else
+ UNREACHABLE_FOR_PLATFORM();
+#endif
+
+ // Arguments will probably be loaded & stored twice (fast & slow)
+ for (ExitValue& arg : m_arguments)
+ m_estimatedSize += 2 * sizeFor(arg.dataFormat());
+
+ // We also have the slow path check, the two calls, and the
+ // CallLinkInfo load for the slow path
+#if CPU(X86_64)
+ m_estimatedSize += 55;
+#elif CPU(ARM64)
+ m_estimatedSize += 44;
+#else
+ m_estimatedSize += sizeOfCall + sizeOfJump + sizeOfLoad + sizeOfSlowPathCheck;
+#endif
+}
+
+void JSTailCall::emit(JITCode& jitCode, CCallHelpers& jit)
+{
+ StackMaps::Record* record { nullptr };
+
+ for (unsigned i = jitCode.stackmaps.records.size(); i--;) {
+ record = &jitCode.stackmaps.records[i];
+ if (record->patchpointID == m_stackmapID)
+ break;
+ }
+
+ RELEASE_ASSERT(record->patchpointID == m_stackmapID);
+
+ m_callLinkInfo = jit.codeBlock()->addCallLinkInfo();
+
+ CallFrameShuffleData shuffleData;
+
+ // The callee was the first passed argument, and must be in a GPR because
+ // we used the "anyregcc" calling convention
+ auto calleeLocation =
+ FTL::Location::forStackmaps(nullptr, record->locations[0]);
+ GPRReg calleeGPR = calleeLocation.directGPR();
+ shuffleData.callee = ValueRecovery::inGPR(calleeGPR, DataFormatJS);
+
+ // The tag type number was the second argument, if there was one
+ auto tagTypeNumberLocation =
+ FTL::Location::forStackmaps(&jitCode.stackmaps, record->locations[1]);
+ if (tagTypeNumberLocation.isGPR() && !tagTypeNumberLocation.addend())
+ shuffleData.tagTypeNumber = tagTypeNumberLocation.directGPR();
+
+ shuffleData.args.grow(numArguments());
+ HashMap<Reg, Vector<std::pair<ValueRecovery*, int32_t>>> withAddend;
+ size_t numAddends { 0 };
+ for (size_t i = 0; i < numArguments(); ++i) {
+ shuffleData.args[i] = recoveryFor(m_arguments[i], *record, jitCode.stackmaps);
+ if (FTL::Location addend = getRegisterWithAddend(m_arguments[i], *record, jitCode.stackmaps)) {
+ withAddend.add(
+ addend.dwarfReg().reg(),
+ Vector<std::pair<ValueRecovery*, int32_t>>()).iterator->value.append(
+ std::make_pair(&shuffleData.args[i], addend.addend()));
+ numAddends++;
+ }
+ }
+
+ numAddends = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), numAddends);
+
+ shuffleData.numLocals = static_cast<int64_t>(jitCode.stackmaps.stackSizeForLocals()) / sizeof(void*) + numAddends;
+
+ ASSERT(!numAddends == withAddend.isEmpty());
+
+ if (!withAddend.isEmpty()) {
+ jit.subPtr(MacroAssembler::TrustedImm32(numAddends * sizeof(void*)), MacroAssembler::stackPointerRegister);
+ VirtualRegister spillBase { 1 - static_cast<int>(shuffleData.numLocals) };
+ for (auto entry : withAddend) {
+ for (auto pair : entry.value) {
+ ASSERT(numAddends > 0);
+ VirtualRegister spillSlot { spillBase + --numAddends };
+ ASSERT(entry.key.isGPR());
+ jit.addPtr(MacroAssembler::TrustedImm32(pair.second), entry.key.gpr());
+ jit.storePtr(entry.key.gpr(), CCallHelpers::addressFor(spillSlot));
+ jit.subPtr(MacroAssembler::TrustedImm32(pair.second), entry.key.gpr());
+ *pair.first = ValueRecovery::displacedInJSStack(spillSlot, pair.first->dataFormat());
+ }
+ }
+ ASSERT(numAddends < stackAlignmentRegisters());
+ }
+
+ shuffleData.args.resize(numArguments());
+ for (size_t i = 0; i < numArguments(); ++i)
+ shuffleData.args[i] = recoveryFor(m_arguments[i], *record, jitCode.stackmaps);
+
+ shuffleData.setupCalleeSaveRegisters(jit.codeBlock());
+
+ CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
+ CCallHelpers::NotEqual, calleeGPR, m_targetToCheck,
+ CCallHelpers::TrustedImmPtr(0));
+
+ m_callLinkInfo->setFrameShuffleData(shuffleData);
+ CallFrameShuffler(jit, shuffleData).prepareForTailCall();
+
+ m_fastCall = jit.nearTailCall();
+
+ slowPath.link(&jit);
+
+ CallFrameShuffler slowPathShuffler(jit, shuffleData);
+ slowPathShuffler.setCalleeJSValueRegs(JSValueRegs { GPRInfo::regT0 });
+ slowPathShuffler.prepareForSlowPath();
+
+ jit.move(CCallHelpers::TrustedImmPtr(m_callLinkInfo), GPRInfo::regT2);
+
+ m_slowCall = jit.nearCall();
+
+ jit.abortWithReason(JITDidReturnFromTailCall);
+
+ m_callLinkInfo->setUpCall(m_type, m_origin, calleeGPR);
+}
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
--- /dev/null
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef FTLJSTailCall_h
+#define FTLJSTailCall_h
+
+#if ENABLE(FTL_JIT)
+
+#include "FTLExitArgumentList.h"
+#include "FTLExitValue.h"
+#include "FTLJSCallBase.h"
+
+namespace JSC {
+
+namespace DFG {
+struct Node;
+}
+
+namespace FTL {
+
+class JSTailCall : public JSCallBase {
+public:
+ JSTailCall(unsigned stackmapID, DFG::Node*, Vector<ExitValue> arguments);
+
+ void emit(JITCode&, CCallHelpers&);
+
+ unsigned stackmapID() const { return m_stackmapID; }
+
+ unsigned estimatedSize() const { return m_estimatedSize; }
+
+ unsigned numArguments() const { return m_arguments.size(); }
+
+ bool operator<(const JSTailCall& other) const
+ {
+ return m_instructionOffset < other.m_instructionOffset;
+ }
+
+private:
+ unsigned m_stackmapID;
+ Vector<ExitValue> m_arguments;
+ unsigned m_estimatedSize;
+
+public:
+ uint32_t m_instructionOffset;
+};
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
+
+#endif // FTLJSTailCall_h
+
return u.constant;
}
- bool operator!() const { return kind() == Unprocessed && !u.variable.offset; }
+ explicit operator bool() const { return kind() != Unprocessed || u.variable.offset; }
+
+ bool operator!() const { return !static_cast<bool>(*this); }
bool isHashTableDeletedValue() const { return kind() == Unprocessed && u.variable.offset; }
for (Node* node : *block) {
switch (node->op()) {
case CallVarargs:
+ case TailCallVarargs:
+ case TailCallVarargsInlinedCaller:
case CallForwardVarargs:
+ case TailCallForwardVarargs:
+ case TailCallForwardVarargsInlinedCaller:
case ConstructVarargs:
case ConstructForwardVarargs:
hasVarargs = true;
compileLogicalNot();
break;
case Call:
+ case TailCallInlinedCaller:
case Construct:
compileCallOrConstruct();
break;
+ case TailCall:
+ compileTailCall();
+ break;
case CallVarargs:
case CallForwardVarargs:
+ case TailCallVarargs:
+ case TailCallVarargsInlinedCaller:
+ case TailCallForwardVarargs:
+ case TailCallForwardVarargsInlinedCaller:
case ConstructVarargs:
case ConstructForwardVarargs:
compileCallOrConstructVarargs();
setJSValue(call);
}
+
+ void compileTailCall()
+ {
+ int numArgs = m_node->numChildren() - 1;
+ ExitArgumentList exitArguments;
+ exitArguments.reserveCapacity(numArgs + 6);
+
+ unsigned stackmapID = m_stackmapIDs++;
+ exitArguments.append(lowJSValue(m_graph.varArgChild(m_node, 0)));
+ exitArguments.append(m_tagTypeNumber);
+
+ Vector<ExitValue> callArguments(numArgs);
+
+ bool needsTagTypeNumber { false };
+ for (int i = 0; i < numArgs; ++i) {
+ callArguments[i] =
+ exitValueForTailCall(exitArguments, m_graph.varArgChild(m_node, 1 + i).node());
+ if (callArguments[i].dataFormat() == DataFormatInt32)
+ needsTagTypeNumber = true;
+ }
+
+ JSTailCall tailCall(stackmapID, m_node, WTF::move(callArguments));
+
+ exitArguments.insert(0, m_out.constInt32(needsTagTypeNumber ? 2 : 1));
+ exitArguments.insert(0, constNull(m_out.ref8));
+ exitArguments.insert(0, m_out.constInt32(tailCall.estimatedSize()));
+ exitArguments.insert(0, m_out.constInt64(stackmapID));
+
+ LValue call =
+ m_out.call(m_out.patchpointVoidIntrinsic(), exitArguments);
+ setInstructionCallingConvention(call, LLVMAnyRegCallConv);
+ m_out.unreachable();
+
+ m_ftlState.jsTailCalls.append(tailCall);
+ }
void compileCallOrConstructVarargs()
{
switch (m_node->op()) {
case CallVarargs:
+ case TailCallVarargs:
+ case TailCallVarargsInlinedCaller:
case ConstructVarargs:
jsArguments = lowJSValue(m_node->child2());
break;
case CallForwardVarargs:
+ case TailCallForwardVarargs:
+ case TailCallForwardVarargsInlinedCaller:
case ConstructForwardVarargs:
break;
default:
setInstructionCallingConvention(call, LLVMCCallConv);
m_ftlState.jsCallVarargses.append(JSCallVarargs(stackmapID, m_node));
-
- setJSValue(call);
+
+ switch (m_node->op()) {
+ case TailCallVarargs:
+ case TailCallForwardVarargs:
+ m_out.unreachable();
+ break;
+
+ default:
+ setJSValue(call);
+ }
}
void compileLoadVarargs()
}
void callPreflight()
{
- callPreflight(m_node->origin.semantic);
+ CodeOrigin codeOrigin = m_node->origin.semantic;
+
+ if (m_node->op() == TailCallInlinedCaller
+ || m_node->op() == TailCallVarargsInlinedCaller
+ || m_node->op() == TailCallForwardVarargsInlinedCaller)
+ codeOrigin =*codeOrigin.inlineCallFrame->getCallerSkippingDeadFrames();
+
+ callPreflight(codeOrigin);
}
void callCheck()
DFG_CRASH(m_graph, m_node, toCString("Cannot find value for node: ", node).data());
return ExitValue::dead();
}
-
+
ExitValue exitArgument(ExitArgumentList& arguments, DataFormat format, LValue value)
{
ExitValue result = ExitValue::exitArgument(ExitArgument(format, arguments.size()));
arguments.append(value);
return result;
}
+
+ ExitValue exitValueForTailCall(ExitArgumentList& arguments, Node* node)
+ {
+ ASSERT(node->shouldGenerate());
+ ASSERT(node->hasResult());
+
+ switch (node->op()) {
+ case JSConstant:
+ case Int52Constant:
+ case DoubleConstant:
+ return ExitValue::constant(node->asJSValue());
+
+ default:
+ break;
+ }
+
+ LoweredNodeValue value = m_jsValueValues.get(node);
+ if (isValid(value))
+ return exitArgument(arguments, DataFormatJS, value.value());
+
+ value = m_int32Values.get(node);
+ if (isValid(value))
+ return exitArgument(arguments, DataFormatInt32, value.value());
+
+ value = m_booleanValues.get(node);
+ if (isValid(value)) {
+ LValue valueToPass = m_out.zeroExt(value.value(), m_out.int32);
+ return exitArgument(arguments, DataFormatBoolean, valueToPass);
+ }
+
+ // Doubles and Int52 have been converted by ValueRep()
+ DFG_CRASH(m_graph, m_node, toCString("Cannot find value for node: ", node).data());
+ }
bool doesKill(Edge edge)
{
#include "FTLJITFinalizer.h"
#include "FTLJSCall.h"
#include "FTLJSCallVarargs.h"
+#include "FTLJSTailCall.h"
#include "FTLStackMaps.h"
#include "FTLState.h"
#include <wtf/Noncopyable.h>
SegmentedVector<CheckInDescriptor> checkIns;
Vector<JSCall> jsCalls;
Vector<JSCallVarargs> jsCallVarargses;
+ Vector<JSTailCall> jsTailCalls;
Vector<CString> codeSectionNames;
Vector<CString> dataSectionNames;
void* unwindDataSection;
if (width == NormalJumpWidth)
return result;
-
+
PatchableJump realJump = patchableJump();
result.link(this);
Vector<ValueRecovery> args;
#if USE(JSVALUE64)
RegisterMap<ValueRecovery> registers;
+ GPRReg tagTypeNumber { InvalidGPRReg };
void setupCalleeSaveRegisters(CodeBlock*);
#endif
else
addNew(reg.fpr(), data.registers[reg]);
}
+
+ m_tagTypeNumber = data.tagTypeNumber;
+ if (m_tagTypeNumber != InvalidGPRReg)
+ lockGPR(m_tagTypeNumber);
#endif
}
static const char* dangerDelimiter = " X-------------------------------X ";
static const char* dangerBoundsDelimiter = " XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ";
static const char* emptySpace = " ";
- ASSERT(m_alignedNewFrameSize <= numLocals());
out.print(" ");
out.print(" Old frame ");
out.print(" New frame ");
out.print("\n");
- for (int i = 0; i < m_alignedOldFrameSize + numLocals() + 3; ++i) {
+ int totalSize = m_alignedOldFrameSize + std::max(numLocals(), m_alignedNewFrameSize) + 3;
+ for (int i = 0; i < totalSize; ++i) {
VirtualRegister old { m_alignedOldFrameSize - i - 1 };
VirtualRegister newReg { old + m_frameDelta };
out.print(" Old frame offset is ", m_oldFrameOffset, "\n");
if (m_newFrameOffset)
out.print(" New frame offset is ", m_newFrameOffset, "\n");
+#if USE(JSVALUE64)
+ if (m_tagTypeNumber != InvalidGPRReg)
+ out.print(" TagTypeNumber is currently in ", m_tagTypeNumber, "\n");
+#endif
}
CachedRecovery* CallFrameShuffler::getCachedRecovery(ValueRecovery recovery)
ASSERT(cachedRecovery.recovery().isInRegisters());
VirtualRegister spillSlot { 0 };
- for (VirtualRegister slot = firstOld(); slot <= lastOld(); slot -= 1) {
- ASSERT(slot < newAsOld(firstNew()));
+ for (VirtualRegister slot = firstOld(); slot <= lastOld(); slot += 1) {
+ if (slot >= newAsOld(firstNew()))
+ break;
+
if (getOld(slot))
continue;
spillSlot = slot;
break;
}
- // We must have enough slots to be able to fit the whole
- // callee's frame for the slow path.
- RELEASE_ASSERT(spillSlot.isLocal());
+ // We must have enough slots to be able to fit the whole callee's
+ // frame for the slow path - unless we are in the FTL. In that
+ // case, we are allowed to extend the frame *once*, since we are
+ // guaranteed to have enough available space for that.
+ if (spillSlot >= newAsOld(firstNew()) || !spillSlot.isLocal()) {
+ RELEASE_ASSERT(!m_didExtendFrame);
+ extendFrameIfNeeded();
+ spill(cachedRecovery);
+ return;
+ }
if (verbose)
dataLog(" * Spilling ", cachedRecovery.recovery(), " into ", spillSlot, "\n");
dataLog(" Skipping the fp-sp delta check since there is too much pressure");
}
+void CallFrameShuffler::extendFrameIfNeeded()
+{
+ ASSERT(!m_didExtendFrame);
+ ASSERT(!isUndecided());
+
+ VirtualRegister firstRead { firstOld() };
+ for (; firstRead <= virtualRegisterForLocal(0); firstRead += 1) {
+ if (getOld(firstRead))
+ break;
+ }
+ size_t availableSize = static_cast<size_t>(firstRead.offset() - firstOld().offset());
+ size_t wantedSize = m_newFrame.size() + m_newFrameOffset;
+
+ if (availableSize < wantedSize) {
+ size_t delta = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), wantedSize - availableSize);
+ m_oldFrame.grow(m_oldFrame.size() + delta);
+ for (size_t i = 0; i < delta; ++i)
+ m_oldFrame[m_oldFrame.size() - i - 1] = nullptr;
+ m_jit.subPtr(MacroAssembler::TrustedImm32(delta * sizeof(Register)), MacroAssembler::stackPointerRegister);
+
+ if (isSlowPath())
+ m_frameDelta = numLocals() + JSStack::CallerFrameAndPCSize;
+ else
+ m_oldFrameOffset = numLocals();
+
+ if (verbose)
+ dataLogF(" Not enough space - extending the old frame %zu slot\n", delta);
+ }
+
+ m_didExtendFrame = true;
+}
+
void CallFrameShuffler::prepareForSlowPath()
{
ASSERT(isUndecided());
m_newFrameOffset = -JSStack::CallerFrameAndPCSize;
if (verbose)
- dataLog("\n\nPreparing frame for slow path call:\n", *this);
+ dataLog("\n\nPreparing frame for slow path call:\n");
+
+ // When coming from the FTL, we need to extend the frame. In other
+ // cases, we may end up extending the frame if we previously
+ // spilled things (e.g. in polymorphic cache).
+ extendFrameIfNeeded();
+
+ if (verbose)
+ dataLog(*this);
prepareAny();
ASSERT_UNUSED(writesOK, writesOK);
}
+#if USE(JSVALUE64)
+ if (m_tagTypeNumber != InvalidGPRReg && m_newRegisters[m_tagTypeNumber])
+ releaseGPR(m_tagTypeNumber);
+#endif
+
// Handle 2) by loading all registers. We don't have to do any
// writes, since they have been taken care of above.
if (verbose)
ASSERT(cachedRecovery->targets().isEmpty());
}
+#if USE(JSVALUE64)
+ if (m_tagTypeNumber != InvalidGPRReg)
+ releaseGPR(m_tagTypeNumber);
+#endif
+
// At this point, we have read everything we cared about from the
// stack, and written everything we had to to the stack.
if (verbose)
m_lockedRegisters.clear(gpr);
}
+ void restoreGPR(GPRReg gpr)
+ {
+ if (!m_newRegisters[gpr])
+ return;
+
+ ensureGPR();
+#if USE(JSVALUE32_64)
+ GPRReg tempGPR { getFreeGPR() };
+ lockGPR(tempGPR);
+ ensureGPR();
+ releaseGPR(tempGPR);
+#endif
+ emitDisplace(*m_newRegisters[gpr]);
+ }
+
// You can only take a snapshot if the recovery has not started
// yet. The only operations that are valid before taking a
// snapshot are lockGPR(), acquireGPR() and releaseGPR().
return reg >= firstOld() && reg <= lastOld();
}
+ bool m_didExtendFrame { false };
+
+ void extendFrameIfNeeded();
+
// This stores, for each slot in the new frame, information about
// the recovery for the value that should eventually go into that
// slot.
// We also use this to lock registers temporarily, for instance to
// ensure that we have at least 2 available registers for loading
// a pair on 32bits.
- RegisterSet m_lockedRegisters;
+ mutable RegisterSet m_lockedRegisters;
// This stores the current recoveries present in registers. A null
// CachedRecovery means we can trash the current value as we don't
// care about it.
RegisterMap<CachedRecovery*> m_registers;
+#if USE(JSVALUE64)
+ mutable GPRReg m_tagTypeNumber;
+
+ bool tryAcquireTagTypeNumber();
+#endif
+
// This stores, for each register, information about the recovery
// for the value that should eventually go into that register. The
// only registers that have a target recovery will be callee-save
nonTemp = reg;
}
}
+
+#if USE(JSVALUE64)
+ if (!nonTemp && m_tagTypeNumber != InvalidGPRReg && check(Reg { m_tagTypeNumber })) {
+ ASSERT(m_lockedRegisters.get(m_tagTypeNumber));
+ m_lockedRegisters.clear(m_tagTypeNumber);
+ nonTemp = Reg { m_tagTypeNumber };
+ m_tagTypeNumber = InvalidGPRReg;
+ }
+#endif
return nonTemp;
}
+ GPRReg getFreeTempGPR() const
+ {
+ Reg freeTempGPR { getFreeRegister([this] (Reg reg) { return reg.isGPR() && !m_newRegisters[reg]; }) };
+ if (!freeTempGPR)
+ return InvalidGPRReg;
+ return freeTempGPR.gpr();
+ }
+
GPRReg getFreeGPR() const
{
Reg freeGPR { getFreeRegister([] (Reg reg) { return reg.isGPR(); }) };
});
}
+ void ensureTempGPR()
+ {
+ if (getFreeTempGPR() != InvalidGPRReg)
+ return;
+
+ if (verbose)
+ dataLog(" Finding a temp GPR to spill\n");
+ ensureRegister(
+ [this] (const CachedRecovery& cachedRecovery) {
+ if (cachedRecovery.recovery().isInGPR()) {
+ return !m_lockedRegisters.get(cachedRecovery.recovery().gpr())
+ && !m_newRegisters[cachedRecovery.recovery().gpr()];
+ }
+#if USE(JSVALUE32_64)
+ if (cachedRecovery.recovery().technique() == InPair) {
+ return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR())
+ && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR())
+ && !m_newRegisters[cachedRecovery.recovery().tagGPR()]
+ && !m_newRegisters[cachedRecovery.recovery().payloadGPR()];
+ }
+#endif
+ return false;
+ });
+ }
+
void ensureGPR()
{
if (getFreeGPR() != InvalidGPRReg)
{
ASSERT(jsValueRegs && !getNew(jsValueRegs));
CachedRecovery* cachedRecovery = addCachedRecovery(recovery);
- ASSERT(!cachedRecovery->wantedJSValueRegs());
- cachedRecovery->setWantedJSValueRegs(jsValueRegs);
#if USE(JSVALUE64)
+ if (cachedRecovery->wantedJSValueRegs())
+ m_newRegisters[cachedRecovery->wantedJSValueRegs().gpr()] = nullptr;
m_newRegisters[jsValueRegs.gpr()] = cachedRecovery;
#else
+ if (JSValueRegs oldRegs { cachedRecovery->wantedJSValueRegs() }) {
+ if (oldRegs.payloadGPR())
+ m_newRegisters[oldRegs.payloadGPR()] = nullptr;
+ if (oldRegs.tagGPR())
+ m_newRegisters[oldRegs.tagGPR()] = nullptr;
+ }
if (jsValueRegs.payloadGPR() != InvalidGPRReg)
m_newRegisters[jsValueRegs.payloadGPR()] = cachedRecovery;
if (jsValueRegs.tagGPR() != InvalidGPRReg)
m_newRegisters[jsValueRegs.tagGPR()] = cachedRecovery;
#endif
+ ASSERT(!cachedRecovery->wantedJSValueRegs());
+ cachedRecovery->setWantedJSValueRegs(jsValueRegs);
}
void addNew(FPRReg fpr, ValueRecovery recovery)
m_jit.zeroExtend32ToPtr(
cachedRecovery.recovery().gpr(),
cachedRecovery.recovery().gpr());
- // We have to do this the hard way.
- m_jit.or64(MacroAssembler::TrustedImm64(TagTypeNumber),
- cachedRecovery.recovery().gpr());
+ m_lockedRegisters.set(cachedRecovery.recovery().gpr());
+ if (tryAcquireTagTypeNumber())
+ m_jit.or64(m_tagTypeNumber, cachedRecovery.recovery().gpr());
+ else {
+ // We have to do this the hard way
+ m_jit.or64(MacroAssembler::TrustedImm64(TagTypeNumber),
+ cachedRecovery.recovery().gpr());
+ }
+ m_lockedRegisters.clear(cachedRecovery.recovery().gpr());
cachedRecovery.setRecovery(
ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatJS));
if (verbose)
ASSERT(resultGPR != InvalidGPRReg);
m_jit.purifyNaN(cachedRecovery.recovery().fpr());
m_jit.moveDoubleTo64(cachedRecovery.recovery().fpr(), resultGPR);
- m_jit.sub64(MacroAssembler::TrustedImm64(TagTypeNumber), resultGPR);
+ m_lockedRegisters.set(resultGPR);
+ if (tryAcquireTagTypeNumber())
+ m_jit.sub64(m_tagTypeNumber, resultGPR);
+ else
+ m_jit.sub64(MacroAssembler::TrustedImm64(TagTypeNumber), resultGPR);
+ m_lockedRegisters.clear(resultGPR);
updateRecovery(cachedRecovery, ValueRecovery::inGPR(resultGPR, DataFormatJS));
if (verbose)
dataLog(" into ", cachedRecovery.recovery(), "\n");
ASSERT(m_registers[wantedReg] == &cachedRecovery);
}
+
+bool CallFrameShuffler::tryAcquireTagTypeNumber()
+{
+ if (m_tagTypeNumber != InvalidGPRReg)
+ return true;
+
+ m_tagTypeNumber = getFreeGPR();
+
+ if (m_tagTypeNumber == InvalidGPRReg)
+ return false;
+
+ m_lockedRegisters.set(m_tagTypeNumber);
+ m_jit.move(MacroAssembler::TrustedImm64(TagTypeNumber), m_tagTypeNumber);
+ return true;
+}
} // namespace JSC
if (opcodeID == op_tail_call) {
CallFrameShuffleData shuffleData;
+ shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister;
shuffleData.numLocals =
instruction[4].u.operand - sizeof(CallerFrameAndPC) / sizeof(Register);
shuffleData.args.resize(instruction[3].u.operand);
: m_index(invalid())
{
}
+
+ Reg(WTF::HashTableDeletedValueType)
+ : m_index(deleted())
+ {
+ }
Reg(MacroAssembler::RegisterID reg)
: m_index(MacroAssembler::registerIndex(reg))
bool isSet() const { return m_index != invalid(); }
bool operator!() const { return !isSet(); }
explicit operator bool() const { return isSet(); }
+
+ bool isHashTableDeletedValue() const { return m_index == deleted(); }
bool isGPR() const
{
private:
static uint8_t invalid() { return 0xff; }
+
+ static uint8_t deleted() { return 0xfe; }
uint8_t m_index;
};
+struct RegHash {
+ static unsigned hash(const Reg& key) { return key.hash(); }
+ static bool equal(const Reg& a, const Reg& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
} // namespace JSC
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::Reg> {
+ typedef JSC::RegHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::Reg> : SimpleClassHashTraits<JSC::Reg> {
+ static const bool emptyValueIsZero = false;
+ };
+
+} // namespace WTF
+
#endif // ENABLE(JIT)
#endif // Reg_h
v(bool, forceProfilerBytecodeGeneration, false, nullptr) \
\
v(bool, enableFunctionDotArguments, true, nullptr) \
- v(bool, enableTailCalls, false, nullptr) \
+ v(bool, enableTailCalls, true, nullptr) \
\
/* showDisassembly implies showDFGDisassembly. */ \
v(bool, showDisassembly, false, "dumps disassembly of all JIT compiled code upon compilation") \
- path: es6/Promise_Promise[Symbol.species].js
cmd: runES6 :fail
- path: es6/proper_tail_calls_tail_call_optimisation_direct_recursion.js
- cmd: runES6 :fail
+ cmd: runES6 :normal
- path: es6/proper_tail_calls_tail_call_optimisation_mutual_recursion.js
- cmd: runES6 :fail
+ cmd: runES6 :normal
- path: es6/prototype_of_bound_functions_arrow_functions.js
cmd: runES6 :fail
- path: es6/prototype_of_bound_functions_basic_functions.js
--- /dev/null
+(function nonInlinedTailCall() {
+ function callee() { if (callee.caller != nonInlinedTailCall) throw new Error(); }
+ noInline(callee);
+
+ function caller() { "use strict"; return callee(); }
+
+ for (var i = 0; i < 10000; ++i)
+ caller();
+
+ function loop(n) { "use strict"; if (n > 0) return loop(n - 1); }
+ noInline(loop);
+
+ loop(1000000);
+})();
+
+(function inlinedTailCall() {
+ function callee() { if (callee.caller != inlinedTailCall) throw new Error(); }
+ function caller() { "use strict"; return callee(); }
+
+ for (var i = 0; i < 10000; ++i)
+ caller();
+
+ function loop(n) { "use strict"; if (n > 0) return loop(n - 1); }
+
+ loop(1000000);
+})();
+
+(function nonInlinedEmulatedTailCall() {
+ function emulator() { caller(); }
+ function callee() { if (callee.caller != emulator) throw new Error(); }
+ noInline(callee);
+ function caller() { "use strict"; return callee(); }
+
+ for (var i = 0; i < 10000; ++i)
+ emulator();
+
+ function pad(n) { "use strict"; return loop(n); }
+ function loop(n) { "use strict"; if (n > 0) return pad(n - 1); }
+ noInline(loop);
+
+ loop(1000000);
+})();
+
+(function inlinedEmulatedTailCall() {
+ function emulator() { caller(); }
+ function callee() { if (callee.caller != emulator) throw new Error(); }
+ function caller() { "use strict"; return callee(); }
+
+ for (var i = 0; i < 10000; ++i)
+ emulator();
+
+ function pad(n) { "use strict"; return loop(n); }
+ function loop(n) { "use strict"; if (n > 0) return pad(n - 1); }
+
+ loop(1000000);
+})();
--- /dev/null
+function shouldThrow(func, errorMessage) {
+ var errorThrown = false;
+ var error = null;
+ try {
+ func();
+ } catch (e) {
+ errorThrown = true;
+ error = e;
+ }
+ if (!errorThrown)
+ throw new Error('not thrown');
+ if (String(error) !== errorMessage)
+ throw new Error(`bad error: ${String(error)}`);
+}
+
+function sloppyCountdown(n) {
+ function even(n) {
+ if (n == 0)
+ return n;
+ return odd(n - 1);
+ }
+
+ function odd(n) {
+ if (n == 1)
+ return n;
+ return even(n - 1);
+ }
+
+ if (n % 2 === 0)
+ return even(n);
+ else
+ return odd(n);
+}
+
+function strictCountdown(n) {
+ "use strict";
+
+ function even(n) {
+ if (n == 0)
+ return n;
+ return odd(n - 1);
+ }
+
+ function odd(n) {
+ if (n == 1)
+ return n;
+ return even(n - 1);
+ }
+
+ if (n % 2 === 0)
+ return even(n);
+ else
+ return odd(n);
+}
+
+shouldThrow(function () { sloppyCountdown(100000); }, "RangeError: Maximum call stack size exceeded.");
+strictCountdown(100000);
+
+// Parity alterning
+function odd(n) {
+ "use strict";
+ if (n > 0)
+ return even(n, 0);
+}
+
+function even(n) {
+ "use strict";
+ return odd(n - 1);
+}
+
+odd(100000);
--- /dev/null
+function shouldThrow(func, errorMessage) {
+ var errorThrown = false;
+ var error = null;
+ try {
+ func();
+ } catch (e) {
+ errorThrown = true;
+ error = e;
+ }
+ if (!errorThrown)
+ throw new Error('not thrown');
+ if (String(error) !== errorMessage)
+ throw new Error(`bad error: ${String(error)}`);
+}
+
+function sloppyLoop(n) {
+ if (n > 0)
+ return sloppyLoop(n - 1);
+}
+
+function strictLoop(n) {
+ "use strict";
+ if (n > 0)
+ return strictLoop(n - 1);
+}
+
+// We have two of these so that we can test different stack alignments
+function strictLoopArityFixup1(n, dummy) {
+ "use strict";
+ if (n > 0)
+ return strictLoopArityFixup1(n - 1);
+}
+
+function strictLoopArityFixup2(n, dummy1, dummy2) {
+ "use strict";
+ if (n > 0)
+ return strictLoopArityFixup2(n - 1);
+}
+
+shouldThrow(function () { sloppyLoop(100000); }, 'RangeError: Maximum call stack size exceeded.');
+
+// These should not throw
+strictLoop(100000);
+strictLoopArityFixup1(1000000);
+strictLoopArityFixup2(1000000);
--- /dev/null
+function callerMustBeRun() {
+ if (!Object.is(callerMustBeRun.caller, runTests))
+ throw Error("Wrong caller, expected run but got ", callerMustBeRun.caller);
+}
+
+function callerMustBeStrict() {
+ var errorThrown = false;
+ try {
+ callerMustBeStrict.caller;
+ } catch (e) {
+ errorThrown = true;
+ }
+ if (!errorThrown)
+ throw Error("Wrong caller, expected strict caller but got ", callerMustBeStrict.caller);
+}
+
+function runTests() {
+ // Statement tests
+ (function simpleTailCall() {
+ "use strict";
+ return callerMustBeRun();
+ })();
+
+ (function noTailCallInTry() {
+ "use strict";
+ try {
+ return callerMustBeStrict();
+ } catch (e) {
+ throw e;
+ }
+ })();
+
+ (function tailCallInCatch() {
+ "use strict";
+ try { } catch (e) { return callerMustBeRun(); }
+ })();
+
+ (function tailCallInFinally() {
+ "use strict";
+ try { } finally { return callerMustBeRun(); }
+ })();
+
+ (function tailCallInFinallyWithCatch() {
+ "use strict";
+ try { } catch (e) { } finally { return callerMustBeRun(); }
+ })();
+
+ (function tailCallInFinallyWithCatchTaken() {
+ "use strict";
+ try { throw null; } catch (e) { } finally { return callerMustBeRun(); }
+ })();
+
+ (function noTailCallInCatchIfFinally() {
+ "use strict";
+ try { throw null; } catch (e) { return callerMustBeStrict(); } finally { }
+ })();
+
+ (function tailCallInFor() {
+ "use strict";
+ for (var i = 0; i < 10; ++i)
+ return callerMustBeRun();
+ })();
+
+ (function tailCallInWhile() {
+ "use strict";
+ while (true)
+ return callerMustBeRun();
+ })();
+
+ (function tailCallInDoWhile() {
+ "use strict";
+ do
+ return callerMustBeRun();
+ while (true);
+ })();
+
+ (function noTailCallInForIn() {
+ "use strict";
+ for (var x in [1, 2])
+ return callerMustBeStrict();
+ })();
+
+ (function noTailCallInForOf() {
+ "use strict";
+ for (var x of [1, 2])
+ return callerMustBeStrict();
+ })();
+
+ (function tailCallInIf() {
+ "use strict";
+ if (true)
+ return callerMustBeRun();
+ })();
+
+ (function tailCallInElse() {
+ "use strict";
+ if (false) throw new Error("WTF");
+ else return callerMustBeRun();
+ })();
+
+ (function tailCallInSwitchCase() {
+ "use strict";
+ switch (0) {
+ case 0: return callerMustBeRun();
+ }
+ })();
+
+ (function tailCallInSwitchDefault() {
+ "use strict";
+ switch (0) {
+ default: return callerMustBeRun();
+ }
+ })();
+
+ (function tailCallWithLabel() {
+ "use strict";
+ dummy: return callerMustBeRun();
+ })();
+
+ // Expression tests, we don't enumerate all the cases where there
+ // *shouldn't* be a tail call
+
+ (function tailCallComma() {
+ "use strict";
+ return callerMustBeStrict(), callerMustBeRun();
+ })();
+
+ (function tailCallTernaryLeft() {
+ "use strict";
+ return true ? callerMustBeRun() : unreachable();
+ })();
+
+ (function tailCallTernaryRight() {
+ "use strict";
+ return false ? unreachable() : callerMustBeRun();
+ })();
+
+ (function tailCallLogicalAnd() {
+ "use strict";
+ return true && callerMustBeRun();
+ })();
+
+ (function tailCallLogicalOr() {
+ "use strict";
+ return false || callerMustBeRun();
+ })();
+
+ (function memberTailCall() {
+ "use strict";
+ return { f: callerMustBeRun }.f();
+ })();
+
+ (function bindTailCall() {
+ "use strict";
+ return callerMustBeRun.bind()();
+ })();
+
+ // Function.prototype tests
+
+ (function applyTailCall() {
+ "use strict";
+ return callerMustBeRun.apply();
+ })();
+
+ (function callTailCall() {
+ "use strict";
+ return callerMustBeRun.call();
+ })();
+
+ // No tail call for constructors
+ (function noTailConstruct() {
+ "use strict";
+ return new callerMustBeStrict();
+ })();
+}
+
+for (var i = 0; i < 10000; ++i)
+ runTests();
--- /dev/null
+function shouldThrow(func, errorMessage) {
+ var errorThrown = false;
+ var error = null;
+ try {
+ func();
+ } catch (e) {
+ errorThrown = true;
+ error = e;
+ }
+ if (!errorThrown)
+ throw new Error('not thrown');
+ if (String(error) !== errorMessage)
+ throw new Error(`bad error: ${String(error)}`);
+}
+
+function sloppyLoop(n) {
+ if (n > 0)
+ return sloppyLoop(...[n - 1]);
+}
+
+function strictLoop(n) {
+ "use strict";
+ if (n > 0)
+ return strictLoop(...[n - 1]);
+}
+
+shouldThrow(function () { sloppyLoop(100000); }, 'RangeError: Maximum call stack size exceeded.');
+strictLoop(100000);
--- /dev/null
+"use strict";
+
+function tail(a, b) { }
+noInline(tail);
+
+var obj = {
+ method: function (x) {
+ return tail(x, x);
+ },
+
+ get fromNative() { return tail(0, 0); }
+};
+noInline(obj.method);
+
+function getThis(x) { return this; }
+noInline(getThis);
+
+for (var i = 0; i < 10000; ++i) {
+ var that = getThis(obj.method(42));
+
+ if (!Object.is(that, undefined))
+ throw new Error("Wrong 'this' value in call, expected undefined but got " + that);
+
+ that = getThis(obj.method(...[42]));
+ if (!Object.is(that, undefined))
+ throw new Error("Wrong 'this' value in varargs call, expected undefined but got " + that);
+
+ if (!Object.is(obj.fromNative, undefined))
+ throw new Error("Wrong 'fromNative' value, expected undefined but got " + obj.fromNative);
+}