[ES6] Implement tail calls in the LLInt and Baseline JIT
authormsaboff@apple.com <msaboff@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 15 Sep 2015 03:00:34 +0000 (03:00 +0000)
committermsaboff@apple.com <msaboff@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 15 Sep 2015 03:00:34 +0000 (03:00 +0000)
https://bugs.webkit.org/show_bug.cgi?id=148661

Patch by Basile Clement <basile_clement@apple.com> on 2015-09-14
Reviewed by Filip Pizlo.

This patch introduces two new opcodes, op_tail_call and
op_tail_call_varargs, to perform tail calls, and implements them in the
LLInt and baseline JIT. Their use prevents DFG and FTL compilation for
now. They are currently implemented by sliding the call frame and
masquerading as our own caller right before performing an actual call.

This required to change the operationLink family of operation to return
a SlowPathReturnType instead of a char* in order to distinguish between
exception cases and actual call cases. We introduce a new FrameAction
enum that indicates whether to reuse (non-exceptional tail call) or
keep the current call frame (non-tail call, and exceptional cases).

This is also a semantics change, since the Function.caller property is
now leaking tail calls. Since tail calls are only used in strict mode,
which poisons this property, the only way of seeing this semantics
change is when a sloppy function calls a strict function that then
tail-calls a sloppy function. Previously, the second sloppy function's
caller would have been the strict function (i.e. raises a TypeError
when the .caller attribute is accessed), while it is now the first
sloppy function. Tests have been updated to reflect that.

This also changes the assumptions we make about call frames. In order
to be relatively efficient, we want to be able to compute the frame
size based only on the argument count, which was not possible
previously. To enable this, we now enforce at the bytecode generator,
DFG and FTL level that any space reserved for a call frame is
stack-aligned, which allows to easily compute its size when performing
a tail call. In all the "special call cases" (calls from native code,
inlined cache calls, etc.), we are starting the frame at the current
stack pointer and thus will always have a stack-aligned frame size.

Finally, this patch adds a couple of tests to check that tail calls run
in constant stack space, as well as tests checking that tail calls are
recognized correctly. Those tests use the handy aforementioned leaking
of tail calls through Function.caller to detect tail calls.

Given that this patch only implements tail calls for the LLInt and
Baseline JIT, tail calls are disabled by default.  Until changes are
landed for all tiers, tail call testing and use requires the
--enableTailCalls=true or equivalent.

* CMakeLists.txt:
* JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
* JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
* JavaScriptCore.xcodeproj/project.pbxproj:
* assembler/AbortReason.h:
* assembler/AbstractMacroAssembler.h:
(JSC::AbstractMacroAssembler::Call::Call):
(JSC::AbstractMacroAssembler::repatchNearCall):
(JSC::AbstractMacroAssembler::repatchCompact):
* assembler/CodeLocation.h:
(JSC::CodeLocationNearCall::CodeLocationNearCall):
(JSC::CodeLocationNearCall::callMode):
(JSC::CodeLocationCommon::callAtOffset):
(JSC::CodeLocationCommon::nearCallAtOffset):
(JSC::CodeLocationCommon::dataLabelPtrAtOffset):
* assembler/LinkBuffer.h:
(JSC::LinkBuffer::locationOfNearCall):
(JSC::LinkBuffer::locationOf):
* assembler/MacroAssemblerARM.h:
(JSC::MacroAssemblerARM::nearCall):
(JSC::MacroAssemblerARM::nearTailCall):
(JSC::MacroAssemblerARM::call):
(JSC::MacroAssemblerARM::linkCall):
* assembler/MacroAssemblerARM64.h:
(JSC::MacroAssemblerARM64::nearCall):
(JSC::MacroAssemblerARM64::nearTailCall):
(JSC::MacroAssemblerARM64::ret):
(JSC::MacroAssemblerARM64::linkCall):
* assembler/MacroAssemblerARMv7.h:
(JSC::MacroAssemblerARMv7::nearCall):
(JSC::MacroAssemblerARMv7::nearTailCall):
(JSC::MacroAssemblerARMv7::call):
(JSC::MacroAssemblerARMv7::linkCall):
* assembler/MacroAssemblerMIPS.h:
(JSC::MacroAssemblerMIPS::nearCall):
(JSC::MacroAssemblerMIPS::nearTailCall):
(JSC::MacroAssemblerMIPS::call):
(JSC::MacroAssemblerMIPS::linkCall):
(JSC::MacroAssemblerMIPS::repatchCall):
* assembler/MacroAssemblerSH4.h:
(JSC::MacroAssemblerSH4::call):
(JSC::MacroAssemblerSH4::nearTailCall):
(JSC::MacroAssemblerSH4::nearCall):
(JSC::MacroAssemblerSH4::linkCall):
(JSC::MacroAssemblerSH4::repatchCall):
* assembler/MacroAssemblerX86.h:
(JSC::MacroAssemblerX86::linkCall):
* assembler/MacroAssemblerX86Common.h:
(JSC::MacroAssemblerX86Common::breakpoint):
(JSC::MacroAssemblerX86Common::nearTailCall):
(JSC::MacroAssemblerX86Common::nearCall):
* assembler/MacroAssemblerX86_64.h:
(JSC::MacroAssemblerX86_64::linkCall):
* bytecode/BytecodeList.json:
* bytecode/BytecodeUseDef.h:
(JSC::computeUsesForBytecodeOffset):
(JSC::computeDefsForBytecodeOffset):
* bytecode/CallLinkInfo.h:
(JSC::CallLinkInfo::callTypeFor):
(JSC::CallLinkInfo::isVarargsCallType):
(JSC::CallLinkInfo::CallLinkInfo):
(JSC::CallLinkInfo::specializationKind):
(JSC::CallLinkInfo::callModeFor):
(JSC::CallLinkInfo::callMode):
(JSC::CallLinkInfo::isTailCall):
(JSC::CallLinkInfo::isVarargs):
(JSC::CallLinkInfo::registerPreservationMode):
* bytecode/CallLinkStatus.cpp:
(JSC::CallLinkStatus::computeFromLLInt):
* bytecode/CallMode.cpp: Added.
(WTF::printInternal):
* bytecode/CallMode.h: Added.
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::dumpBytecode):
(JSC::CodeBlock::CodeBlock):
* bytecompiler/BytecodeGenerator.cpp:
(JSC::BytecodeGenerator::BytecodeGenerator):
(JSC::BytecodeGenerator::emitCallInTailPosition):
(JSC::BytecodeGenerator::emitCallEval):
(JSC::BytecodeGenerator::emitCall):
(JSC::BytecodeGenerator::emitCallVarargsInTailPosition):
(JSC::BytecodeGenerator::emitConstructVarargs):
* bytecompiler/NodesCodegen.cpp:
(JSC::CallArguments::CallArguments):
(JSC::LabelNode::emitBytecode):
* dfg/DFGByteCodeParser.cpp:
(JSC::DFG::ByteCodeParser::addCallWithoutSettingResult):
* ftl/FTLLowerDFGToLLVM.cpp:
(JSC::FTL::DFG::LowerDFGToLLVM::compileCallOrConstruct):
* interpreter/Interpreter.h:
(JSC::Interpreter::isCallBytecode):
* jit/CCallHelpers.h:
(JSC::CCallHelpers::jumpToExceptionHandler):
(JSC::CCallHelpers::prepareForTailCallSlow):
* jit/JIT.cpp:
(JSC::JIT::privateCompileMainPass):
(JSC::JIT::privateCompileSlowCases):
* jit/JIT.h:
* jit/JITCall.cpp:
(JSC::JIT::compileOpCall):
(JSC::JIT::compileOpCallSlowCase):
(JSC::JIT::emit_op_call):
(JSC::JIT::emit_op_tail_call):
(JSC::JIT::emit_op_call_eval):
(JSC::JIT::emit_op_call_varargs):
(JSC::JIT::emit_op_tail_call_varargs):
(JSC::JIT::emit_op_construct_varargs):
(JSC::JIT::emitSlow_op_call):
(JSC::JIT::emitSlow_op_tail_call):
(JSC::JIT::emitSlow_op_call_eval):
(JSC::JIT::emitSlow_op_call_varargs):
(JSC::JIT::emitSlow_op_tail_call_varargs):
(JSC::JIT::emitSlow_op_construct_varargs):
* jit/JITCall32_64.cpp:
(JSC::JIT::emitSlow_op_call):
(JSC::JIT::emitSlow_op_tail_call):
(JSC::JIT::emitSlow_op_call_eval):
(JSC::JIT::emitSlow_op_call_varargs):
(JSC::JIT::emitSlow_op_tail_call_varargs):
(JSC::JIT::emitSlow_op_construct_varargs):
(JSC::JIT::emit_op_call):
(JSC::JIT::emit_op_tail_call):
(JSC::JIT::emit_op_call_eval):
(JSC::JIT::emit_op_call_varargs):
(JSC::JIT::emit_op_tail_call_varargs):
(JSC::JIT::emit_op_construct_varargs):
(JSC::JIT::compileOpCall):
(JSC::JIT::compileOpCallSlowCase):
* jit/JITInlines.h:
(JSC::JIT::emitNakedCall):
(JSC::JIT::emitNakedTailCall):
(JSC::JIT::updateTopCallFrame):
* jit/JITOperations.cpp:
* jit/JITOperations.h:
* jit/Repatch.cpp:
(JSC::linkVirtualFor):
(JSC::linkPolymorphicCall):
* jit/ThunkGenerators.cpp:
(JSC::throwExceptionFromCallSlowPathGenerator):
(JSC::slowPathFor):
(JSC::linkCallThunkGenerator):
(JSC::virtualThunkFor):
(JSC::arityFixupGenerator):
(JSC::unreachableGenerator):
(JSC::baselineGetterReturnThunkGenerator):
* jit/ThunkGenerators.h:
* llint/LowLevelInterpreter.asm:
* llint/LowLevelInterpreter32_64.asm:
* llint/LowLevelInterpreter64.asm:
* runtime/CommonSlowPaths.h:
(JSC::CommonSlowPaths::arityCheckFor):
(JSC::CommonSlowPaths::opIn):
* runtime/Options.h:
* tests/stress/mutual-tail-call-no-stack-overflow.js: Added.
(shouldThrow):
(sloppyCountdown.even):
(sloppyCountdown.odd):
(strictCountdown.even):
(strictCountdown.odd):
(strictCountdown):
(odd):
(even):
* tests/stress/tail-call-no-stack-overflow.js: Added.
(shouldThrow):
(strictLoop):
(strictLoopArityFixup1):
(strictLoopArityFixup2):
* tests/stress/tail-call-recognize.js: Added.
(callerMustBeRun):
(callerMustBeStrict):
(runTests):
* tests/stress/tail-call-varargs-no-stack-overflow.js: Added.
(shouldThrow):
(strictLoop):
* tests/stress/tail-calls-dont-overwrite-live-stack.js: Added.
(tail):
(obj.method):
(obj.get fromNative):
(getThis):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@189774 268f45cc-cd09-0410-ab3c-d52691b4dbfc

50 files changed:
Source/JavaScriptCore/CMakeLists.txt
Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj
Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters
Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
Source/JavaScriptCore/assembler/AbortReason.h
Source/JavaScriptCore/assembler/AbstractMacroAssembler.h
Source/JavaScriptCore/assembler/CodeLocation.h
Source/JavaScriptCore/assembler/LinkBuffer.h
Source/JavaScriptCore/assembler/MacroAssemblerARM.h
Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
Source/JavaScriptCore/assembler/MacroAssemblerSH4.h
Source/JavaScriptCore/assembler/MacroAssemblerX86.h
Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h
Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
Source/JavaScriptCore/bytecode/BytecodeList.json
Source/JavaScriptCore/bytecode/BytecodeUseDef.h
Source/JavaScriptCore/bytecode/CallLinkInfo.h
Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
Source/JavaScriptCore/bytecode/CallMode.cpp [new file with mode: 0644]
Source/JavaScriptCore/bytecode/CallMode.h [new file with mode: 0644]
Source/JavaScriptCore/bytecode/CodeBlock.cpp
Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp
Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.cpp
Source/JavaScriptCore/interpreter/Interpreter.h
Source/JavaScriptCore/jit/CCallHelpers.h
Source/JavaScriptCore/jit/JIT.cpp
Source/JavaScriptCore/jit/JIT.h
Source/JavaScriptCore/jit/JITCall.cpp
Source/JavaScriptCore/jit/JITCall32_64.cpp
Source/JavaScriptCore/jit/JITInlines.h
Source/JavaScriptCore/jit/JITOperations.cpp
Source/JavaScriptCore/jit/JITOperations.h
Source/JavaScriptCore/jit/Repatch.cpp
Source/JavaScriptCore/jit/ThunkGenerators.cpp
Source/JavaScriptCore/jit/ThunkGenerators.h
Source/JavaScriptCore/llint/LowLevelInterpreter.asm
Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
Source/JavaScriptCore/runtime/CommonSlowPaths.h
Source/JavaScriptCore/runtime/Options.h
Source/JavaScriptCore/tests/stress/mutual-tail-call-no-stack-overflow.js [new file with mode: 0644]
Source/JavaScriptCore/tests/stress/tail-call-no-stack-overflow.js [new file with mode: 0644]
Source/JavaScriptCore/tests/stress/tail-call-recognize.js [new file with mode: 0644]
Source/JavaScriptCore/tests/stress/tail-call-varargs-no-stack-overflow.js [new file with mode: 0644]
Source/JavaScriptCore/tests/stress/tail-calls-dont-overwrite-live-stack.js [new file with mode: 0644]

index a6ce71c..adb3f3c 100644 (file)
@@ -80,6 +80,7 @@ set(JavaScriptCore_SOURCES
     bytecode/CallEdge.cpp
     bytecode/CallLinkInfo.cpp
     bytecode/CallLinkStatus.cpp
+    bytecode/CallMode.cpp
     bytecode/CallVariant.cpp
     bytecode/CodeBlock.cpp
     bytecode/CodeBlockHash.cpp
index 4823236..abc6a10 100644 (file)
@@ -1,3 +1,231 @@
+2015-09-14  Basile Clement  <basile_clement@apple.com>
+
+        [ES6] Implement tail calls in the LLInt and Baseline JIT
+        https://bugs.webkit.org/show_bug.cgi?id=148661
+
+        Reviewed by Filip Pizlo.
+
+        This patch introduces two new opcodes, op_tail_call and
+        op_tail_call_varargs, to perform tail calls, and implements them in the
+        LLInt and baseline JIT. Their use prevents DFG and FTL compilation for
+        now. They are currently implemented by sliding the call frame and
+        masquerading as our own caller right before performing an actual call.
+
+        This required to change the operationLink family of operation to return
+        a SlowPathReturnType instead of a char* in order to distinguish between
+        exception cases and actual call cases. We introduce a new FrameAction
+        enum that indicates whether to reuse (non-exceptional tail call) or
+        keep the current call frame (non-tail call, and exceptional cases).
+
+        This is also a semantics change, since the Function.caller property is
+        now leaking tail calls. Since tail calls are only used in strict mode,
+        which poisons this property, the only way of seeing this semantics
+        change is when a sloppy function calls a strict function that then
+        tail-calls a sloppy function. Previously, the second sloppy function's
+        caller would have been the strict function (i.e. raises a TypeError
+        when the .caller attribute is accessed), while it is now the first
+        sloppy function. Tests have been updated to reflect that.
+
+        This also changes the assumptions we make about call frames. In order
+        to be relatively efficient, we want to be able to compute the frame
+        size based only on the argument count, which was not possible
+        previously. To enable this, we now enforce at the bytecode generator,
+        DFG and FTL level that any space reserved for a call frame is
+        stack-aligned, which allows to easily compute its size when performing
+        a tail call. In all the "special call cases" (calls from native code,
+        inlined cache calls, etc.), we are starting the frame at the current
+        stack pointer and thus will always have a stack-aligned frame size.
+
+        Finally, this patch adds a couple of tests to check that tail calls run
+        in constant stack space, as well as tests checking that tail calls are
+        recognized correctly. Those tests use the handy aforementioned leaking
+        of tail calls through Function.caller to detect tail calls. 
+
+        Given that this patch only implements tail calls for the LLInt and
+        Baseline JIT, tail calls are disabled by default.  Until changes are
+        landed for all tiers, tail call testing and use requires the
+        --enableTailCalls=true or equivalent.
+
+        * CMakeLists.txt:
+        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
+        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * assembler/AbortReason.h:
+        * assembler/AbstractMacroAssembler.h:
+        (JSC::AbstractMacroAssembler::Call::Call):
+        (JSC::AbstractMacroAssembler::repatchNearCall):
+        (JSC::AbstractMacroAssembler::repatchCompact):
+        * assembler/CodeLocation.h:
+        (JSC::CodeLocationNearCall::CodeLocationNearCall):
+        (JSC::CodeLocationNearCall::callMode):
+        (JSC::CodeLocationCommon::callAtOffset):
+        (JSC::CodeLocationCommon::nearCallAtOffset):
+        (JSC::CodeLocationCommon::dataLabelPtrAtOffset):
+        * assembler/LinkBuffer.h:
+        (JSC::LinkBuffer::locationOfNearCall):
+        (JSC::LinkBuffer::locationOf):
+        * assembler/MacroAssemblerARM.h:
+        (JSC::MacroAssemblerARM::nearCall):
+        (JSC::MacroAssemblerARM::nearTailCall):
+        (JSC::MacroAssemblerARM::call):
+        (JSC::MacroAssemblerARM::linkCall):
+        * assembler/MacroAssemblerARM64.h:
+        (JSC::MacroAssemblerARM64::nearCall):
+        (JSC::MacroAssemblerARM64::nearTailCall):
+        (JSC::MacroAssemblerARM64::ret):
+        (JSC::MacroAssemblerARM64::linkCall):
+        * assembler/MacroAssemblerARMv7.h:
+        (JSC::MacroAssemblerARMv7::nearCall):
+        (JSC::MacroAssemblerARMv7::nearTailCall):
+        (JSC::MacroAssemblerARMv7::call):
+        (JSC::MacroAssemblerARMv7::linkCall):
+        * assembler/MacroAssemblerMIPS.h:
+        (JSC::MacroAssemblerMIPS::nearCall):
+        (JSC::MacroAssemblerMIPS::nearTailCall):
+        (JSC::MacroAssemblerMIPS::call):
+        (JSC::MacroAssemblerMIPS::linkCall):
+        (JSC::MacroAssemblerMIPS::repatchCall):
+        * assembler/MacroAssemblerSH4.h:
+        (JSC::MacroAssemblerSH4::call):
+        (JSC::MacroAssemblerSH4::nearTailCall):
+        (JSC::MacroAssemblerSH4::nearCall):
+        (JSC::MacroAssemblerSH4::linkCall):
+        (JSC::MacroAssemblerSH4::repatchCall):
+        * assembler/MacroAssemblerX86.h:
+        (JSC::MacroAssemblerX86::linkCall):
+        * assembler/MacroAssemblerX86Common.h:
+        (JSC::MacroAssemblerX86Common::breakpoint):
+        (JSC::MacroAssemblerX86Common::nearTailCall):
+        (JSC::MacroAssemblerX86Common::nearCall):
+        * assembler/MacroAssemblerX86_64.h:
+        (JSC::MacroAssemblerX86_64::linkCall):
+        * bytecode/BytecodeList.json:
+        * bytecode/BytecodeUseDef.h:
+        (JSC::computeUsesForBytecodeOffset):
+        (JSC::computeDefsForBytecodeOffset):
+        * bytecode/CallLinkInfo.h:
+        (JSC::CallLinkInfo::callTypeFor):
+        (JSC::CallLinkInfo::isVarargsCallType):
+        (JSC::CallLinkInfo::CallLinkInfo):
+        (JSC::CallLinkInfo::specializationKind):
+        (JSC::CallLinkInfo::callModeFor):
+        (JSC::CallLinkInfo::callMode):
+        (JSC::CallLinkInfo::isTailCall):
+        (JSC::CallLinkInfo::isVarargs):
+        (JSC::CallLinkInfo::registerPreservationMode):
+        * bytecode/CallLinkStatus.cpp:
+        (JSC::CallLinkStatus::computeFromLLInt):
+        * bytecode/CallMode.cpp: Added.
+        (WTF::printInternal):
+        * bytecode/CallMode.h: Added.
+        * bytecode/CodeBlock.cpp:
+        (JSC::CodeBlock::dumpBytecode):
+        (JSC::CodeBlock::CodeBlock):
+        * bytecompiler/BytecodeGenerator.cpp:
+        (JSC::BytecodeGenerator::BytecodeGenerator):
+        (JSC::BytecodeGenerator::emitCallInTailPosition):
+        (JSC::BytecodeGenerator::emitCallEval):
+        (JSC::BytecodeGenerator::emitCall):
+        (JSC::BytecodeGenerator::emitCallVarargsInTailPosition):
+        (JSC::BytecodeGenerator::emitConstructVarargs):
+        * bytecompiler/NodesCodegen.cpp:
+        (JSC::CallArguments::CallArguments):
+        (JSC::LabelNode::emitBytecode):
+        * dfg/DFGByteCodeParser.cpp:
+        (JSC::DFG::ByteCodeParser::addCallWithoutSettingResult):
+        * ftl/FTLLowerDFGToLLVM.cpp:
+        (JSC::FTL::DFG::LowerDFGToLLVM::compileCallOrConstruct):
+        * interpreter/Interpreter.h:
+        (JSC::Interpreter::isCallBytecode):
+        * jit/CCallHelpers.h:
+        (JSC::CCallHelpers::jumpToExceptionHandler):
+        (JSC::CCallHelpers::prepareForTailCallSlow):
+        * jit/JIT.cpp:
+        (JSC::JIT::privateCompileMainPass):
+        (JSC::JIT::privateCompileSlowCases):
+        * jit/JIT.h:
+        * jit/JITCall.cpp:
+        (JSC::JIT::compileOpCall):
+        (JSC::JIT::compileOpCallSlowCase):
+        (JSC::JIT::emit_op_call):
+        (JSC::JIT::emit_op_tail_call):
+        (JSC::JIT::emit_op_call_eval):
+        (JSC::JIT::emit_op_call_varargs):
+        (JSC::JIT::emit_op_tail_call_varargs):
+        (JSC::JIT::emit_op_construct_varargs):
+        (JSC::JIT::emitSlow_op_call):
+        (JSC::JIT::emitSlow_op_tail_call):
+        (JSC::JIT::emitSlow_op_call_eval):
+        (JSC::JIT::emitSlow_op_call_varargs):
+        (JSC::JIT::emitSlow_op_tail_call_varargs):
+        (JSC::JIT::emitSlow_op_construct_varargs):
+        * jit/JITCall32_64.cpp:
+        (JSC::JIT::emitSlow_op_call):
+        (JSC::JIT::emitSlow_op_tail_call):
+        (JSC::JIT::emitSlow_op_call_eval):
+        (JSC::JIT::emitSlow_op_call_varargs):
+        (JSC::JIT::emitSlow_op_tail_call_varargs):
+        (JSC::JIT::emitSlow_op_construct_varargs):
+        (JSC::JIT::emit_op_call):
+        (JSC::JIT::emit_op_tail_call):
+        (JSC::JIT::emit_op_call_eval):
+        (JSC::JIT::emit_op_call_varargs):
+        (JSC::JIT::emit_op_tail_call_varargs):
+        (JSC::JIT::emit_op_construct_varargs):
+        (JSC::JIT::compileOpCall):
+        (JSC::JIT::compileOpCallSlowCase):
+        * jit/JITInlines.h:
+        (JSC::JIT::emitNakedCall):
+        (JSC::JIT::emitNakedTailCall):
+        (JSC::JIT::updateTopCallFrame):
+        * jit/JITOperations.cpp:
+        * jit/JITOperations.h:
+        * jit/Repatch.cpp:
+        (JSC::linkVirtualFor):
+        (JSC::linkPolymorphicCall):
+        * jit/ThunkGenerators.cpp:
+        (JSC::throwExceptionFromCallSlowPathGenerator):
+        (JSC::slowPathFor):
+        (JSC::linkCallThunkGenerator):
+        (JSC::virtualThunkFor):
+        (JSC::arityFixupGenerator):
+        (JSC::unreachableGenerator):
+        (JSC::baselineGetterReturnThunkGenerator):
+        * jit/ThunkGenerators.h:
+        * llint/LowLevelInterpreter.asm:
+        * llint/LowLevelInterpreter32_64.asm:
+        * llint/LowLevelInterpreter64.asm:
+        * runtime/CommonSlowPaths.h:
+        (JSC::CommonSlowPaths::arityCheckFor):
+        (JSC::CommonSlowPaths::opIn):
+        * runtime/Options.h:
+        * tests/stress/mutual-tail-call-no-stack-overflow.js: Added.
+        (shouldThrow):
+        (sloppyCountdown.even):
+        (sloppyCountdown.odd):
+        (strictCountdown.even):
+        (strictCountdown.odd):
+        (strictCountdown):
+        (odd):
+        (even):
+        * tests/stress/tail-call-no-stack-overflow.js: Added.
+        (shouldThrow):
+        (strictLoop):
+        (strictLoopArityFixup1):
+        (strictLoopArityFixup2):
+        * tests/stress/tail-call-recognize.js: Added.
+        (callerMustBeRun):
+        (callerMustBeStrict):
+        (runTests):
+        * tests/stress/tail-call-varargs-no-stack-overflow.js: Added.
+        (shouldThrow):
+        (strictLoop):
+        * tests/stress/tail-calls-dont-overwrite-live-stack.js: Added.
+        (tail):
+        (obj.method):
+        (obj.get fromNative):
+        (getThis):
+
 2015-09-14  Filip Pizlo  <fpizlo@apple.com>
 
         LLInt get/put inline caches shouldn't use tons of opcodes
index bc7d07e..7ebdfc7 100644 (file)
     <ClCompile Include="..\bytecode\CallEdge.cpp" />
     <ClCompile Include="..\bytecode\CallLinkInfo.cpp" />
     <ClCompile Include="..\bytecode\CallLinkStatus.cpp" />
+    <ClCompile Include="..\bytecode\CallMode.cpp" />
     <ClCompile Include="..\bytecode\CallVariant.cpp" />
     <ClCompile Include="..\bytecode\CodeBlock.cpp" />
     <ClCompile Include="..\bytecode\CodeBlockHash.cpp" />
     <ClInclude Include="..\bytecode\CallEdge.h" />
     <ClInclude Include="..\bytecode\CallLinkInfo.h" />
     <ClInclude Include="..\bytecode\CallLinkStatus.h" />
+    <ClInclude Include="..\bytecode\CallMode.h" />
     <ClInclude Include="..\bytecode\CallReturnOffsetToBytecodeOffset.h" />
     <ClInclude Include="..\bytecode\CallVariant.h" />
     <ClInclude Include="..\bytecode\CodeBlock.h" />
index 56816ed..d5c6e28 100644 (file)
     <ClCompile Include="..\bytecode\CallLinkStatus.cpp">
       <Filter>bytecode</Filter>
     </ClCompile>
+    <ClCompile Include="..\bytecode\CallMode.cpp">
+      <Filter>bytecode</Filter>
+    </ClCompile>
     <ClCompile Include="..\bytecode\CodeBlock.cpp">
       <Filter>bytecode</Filter>
     </ClCompile>
     <ClInclude Include="..\bytecode\CallLinkStatus.h">
       <Filter>bytecode</Filter>
     </ClInclude>
+    <ClInclude Include="..\bytecode\CallMode.h">
+      <Filter>bytecode</Filter>
+    </ClInclude>
     <ClInclude Include="..\bytecode\CallReturnOffsetToBytecodeOffset.h">
       <Filter>bytecode</Filter>
     </ClInclude>
index c7a254c..ac2ea4b 100644 (file)
                5DBB1525131D0BD70056AD36 /* minidom.js in Copy Support Script */ = {isa = PBXBuildFile; fileRef = 1412110D0A48788700480255 /* minidom.js */; };
                5DE6E5B30E1728EC00180407 /* create_hash_table in Headers */ = {isa = PBXBuildFile; fileRef = F692A8540255597D01FF60F7 /* create_hash_table */; settings = {ATTRIBUTES = (); }; };
                623A37EC1B87A7C000754209 /* RegisterMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 623A37EB1B87A7BD00754209 /* RegisterMap.h */; };
+               627673231B680C1E00FD9F2E /* CallMode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 627673211B680C1E00FD9F2E /* CallMode.cpp */; };
+               627673241B680C1E00FD9F2E /* CallMode.h in Headers */ = {isa = PBXBuildFile; fileRef = 627673221B680C1E00FD9F2E /* CallMode.h */; settings = {ATTRIBUTES = (Private, ); }; };
                62D2D38F1ADF103F000206C1 /* FunctionRareData.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 62D2D38D1ADF103F000206C1 /* FunctionRareData.cpp */; };
                62D2D3901ADF103F000206C1 /* FunctionRareData.h in Headers */ = {isa = PBXBuildFile; fileRef = 62D2D38E1ADF103F000206C1 /* FunctionRareData.h */; settings = {ATTRIBUTES = (Private, ); }; };
                62E3D5F01B8D0B7300B868BB /* DataFormat.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 62E3D5EF1B8D0B7300B868BB /* DataFormat.cpp */; };
                5DDDF44614FEE72200B4FB4D /* LLIntDesiredOffsets.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LLIntDesiredOffsets.h; path = LLIntOffsets/LLIntDesiredOffsets.h; sourceTree = BUILT_PRODUCTS_DIR; };
                5DE3D0F40DD8DDFB00468714 /* WebKitAvailability.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WebKitAvailability.h; sourceTree = "<group>"; };
                623A37EB1B87A7BD00754209 /* RegisterMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RegisterMap.h; sourceTree = "<group>"; };
+               627673211B680C1E00FD9F2E /* CallMode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CallMode.cpp; sourceTree = "<group>"; };
+               627673221B680C1E00FD9F2E /* CallMode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallMode.h; sourceTree = "<group>"; };
                62A9A29E1B0BED4800BD54CA /* DFGLazyNode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGLazyNode.cpp; path = dfg/DFGLazyNode.cpp; sourceTree = "<group>"; };
                62A9A29F1B0BED4800BD54CA /* DFGLazyNode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGLazyNode.h; path = dfg/DFGLazyNode.h; sourceTree = "<group>"; };
                62D2D38D1ADF103F000206C1 /* FunctionRareData.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = FunctionRareData.cpp; sourceTree = "<group>"; };
                                0F0B83AF14BCF71400885B4F /* CallLinkInfo.h */,
                                0F93329314CA7DC10085F3C6 /* CallLinkStatus.cpp */,
                                0F93329414CA7DC10085F3C6 /* CallLinkStatus.h */,
+                               627673211B680C1E00FD9F2E /* CallMode.cpp */,
+                               627673221B680C1E00FD9F2E /* CallMode.h */,
                                0F0B83B814BCF95B00885B4F /* CallReturnOffsetToBytecodeOffset.h */,
                                0F3B7E2419A11B8000D9BC56 /* CallVariant.cpp */,
                                0F3B7E2519A11B8000D9BC56 /* CallVariant.h */,
                                E33B3E261B7ABD750048DB2E /* InspectorInstrumentationObject.lut.h in Headers */,
                                A7D89CFE17A0B8CC00773AD8 /* DFGOSRAvailabilityAnalysisPhase.h in Headers */,
                                0FD82E57141DAF1000179C94 /* DFGOSREntry.h in Headers */,
+                               627673241B680C1E00FD9F2E /* CallMode.h in Headers */,
                                0FD8A32617D51F5700CA2C40 /* DFGOSREntrypointCreationPhase.h in Headers */,
                                62F2AA381B0BEDE300610C7A /* DFGLazyNode.h in Headers */,
                                0FC0976A1468A6F700CF2442 /* DFGOSRExit.h in Headers */,
                                0F2D4DEF19832DD3007D4B19 /* TypeSet.cpp in Sources */,
                                9335F24D12E6765B002B5553 /* StringRecursionChecker.cpp in Sources */,
                                BCDE3B430E6C832D001453A7 /* Structure.cpp in Sources */,
+                               627673231B680C1E00FD9F2E /* CallMode.cpp in Sources */,
                                7E4EE70F0EBB7A5B005934AA /* StructureChain.cpp in Sources */,
                                C2F0F2D116BAEEE900187C19 /* StructureRareData.cpp in Sources */,
                                0FD3E40B1B618B6600C80E1E /* ObjectPropertyConditionSet.cpp in Sources */,
index cba1f54..692ad6e 100644 (file)
@@ -58,6 +58,7 @@ enum AbortReason {
     DFGUnreachableBasicBlock                          = 220,
     DFGUnreasonableOSREntryJumpDestination            = 230,
     DFGVarargsThrowingPathDidNotThrow                 = 235,
+    JITDidReturnFromTailCall                          = 237,
     JITDivOperandsAreNotNumbers                       = 240,
     JITGetByValResultIsNotEmpty                       = 250,
     JITNotSupported                                   = 260,
index 703ca03..01974ef 100644 (file)
@@ -507,7 +507,9 @@ public:
             None = 0x0,
             Linkable = 0x1,
             Near = 0x2,
+            Tail = 0x4,
             LinkableNear = 0x3,
+            LinkableNearTail = 0x7,
         };
 
         Call()
@@ -962,7 +964,15 @@ public:
 
     static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
     {
-        AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
+        switch (nearCall.callMode()) {
+        case NearCallMode::Tail:
+            AssemblerType::relinkJump(nearCall.dataLocation(), destination.executableAddress());
+            return;
+        case NearCallMode::Regular:
+            AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
+            return;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
     }
 
     static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
index 86d1f2b..3116e06 100644 (file)
@@ -32,6 +32,8 @@
 
 namespace JSC {
 
+enum NearCallMode { Regular, Tail };
+
 class CodeLocationInstruction;
 class CodeLocationLabel;
 class CodeLocationJump;
@@ -59,7 +61,7 @@ public:
     CodeLocationLabel labelAtOffset(int offset);
     CodeLocationJump jumpAtOffset(int offset);
     CodeLocationCall callAtOffset(int offset);
-    CodeLocationNearCall nearCallAtOffset(int offset);
+    CodeLocationNearCall nearCallAtOffset(int offset, NearCallMode);
     CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset);
     CodeLocationDataLabel32 dataLabel32AtOffset(int offset);
     CodeLocationDataLabelCompact dataLabelCompactAtOffset(int offset);
@@ -115,10 +117,13 @@ public:
 class CodeLocationNearCall : public CodeLocationCommon {
 public:
     CodeLocationNearCall() {}
-    explicit CodeLocationNearCall(MacroAssemblerCodePtr location)
-        : CodeLocationCommon(location) {}
-    explicit CodeLocationNearCall(void* location)
-        : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+    explicit CodeLocationNearCall(MacroAssemblerCodePtr location, NearCallMode callMode)
+        : CodeLocationCommon(location), m_callMode(callMode) { }
+    explicit CodeLocationNearCall(void* location, NearCallMode callMode)
+        : CodeLocationCommon(MacroAssemblerCodePtr(location)), m_callMode(callMode) { }
+    NearCallMode callMode() { return m_callMode; }
+private:
+    NearCallMode m_callMode = NearCallMode::Regular;
 };
 
 class CodeLocationDataLabel32 : public CodeLocationCommon {
@@ -181,10 +186,10 @@ inline CodeLocationCall CodeLocationCommon::callAtOffset(int offset)
     return CodeLocationCall(reinterpret_cast<char*>(dataLocation()) + offset);
 }
 
-inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset)
+inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset, NearCallMode callMode)
 {
     ASSERT_VALID_CODE_OFFSET(offset);
-    return CodeLocationNearCall(reinterpret_cast<char*>(dataLocation()) + offset);
+    return CodeLocationNearCall(reinterpret_cast<char*>(dataLocation()) + offset, callMode);
 }
 
 inline CodeLocationDataLabelPtr CodeLocationCommon::dataLabelPtrAtOffset(int offset)
index 9b0d4c4..b34a6f1 100644 (file)
@@ -180,7 +180,8 @@ public:
     {
         ASSERT(call.isFlagSet(Call::Linkable));
         ASSERT(call.isFlagSet(Call::Near));
-        return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)));
+        return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)),
+            call.isFlagSet(Call::Tail) ? NearCallMode::Tail : NearCallMode::Regular);
     }
 
     CodeLocationLabel locationOf(PatchableJump jump)
index 91fef4d..d2b411f 100644 (file)
@@ -904,6 +904,11 @@ public:
         return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear);
     }
 
+    Call nearTailCall()
+    {
+        return Call(m_assembler.jmp(), Call::LinkableNearTail);
+    }
+
     Call call(RegisterID target)
     {
         return Call(m_assembler.blx(target), Call::None);
@@ -1488,7 +1493,10 @@ private:
 
     static void linkCall(void* code, Call call, FunctionPtr function)
     {
-        ARMAssembler::linkCall(code, call.m_label, function.value());
+        if (call.isFlagSet(Call::Tail))
+            ARMAssembler::linkJump(code, call.m_label, function.value());
+        else
+            ARMAssembler::linkCall(code, call.m_label, function.value());
     }
 
 
index 2cdcd17..7b17207 100644 (file)
@@ -2204,6 +2204,13 @@ public:
         return Call(m_assembler.label(), Call::LinkableNear);
     }
 
+    ALWAYS_INLINE Call nearTailCall()
+    {
+        AssemblerLabel label = m_assembler.label();
+        m_assembler.b();
+        return Call(label, Call::LinkableNearTail);
+    }
+
     ALWAYS_INLINE void ret()
     {
         m_assembler.ret();
@@ -2882,10 +2889,12 @@ private:
 
     static void linkCall(void* code, Call call, FunctionPtr function)
     {
-        if (call.isFlagSet(Call::Near))
-            ARM64Assembler::linkCall(code, call.m_label, function.value());
-        else
+        if (!call.isFlagSet(Call::Near))
             ARM64Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value());
+        else if (call.isFlagSet(Call::Tail))
+            ARM64Assembler::linkJump(code, call.m_label, function.value());
+        else
+            ARM64Assembler::linkCall(code, call.m_label, function.value());
     }
 
     CachedTempRegister m_dataMemoryTempRegister;
index dc1c8c4..e807f79 100644 (file)
@@ -1677,6 +1677,12 @@ public:
         return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
     }
 
+    ALWAYS_INLINE Call nearTailCall()
+    {
+        moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+        return Call(m_assembler.bx(dataTempRegister), Call::LinkableNearTail);
+    }
+
     ALWAYS_INLINE Call call()
     {
         moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
@@ -2012,7 +2018,10 @@ private:
 
     static void linkCall(void* code, Call call, FunctionPtr function)
     {
-        ARMv7Assembler::linkCall(code, call.m_label, function.value());
+        if (call.isFlagSet(Call::Tail))
+            ARMv7Assembler::linkJump(code, call.m_label, function.value());
+        else
+            ARMv7Assembler::linkCall(code, call.m_label, function.value());
     }
 
 #if ENABLE(MASM_PROBE)
index 1a93128..48e3401 100644 (file)
@@ -1975,6 +1975,16 @@ public:
         return Call(m_assembler.label(), Call::LinkableNear);
     }
 
+    Call nearTailCall()
+    {
+        m_assembler.nop();
+        m_assembler.nop();
+        m_assembler.beq(MIPSRegisters::zero, MIPSRegisters::zero, 0);
+        m_assembler.nop();
+        insertRelaxationWords();
+        return Call(m_assembler.label(), Call::LinkableNearTail);
+    }
+
     Call call()
     {
         m_assembler.lui(MIPSRegisters::t9, 0);
@@ -2800,7 +2810,10 @@ private:
 
     static void linkCall(void* code, Call call, FunctionPtr function)
     {
-        MIPSAssembler::linkCall(code, call.m_label, function.value());
+        if (call.isFlagSet(Call::Tail))
+            MIPSAssembler::linkJump(code, call.m_label, function.value());
+        else
+            MIPSAssembler::linkCall(code, call.m_label, function.value());
     }
 
     static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
index 6857c60..5cfb05b 100644 (file)
@@ -2403,6 +2403,11 @@ public:
         return Call(m_assembler.call(), Call::Linkable);
     }
 
+    Call nearTailCall()
+    {
+        return Call(m_assembler.jump(), Call::LinkableNearTail);
+    }
+
     Call nearCall()
     {
         return Call(m_assembler.call(), Call::LinkableNear);
@@ -2608,7 +2613,10 @@ private:
 
     static void linkCall(void* code, Call call, FunctionPtr function)
     {
-        SH4Assembler::linkCall(code, call.m_label, function.value());
+        if (call.isFlagSet(Call::Tail))
+            SH4Assembler::linkJump(code, call.m_label, function.value());
+        else
+            SH4Assembler::linkCall(code, call.m_label, function.value());
     }
 
     static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
index 108f511..7ea3b5d 100644 (file)
@@ -361,7 +361,10 @@ private:
 
     static void linkCall(void* code, Call call, FunctionPtr function)
     {
-        X86Assembler::linkCall(code, call.m_label, function.value());
+        if (call.isFlagSet(Call::Tail))
+            X86Assembler::linkJump(code, call.m_label, function.value());
+        else
+            X86Assembler::linkCall(code, call.m_label, function.value());
     }
 };
 
index 84a4f52..deba962 100644 (file)
@@ -1403,6 +1403,11 @@ public:
         m_assembler.int3();
     }
 
+    Call nearTailCall()
+    {
+        return Call(m_assembler.jmp(), Call::LinkableNearTail);
+    }
+
     Call nearCall()
     {
         return Call(m_assembler.call(), Call::LinkableNear);
index ac1912b..8d071f6 100644 (file)
@@ -872,6 +872,8 @@ private:
     {
         if (!call.isFlagSet(Call::Near))
             X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPATCH_OFFSET_CALL_R11), function.value());
+        else if (call.isFlagSet(Call::Tail))
+            X86Assembler::linkJump(code, call.m_label, function.value());
         else
             X86Assembler::linkCall(code, call.m_label, function.value());
     }
index 6c07463..e5f3ad3 100644 (file)
             { "name" : "op_new_func_exp", "length" : 4 },
             { "name" : "op_new_arrow_func_exp", "length" : 5 },
             { "name" : "op_call", "length" : 9 },
+            { "name" : "op_tail_call", "length" : 9 },
             { "name" : "op_call_eval", "length" : 9 },
             { "name" : "op_call_varargs", "length" : 9 },
+            { "name" : "op_tail_call_varargs", "length" : 9 },
             { "name" : "op_ret", "length" : 2 },
             { "name" : "op_construct", "length" : 9 },
             { "name" : "op_construct_varargs", "length" : 9 },
             { "name" : "llint_cloop_did_return_from_js_5" },
             { "name" : "llint_cloop_did_return_from_js_6" },
             { "name" : "llint_cloop_did_return_from_js_7" },
-            { "name" : "llint_cloop_did_return_from_js_8" }
+            { "name" : "llint_cloop_did_return_from_js_8" },
+            { "name" : "llint_cloop_did_return_from_js_9" }
         ]
     },
     {
index d839867..f77a872 100644 (file)
@@ -191,7 +191,8 @@ void computeUsesForBytecodeOffset(
     }
     case op_has_structure_property:
     case op_construct_varargs:
-    case op_call_varargs: {
+    case op_call_varargs:
+    case op_tail_call_varargs: {
         functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
         functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
         functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
@@ -220,7 +221,8 @@ void computeUsesForBytecodeOffset(
     }
     case op_construct:
     case op_call_eval:
-    case op_call: {
+    case op_call:
+    case op_tail_call: {
         functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
         int argCount = instruction[3].u.operand;
         int registerOffset = -instruction[4].u.operand;
@@ -311,9 +313,11 @@ void computeDefsForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset,
     case op_new_func_exp:
     case op_new_arrow_func_exp:
     case op_call_varargs:
+    case op_tail_call_varargs:
     case op_construct_varargs:
     case op_get_from_scope:
     case op_call:
+    case op_tail_call:
     case op_call_eval:
     case op_construct:
     case op_get_by_id:
index 5f5c6b4..7326d13 100644 (file)
@@ -26,6 +26,7 @@
 #ifndef CallLinkInfo_h
 #define CallLinkInfo_h
 
+#include "CallMode.h"
 #include "CodeLocation.h"
 #include "CodeSpecializationKind.h"
 #include "JITWriteBarrier.h"
@@ -41,19 +42,36 @@ namespace JSC {
 
 class CallLinkInfo : public BasicRawSentinelNode<CallLinkInfo> {
 public:
-    enum CallType { None, Call, CallVarargs, Construct, ConstructVarargs };
+    enum CallType { None, Call, CallVarargs, Construct, ConstructVarargs, TailCall, TailCallVarargs };
     static CallType callTypeFor(OpcodeID opcodeID)
     {
         if (opcodeID == op_call || opcodeID == op_call_eval)
             return Call;
+        if (opcodeID == op_call_varargs)
+            return CallVarargs;
         if (opcodeID == op_construct)
             return Construct;
         if (opcodeID == op_construct_varargs)
             return ConstructVarargs;
-        ASSERT(opcodeID == op_call_varargs);
-        return CallVarargs;
+        if (opcodeID == op_tail_call)
+            return TailCall;
+        ASSERT(opcodeID == op_tail_call_varargs);
+        return TailCallVarargs;
     }
-    
+
+    static bool isVarargsCallType(CallType callType)
+    {
+        switch (callType) {
+        case CallVarargs:
+        case ConstructVarargs:
+        case TailCallVarargs:
+            return true;
+
+        default:
+            return false;
+        }
+    }
+
     CallLinkInfo()
         : m_registerPreservationMode(static_cast<unsigned>(RegisterPreservationNotRequired))
         , m_hasSeenShouldRepatch(false)
@@ -83,6 +101,40 @@ public:
         return specializationKindFor(static_cast<CallType>(m_callType));
     }
 
+    static CallMode callModeFor(CallType callType)
+    {
+        switch (callType) {
+        case Call:
+        case CallVarargs:
+            return CallMode::Regular;
+        case TailCall:
+        case TailCallVarargs:
+            return CallMode::Tail;
+        case Construct:
+        case ConstructVarargs:
+            return CallMode::Construct;
+        case None:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+
+    CallMode callMode() const
+    {
+        return callModeFor(static_cast<CallType>(m_callType));
+    }
+
+    bool isTailCall() const
+    {
+        return callMode() == CallMode::Tail;
+    }
+
+    bool isVarargs() const
+    {
+        return isVarargsCallType(static_cast<CallType>(m_callType));
+    }
+
     RegisterPreservationMode registerPreservationMode() const
     {
         return static_cast<RegisterPreservationMode>(m_registerPreservationMode);
index 68eaff9..687a24c 100644 (file)
@@ -69,7 +69,7 @@ CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJITLocker& locke
     
     Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
     OpcodeID op = vm.interpreter->getOpcodeID(instruction[0].u.opcode);
-    if (op != op_call && op != op_construct)
+    if (op != op_call && op != op_construct && op != op_tail_call)
         return CallLinkStatus();
     
     LLIntCallLinkInfo* callLinkInfo = instruction[5].u.callLinkInfo;
diff --git a/Source/JavaScriptCore/bytecode/CallMode.cpp b/Source/JavaScriptCore/bytecode/CallMode.cpp
new file mode 100644 (file)
index 0000000..5757b18
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "CallMode.h"
+
+#include <wtf/PrintStream.h>
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::CallMode callMode)
+{
+    switch (callMode) {
+    case JSC::CallMode::Tail:
+        out.print("TailCall");
+        return;
+    case JSC::CallMode::Regular:
+        out.print("Call");
+        return;
+    case JSC::CallMode::Construct:
+        out.print("Construct");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/bytecode/CallMode.h b/Source/JavaScriptCore/bytecode/CallMode.h
new file mode 100644 (file)
index 0000000..bcbb613
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef CallMode_h
+#define CallMode_h
+
+namespace JSC {
+
+enum class CallMode { Regular, Tail, Construct };
+
+enum FrameAction { KeepTheFrame = 0, ReuseTheFrame };
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::CallMode);
+
+} // namespace WTF
+
+#endif // CallMode_h
+
index cb48868..788c799 100644 (file)
@@ -1248,13 +1248,18 @@ void CodeBlock::dumpBytecode(
             printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling, callLinkInfos);
             break;
         }
+        case op_tail_call: {
+            printCallOp(out, exec, location, it, "tail_call", DumpCaches, hasPrintedProfiling, callLinkInfos);
+            break;
+        }
         case op_call_eval: {
             printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, callLinkInfos);
             break;
         }
             
         case op_construct_varargs:
-        case op_call_varargs: {
+        case op_call_varargs:
+        case op_tail_call_varargs: {
             int result = (++it)->u.operand;
             int callee = (++it)->u.operand;
             int thisValue = (++it)->u.operand;
@@ -1262,7 +1267,7 @@ void CodeBlock::dumpBytecode(
             int firstFreeRegister = (++it)->u.operand;
             int varArgOffset = (++it)->u.operand;
             ++it;
-            printLocationAndOp(out, exec, location, it, opcode == op_call_varargs ? "call_varargs" : "construct_varargs");
+            printLocationAndOp(out, exec, location, it, opcode == op_call_varargs ? "call_varargs" : opcode == op_construct_varargs ? "construct_varargs" : "tail_call_varargs");
             out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset);
             dumpValueProfiling(out, it, hasPrintedProfiling);
             break;
@@ -1829,6 +1834,7 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
             break;
         }
         case op_call_varargs:
+        case op_tail_call_varargs:
         case op_construct_varargs:
         case op_get_by_val: {
             int arrayProfileIndex = pc[opLength - 2].u.operand;
@@ -1878,6 +1884,7 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
         }
 
         case op_call:
+        case op_tail_call:
         case op_call_eval: {
             ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
             ASSERT(profile->m_bytecodeOffset == -1);
index f54d8db..ab45d52 100644 (file)
@@ -193,7 +193,12 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionNode* functionNode, Unlinke
     , m_vm(&vm)
     , m_isBuiltinFunction(codeBlock->isBuiltinFunction())
     , m_usesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode())
-    , m_inTailPosition(Options::enableTailCalls() && constructorKind() == ConstructorKind::None && isStrictMode())
+    // FIXME: We should be able to have tail call elimination with the profiler
+    // enabled. This is currently not possible because the profiler expects
+    // op_will_call / op_did_call pairs before and after a call, which are not
+    // compatible with tail calls (we have no way of emitting op_did_call).
+    // https://bugs.webkit.org/show_bug.cgi?id=148819
+    , m_inTailPosition(Options::enableTailCalls() && constructorKind() == ConstructorKind::None && isStrictMode() && !m_shouldEmitProfileHooks)
 {
     for (auto& constantRegister : m_linkTimeConstantRegisters)
         constantRegister = nullptr;
@@ -2513,10 +2518,7 @@ RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, Expec
 
 RegisterID* BytecodeGenerator::emitCallInTailPosition(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
 {
-    // FIXME: We should be emitting a new op_tail_call here when
-    // m_inTailPosition is false
-    // https://bugs.webkit.org/show_bug.cgi?id=148661
-    return emitCall(op_call, dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd);
+    return emitCall(m_inTailPosition ? op_tail_call : op_call, dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd);
 }
 
 RegisterID* BytecodeGenerator::emitCallEval(RegisterID* dst, RegisterID* func, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
@@ -2601,7 +2603,7 @@ ExpectedFunction BytecodeGenerator::emitExpectedFunctionSnippet(RegisterID* dst,
 
 RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
 {
-    ASSERT(opcodeID == op_call || opcodeID == op_call_eval);
+    ASSERT(opcodeID == op_call || opcodeID == op_call_eval || opcodeID == op_tail_call);
     ASSERT(func->refCount());
 
     if (m_shouldEmitProfileHooks)
@@ -2617,7 +2619,7 @@ RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, Regi
             RefPtr<RegisterID> argumentRegister;
             argumentRegister = expression->emitBytecode(*this, callArguments.argumentRegister(0));
             RefPtr<RegisterID> thisRegister = emitMove(newTemporary(), callArguments.thisRegister());
-            return emitCallVarargs(dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, callArguments.profileHookRegister(), divot, divotStart, divotEnd);
+            return emitCallVarargs(opcodeID == op_tail_call ? op_tail_call_varargs : op_call_varargs, dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, callArguments.profileHookRegister(), divot, divotStart, divotEnd);
         }
         for (; n; n = n->m_next)
             emitNode(callArguments.argumentRegister(argument++), n);
@@ -2670,10 +2672,7 @@ RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func
 
 RegisterID* BytecodeGenerator::emitCallVarargsInTailPosition(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, RegisterID* profileHookRegister, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
 {
-    // FIXME: We should be emitting a new op_tail_call here when
-    // m_inTailPosition is false
-    // https://bugs.webkit.org/show_bug.cgi?id=148661
-    return emitCallVarargs(op_call_varargs, dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, profileHookRegister, divot, divotStart, divotEnd);
+    return emitCallVarargs(m_inTailPosition ? op_tail_call_varargs : op_call_varargs, dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, profileHookRegister, divot, divotStart, divotEnd);
 }
 
 RegisterID* BytecodeGenerator::emitConstructVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, RegisterID* profileHookRegister, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd)
index 0529e32..48647a3 100644 (file)
@@ -678,6 +678,12 @@ CallArguments::CallArguments(BytecodeGenerator& generator, ArgumentsNode* argume
         m_argv[i] = generator.newTemporary();
         ASSERT(static_cast<size_t>(i) == m_argv.size() - 1 || m_argv[i]->index() == m_argv[i + 1]->index() - 1);
     }
+
+    // We need to ensure that the frame size is stack-aligned
+    while ((JSStack::CallFrameHeaderSize + m_argv.size()) % stackAlignmentRegisters()) {
+        m_argv.insert(0, generator.newTemporary());
+        m_padding++;
+    }
     
     while (stackOffset() % stackAlignmentRegisters()) {
         m_argv.insert(0, generator.newTemporary());
@@ -2786,7 +2792,7 @@ void LabelNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst)
     ASSERT(!generator.breakTarget(m_name));
 
     LabelScopePtr scope = generator.newLabelScope(LabelScope::NamedLabel, &m_name);
-    generator.emitNode(dst, m_statement);
+    generator.emitNodeInTailPosition(dst, m_statement);
 
     generator.emitLabel(scope->breakTarget());
 }
index 7bd3b37..9fbd80c 100644 (file)
@@ -741,7 +741,10 @@ private:
         SpeculatedType prediction)
     {
         addVarArgChild(callee);
-        size_t parameterSlots = JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + argCount;
+        size_t frameSize = JSStack::CallFrameHeaderSize + argCount;
+        size_t alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), frameSize);
+        size_t parameterSlots = alignedFrameSize - JSStack::CallerFrameAndPCSize;
+
         if (parameterSlots > m_parameterSlots)
             m_parameterSlots = parameterSlots;
 
index e74139c..568bdf4 100644 (file)
@@ -4329,24 +4329,29 @@ private:
 
     void compileCallOrConstruct()
     {
-        int numPassedArgs = m_node->numChildren() - 1;
-        int numArgs = numPassedArgs;
+        int numArgs = m_node->numChildren() - 1;
 
         LValue jsCallee = lowJSValue(m_graph.varArgChild(m_node, 0));
 
         unsigned stackmapID = m_stackmapIDs++;
-        
+
+        unsigned frameSize = JSStack::CallFrameHeaderSize + numArgs;
+        unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), frameSize);
+        unsigned padding = alignedFrameSize - frameSize;
+
         Vector<LValue> arguments;
         arguments.append(m_out.constInt64(stackmapID));
         arguments.append(m_out.constInt32(sizeOfCall()));
         arguments.append(constNull(m_out.ref8));
-        arguments.append(m_out.constInt32(1 + JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + numArgs));
+        arguments.append(m_out.constInt32(1 + alignedFrameSize - JSStack::CallerFrameAndPCSize));
         arguments.append(jsCallee); // callee -> %rax
         arguments.append(getUndef(m_out.int64)); // code block
         arguments.append(jsCallee); // callee -> stack
         arguments.append(m_out.constInt64(numArgs)); // argument count and zeros for the tag
-        for (int i = 0; i < numPassedArgs; ++i)
+        for (int i = 0; i < numArgs; ++i)
             arguments.append(lowJSValue(m_graph.varArgChild(m_node, 1 + i)));
+        for (unsigned i = 0; i < padding; ++i)
+            arguments.append(getUndef(m_out.int64));
         
         callPreflight();
         
index a96efb7..cf72c33 100644 (file)
@@ -253,7 +253,7 @@ namespace JSC {
 
         void dumpRegisters(CallFrame*);
         
-        bool isCallBytecode(Opcode opcode) { return opcode == getOpcode(op_call) || opcode == getOpcode(op_construct) || opcode == getOpcode(op_call_eval); }
+        bool isCallBytecode(Opcode opcode) { return opcode == getOpcode(op_call) || opcode == getOpcode(op_construct) || opcode == getOpcode(op_call_eval) || opcode == getOpcode(op_tail_call); }
 
         void enableSampler();
         int m_sampleEntryDepth;
index b790e12..d4f1257 100644 (file)
@@ -30,6 +30,7 @@
 
 #include "AssemblyHelpers.h"
 #include "GPRInfo.h"
+#include "StackAlignment.h"
 
 namespace JSC {
 
@@ -2082,6 +2083,95 @@ public:
         loadPtr(&vm()->targetMachinePCForThrow, GPRInfo::regT1);
         jump(GPRInfo::regT1);
     }
+
+    void prepareForTailCallSlow(GPRReg calleeGPR = InvalidGPRReg)
+    {
+        GPRReg temp1 = calleeGPR == GPRInfo::regT0 ? GPRInfo::regT3 : GPRInfo::regT0;
+        GPRReg temp2 = calleeGPR == GPRInfo::regT1 ? GPRInfo::regT3 : GPRInfo::regT1;
+        GPRReg temp3 = calleeGPR == GPRInfo::regT2 ? GPRInfo::regT3 : GPRInfo::regT2;
+
+        GPRReg newFramePointer = temp1;
+        GPRReg newFrameSizeGPR = temp2;
+        {
+            // The old frame size is its number of arguments (or number of
+            // parameters in case of arity fixup), plus the frame header size,
+            // aligned
+            GPRReg oldFrameSizeGPR = temp2;
+            {
+                GPRReg argCountGPR = oldFrameSizeGPR;
+                load32(Address(framePointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), argCountGPR);
+
+                {
+                    GPRReg numParametersGPR = temp1;
+                    {
+                        GPRReg codeBlockGPR = numParametersGPR;
+                        loadPtr(Address(framePointerRegister, JSStack::CodeBlock * static_cast<int>(sizeof(Register))), codeBlockGPR);
+                        load32(Address(codeBlockGPR, CodeBlock::offsetOfNumParameters()), numParametersGPR);
+                    }
+
+                    ASSERT(numParametersGPR != argCountGPR);
+                    Jump argumentCountWasNotFixedUp = branch32(BelowOrEqual, numParametersGPR, argCountGPR);
+                    move(numParametersGPR, argCountGPR);
+                    argumentCountWasNotFixedUp.link(this);
+                }
+
+                add32(TrustedImm32(stackAlignmentRegisters() + JSStack::CallFrameHeaderSize - 1), argCountGPR, oldFrameSizeGPR);
+                and32(TrustedImm32(-stackAlignmentRegisters()), oldFrameSizeGPR);
+                // We assume < 2^28 arguments
+                mul32(TrustedImm32(sizeof(Register)), oldFrameSizeGPR, oldFrameSizeGPR);
+            }
+
+            // The new frame pointer is at framePointer + oldFrameSize - newFrameSize
+            ASSERT(newFramePointer != oldFrameSizeGPR);
+            move(framePointerRegister, newFramePointer);
+            addPtr(oldFrameSizeGPR, newFramePointer);
+
+            // The new frame size is just the number of arguments plus the
+            // frame header size, aligned
+            ASSERT(newFrameSizeGPR != newFramePointer);
+            load32(Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)),
+                newFrameSizeGPR);
+            add32(TrustedImm32(stackAlignmentRegisters() + JSStack::CallFrameHeaderSize - 1), newFrameSizeGPR);
+            and32(TrustedImm32(-stackAlignmentRegisters()), newFrameSizeGPR);
+            // We assume < 2^28 arguments
+            mul32(TrustedImm32(sizeof(Register)), newFrameSizeGPR, newFrameSizeGPR);
+        }
+
+        GPRReg tempGPR = temp3;
+        ASSERT(tempGPR != newFramePointer && tempGPR != newFrameSizeGPR);
+
+        // We don't need the current frame beyond this point. Masquerade as our
+        // caller.
+#if CPU(ARM) || CPU(SH4) || CPU(ARM64)
+        loadPtr(Address(framePointerRegister, sizeof(void*)), linkRegister);
+        subPtr(TrustedImm32(2 * sizeof(void*)), newFrameSizeGPR);
+#elif CPU(MIPS)
+        loadPtr(Address(framePointerRegister, sizeof(void*)), returnAddressRegister);
+        subPtr(TrustedImm32(2 * sizeof(void*)), newFrameSizeGPR);
+#elif CPU(X86) || CPU(X86_64)
+        loadPtr(Address(framePointerRegister, sizeof(void*)), tempGPR);
+        push(tempGPR);
+        subPtr(TrustedImm32(sizeof(void*)), newFrameSizeGPR);
+#else
+        UNREACHABLE_FOR_PLATFORM();
+#endif
+        subPtr(newFrameSizeGPR, newFramePointer);
+        loadPtr(Address(framePointerRegister), framePointerRegister);
+
+
+        // We need to move the newFrameSizeGPR slots above the stack pointer by
+        // newFramePointer registers. We use pointer-sized chunks.
+        MacroAssembler::Label copyLoop(label());
+
+        subPtr(TrustedImm32(sizeof(void*)), newFrameSizeGPR);
+        loadPtr(BaseIndex(stackPointerRegister, newFrameSizeGPR, TimesOne), tempGPR);
+        storePtr(tempGPR, BaseIndex(newFramePointer, newFrameSizeGPR, TimesOne));
+
+        branchTest32(MacroAssembler::NonZero, newFrameSizeGPR).linkTo(copyLoop, this);
+
+        // Ready for a jump!
+        move(newFramePointer, stackPointerRegister);
+    }
 };
 
 } // namespace JSC
index 6a9827c..ad8c04d 100644 (file)
@@ -197,8 +197,10 @@ void JIT::privateCompileMainPass()
         DEFINE_OP(op_bitor)
         DEFINE_OP(op_bitxor)
         DEFINE_OP(op_call)
+        DEFINE_OP(op_tail_call)
         DEFINE_OP(op_call_eval)
         DEFINE_OP(op_call_varargs)
+        DEFINE_OP(op_tail_call_varargs)
         DEFINE_OP(op_construct_varargs)
         DEFINE_OP(op_catch)
         DEFINE_OP(op_construct)
@@ -371,8 +373,10 @@ void JIT::privateCompileSlowCases()
         DEFINE_SLOWCASE_OP(op_bitor)
         DEFINE_SLOWCASE_OP(op_bitxor)
         DEFINE_SLOWCASE_OP(op_call)
+        DEFINE_SLOWCASE_OP(op_tail_call)
         DEFINE_SLOWCASE_OP(op_call_eval)
         DEFINE_SLOWCASE_OP(op_call_varargs)
+        DEFINE_SLOWCASE_OP(op_tail_call_varargs)
         DEFINE_SLOWCASE_OP(op_construct_varargs)
         DEFINE_SLOWCASE_OP(op_construct)
         DEFINE_SLOWCASE_OP(op_to_this)
index 35902b1..7fab237 100644 (file)
@@ -487,8 +487,10 @@ namespace JSC {
         void emit_op_bitor(Instruction*);
         void emit_op_bitxor(Instruction*);
         void emit_op_call(Instruction*);
+        void emit_op_tail_call(Instruction*);
         void emit_op_call_eval(Instruction*);
         void emit_op_call_varargs(Instruction*);
+        void emit_op_tail_call_varargs(Instruction*);
         void emit_op_construct_varargs(Instruction*);
         void emit_op_catch(Instruction*);
         void emit_op_construct(Instruction*);
@@ -600,8 +602,10 @@ namespace JSC {
         void emitSlow_op_bitor(Instruction*, Vector<SlowCaseEntry>::iterator&);
         void emitSlow_op_bitxor(Instruction*, Vector<SlowCaseEntry>::iterator&);
         void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
+        void emitSlow_op_tail_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
         void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&);
         void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
+        void emitSlow_op_tail_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
         void emitSlow_op_construct_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
         void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&);
         void emitSlow_op_to_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -824,6 +828,7 @@ namespace JSC {
         void updateTopCallFrame();
 
         Call emitNakedCall(CodePtr function = CodePtr());
+        Call emitNakedTailCall(CodePtr function = CodePtr());
 
         // Loads the character value of a single character string into dst.
         void emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures);
index 1af51ad..5b5099a 100644 (file)
@@ -145,10 +145,12 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
     COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), call_and_construct_opcodes_must_be_same_length);
     COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), call_and_call_varargs_opcodes_must_be_same_length);
     COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct_varargs), call_and_construct_varargs_opcodes_must_be_same_length);
+    COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call), call_and_tail_call_opcodes_must_be_same_length);
+    COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_varargs), call_and_tail_call_varargs_opcodes_must_be_same_length);
     CallLinkInfo* info;
     if (opcodeID != op_call_eval)
         info = m_codeBlock->addCallLinkInfo();
-    if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs)
+    if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs || opcodeID == op_tail_call_varargs)
         compileSetupVarargsFrame(instruction, info);
     else {
         int argCount = instruction[3].u.operand;
@@ -172,12 +174,15 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
     emitGetVirtualRegister(callee, regT0); // regT0 holds callee.
 
     store64(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC)));
-    
+
     if (opcodeID == op_call_eval) {
         compileCallEval(instruction);
         return;
     }
 
+    if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs)
+        emitRestoreCalleeSaves();
+
     DataLabelPtr addressOfLinkedFunctionCheck;
     Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
     addSlowCase(slowCase);
@@ -188,6 +193,12 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
     m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
     m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info;
 
+    if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) {
+        prepareForTailCallSlow();
+        m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall();
+        return;
+    }
+
     m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
 
     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
@@ -208,8 +219,14 @@ void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vec
     linkSlowCase(iter);
 
     move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2);
+
     m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(linkCallThunkGenerator).code());
 
+    if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) {
+        abortWithReason(JITDidReturnFromTailCall);
+        return;
+    }
+
     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
     checkStackPointerAlignment();
 
@@ -223,6 +240,11 @@ void JIT::emit_op_call(Instruction* currentInstruction)
     compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
 }
 
+void JIT::emit_op_tail_call(Instruction* currentInstruction)
+{
+    compileOpCall(op_tail_call, currentInstruction, m_callLinkInfoIndex++);
+}
+
 void JIT::emit_op_call_eval(Instruction* currentInstruction)
 {
     compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex);
@@ -232,7 +254,12 @@ void JIT::emit_op_call_varargs(Instruction* currentInstruction)
 {
     compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++);
 }
-    
+
+void JIT::emit_op_tail_call_varargs(Instruction* currentInstruction)
+{
+    compileOpCall(op_tail_call_varargs, currentInstruction, m_callLinkInfoIndex++);
+}
+
 void JIT::emit_op_construct_varargs(Instruction* currentInstruction)
 {
     compileOpCall(op_construct_varargs, currentInstruction, m_callLinkInfoIndex++);
@@ -248,6 +275,11 @@ void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry
     compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
 }
 
+void JIT::emitSlow_op_tail_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+    compileOpCallSlowCase(op_tail_call, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
 void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     compileOpCallSlowCase(op_call_eval, currentInstruction, iter, m_callLinkInfoIndex);
@@ -257,6 +289,11 @@ void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowC
 {
     compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
 }
+
+void JIT::emitSlow_op_tail_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+    compileOpCallSlowCase(op_tail_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
+}
     
 void JIT::emitSlow_op_construct_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
index 89c3bac..4d163c2 100644 (file)
@@ -69,6 +69,11 @@ void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry
     compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
 }
 
+void JIT::emitSlow_op_tail_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+    compileOpCallSlowCase(op_tail_call, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
 void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     compileOpCallSlowCase(op_call_eval, currentInstruction, iter, m_callLinkInfoIndex);
@@ -78,6 +83,11 @@ void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowC
 {
     compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
 }
+
+void JIT::emitSlow_op_tail_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+    compileOpCallSlowCase(op_tail_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
+}
     
 void JIT::emitSlow_op_construct_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
@@ -94,6 +104,11 @@ void JIT::emit_op_call(Instruction* currentInstruction)
     compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
 }
 
+void JIT::emit_op_tail_call(Instruction* currentInstruction)
+{
+    compileOpCall(op_tail_call, currentInstruction, m_callLinkInfoIndex++);
+}
+
 void JIT::emit_op_call_eval(Instruction* currentInstruction)
 {
     compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex);
@@ -103,6 +118,11 @@ void JIT::emit_op_call_varargs(Instruction* currentInstruction)
 {
     compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++);
 }
+
+void JIT::emit_op_tail_call_varargs(Instruction* currentInstruction)
+{
+    compileOpCall(op_tail_call_varargs, currentInstruction, m_callLinkInfoIndex++);
+}
     
 void JIT::emit_op_construct_varargs(Instruction* currentInstruction)
 {
@@ -210,7 +230,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
     CallLinkInfo* info;
     if (opcodeID != op_call_eval)
         info = m_codeBlock->addCallLinkInfo();
-    if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs)
+    if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs || opcodeID == op_tail_call_varargs)
         compileSetupVarargsFrame(instruction, info);
     else {
         int argCount = instruction[3].u.operand;
@@ -241,6 +261,9 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
         return;
     }
 
+    if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs)
+        emitRestoreCalleeSaves();
+
     addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
 
     DataLabelPtr addressOfLinkedFunctionCheck;
@@ -255,6 +278,12 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
     m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info;
 
     checkStackPointerAlignment();
+    if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) {
+        prepareForTailCallSlow();
+        m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall();
+        return;
+    }
+
     m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
 
     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
@@ -275,8 +304,17 @@ void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vec
     linkSlowCase(iter);
 
     move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2);
+
+    if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs)
+        emitRestoreCalleeSaves();
+
     m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(linkCallThunkGenerator).code());
 
+    if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) {
+        abortWithReason(JITDidReturnFromTailCall);
+        return;
+    }
+
     addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
     checkStackPointerAlignment();
 
index d327673..386f0fa 100644 (file)
@@ -125,6 +125,14 @@ ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
     return nakedCall;
 }
 
+ALWAYS_INLINE JIT::Call JIT::emitNakedTailCall(CodePtr function)
+{
+    ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+    Call nakedCall = nearTailCall();
+    m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
+    return nakedCall;
+}
+
 ALWAYS_INLINE void JIT::updateTopCallFrame()
 {
     ASSERT(static_cast<int>(m_bytecodeOffset) >= 0);
index 73f4a6c..2b7e825 100644 (file)
@@ -682,14 +682,14 @@ EncodedJSValue JIT_OPERATION operationCallEval(ExecState* exec, ExecState* execC
     return JSValue::encode(result);
 }
 
-static void* handleHostCall(ExecState* execCallee, JSValue callee, CodeSpecializationKind kind)
+static SlowPathReturnType handleHostCall(ExecState* execCallee, JSValue callee, CallLinkInfo* callLinkInfo)
 {
     ExecState* exec = execCallee->callerFrame();
     VM* vm = &exec->vm();
 
     execCallee->setCodeBlock(0);
 
-    if (kind == CodeForCall) {
+    if (callLinkInfo->specializationKind() == CodeForCall) {
         CallData callData;
         CallType callType = getCallData(callee, callData);
     
@@ -699,18 +699,25 @@ static void* handleHostCall(ExecState* execCallee, JSValue callee, CodeSpecializ
             NativeCallFrameTracer tracer(vm, execCallee);
             execCallee->setCallee(asObject(callee));
             vm->hostCallReturnValue = JSValue::decode(callData.native.function(execCallee));
-            if (vm->exception())
-                return vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress();
+            if (vm->exception()) {
+                return encodeResult(
+                    vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+                    reinterpret_cast<void*>(KeepTheFrame));
+            }
 
-            return reinterpret_cast<void*>(getHostCallReturnValue);
+            return encodeResult(
+                bitwise_cast<void*>(getHostCallReturnValue),
+                reinterpret_cast<void*>(callLinkInfo->callMode() == CallMode::Tail ? ReuseTheFrame : KeepTheFrame));
         }
     
         ASSERT(callType == CallTypeNone);
         exec->vm().throwException(exec, createNotAFunctionError(exec, callee));
-        return vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress();
+        return encodeResult(
+            vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+            reinterpret_cast<void*>(KeepTheFrame));
     }
 
-    ASSERT(kind == CodeForConstruct);
+    ASSERT(callLinkInfo->specializationKind() == CodeForConstruct);
     
     ConstructData constructData;
     ConstructType constructType = getConstructData(callee, constructData);
@@ -721,18 +728,23 @@ static void* handleHostCall(ExecState* execCallee, JSValue callee, CodeSpecializ
         NativeCallFrameTracer tracer(vm, execCallee);
         execCallee->setCallee(asObject(callee));
         vm->hostCallReturnValue = JSValue::decode(constructData.native.function(execCallee));
-        if (vm->exception())
-            return vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress();
+        if (vm->exception()) {
+            return encodeResult(
+                vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+                reinterpret_cast<void*>(KeepTheFrame));
+        }
 
-        return reinterpret_cast<void*>(getHostCallReturnValue);
+        return encodeResult(bitwise_cast<void*>(getHostCallReturnValue), reinterpret_cast<void*>(KeepTheFrame));
     }
     
     ASSERT(constructType == ConstructTypeNone);
     exec->vm().throwException(exec, createNotAConstructorError(exec, callee));
-    return vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress();
+    return encodeResult(
+        vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+        reinterpret_cast<void*>(KeepTheFrame));
 }
 
-char* JIT_OPERATION operationLinkCall(ExecState* execCallee, CallLinkInfo* callLinkInfo)
+SlowPathReturnType JIT_OPERATION operationLinkCall(ExecState* execCallee, CallLinkInfo* callLinkInfo)
 {
     ExecState* exec = execCallee->callerFrame();
     VM* vm = &exec->vm();
@@ -745,7 +757,7 @@ char* JIT_OPERATION operationLinkCall(ExecState* execCallee, CallLinkInfo* callL
         // FIXME: We should cache these kinds of calls. They can be common and currently they are
         // expensive.
         // https://bugs.webkit.org/show_bug.cgi?id=144458
-        return reinterpret_cast<char*>(handleHostCall(execCallee, calleeAsValue, kind));
+        return handleHostCall(execCallee, calleeAsValue, callLinkInfo);
     }
 
     JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell);
@@ -774,17 +786,21 @@ char* JIT_OPERATION operationLinkCall(ExecState* execCallee, CallLinkInfo* callL
 
         if (!isCall(kind) && functionExecutable->constructAbility() == ConstructAbility::CannotConstruct) {
             exec->vm().throwException(exec, createNotAConstructorError(exec, callee));
-            return reinterpret_cast<char*>(vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress());
+            return encodeResult(
+                vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+                reinterpret_cast<void*>(KeepTheFrame));
         }
 
         JSObject* error = functionExecutable->prepareForExecution(execCallee, callee, scope, kind);
         if (error) {
             exec->vm().throwException(exec, error);
-            return reinterpret_cast<char*>(vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress());
+            return encodeResult(
+                vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+                reinterpret_cast<void*>(KeepTheFrame));
         }
         codeBlock = functionExecutable->codeBlockFor(kind);
         ArityCheckMode arity;
-        if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo->callType() == CallLinkInfo::CallVarargs || callLinkInfo->callType() == CallLinkInfo::ConstructVarargs)
+        if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo->isVarargs())
             arity = MustCheckArity;
         else
             arity = ArityCheckNotRequired;
@@ -795,10 +811,10 @@ char* JIT_OPERATION operationLinkCall(ExecState* execCallee, CallLinkInfo* callL
     else
         linkFor(execCallee, *callLinkInfo, codeBlock, callee, codePtr);
     
-    return reinterpret_cast<char*>(codePtr.executableAddress());
+    return encodeResult(codePtr.executableAddress(), reinterpret_cast<void*>(callLinkInfo->callMode() == CallMode::Tail ? ReuseTheFrame : KeepTheFrame));
 }
 
-inline char* virtualForWithFunction(
+inline SlowPathReturnType virtualForWithFunction(
     ExecState* execCallee, CallLinkInfo* callLinkInfo, JSCell*& calleeAsFunctionCell)
 {
     ExecState* exec = execCallee->callerFrame();
@@ -809,7 +825,7 @@ inline char* virtualForWithFunction(
     JSValue calleeAsValue = execCallee->calleeAsValue();
     calleeAsFunctionCell = getJSFunction(calleeAsValue);
     if (UNLIKELY(!calleeAsFunctionCell))
-        return reinterpret_cast<char*>(handleHostCall(execCallee, calleeAsValue, kind));
+        return handleHostCall(execCallee, calleeAsValue, callLinkInfo);
     
     JSFunction* function = jsCast<JSFunction*>(calleeAsFunctionCell);
     JSScope* scope = function->scopeUnchecked();
@@ -824,13 +840,17 @@ inline char* virtualForWithFunction(
 
             if (!isCall(kind) && functionExecutable->constructAbility() == ConstructAbility::CannotConstruct) {
                 exec->vm().throwException(exec, createNotAConstructorError(exec, function));
-                return reinterpret_cast<char*>(vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress());
+                return encodeResult(
+                    vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+                    reinterpret_cast<void*>(KeepTheFrame));
             }
 
             JSObject* error = functionExecutable->prepareForExecution(execCallee, function, scope, kind);
             if (error) {
                 exec->vm().throwException(exec, error);
-                return reinterpret_cast<char*>(vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress());
+                return encodeResult(
+                    vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+                    reinterpret_cast<void*>(KeepTheFrame));
             }
         } else {
 #if ENABLE(WEBASSEMBLY)
@@ -844,22 +864,23 @@ inline char* virtualForWithFunction(
 #endif
         }
     }
-    return reinterpret_cast<char*>(executable->entrypointFor(
-        *vm, kind, MustCheckArity, callLinkInfo->registerPreservationMode()).executableAddress());
+    return encodeResult(executable->entrypointFor(
+        *vm, kind, MustCheckArity, callLinkInfo->registerPreservationMode()).executableAddress(),
+        reinterpret_cast<void*>(callLinkInfo->callMode() == CallMode::Tail ? ReuseTheFrame : KeepTheFrame));
 }
 
-char* JIT_OPERATION operationLinkPolymorphicCall(ExecState* execCallee, CallLinkInfo* callLinkInfo)
+SlowPathReturnType JIT_OPERATION operationLinkPolymorphicCall(ExecState* execCallee, CallLinkInfo* callLinkInfo)
 {
     ASSERT(callLinkInfo->specializationKind() == CodeForCall);
     JSCell* calleeAsFunctionCell;
-    char* result = virtualForWithFunction(execCallee, callLinkInfo, calleeAsFunctionCell);
+    SlowPathReturnType result = virtualForWithFunction(execCallee, callLinkInfo, calleeAsFunctionCell);
 
     linkPolymorphicCall(execCallee, *callLinkInfo, CallVariant(calleeAsFunctionCell));
     
     return result;
 }
 
-char* JIT_OPERATION operationVirtualCall(ExecState* execCallee, CallLinkInfo* callLinkInfo)
+SlowPathReturnType JIT_OPERATION operationVirtualCall(ExecState* execCallee, CallLinkInfo* callLinkInfo)
 {
     JSCell* calleeAsFunctionCellIgnored;
     return virtualForWithFunction(execCallee, callLinkInfo, calleeAsFunctionCellIgnored);
index d708bba..82c95f6 100644 (file)
@@ -237,6 +237,7 @@ typedef char* JIT_OPERATION (*P_JITOperation_EStPS)(ExecState*, Structure*, void
 typedef char* JIT_OPERATION (*P_JITOperation_EStSS)(ExecState*, Structure*, size_t, size_t);
 typedef char* JIT_OPERATION (*P_JITOperation_EStZ)(ExecState*, Structure*, int32_t);
 typedef char* JIT_OPERATION (*P_JITOperation_EZZ)(ExecState*, int32_t, int32_t);
+typedef SlowPathReturnType JIT_OPERATION (*Sprt_JITOperation_ECli)(ExecState*, CallLinkInfo*);
 typedef StringImpl* JIT_OPERATION (*T_JITOperation_EJss)(ExecState*, JSString*);
 typedef JSString* JIT_OPERATION (*Jss_JITOperation_EZ)(ExecState*, int32_t);
 
@@ -278,9 +279,9 @@ void JIT_OPERATION operationDirectPutByValOptimize(ExecState*, EncodedJSValue, E
 void JIT_OPERATION operationPutByValGeneric(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*) WTF_INTERNAL;
 void JIT_OPERATION operationDirectPutByValGeneric(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*) WTF_INTERNAL;
 EncodedJSValue JIT_OPERATION operationCallEval(ExecState*, ExecState*) WTF_INTERNAL;
-char* JIT_OPERATION operationLinkCall(ExecState*, CallLinkInfo*) WTF_INTERNAL;
-char* JIT_OPERATION operationLinkPolymorphicCall(ExecState*, CallLinkInfo*) WTF_INTERNAL;
-char* JIT_OPERATION operationVirtualCall(ExecState*, CallLinkInfo*) WTF_INTERNAL;
+SlowPathReturnType JIT_OPERATION operationLinkCall(ExecState*, CallLinkInfo*) WTF_INTERNAL;
+SlowPathReturnType JIT_OPERATION operationLinkPolymorphicCall(ExecState*, CallLinkInfo*) WTF_INTERNAL;
+SlowPathReturnType JIT_OPERATION operationVirtualCall(ExecState*, CallLinkInfo*) WTF_INTERNAL;
 
 size_t JIT_OPERATION operationCompareLess(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
 size_t JIT_OPERATION operationCompareLessEq(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
index 6bf4a07..90efb10 100644 (file)
@@ -609,7 +609,7 @@ void linkVirtualFor(
 {
     CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
     VM* vm = callerCodeBlock->vm();
-    
+
     if (shouldShowDisassemblyFor(callerCodeBlock))
         dataLog("Linking virtual call at ", *callerCodeBlock, " ", exec->callerFrame()->codeOrigin(), "\n");
     
@@ -680,7 +680,7 @@ void linkPolymorphicCall(
                 codeBlock = jsCast<FunctionExecutable*>(executable)->codeBlockForCall();
             // If we cannot handle a callee, assume that it's better for this whole thing to be a
             // virtual call.
-            if (exec->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo.callType() == CallLinkInfo::CallVarargs || callLinkInfo.callType() == CallLinkInfo::ConstructVarargs) {
+            if (exec->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo.isVarargs()) {
                 linkVirtualFor(exec, callLinkInfo);
                 return;
             }
@@ -803,7 +803,11 @@ void linkPolymorphicCall(
                 CCallHelpers::TrustedImm32(1),
                 CCallHelpers::Address(fastCountsBaseGPR, caseIndex * sizeof(uint32_t)));
         }
-        calls[caseIndex].call = stubJit.nearCall();
+        if (callLinkInfo.isTailCall()) {
+            stubJit.prepareForTailCallSlow();
+            calls[caseIndex].call = stubJit.nearTailCall();
+        } else
+            calls[caseIndex].call = stubJit.nearCall();
         calls[caseIndex].codePtr = codePtr;
         done.append(stubJit.jump());
     }
index cb648bb..11289a5 100644 (file)
@@ -79,10 +79,26 @@ MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
 }
 
 static void slowPathFor(
-    CCallHelpers& jit, VM* vm, P_JITOperation_ECli slowPathFunction)
+    CCallHelpers& jit, VM* vm, Sprt_JITOperation_ECli slowPathFunction)
 {
     jit.emitFunctionPrologue();
     jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
+#if OS(WINDOWS) && CPU(X86_64)
+    // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits.
+    // Other argument values are shift by 1. Use space on the stack for our two return values.
+    // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments
+    // and space for the 16 byte return area.
+    jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
+    jit.move(GPRInfo::regT2, GPRInfo::argumentGPR2);
+    jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0);
+    jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+    jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
+    emitPointerValidation(jit, GPRInfo::nonArgGPR0);
+    jit.call(GPRInfo::nonArgGPR0);
+    jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
+    jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
+    jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
+#else
     if (maxFrameExtentForSlowPathCall)
         jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
     jit.setupArgumentsWithExecState(GPRInfo::regT2);
@@ -91,13 +107,24 @@ static void slowPathFor(
     jit.call(GPRInfo::nonArgGPR0);
     if (maxFrameExtentForSlowPathCall)
         jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
-    
+#endif
+
     // This slow call will return the address of one of the following:
     // 1) Exception throwing thunk.
     // 2) Host call return value returner thingy.
     // 3) The function to call.
+    // The second return value GPR will hold a non-zero value for tail calls.
+
     emitPointerValidation(jit, GPRInfo::returnValueGPR);
     jit.emitFunctionEpilogue();
+
+    RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0));
+    CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2);
+
+    jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
+    jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);
+
+    doNotTrash.link(&jit);
     jit.jump(GPRInfo::returnValueGPR);
 }
 
@@ -108,7 +135,6 @@ MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
     // to perform linking and lazy compilation if necessary. We expect the callee
     // to be in regT0/regT1 (payload/tag), the CallFrame to have already
     // been adjusted, and all other registers to be available for use.
-    
     CCallHelpers jit(vm);
     
     slowPathFor(jit, vm, operationLinkCall);
@@ -185,6 +211,10 @@ MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
     
     // Make a tail call. This will return back to JIT code.
     emitPointerValidation(jit, GPRInfo::regT4);
+    if (callLinkInfo.isTailCall()) {
+        jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
+        jit.prepareForTailCallSlow(GPRInfo::regT4);
+    }
     jit.jump(GPRInfo::regT4);
 
     slowCase.link(&jit);
@@ -196,9 +226,8 @@ MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
     return FINALIZE_CODE(
         patchBuffer,
-        ("Virtual %s%s slow path thunk",
-        callLinkInfo.specializationKind() == CodeForCall ? "call" : "construct",
-        callLinkInfo.registerPreservationMode() == MustPreserveRegisters ? " that preserves registers" : ""));
+        ("Virtual %s slow path thunk",
+        callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct"));
 }
 
 enum ThunkEntryType { EnterViaCall, EnterViaJump };
@@ -367,7 +396,7 @@ MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
 {
     JSInterfaceJIT jit(vm);
 
-    // We enter with fixup count, in aligned stack units, in argumentGPR0 and the return thunk in argumentGPR1
+    // We enter with fixup count in argumentGPR0
     // We have the guarantee that a0, a1, a2, t3, t4 and t5 (or t0 for Windows) are all distinct :-)
 #if USE(JSVALUE64)
 #if OS(WINDOWS)
@@ -378,12 +407,25 @@ MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
 #  if CPU(X86_64)
     jit.pop(JSInterfaceJIT::regT4);
 #  endif
-    jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
-    jit.neg64(JSInterfaceJIT::argumentGPR0);
     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
     jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
     jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::argumentGPR2);
 
+    // Check to see if we have extra slots we can use
+    jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
+    jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
+    JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
+    jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
+    JSInterfaceJIT::Label fillExtraSlots(jit.label());
+    jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight));
+    jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
+    jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
+    jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
+    JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
+    noExtraSlot.link(&jit);
+
+    jit.neg64(JSInterfaceJIT::argumentGPR0);
+
     // Move current frame down argumentGPR0 number of slots
     JSInterfaceJIT::Label copyLoop(jit.label());
     jit.load64(JSInterfaceJIT::regT3, extraTemp);
@@ -391,10 +433,9 @@ MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
     jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
 
-    // Fill in argumentGPR0 - 1 missing arg slots with undefined
+    // Fill in argumentGPR0 missing arg slots with undefined
     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
     jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
-    jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
     JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
     jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
@@ -406,6 +447,8 @@ MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
     jit.addPtr(extraTemp, JSInterfaceJIT::callFrameRegister);
     jit.addPtr(extraTemp, JSInterfaceJIT::stackPointerRegister);
 
+    done.link(&jit);
+
 #  if CPU(X86_64)
     jit.push(JSInterfaceJIT::regT4);
 #  endif
@@ -414,29 +457,43 @@ MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
 #  if CPU(X86)
     jit.pop(JSInterfaceJIT::regT4);
 #  endif
-    jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
-    jit.neg32(JSInterfaceJIT::argumentGPR0);
     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
     jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
     jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::argumentGPR2);
 
+    // Check to see if we have extra slots we can use
+    jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
+    jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
+    JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
+    JSInterfaceJIT::Label fillExtraSlots(jit.label());
+    jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
+    jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, PayloadOffset));
+    jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
+    jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, TagOffset));
+    jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
+    jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
+    jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
+    JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
+    noExtraSlot.link(&jit);
+
+    jit.neg32(JSInterfaceJIT::argumentGPR0);
+
     // Move current frame down argumentGPR0 number of slots
     JSInterfaceJIT::Label copyLoop(jit.label());
-    jit.load32(JSInterfaceJIT::regT3, JSInterfaceJIT::regT5);
-    jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
-    jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, 4), JSInterfaceJIT::regT5);
-    jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, 4));
+    jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, PayloadOffset), JSInterfaceJIT::regT5);
+    jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
+    jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, TagOffset), JSInterfaceJIT::regT5);
+    jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
     jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
 
-    // Fill in argumentGPR0 - 1 missing arg slots with undefined
+    // Fill in argumentGPR0 missing arg slots with undefined
     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
-    jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
     JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
     jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
-    jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
+    jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
     jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
-    jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, 4));
+    jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
 
     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
     jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
@@ -447,6 +504,8 @@ MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
     jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::callFrameRegister);
     jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::stackPointerRegister);
 
+    done.link(&jit);
+
 #  if CPU(X86)
     jit.push(JSInterfaceJIT::regT4);
 #  endif
@@ -457,6 +516,16 @@ MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
     return FINALIZE_CODE(patchBuffer, ("fixup arity"));
 }
 
+MacroAssemblerCodeRef unreachableGenerator(VM* vm)
+{
+    JSInterfaceJIT jit(vm);
+
+    jit.breakpoint();
+
+    LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+    return FINALIZE_CODE(patchBuffer, ("unreachable thunk"));
+}
+
 MacroAssemblerCodeRef baselineGetterReturnThunkGenerator(VM* vm)
 {
     JSInterfaceJIT jit(vm);
index 01c2df6..52f79c6 100644 (file)
 namespace JSC {
 
 class CallLinkInfo;
+class CCallHelpers;
 
 MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM*);
 
+MacroAssemblerCodeRef linkCallThunk(VM*, CallLinkInfo&, CodeSpecializationKind, RegisterPreservationMode);
 MacroAssemblerCodeRef linkCallThunkGenerator(VM*);
 MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM*);
 
@@ -46,6 +48,7 @@ MacroAssemblerCodeRef nativeCallGenerator(VM*);
 MacroAssemblerCodeRef nativeConstructGenerator(VM*);
 MacroAssemblerCodeRef nativeTailCallGenerator(VM*);
 MacroAssemblerCodeRef arityFixupGenerator(VM*);
+MacroAssemblerCodeRef unreachableGenerator(VM*);
 
 MacroAssemblerCodeRef baselineGetterReturnThunkGenerator(VM* vm);
 MacroAssemblerCodeRef baselineSetterReturnThunkGenerator(VM* vm);
index 5b669e3..dc01126 100644 (file)
@@ -167,6 +167,7 @@ const JSEnvironmentRecord_variables = (sizeof JSEnvironmentRecord + SlotSize - 1
 const DirectArguments_storage = (sizeof DirectArguments + SlotSize - 1) & ~(SlotSize - 1)
 
 const StackAlignment = 16
+const StackAlignmentSlots = 2
 const StackAlignmentMask = StackAlignment - 1
 
 const CallerFrameAndPCSize = 2 * PtrSize
@@ -697,36 +698,79 @@ macro traceExecution()
     end
 end
 
-macro callTargetFunction(callLinkInfo, calleeFramePtr)
-    move calleeFramePtr, sp
+macro callTargetFunction(callee)
     if C_LOOP
-        cloopCallJSFunction LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
+        cloopCallJSFunction callee
     else
-        call LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
+        call callee
     end
     restoreStackPointerAfterCall()
     dispatchAfterCall()
 end
 
-macro slowPathForCall(slowPath)
+macro prepareForRegularCall(callee, temp1, temp2, temp3)
+    addp CallerFrameAndPCSize, sp
+end
+
+# sp points to the new frame
+macro prepareForTailCall(callee, temp1, temp2, temp3)
+    restoreCalleeSavesUsedByLLInt()
+
+    loadi PayloadOffset + ArgumentCount[cfr], temp2
+    loadp CodeBlock[cfr], temp1
+    loadp CodeBlock::m_numParameters[temp1], temp1
+    bilteq temp1, temp2, .noArityFixup
+    move temp1, temp2
+
+.noArityFixup:
+    # We assume < 2^28 arguments
+    muli SlotSize, temp2
+    addi StackAlignment - 1 + CallFrameHeaderSize, temp2
+    andi ~StackAlignmentMask, temp2
+
+    move cfr, temp1
+    addp temp2, temp1
+
+    loadi PayloadOffset + ArgumentCount[sp], temp2
+    # We assume < 2^28 arguments
+    muli SlotSize, temp2
+    addi StackAlignment - 1 + CallFrameHeaderSize, temp2
+    andi ~StackAlignmentMask, temp2
+
+    if ARM or SH4 or ARM64 or C_LOOP or MIPS
+        addp 2 * PtrSize, sp
+        subi 2 * PtrSize, temp2
+        loadp PtrSize[cfr], lr
+    else
+        addp PtrSize, sp
+        subi PtrSize, temp2
+        loadp PtrSize[cfr], temp3
+        storep temp3, [sp]
+    end
+
+    subp temp2, temp1
+    loadp [cfr], cfr
+
+.copyLoop:
+    subi PtrSize, temp2
+    loadp [sp, temp2, 1], temp3
+    storep temp3, [temp1, temp2, 1]
+    btinz temp2, .copyLoop
+
+    move temp1, sp
+    jmp callee
+end
+
+macro slowPathForCall(slowPath, prepareCall)
     callCallSlowPath(
         slowPath,
-        macro (callee, calleeFrame)
-            btpz calleeFrame, .dontUpdateSP
-            if ARMv7
-                addp CallerFrameAndPCSize, calleeFrame, calleeFrame
-                move calleeFrame, sp
-            else
-                addp CallerFrameAndPCSize, calleeFrame, sp
-            end
+        # Those are r0 and r1
+        macro (callee, calleeFramePtr)
+            btpz calleeFramePtr, .dontUpdateSP
+            move calleeFramePtr, sp
+            prepareCall(callee, t2, t3, t4)
         .dontUpdateSP:
-            if C_LOOP
-                cloopCallJSFunction callee
-            else
-                call callee
-            end
-            restoreStackPointerAfterCall()
-            dispatchAfterCall()
+            callTargetFunction(callee)
         end)
 end
 
@@ -1415,16 +1459,19 @@ _llint_op_new_arrow_func_exp:
 _llint_op_call:
     traceExecution()
     arrayProfileForCall()
-    doCall(_llint_slow_path_call)
+    doCall(_llint_slow_path_call, prepareForRegularCall)
 
+_llint_op_tail_call:
+    traceExecution()
+    arrayProfileForCall()
+    checkSwitchToJITForEpilogue()
+    doCall(_llint_slow_path_call, prepareForTailCall)
 
 _llint_op_construct:
     traceExecution()
-    doCall(_llint_slow_path_construct)
-
+    doCall(_llint_slow_path_construct, prepareForRegularCall)
 
-_llint_op_call_varargs:
-    traceExecution()
+macro doCallVarargs(slowPath, prepareCall)
     callSlowPath(_llint_slow_path_size_frame_for_varargs)
     branchIfException(_llint_throw_from_slow_path_trampoline)
     # calleeFrame in r1
@@ -1439,25 +1486,23 @@ _llint_op_call_varargs:
             subp r1, CallerFrameAndPCSize, sp
         end
     end
-    slowPathForCall(_llint_slow_path_call_varargs)
+    slowPathForCall(slowPath, prepareCall)
+end
+
+_llint_op_call_varargs:
+    traceExecution()
+    doCallVarargs(_llint_slow_path_call_varargs, prepareForRegularCall)
+
+_llint_op_tail_call_varargs:
+    traceExecution()
+    checkSwitchToJITForEpilogue()
+    # We lie and perform the tail call instead of preparing it since we can't
+    # prepare the frame for a call opcode
+    doCallVarargs(_llint_slow_path_call_varargs, prepareForTailCall)
 
 _llint_op_construct_varargs:
     traceExecution()
-    callSlowPath(_llint_slow_path_size_frame_for_varargs)
-    branchIfException(_llint_throw_from_slow_path_trampoline)
-    # calleeFrame in r1
-    if JSVALUE64
-        move r1, sp
-    else
-        # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
-        if ARMv7
-            subp r1, CallerFrameAndPCSize, t2
-            move t2, sp
-        else
-            subp r1, CallerFrameAndPCSize, sp
-        end
-    end
-    slowPathForCall(_llint_slow_path_construct_varargs)
+    doCallVarargs(_llint_slow_path_construct_varargs, prepareForRegularCall)
 
 
 _llint_op_call_eval:
@@ -1496,7 +1541,7 @@ _llint_op_call_eval:
     # and a PC to call, and that PC may be a dummy thunk that just
     # returns the JS value that the eval returned.
     
-    slowPathForCall(_llint_slow_path_call_eval)
+    slowPathForCall(_llint_slow_path_call_eval, prepareForRegularCall)
 
 
 _llint_generic_return_point:
index 821470c..37c911f 100644 (file)
@@ -602,13 +602,27 @@ macro functionArityCheck(doneLabel, slowPath)
 .proceedInline:
     loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[r1], t1
     btiz t1, .continue
+    loadi PayloadOffset + ArgumentCount[cfr], t2
+    addi CallFrameHeaderSlots, t2
 
-    // Move frame up "t1 * 2" slots
-    lshiftp 1, t1
+    // Check if there are some unaligned slots we can use
+    move t1, t3
+    andi StackAlignmentSlots - 1, t3
+    btiz t3, .noExtraSlot
+.fillExtraSlots:
+    move 0, t0
+    storei t0, PayloadOffset[cfr, t2, 8]
+    move UndefinedTag, t0
+    storei t0, TagOffset[cfr, t2, 8]
+    addi 1, t2
+    bsubinz 1, t3, .fillExtraSlots
+    andi ~(StackAlignmentSlots - 1), t1
+    btiz t1, .continue
+
+.noExtraSlot:
+    // Move frame up t1 slots
     negi t1
     move cfr, t3
-    loadi PayloadOffset + ArgumentCount[cfr], t2
-    addi CallFrameHeaderSlots, t2
 .copyLoop:
     loadi PayloadOffset[t3], t0
     storei t0, PayloadOffset[t3, t1, 8]
@@ -1764,7 +1778,7 @@ macro arrayProfileForCall()
 .done:
 end
 
-macro doCall(slowPath)
+macro doCall(slowPath, prepareCall)
     loadi 8[PC], t0
     loadi 20[PC], t1
     loadp LLIntCallLinkInfo::callee[t1], t2
@@ -1779,14 +1793,14 @@ macro doCall(slowPath)
     storei PC, ArgumentCount + TagOffset[cfr]
     storei t2, ArgumentCount + PayloadOffset[t3]
     storei CellTag, Callee + TagOffset[t3]
-    addp CallerFrameAndPCSize, t3
-    callTargetFunction(t1, t3)
+    move t3, sp
+    prepareCall(LLIntCallLinkInfo::machineCodeTarget[t1], t2, t3, t4)
+    callTargetFunction(LLIntCallLinkInfo::machineCodeTarget[t1])
 
 .opCallSlow:
-    slowPathForCall(slowPath)
+    slowPathForCall(slowPath, prepareCall)
 end
 
-
 _llint_op_ret:
     traceExecution()
     checkSwitchToJITForEpilogue()
index 966a7e4..339448a 100644 (file)
@@ -510,14 +510,27 @@ macro functionArityCheck(doneLabel, slowPath)
 .noError:
     loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[r1], t1
     btiz t1, .continue
+    loadi PayloadOffset + ArgumentCount[cfr], t2
+    addi CallFrameHeaderSlots, t2
+
+    // Check if there are some unaligned slots we can use
+    move t1, t3
+    andi StackAlignmentSlots - 1, t3
+    btiz t3, .noExtraSlot
+    move ValueUndefined, t0
+.fillExtraSlots:
+    storeq t0, [cfr, t2, 8]
+    addi 1, t2
+    bsubinz 1, t3, .fillExtraSlots
+    andi ~(StackAlignmentSlots - 1), t1
+    btiz t1, .continue
 
-    // Move frame up "t1 * 2" slots
-    lshiftp 1, t1
+.noExtraSlot:
+    // Move frame up t1 slots
     negq t1
     move cfr, t3
     subp CalleeSaveSpaceAsVirtualRegisters * 8, t3
-    loadi PayloadOffset + ArgumentCount[cfr], t2
-    addi CallFrameHeaderSlots + CalleeSaveSpaceAsVirtualRegisters, t2
+    addi CalleeSaveSpaceAsVirtualRegisters, t2
 .copyLoop:
     loadq [t3], t0
     storeq t0, [t3, t1, 8]
@@ -1653,7 +1666,7 @@ macro arrayProfileForCall()
 .done:
 end
 
-macro doCall(slowPath)
+macro doCall(slowPath, prepareCall)
     loadisFromInstruction(2, t0)
     loadpFromInstruction(5, t1)
     loadp LLIntCallLinkInfo::callee[t1], t2
@@ -1667,14 +1680,14 @@ macro doCall(slowPath)
     loadisFromInstruction(3, t2)
     storei PC, ArgumentCount + TagOffset[cfr]
     storei t2, ArgumentCount + PayloadOffset[t3]
-    addp CallerFrameAndPCSize, t3
-    callTargetFunction(t1, t3)
+    move t3, sp
+    prepareCall(LLIntCallLinkInfo::machineCodeTarget[t1], t2, t3, t4)
+    callTargetFunction(LLIntCallLinkInfo::machineCodeTarget[t1])
 
 .opCallSlow:
-    slowPathForCall(slowPath)
+    slowPathForCall(slowPath, prepareCall)
 end
 
-
 _llint_op_ret:
     traceExecution()
     checkSwitchToJITForEpilogue()
index 92af43b..05762e5 100644 (file)
@@ -59,13 +59,14 @@ ALWAYS_INLINE int arityCheckFor(ExecState* exec, JSStack* stack, CodeSpecializat
     int argumentCountIncludingThis = exec->argumentCountIncludingThis();
     
     ASSERT(argumentCountIncludingThis < newCodeBlock->numParameters());
-    int missingArgumentCount = newCodeBlock->numParameters() - argumentCountIncludingThis;
-    int neededStackSpace = missingArgumentCount + 1; // Allow space to save the original return PC.
-    int paddedStackSpace = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), neededStackSpace);
+    int frameSize = argumentCountIncludingThis + JSStack::CallFrameHeaderSize;
+    int alignedFrameSizeForParameters = WTF::roundUpToMultipleOf(stackAlignmentRegisters(),
+        newCodeBlock->numParameters() + JSStack::CallFrameHeaderSize);
+    int paddedStackSpace = alignedFrameSizeForParameters - frameSize;
 
-    if (!stack->ensureCapacityFor(exec->registers() - paddedStackSpace))
+    if (!stack->ensureCapacityFor(exec->registers() - paddedStackSpace % stackAlignmentRegisters()))
         return -1;
-    return paddedStackSpace / stackAlignmentRegisters();
+    return paddedStackSpace;
 }
 
 inline bool opIn(ExecState* exec, JSValue propName, JSValue baseVal)
index 3225454..303eb9b 100644 (file)
@@ -128,7 +128,7 @@ typedef const char* optionString;
     v(bool, forceProfilerBytecodeGeneration, false, nullptr) \
     \
     v(bool, enableFunctionDotArguments, true, nullptr) \
-    v(bool, enableTailCalls, true, nullptr) \
+    v(bool, enableTailCalls, false, nullptr) \
     \
     /* showDisassembly implies showDFGDisassembly. */ \
     v(bool, showDisassembly, false, "dumps disassembly of all JIT compiled code upon compilation") \
diff --git a/Source/JavaScriptCore/tests/stress/mutual-tail-call-no-stack-overflow.js b/Source/JavaScriptCore/tests/stress/mutual-tail-call-no-stack-overflow.js
new file mode 100644 (file)
index 0000000..7a74821
--- /dev/null
@@ -0,0 +1,75 @@
+//@ run("default", "--enableTailCalls=true", "--useDFGJIT=false")
+//@ run("no-llint", "--enableTailCalls=true", "--useLLInt=false")
+//@ run("no-cjit", "--enableTailCalls=true", "--useDFGJIT=false", *NO_CJIT_OPTIONS)
+
+function shouldThrow(func, errorMessage) {
+    var errorThrown = false;
+    var error = null;
+    try {
+        func();
+    } catch (e) {
+        errorThrown = true;
+        error = e;
+    }
+    if (!errorThrown)
+        throw new Error('not thrown');
+    if (String(error) !== errorMessage)
+        throw new Error(`bad error: ${String(error)}`);
+}
+
+function sloppyCountdown(n) {
+    function even(n) {
+        if (n == 0)
+            return n;
+        return odd(n - 1);
+    }
+
+    function odd(n) {
+        if (n == 1)
+            return n;
+        return even(n - 1);
+    }
+
+    if (n % 2 === 0)
+        return even(n);
+    else
+        return odd(n);
+}
+
+function strictCountdown(n) {
+    "use strict";
+
+    function even(n) {
+        if (n == 0)
+            return n;
+        return odd(n - 1);
+    }
+
+    function odd(n) {
+        if (n == 1)
+            return n;
+        return even(n - 1);
+    }
+
+    if (n % 2 === 0)
+        return even(n);
+    else
+        return odd(n);
+}
+
+shouldThrow(function () { sloppyCountdown(100000); }, "RangeError: Maximum call stack size exceeded.");
+strictCountdown(100000);
+
+// Parity alterning
+function odd(n) {
+    "use strict";
+    if (n > 0)
+        return even(n, 0);
+}
+
+function even(n) {
+    "use strict";
+    return odd(n - 1);
+}
+
+odd(100000);
diff --git a/Source/JavaScriptCore/tests/stress/tail-call-no-stack-overflow.js b/Source/JavaScriptCore/tests/stress/tail-call-no-stack-overflow.js
new file mode 100644 (file)
index 0000000..7d8e1a8
--- /dev/null
@@ -0,0 +1,48 @@
+//@ run("default", "--enableTailCalls=true", "--useDFGJIT=false")
+//@ run("no-llint", "--enableTailCalls=true", "--useLLInt=false")
+
+function shouldThrow(func, errorMessage) {
+    var errorThrown = false;
+    var error = null;
+    try {
+        func();
+    } catch (e) {
+        errorThrown = true;
+        error = e;
+    }
+    if (!errorThrown)
+        throw new Error('not thrown');
+    if (String(error) !== errorMessage)
+        throw new Error(`bad error: ${String(error)}`);
+}
+
+function sloppyLoop(n) {
+    if (n > 0)
+        return sloppyLoop(n - 1);
+}
+
+function strictLoop(n) {
+    "use strict";
+    if (n > 0)
+        return strictLoop(n - 1);
+}
+
+// We have two of these so that we can test different stack alignments
+function strictLoopArityFixup1(n, dummy) {
+    "use strict";
+    if (n > 0)
+        return strictLoopArityFixup1(n - 1);
+}
+
+function strictLoopArityFixup2(n, dummy1, dummy2) {
+    "use strict";
+    if (n > 0)
+        return strictLoopArityFixup2(n - 1);
+}
+
+shouldThrow(function () { sloppyLoop(100000); }, 'RangeError: Maximum call stack size exceeded.');
+
+// These should not throw
+strictLoop(100000);
+strictLoopArityFixup1(1000000);
+strictLoopArityFixup2(1000000);
diff --git a/Source/JavaScriptCore/tests/stress/tail-call-recognize.js b/Source/JavaScriptCore/tests/stress/tail-call-recognize.js
new file mode 100644 (file)
index 0000000..0220959
--- /dev/null
@@ -0,0 +1,183 @@
+//@ run("default", "--enableTailCalls=true", "--useDFGJIT=false")
+//@ run("no-llint", "--enableTailCalls=true", "--useLLInt=false")
+//@ run("no-cjit", "--enableTailCalls=true", "--useDFGJIT=false", *NO_CJIT_OPTIONS)
+// run("-no-inline", "--maximumInliningDepth=1")
+
+function callerMustBeRun() {
+    if (!Object.is(callerMustBeRun.caller, runTests))
+        throw Error("Wrong caller, expected run but got ", callerMustBeRun.caller);
+}
+
+function callerMustBeStrict() {
+    var errorThrown = false;
+    try {
+        callerMustBeStrict.caller;
+    } catch (e) {
+        errorThrown = true;
+    }
+    if (!errorThrown)
+        throw Error("Wrong caller, expected strict caller but got ", callerMustBeStrict.caller);
+}
+
+function runTests() {
+    // Statement tests
+    (function simpleTailCall() {
+        "use strict";
+        return callerMustBeRun();
+    })();
+
+    (function noTailCallInTry() {
+        "use strict";
+        try {
+            return callerMustBeStrict();
+        } catch (e) {
+            throw e;
+        }
+    })();
+
+    (function tailCallInCatch() {
+        "use strict";
+        try { } catch (e) { return callerMustBeRun(); }
+    })();
+
+    (function tailCallInFinally() {
+        "use strict";
+        try { } finally { return callerMustBeRun(); }
+    })();
+
+    (function tailCallInFinallyWithCatch() {
+        "use strict";
+        try { } catch (e) { } finally { return callerMustBeRun(); }
+    })();
+
+    (function tailCallInFinallyWithCatchTaken() {
+        "use strict";
+        try { throw null; } catch (e) { } finally { return callerMustBeRun(); }
+    })();
+
+    (function noTailCallInCatchIfFinally() {
+        "use strict";
+        try { throw null; } catch (e) { return callerMustBeStrict(); } finally { }
+    })();
+
+    (function tailCallInFor() {
+        "use strict";
+        for (var i = 0; i < 10; ++i)
+            return callerMustBeRun();
+    })();
+
+    (function tailCallInWhile() {
+        "use strict";
+        while (true)
+            return callerMustBeRun();
+    })();
+
+    (function tailCallInDoWhile() {
+        "use strict";
+        do
+            return callerMustBeRun();
+        while (true);
+    })();
+
+    (function noTailCallInForIn() {
+        "use strict";
+        for (var x in [1, 2])
+            return callerMustBeStrict();
+    })();
+
+    (function noTailCallInForOf() {
+        "use strict";
+        for (var x of [1, 2])
+            return callerMustBeStrict();
+    })();
+
+    (function tailCallInIf() {
+        "use strict";
+        if (true)
+            return callerMustBeRun();
+    })();
+
+    (function tailCallInElse() {
+        "use strict";
+        if (false) throw new Error("WTF");
+        else return callerMustBeRun();
+    })();
+
+    (function tailCallInSwitchCase() {
+        "use strict";
+        switch (0) {
+        case 0: return callerMustBeRun();
+        }
+    })();
+
+    (function tailCallInSwitchDefault() {
+        "use strict";
+        switch (0) {
+        default: return callerMustBeRun();
+        }
+    })();
+
+    (function tailCallWithLabel() {
+        "use strict";
+        dummy: return callerMustBeRun();
+    })();
+
+    // Expression tests, we don't enumerate all the cases where there
+    // *shouldn't* be a tail call
+
+    (function tailCallComma() {
+        "use strict";
+        return callerMustBeStrict(), callerMustBeRun();
+    })();
+
+    (function tailCallTernaryLeft() {
+        "use strict";
+        return true ? callerMustBeRun() : unreachable();
+    })();
+
+    (function tailCallTernaryRight() {
+        "use strict";
+        return false ? unreachable() : callerMustBeRun();
+    })();
+
+    (function tailCallLogicalAnd() {
+        "use strict";
+        return true && callerMustBeRun();
+    })();
+
+    (function tailCallLogicalOr() {
+        "use strict";
+        return false || callerMustBeRun();
+    })();
+
+    (function memberTailCall() {
+        "use strict";
+        return { f: callerMustBeRun }.f();
+    })();
+
+    (function bindTailCall() {
+        "use strict";
+        return callerMustBeRun.bind()();
+    })();
+
+    // Function.prototype tests
+
+    (function applyTailCall() {
+        "use strict";
+        return callerMustBeRun.apply();
+    })();
+
+    (function callTailCall() {
+        "use strict";
+        return callerMustBeRun.call();
+    })();
+
+    // No tail call for constructors
+    (function noTailConstruct() {
+        "use strict";
+        return new callerMustBeStrict();
+    })();
+}
+
+for (var i = 0; i < 10000; ++i)
+    runTests();
diff --git a/Source/JavaScriptCore/tests/stress/tail-call-varargs-no-stack-overflow.js b/Source/JavaScriptCore/tests/stress/tail-call-varargs-no-stack-overflow.js
new file mode 100644 (file)
index 0000000..e6f894d
--- /dev/null
@@ -0,0 +1,32 @@
+//@ run("default", "--enableTailCalls=true", "--useDFGJIT=false")
+//@ run("no-llint", "--enableTailCalls=true", "--useLLInt=false")
+//@ run("no-cjit", "--enableTailCalls=true", "--useDFGJIT=false", *NO_CJIT_OPTIONS)
+
+function shouldThrow(func, errorMessage) {
+    var errorThrown = false;
+    var error = null;
+    try {
+        func();
+    } catch (e) {
+        errorThrown = true;
+        error = e;
+    }
+    if (!errorThrown)
+        throw new Error('not thrown');
+    if (String(error) !== errorMessage)
+        throw new Error(`bad error: ${String(error)}`);
+}
+
+function sloppyLoop(n) {
+    if (n > 0)
+        return sloppyLoop(...[n - 1]);
+}
+
+function strictLoop(n) {
+    "use strict";
+    if (n > 0)
+        return strictLoop(...[n - 1]);
+}
+
+shouldThrow(function () { sloppyLoop(100000); }, 'RangeError: Maximum call stack size exceeded.');
+strictLoop(100000);
diff --git a/Source/JavaScriptCore/tests/stress/tail-calls-dont-overwrite-live-stack.js b/Source/JavaScriptCore/tests/stress/tail-calls-dont-overwrite-live-stack.js
new file mode 100644 (file)
index 0000000..d9563d1
--- /dev/null
@@ -0,0 +1,30 @@
+"use strict";
+
+function tail(a, b) { }
+noInline(tail);
+
+var obj = {
+    method: function (x) {
+        return tail(x, x);
+    },
+
+    get fromNative() { return tail(0, 0); }
+};
+noInline(obj.method);
+
+function getThis(x) { return this; }
+noInline(getThis);
+
+for (var i = 0; i < 10000; ++i) {
+    var that = getThis(obj.method(42));
+
+    if (!Object.is(that, undefined))
+        throw new Error("Wrong 'this' value in call, expected undefined but got " + that);
+
+    that = getThis(obj.method(...[42]));
+    if (!Object.is(that, undefined))
+        throw new Error("Wrong 'this' value in varargs call, expected undefined but got " + that);
+
+    if (!Object.is(obj.fromNative, undefined))
+        throw new Error("Wrong 'fromNative' value, expected undefined but got " + obj.fromNative);
+}