Unreviewed, rolling out r250750.
authorjlewis3@apple.com <jlewis3@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Mon, 7 Oct 2019 16:47:30 +0000 (16:47 +0000)
committerjlewis3@apple.com <jlewis3@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Mon, 7 Oct 2019 16:47:30 +0000 (16:47 +0000)
Reverting change as this broke interal test over the weekend.

Reverted changeset:

"Allow OSR exit to the LLInt"
https://bugs.webkit.org/show_bug.cgi?id=197993
https://trac.webkit.org/changeset/250750

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@250775 268f45cc-cd09-0410-ab3c-d52691b4dbfc

24 files changed:
JSTests/ChangeLog
JSTests/stress/exit-from-getter-by-val.js [deleted file]
JSTests/stress/exit-from-setter-by-val.js [deleted file]
Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
Source/JavaScriptCore/Sources.txt
Source/JavaScriptCore/bytecode/CodeBlock.h
Source/JavaScriptCore/bytecode/InlineCallFrame.h
Source/JavaScriptCore/dfg/DFGOSRExit.cpp
Source/JavaScriptCore/dfg/DFGOSRExit.h
Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp
Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.h
Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp [new file with mode: 0644]
Source/JavaScriptCore/dfg/DFGOSRExitPreparation.h [new file with mode: 0644]
Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp
Source/JavaScriptCore/llint/LLIntData.h
Source/JavaScriptCore/llint/LowLevelInterpreter.asm
Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
Source/JavaScriptCore/offlineasm/asm.rb
Source/JavaScriptCore/offlineasm/transform.rb
Source/JavaScriptCore/runtime/OptionsList.h
Tools/ChangeLog
Tools/Scripts/run-jsc-stress-tests

index d71220d..27deafb 100644 (file)
@@ -1,3 +1,15 @@
+2019-10-07  Matt Lewis  <jlewis3@apple.com>
+
+        Unreviewed, rolling out r250750.
+
+        Reverting change as this broke interal test over the weekend.
+
+        Reverted changeset:
+
+        "Allow OSR exit to the LLInt"
+        https://bugs.webkit.org/show_bug.cgi?id=197993
+        https://trac.webkit.org/changeset/250750
+
 2019-10-04  Saam Barati  <sbarati@apple.com>
 
         Allow OSR exit to the LLInt
diff --git a/JSTests/stress/exit-from-getter-by-val.js b/JSTests/stress/exit-from-getter-by-val.js
deleted file mode 100644 (file)
index 3a40cd0..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-function field() { return "f"; }
-noInline(field);
-
-(function() {
-    var o = {_f:42};
-    o.__defineGetter__("f", function() { return this._f * 100; });
-    var result = 0;
-    var n = 50000;
-    function foo(o) {
-        return o[field()] + 11;
-    }
-    noInline(foo);
-    for (var i = 0; i < n; ++i) {
-        result += foo(o);
-    }
-    if (result != n * (42 * 100 + 11))
-        throw "Error: bad result: " + result;
-    o._f = 1000000000;
-    result = 0;
-    for (var i = 0; i < n; ++i) {
-        result += foo(o);
-    }
-    if (result != n * (1000000000 * 100 + 11))
-        throw "Error: bad result (2): " + result;
-})();
diff --git a/JSTests/stress/exit-from-setter-by-val.js b/JSTests/stress/exit-from-setter-by-val.js
deleted file mode 100644 (file)
index f271e54..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-function field() { return "f"; }
-noInline(field);
-
-(function() {
-    var o = {_f:42};
-    o.__defineSetter__("f", function(value) { this._f = value * 100; });
-    var n = 50000;
-    function foo(o_, v_) {
-        let f = field();
-        var o = o_[f];
-        var v = v_[f];
-        o[f] = v;
-        o[f] = v + 1;
-    }
-    noInline(foo);
-    for (var i = 0; i < n; ++i) {
-        foo({f:o}, {f:11});
-    }
-    if (o._f != (11 + 1) * 100)
-        throw "Error: bad o._f: " + o._f;
-    for (var i = 0; i < n; ++i) {
-        foo({f:o}, {f:1000000000});
-    }
-    if (o._f != 100 * (1000000000 + 1))
-        throw "Error: bad o._f (2): " + o._f;
-})();
-
index 9670144..b6da7ec 100644 (file)
@@ -1,3 +1,15 @@
+2019-10-07  Matt Lewis  <jlewis3@apple.com>
+
+        Unreviewed, rolling out r250750.
+
+        Reverting change as this broke interal test over the weekend.
+
+        Reverted changeset:
+
+        "Allow OSR exit to the LLInt"
+        https://bugs.webkit.org/show_bug.cgi?id=197993
+        https://trac.webkit.org/changeset/250750
+
 2019-10-04  Ross Kirsling  <ross.kirsling@sony.com>
 
         Socket-based RWI should base64-encode backend commands on client, not server
index 5678547..3e9d4c9 100644 (file)
                0F235BE017178E1C00690C7F /* FTLOSRExitCompiler.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F235BCA17178E1C00690C7F /* FTLOSRExitCompiler.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0F235BE217178E1C00690C7F /* FTLThunks.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F235BCC17178E1C00690C7F /* FTLThunks.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0F235BEC17178E7300690C7F /* DFGOSRExitBase.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F235BE817178E7300690C7F /* DFGOSRExitBase.h */; };
+               0F235BEE17178E7300690C7F /* DFGOSRExitPreparation.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F235BEA17178E7300690C7F /* DFGOSRExitPreparation.h */; };
                0F24E54117EA9F5900ABB217 /* AssemblyHelpers.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F24E53C17EA9F5900ABB217 /* AssemblyHelpers.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0F24E54217EA9F5900ABB217 /* CCallHelpers.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F24E53D17EA9F5900ABB217 /* CCallHelpers.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0F24E54317EA9F5900ABB217 /* FPRInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F24E53E17EA9F5900ABB217 /* FPRInfo.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0F235BCC17178E1C00690C7F /* FTLThunks.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = FTLThunks.h; path = ftl/FTLThunks.h; sourceTree = "<group>"; };
                0F235BE717178E7300690C7F /* DFGOSRExitBase.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGOSRExitBase.cpp; path = dfg/DFGOSRExitBase.cpp; sourceTree = "<group>"; };
                0F235BE817178E7300690C7F /* DFGOSRExitBase.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGOSRExitBase.h; path = dfg/DFGOSRExitBase.h; sourceTree = "<group>"; };
+               0F235BE917178E7300690C7F /* DFGOSRExitPreparation.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGOSRExitPreparation.cpp; path = dfg/DFGOSRExitPreparation.cpp; sourceTree = "<group>"; };
+               0F235BEA17178E7300690C7F /* DFGOSRExitPreparation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGOSRExitPreparation.h; path = dfg/DFGOSRExitPreparation.h; sourceTree = "<group>"; };
                0F24E53B17EA9F5900ABB217 /* AssemblyHelpers.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AssemblyHelpers.cpp; sourceTree = "<group>"; };
                0F24E53C17EA9F5900ABB217 /* AssemblyHelpers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AssemblyHelpers.h; sourceTree = "<group>"; };
                0F24E53D17EA9F5900ABB217 /* CCallHelpers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CCallHelpers.h; sourceTree = "<group>"; };
                                0F392C881B46188400844728 /* DFGOSRExitFuzz.h */,
                                0FEFC9A71681A3B000567F53 /* DFGOSRExitJumpPlaceholder.cpp */,
                                0FEFC9A81681A3B000567F53 /* DFGOSRExitJumpPlaceholder.h */,
+                               0F235BE917178E7300690C7F /* DFGOSRExitPreparation.cpp */,
+                               0F235BEA17178E7300690C7F /* DFGOSRExitPreparation.h */,
                                0F6237951AE45CA700D402EA /* DFGPhantomInsertionPhase.cpp */,
                                0F6237961AE45CA700D402EA /* DFGPhantomInsertionPhase.h */,
                                0FFFC94F14EF909500C72532 /* DFGPhase.cpp */,
                                0F7025AA1714B0FC00382C0E /* DFGOSRExitCompilerCommon.h in Headers */,
                                0F392C8A1B46188400844728 /* DFGOSRExitFuzz.h in Headers */,
                                0FEFC9AB1681A3B600567F53 /* DFGOSRExitJumpPlaceholder.h in Headers */,
+                               0F235BEE17178E7300690C7F /* DFGOSRExitPreparation.h in Headers */,
                                0F6237981AE45CA700D402EA /* DFGPhantomInsertionPhase.h in Headers */,
                                0FFFC95C14EF90AF00C72532 /* DFGPhase.h in Headers */,
                                0F2B9CEB19D0BA7D00B1D1B5 /* DFGPhiChildren.h in Headers */,
index 82db982..a2e88b1 100644 (file)
@@ -382,6 +382,7 @@ dfg/DFGOSRExitBase.cpp
 dfg/DFGOSRExitCompilerCommon.cpp
 dfg/DFGOSRExitFuzz.cpp
 dfg/DFGOSRExitJumpPlaceholder.cpp
+dfg/DFGOSRExitPreparation.cpp
 dfg/DFGObjectAllocationSinkingPhase.cpp
 dfg/DFGObjectMaterializationData.cpp
 dfg/DFGOperations.cpp
index 4233ea2..88cfae4 100644 (file)
@@ -892,9 +892,6 @@ public:
         return m_unlinkedCode->metadataSizeInBytes();
     }
 
-    MetadataTable* metadataTable() { return m_metadata.get(); }
-    const void* instructionsRawPointer() { return m_instructionsRawPointer; }
-
 protected:
     void finalizeLLIntInlineCaches();
 #if ENABLE(JIT)
index 763ebfe..8defc26 100644 (file)
@@ -240,7 +240,7 @@ inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCal
 
 inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
 {
-    ASSERT(JITCode::isBaselineCode(baselineCodeBlock->jitType()));
+    ASSERT(baselineCodeBlock->jitType() == JITType::BaselineJIT);
     auto* inlineCallFrame = codeOrigin.inlineCallFrame();
     if (inlineCallFrame)
         return baselineCodeBlockForInlineCallFrame(inlineCallFrame);
index 7495ab6..5587fc6 100644 (file)
@@ -34,6 +34,7 @@
 #include "DFGGraph.h"
 #include "DFGMayExit.h"
 #include "DFGOSRExitCompilerCommon.h"
+#include "DFGOSRExitPreparation.h"
 #include "DFGOperations.h"
 #include "DFGSpeculativeJIT.h"
 #include "DirectArguments.h"
@@ -371,8 +372,11 @@ void OSRExit::executeOSRExit(Context& context)
         // results will be cached in the OSRExitState record for use of the rest of the
         // exit ramp code.
 
+        // Ensure we have baseline codeBlocks to OSR exit to.
+        prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin);
+
         CodeBlock* baselineCodeBlock = codeBlock->baselineAlternative();
-        ASSERT(JITCode::isBaselineCode(baselineCodeBlock->jitType()));
+        ASSERT(baselineCodeBlock->jitType() == JITType::BaselineJIT);
 
         SpeculationRecovery* recovery = nullptr;
         if (exit.m_recoveryIndex != UINT_MAX) {
@@ -402,19 +406,11 @@ void OSRExit::executeOSRExit(Context& context)
         adjustedThreshold = BaselineExecutionCounter::clippedThreshold(codeBlock->globalObject(), adjustedThreshold);
 
         CodeBlock* codeBlockForExit = baselineCodeBlockForOriginAndBaselineCodeBlock(exit.m_codeOrigin, baselineCodeBlock);
-        bool exitToLLInt = Options::forceOSRExitToLLInt() || codeBlockForExit->jitType() == JITType::InterpreterThunk;
-        void* jumpTarget;
-        if (exitToLLInt) {
-            unsigned bytecodeOffset = exit.m_codeOrigin.bytecodeIndex();
-            const Instruction& currentInstruction = *codeBlockForExit->instructions().at(bytecodeOffset).ptr();
-            MacroAssemblerCodePtr<JSEntryPtrTag> destination = LLInt::getCodePtr<JSEntryPtrTag>(currentInstruction);
-            jumpTarget = destination.executableAddress();    
-        } else {
-            const JITCodeMap& codeMap = codeBlockForExit->jitCodeMap();
-            CodeLocationLabel<JSEntryPtrTag> codeLocation = codeMap.find(exit.m_codeOrigin.bytecodeIndex());
-            ASSERT(codeLocation);
-            jumpTarget = codeLocation.executableAddress();
-        }
+        const JITCodeMap& codeMap = codeBlockForExit->jitCodeMap();
+        CodeLocationLabel<JSEntryPtrTag> codeLocation = codeMap.find(exit.m_codeOrigin.bytecodeIndex());
+        ASSERT(codeLocation);
+
+        void* jumpTarget = codeLocation.executableAddress();
 
         // Compute the value recoveries.
         Operands<ValueRecovery> operands;
@@ -422,7 +418,7 @@ void OSRExit::executeOSRExit(Context& context)
         dfgJITCode->variableEventStream.reconstruct(codeBlock, exit.m_codeOrigin, dfgJITCode->minifiedDFG, exit.m_streamIndex, operands, &undefinedOperandSpans);
         ptrdiff_t stackPointerOffset = -static_cast<ptrdiff_t>(codeBlock->jitCode()->dfgCommon()->requiredRegisterCountForExit) * sizeof(Register);
 
-        exit.exitState = adoptRef(new OSRExitState(exit, codeBlock, baselineCodeBlock, operands, WTFMove(undefinedOperandSpans), recovery, stackPointerOffset, activeThreshold, adjustedThreshold, jumpTarget, arrayProfile, exitToLLInt));
+        exit.exitState = adoptRef(new OSRExitState(exit, codeBlock, baselineCodeBlock, operands, WTFMove(undefinedOperandSpans), recovery, stackPointerOffset, activeThreshold, adjustedThreshold, jumpTarget, arrayProfile));
 
         if (UNLIKELY(vm.m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation)) {
             Profiler::Database& database = *vm.m_perBytecodeProfiler;
@@ -450,7 +446,7 @@ void OSRExit::executeOSRExit(Context& context)
 
     OSRExitState& exitState = *exit.exitState.get();
     CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
-    ASSERT(JITCode::isBaselineCode(baselineCodeBlock->jitType()));
+    ASSERT(baselineCodeBlock->jitType() == JITType::BaselineJIT);
 
     Operands<ValueRecovery>& operands = exitState.operands;
     Vector<UndefinedOperandSpan>& undefinedOperandSpans = exitState.undefinedOperandSpans;
@@ -761,7 +757,7 @@ static void reifyInlinedCallFrames(Context& context, CodeBlock* outermostBaselin
     // FIXME: We shouldn't leave holes on the stack when performing an OSR exit
     // in presence of inlined tail calls.
     // https://bugs.webkit.org/show_bug.cgi?id=147511
-    ASSERT(JITCode::isBaselineCode(outermostBaselineCodeBlock->jitType()));
+    ASSERT(outermostBaselineCodeBlock->jitType() == JITType::BaselineJIT);
     frame.setOperand<CodeBlock*>(CallFrameSlot::codeBlock, outermostBaselineCodeBlock);
 
     const CodeOrigin* codeOrigin;
@@ -772,8 +768,6 @@ static void reifyInlinedCallFrames(Context& context, CodeBlock* outermostBaselin
         CodeOrigin* trueCaller = inlineCallFrame->getCallerSkippingTailCalls(&trueCallerCallKind);
         void* callerFrame = cpu.fp();
 
-        bool callerIsLLInt = false;
-
         if (!trueCaller) {
             ASSERT(inlineCallFrame->isTail());
             void* returnPC = frame.get<void*>(CallFrame::returnPCOffset());
@@ -787,16 +781,46 @@ static void reifyInlinedCallFrames(Context& context, CodeBlock* outermostBaselin
         } else {
             CodeBlock* baselineCodeBlockForCaller = baselineCodeBlockForOriginAndBaselineCodeBlock(*trueCaller, outermostBaselineCodeBlock);
             unsigned callBytecodeIndex = trueCaller->bytecodeIndex();
-            void* jumpTarget = callerReturnPC(baselineCodeBlockForCaller, callBytecodeIndex, trueCallerCallKind, callerIsLLInt);
+            MacroAssemblerCodePtr<JSInternalPtrTag> jumpTarget;
+
+            switch (trueCallerCallKind) {
+            case InlineCallFrame::Call:
+            case InlineCallFrame::Construct:
+            case InlineCallFrame::CallVarargs:
+            case InlineCallFrame::ConstructVarargs:
+            case InlineCallFrame::TailCall:
+            case InlineCallFrame::TailCallVarargs: {
+                CallLinkInfo* callLinkInfo =
+                    baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
+                RELEASE_ASSERT(callLinkInfo);
+
+                jumpTarget = callLinkInfo->callReturnLocation();
+                break;
+            }
+
+            case InlineCallFrame::GetterCall:
+            case InlineCallFrame::SetterCall: {
+                StructureStubInfo* stubInfo =
+                    baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
+                RELEASE_ASSERT(stubInfo);
+
+                jumpTarget = stubInfo->doneLocation();
+                break;
+            }
+
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
 
             if (trueCaller->inlineCallFrame())
                 callerFrame = cpu.fp<uint8_t*>() + trueCaller->inlineCallFrame()->stackOffset * sizeof(EncodedJSValue);
 
+            void* targetAddress = jumpTarget.executableAddress();
 #if CPU(ARM64E)
             void* newEntrySP = cpu.fp<uint8_t*>() + inlineCallFrame->returnPCOffset() + sizeof(void*);
-            jumpTarget = tagCodePtr(jumpTarget, bitwise_cast<PtrTag>(newEntrySP));
+            targetAddress = retagCodePtr(targetAddress, JSInternalPtrTag, bitwise_cast<PtrTag>(newEntrySP));
 #endif
-            frame.set<void*>(inlineCallFrame->returnPCOffset(), jumpTarget);
+            frame.set<void*>(inlineCallFrame->returnPCOffset(), targetAddress);
         }
 
         frame.setOperand<void*>(inlineCallFrame->stackOffset + CallFrameSlot::codeBlock, baselineCodeBlock);
@@ -806,14 +830,6 @@ static void reifyInlinedCallFrames(Context& context, CodeBlock* outermostBaselin
         // copy the prior contents of the tag registers already saved for the outer frame to this frame.
         saveOrCopyCalleeSavesFor(context, baselineCodeBlock, VirtualRegister(inlineCallFrame->stackOffset), !trueCaller);
 
-        if (callerIsLLInt) {
-            CodeBlock* baselineCodeBlockForCaller = baselineCodeBlockForOriginAndBaselineCodeBlock(*trueCaller, outermostBaselineCodeBlock);
-            frame.set<const void*>(calleeSaveSlot(inlineCallFrame, baselineCodeBlock, LLInt::Registers::metadataTableGPR).offset, baselineCodeBlockForCaller->metadataTable());
-#if USE(JSVALUE64)
-            frame.set<const void*>(calleeSaveSlot(inlineCallFrame, baselineCodeBlock, LLInt::Registers::pbGPR).offset, baselineCodeBlockForCaller->instructionsRawPointer());
-#endif
-        }
-
         if (!inlineCallFrame->isVarargs())
             frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount, PayloadOffset, inlineCallFrame->argumentCountIncludingThis);
         ASSERT(callerFrame);
@@ -878,24 +894,6 @@ static void adjustAndJumpToTarget(Context& context, VM& vm, CodeBlock* codeBlock
     }
 
     vm.topCallFrame = context.fp<ExecState*>();
-
-    if (exitState->isJumpToLLInt) {
-        CodeBlock* codeBlockForExit = baselineCodeBlockForOriginAndBaselineCodeBlock(exit.m_codeOrigin, baselineCodeBlock);
-        unsigned bytecodeOffset = exit.m_codeOrigin.bytecodeIndex();
-        const Instruction& currentInstruction = *codeBlockForExit->instructions().at(bytecodeOffset).ptr();
-
-        context.gpr(LLInt::Registers::metadataTableGPR) = bitwise_cast<uintptr_t>(codeBlockForExit->metadataTable());
-#if USE(JSVALUE64)
-        context.gpr(LLInt::Registers::pbGPR) = bitwise_cast<uintptr_t>(codeBlockForExit->instructionsRawPointer());
-        context.gpr(LLInt::Registers::pcGPR) = static_cast<uintptr_t>(exit.m_codeOrigin.bytecodeIndex());
-#else
-        context.gpr(LLInt::Registers::pcGPR) = bitwise_cast<uintptr_t>(&currentInstruction);
-#endif
-
-        if (exit.isExceptionHandler())
-            vm.targetInterpreterPCForThrow = &currentInstruction;
-    }
-
     context.pc() = untagCodePtr<JSEntryPtrTag>(jumpTarget);
 }
 
@@ -1054,6 +1052,8 @@ void JIT_OPERATION OSRExit::compileOSRExit(ExecState* exec)
     ASSERT(!vm.callFrameForCatch || exit.m_kind == GenericUnwind);
     EXCEPTION_ASSERT_UNUSED(scope, !!scope.exception() || !exit.isExceptionHandler());
     
+    prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin);
+
     // Compute the value recoveries.
     Operands<ValueRecovery> operands;
     codeBlock->jitCode()->dfg()->variableEventStream.reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->jitCode()->dfg()->minifiedDFG, exit.m_streamIndex, operands);
index cbc7131..817c3cd 100644 (file)
@@ -106,7 +106,7 @@ private:
 enum class ExtraInitializationLevel;
 
 struct OSRExitState : RefCounted<OSRExitState> {
-    OSRExitState(OSRExitBase& exit, CodeBlock* codeBlock, CodeBlock* baselineCodeBlock, Operands<ValueRecovery>& operands, Vector<UndefinedOperandSpan>&& undefinedOperandSpans, SpeculationRecovery* recovery, ptrdiff_t stackPointerOffset, int32_t activeThreshold, double memoryUsageAdjustedThreshold, void* jumpTarget, ArrayProfile* arrayProfile, bool isJumpToLLInt)
+    OSRExitState(OSRExitBase& exit, CodeBlock* codeBlock, CodeBlock* baselineCodeBlock, Operands<ValueRecovery>& operands, Vector<UndefinedOperandSpan>&& undefinedOperandSpans, SpeculationRecovery* recovery, ptrdiff_t stackPointerOffset, int32_t activeThreshold, double memoryUsageAdjustedThreshold, void* jumpTarget, ArrayProfile* arrayProfile)
         : exit(exit)
         , codeBlock(codeBlock)
         , baselineCodeBlock(baselineCodeBlock)
@@ -118,7 +118,6 @@ struct OSRExitState : RefCounted<OSRExitState> {
         , memoryUsageAdjustedThreshold(memoryUsageAdjustedThreshold)
         , jumpTarget(jumpTarget)
         , arrayProfile(arrayProfile)
-        , isJumpToLLInt(isJumpToLLInt)
     { }
 
     OSRExitBase& exit;
@@ -132,7 +131,6 @@ struct OSRExitState : RefCounted<OSRExitState> {
     double memoryUsageAdjustedThreshold;
     void* jumpTarget;
     ArrayProfile* arrayProfile;
-    bool isJumpToLLInt;
 
     ExtraInitializationLevel extraInitializationLevel;
     Profiler::OSRExit* profilerExit { nullptr };
index 07de718..b601569 100644 (file)
 #include "JIT.h"
 #include "JSCJSValueInlines.h"
 #include "JSCInlines.h"
-#include "LLIntData.h"
 #include "StructureStubInfo.h"
 
 namespace JSC { namespace DFG {
 
-// These are the LLInt OSR exit return points.
-extern "C" void op_call_return_location_narrow();
-extern "C" void op_call_return_location_wide_16();
-extern "C" void op_call_return_location_wide_32();
-
-extern "C" void op_construct_return_location_narrow();
-extern "C" void op_construct_return_location_wide_16();
-extern "C" void op_construct_return_location_wide_32();
-
-extern "C" void op_call_varargs_slow_return_location_narrow();
-extern "C" void op_call_varargs_slow_return_location_wide_16();
-extern "C" void op_call_varargs_slow_return_location_wide_32();
-
-extern "C" void op_construct_varargs_slow_return_location_narrow();
-extern "C" void op_construct_varargs_slow_return_location_wide_16();
-extern "C" void op_construct_varargs_slow_return_location_wide_32();
-
-extern "C" void op_get_by_id_return_location_narrow();
-extern "C" void op_get_by_id_return_location_wide_16();
-extern "C" void op_get_by_id_return_location_wide_32();
-
-extern "C" void op_get_by_val_return_location_narrow();
-extern "C" void op_get_by_val_return_location_wide_16();
-extern "C" void op_get_by_val_return_location_wide_32();
-
-extern "C" void op_put_by_id_return_location_narrow();
-extern "C" void op_put_by_id_return_location_wide_16();
-extern "C" void op_put_by_id_return_location_wide_32();
-
-extern "C" void op_put_by_val_return_location_narrow();
-extern "C" void op_put_by_val_return_location_wide_16();
-extern "C" void op_put_by_val_return_location_wide_32();
-
 void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit)
 {
     if (!exitKindMayJettison(exit.m_kind)) {
@@ -170,106 +136,12 @@ void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit)
     doneAdjusting.link(&jit);
 }
 
-void* callerReturnPC(CodeBlock* baselineCodeBlockForCaller, unsigned callBytecodeIndex, InlineCallFrame::Kind trueCallerCallKind, bool& callerIsLLInt)
-{
-    callerIsLLInt = Options::forceOSRExitToLLInt() || baselineCodeBlockForCaller->jitType() == JITType::InterpreterThunk;
-
-    void* jumpTarget;
-
-    if (callerIsLLInt) {
-        const Instruction& callInstruction = *baselineCodeBlockForCaller->instructions().at(callBytecodeIndex).ptr();
-
-#define LLINT_RETURN_LOCATION(name) FunctionPtr<NoPtrTag>(callInstruction.isWide16() ? name##_return_location_wide_16 : (callInstruction.isWide32() ? name##_return_location_wide_32 : name##_return_location_narrow)).executableAddress()
-
-        switch (trueCallerCallKind) {
-        case InlineCallFrame::Call:
-            jumpTarget = LLINT_RETURN_LOCATION(op_call);
-            break;
-        case InlineCallFrame::Construct:
-            jumpTarget = LLINT_RETURN_LOCATION(op_construct);
-            break;
-        case InlineCallFrame::CallVarargs:
-            jumpTarget = LLINT_RETURN_LOCATION(op_call_varargs_slow);
-            break;
-        case InlineCallFrame::ConstructVarargs:
-            jumpTarget = LLINT_RETURN_LOCATION(op_construct_varargs_slow);
-            break;
-        case InlineCallFrame::GetterCall: {
-            if (callInstruction.opcodeID() == op_get_by_id)
-                jumpTarget = LLINT_RETURN_LOCATION(op_get_by_id);
-            else if (callInstruction.opcodeID() == op_get_by_val)
-                jumpTarget = LLINT_RETURN_LOCATION(op_get_by_val);
-            else
-                RELEASE_ASSERT_NOT_REACHED();
-            break;
-        }
-        case InlineCallFrame::SetterCall: {
-            if (callInstruction.opcodeID() == op_put_by_id)
-                jumpTarget = LLINT_RETURN_LOCATION(op_put_by_id);
-            else if (callInstruction.opcodeID() == op_put_by_val)
-                jumpTarget = LLINT_RETURN_LOCATION(op_put_by_val);
-            else
-                RELEASE_ASSERT_NOT_REACHED();
-            break;
-        }
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
-
-#undef LLINT_RETURN_LOCATION
-
-    } else {
-        switch (trueCallerCallKind) {
-        case InlineCallFrame::Call:
-        case InlineCallFrame::Construct:
-        case InlineCallFrame::CallVarargs:
-        case InlineCallFrame::ConstructVarargs: {
-            CallLinkInfo* callLinkInfo =
-                baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
-            RELEASE_ASSERT(callLinkInfo);
-
-            jumpTarget = callLinkInfo->callReturnLocation().untaggedExecutableAddress();
-            break;
-        }
-
-        case InlineCallFrame::GetterCall:
-        case InlineCallFrame::SetterCall: {
-            StructureStubInfo* stubInfo =
-                baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
-            RELEASE_ASSERT(stubInfo);
-
-            jumpTarget = stubInfo->doneLocation().untaggedExecutableAddress();
-            break;
-        }
-
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
-    }
-
-    return jumpTarget;
-}
-
-CCallHelpers::Address calleeSaveSlot(InlineCallFrame* inlineCallFrame, CodeBlock* baselineCodeBlock, GPRReg calleeSave)
-{
-    const RegisterAtOffsetList* calleeSaves = baselineCodeBlock->calleeSaveRegisters();
-    for (unsigned i = 0; i < calleeSaves->size(); i++) {
-        RegisterAtOffset entry = calleeSaves->at(i);
-        if (entry.reg() != calleeSave)
-            continue;
-        return CCallHelpers::Address(CCallHelpers::framePointerRegister, static_cast<VirtualRegister>(inlineCallFrame->stackOffset).offsetInBytes() + entry.offset());
-    }
-
-    RELEASE_ASSERT_NOT_REACHED();
-    return CCallHelpers::Address(CCallHelpers::framePointerRegister);
-}
-
 void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
 {
     // FIXME: We shouldn't leave holes on the stack when performing an OSR exit
     // in presence of inlined tail calls.
     // https://bugs.webkit.org/show_bug.cgi?id=147511
-    ASSERT(JITCode::isBaselineCode(jit.baselineCodeBlock()->jitType()));
+    ASSERT(jit.baselineCodeBlock()->jitType() == JITType::BaselineJIT);
     jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)CallFrameSlot::codeBlock));
 
     const CodeOrigin* codeOrigin;
@@ -280,8 +152,6 @@ void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
         CodeOrigin* trueCaller = inlineCallFrame->getCallerSkippingTailCalls(&trueCallerCallKind);
         GPRReg callerFrameGPR = GPRInfo::callFrameRegister;
 
-        bool callerIsLLInt = false;
-
         if (!trueCaller) {
             ASSERT(inlineCallFrame->isTail());
             jit.loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT3);
@@ -297,7 +167,36 @@ void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
         } else {
             CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(*trueCaller);
             unsigned callBytecodeIndex = trueCaller->bytecodeIndex();
-            void* jumpTarget = callerReturnPC(baselineCodeBlockForCaller, callBytecodeIndex, trueCallerCallKind, callerIsLLInt);
+            void* jumpTarget = nullptr;
+
+            switch (trueCallerCallKind) {
+            case InlineCallFrame::Call:
+            case InlineCallFrame::Construct:
+            case InlineCallFrame::CallVarargs:
+            case InlineCallFrame::ConstructVarargs:
+            case InlineCallFrame::TailCall:
+            case InlineCallFrame::TailCallVarargs: {
+                CallLinkInfo* callLinkInfo =
+                    baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
+                RELEASE_ASSERT(callLinkInfo);
+
+                jumpTarget = callLinkInfo->callReturnLocation().untaggedExecutableAddress();
+                break;
+            }
+
+            case InlineCallFrame::GetterCall:
+            case InlineCallFrame::SetterCall: {
+                StructureStubInfo* stubInfo =
+                    baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
+                RELEASE_ASSERT(stubInfo);
+
+                jumpTarget = stubInfo->doneLocation().untaggedExecutableAddress();
+                break;
+            }
+
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
 
             if (trueCaller->inlineCallFrame()) {
                 jit.addPtr(
@@ -328,14 +227,6 @@ void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
             trueCaller ? AssemblyHelpers::UseExistingTagRegisterContents : AssemblyHelpers::CopyBaselineCalleeSavedRegistersFromBaseFrame,
             GPRInfo::regT2);
 
-        if (callerIsLLInt) {
-            CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(*trueCaller);
-            jit.storePtr(CCallHelpers::TrustedImmPtr(baselineCodeBlockForCaller->metadataTable()), calleeSaveSlot(inlineCallFrame, baselineCodeBlock, LLInt::Registers::metadataTableGPR));
-#if USE(JSVALUE64)
-            jit.storePtr(CCallHelpers::TrustedImmPtr(baselineCodeBlockForCaller->instructionsRawPointer()), calleeSaveSlot(inlineCallFrame, baselineCodeBlock, LLInt::Registers::pbGPR));
-#endif
-        }
-
         if (!inlineCallFrame->isVarargs())
             jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->argumentCountIncludingThis), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)));
 #if USE(JSVALUE64)
@@ -410,35 +301,11 @@ void adjustAndJumpToTarget(VM& vm, CCallHelpers& jit, const OSRExitBase& exit)
 
     CodeBlock* codeBlockForExit = jit.baselineCodeBlockFor(exit.m_codeOrigin);
     ASSERT(codeBlockForExit == codeBlockForExit->baselineVersion());
-    ASSERT(JITCode::isBaselineCode(codeBlockForExit->jitType()));
-
-    void* jumpTarget;
-    bool exitToLLInt = Options::forceOSRExitToLLInt() || codeBlockForExit->jitType() == JITType::InterpreterThunk;
-    if (exitToLLInt) {
-        unsigned bytecodeOffset = exit.m_codeOrigin.bytecodeIndex();
-        const Instruction& currentInstruction = *codeBlockForExit->instructions().at(bytecodeOffset).ptr();
-        MacroAssemblerCodePtr<JSEntryPtrTag> destination = LLInt::getCodePtr<JSEntryPtrTag>(currentInstruction);
-
-        if (exit.isExceptionHandler()) {
-            jit.move(CCallHelpers::TrustedImmPtr(&currentInstruction), GPRInfo::regT2);
-            jit.storePtr(GPRInfo::regT2, &vm.targetInterpreterPCForThrow);
-        }
-
-        jit.move(CCallHelpers::TrustedImmPtr(codeBlockForExit->metadataTable()), LLInt::Registers::metadataTableGPR);
-#if USE(JSVALUE64)
-        jit.move(CCallHelpers::TrustedImmPtr(codeBlockForExit->instructionsRawPointer()), LLInt::Registers::pbGPR);
-        jit.move(CCallHelpers::TrustedImm32(bytecodeOffset), LLInt::Registers::pcGPR);
-#else
-        jit.move(CCallHelpers::TrustedImmPtr(&currentInstruction), LLInt::Registers::pcGPR);
-#endif
-        jumpTarget = destination.retagged<OSRExitPtrTag>().executableAddress();
-    } else {
-        CodeLocationLabel<JSEntryPtrTag> codeLocation = codeBlockForExit->jitCodeMap().find(exit.m_codeOrigin.bytecodeIndex());
-        ASSERT(codeLocation);
-
-        jumpTarget = codeLocation.retagged<OSRExitPtrTag>().executableAddress();
-    }
+    ASSERT(codeBlockForExit->jitType() == JITType::BaselineJIT);
+    CodeLocationLabel<JSEntryPtrTag> codeLocation = codeBlockForExit->jitCodeMap().find(exit.m_codeOrigin.bytecodeIndex());
+    ASSERT(codeLocation);
 
+    void* jumpTarget = codeLocation.retagged<OSRExitPtrTag>().executableAddress();
     jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(codeBlockForExit) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister);
     if (exit.isExceptionHandler()) {
         // Since we're jumping to op_catch, we need to set callFrameForCatch.
index 0caeb00..a0bfd63 100644 (file)
@@ -39,8 +39,6 @@ namespace JSC { namespace DFG {
 void handleExitCounts(CCallHelpers&, const OSRExitBase&);
 void reifyInlinedCallFrames(CCallHelpers&, const OSRExitBase&);
 void adjustAndJumpToTarget(VM&, CCallHelpers&, const OSRExitBase&);
-void* callerReturnPC(CodeBlock* baselineCodeBlockForCaller, unsigned callBytecodeOffset, InlineCallFrame::Kind callerKind, bool& callerIsLLInt);
-CCallHelpers::Address calleeSaveSlot(InlineCallFrame*, CodeBlock* baselineCodeBlock, GPRReg calleeSave);
 
 template <typename JITCodeType>
 void adjustFrameAndStackInOSRExitCompilerThunk(MacroAssembler& jit, VM& vm, JITType jitType)
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp
new file mode 100644 (file)
index 0000000..a0b9d8b
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "DFGOSRExitPreparation.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "CodeBlock.h"
+#include "JIT.h"
+#include "JITCode.h"
+#include "JITWorklist.h"
+#include "JSCInlines.h"
+
+namespace JSC { namespace DFG {
+
+void prepareCodeOriginForOSRExit(ExecState* exec, CodeOrigin codeOrigin)
+{
+    VM& vm = exec->vm();
+    DeferGC deferGC(vm.heap);
+    
+    for (; codeOrigin.inlineCallFrame(); codeOrigin = codeOrigin.inlineCallFrame()->directCaller) {
+        CodeBlock* codeBlock = codeOrigin.inlineCallFrame()->baselineCodeBlock.get();
+        JITWorklist::ensureGlobalWorklist().compileNow(codeBlock);
+    }
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitPreparation.h b/Source/JavaScriptCore/dfg/DFGOSRExitPreparation.h
new file mode 100644 (file)
index 0000000..230386a
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(DFG_JIT)
+
+#include "CallFrame.h"
+#include "CodeOrigin.h"
+
+namespace JSC { namespace DFG {
+
+// Make sure all code on our inline stack is JIT compiled. This is necessary since
+// we may opt to inline a code block even before it had ever been compiled by the
+// JIT, but our OSR exit infrastructure currently only works if the target of the
+// OSR exit is JIT code. This could be changed since there is nothing particularly
+// hard about doing an OSR exit into the interpreter, but for now this seems to make
+// sense in that if we're OSR exiting from inlined code of a DFG code block, then
+// probably it's a good sign that the thing we're exiting into is hot. Even more
+// interestingly, since the code was inlined, it may never otherwise get JIT
+// compiled since the act of inlining it may ensure that it otherwise never runs.
+void prepareCodeOriginForOSRExit(ExecState*, CodeOrigin);
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
index e6ae4c5..4543d90 100644 (file)
@@ -30,6 +30,7 @@
 
 #include "BytecodeStructs.h"
 #include "DFGOSRExitCompilerCommon.h"
+#include "DFGOSRExitPreparation.h"
 #include "FTLExitArgumentForOperand.h"
 #include "FTLJITCode.h"
 #include "FTLLocation.h"
@@ -543,6 +544,8 @@ extern "C" void* compileFTLOSRExit(ExecState* exec, unsigned exitID)
         }
     }
 
+    prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin);
+
     compileStub(exitID, jitCode, exit, &vm, codeBlock);
 
     MacroAssembler::repatchJump(
index 26e32a9..de39056 100644 (file)
@@ -25,8 +25,6 @@
 
 #pragma once
 
-#include "GPRInfo.h"
-#include "Instruction.h"
 #include "JSCJSValue.h"
 #include "MacroAssemblerCodeRef.h"
 #include "Opcode.h"
@@ -34,6 +32,7 @@
 namespace JSC {
 
 class VM;
+struct Instruction;
 
 #if ENABLE(C_LOOP)
 typedef OpcodeID LLIntCode;
@@ -146,16 +145,6 @@ ALWAYS_INLINE MacroAssemblerCodePtr<tag> getWide32CodePtr(OpcodeID opcodeID)
 }
 
 template<PtrTag tag>
-ALWAYS_INLINE MacroAssemblerCodePtr<tag> getCodePtr(const Instruction& instruction)
-{
-    if (instruction.isWide16())
-        return getWide16CodePtr<tag>(instruction.opcodeID());
-    if (instruction.isWide32())
-        return getWide32CodePtr<tag>(instruction.opcodeID());
-    return getCodePtr<tag>(instruction.opcodeID());
-}
-
-template<PtrTag tag>
 ALWAYS_INLINE MacroAssemblerCodeRef<tag> getCodeRef(OpcodeID opcodeID)
 {
     return MacroAssemblerCodeRef<tag>::createSelfManagedCodeRef(getCodePtr<tag>(opcodeID));
@@ -195,23 +184,4 @@ ALWAYS_INLINE void* getCodePtr(JSC::EncodedJSValue glueHelper())
     return bitwise_cast<void*>(glueHelper);
 }
 
-#if ENABLE(JIT)
-struct Registers {
-    static const GPRReg pcGPR = GPRInfo::regT4;
-
-#if CPU(X86_64) && !OS(WINDOWS)
-    static const GPRReg metadataTableGPR = GPRInfo::regCS1;
-    static const GPRReg pbGPR = GPRInfo::regCS2;
-#elif CPU(X86_64) && OS(WINDOWS)
-    static const GPRReg metadataTableGPR = GPRInfo::regCS3;
-    static const GPRReg pbGPR = GPRInfo::regCS4;
-#elif CPU(ARM64)
-    static const GPRReg metadataTableGPR = GPRInfo::regCS6;
-    static const GPRReg pbGPR = GPRInfo::regCS7;
-#elif CPU(MIPS) || CPU(ARM_THUMB2)
-    static const GPRReg metadataTableGPR = GPRInfo::regCS0;
-#endif
-};
-#endif
-
 } } // namespace JSC::LLInt
index 5c6c6a2..2d9a172 100644 (file)
@@ -929,33 +929,12 @@ macro traceExecution()
     end
 end
 
-macro defineOSRExitReturnLabel(opcodeName, size)
-    macro defineNarrow()
-        global _%opcodeName%_return_location_narrow
-        _%opcodeName%_return_location_narrow:
-    end
-
-    macro defineWide16()
-        global _%opcodeName%_return_location_wide_16
-        _%opcodeName%_return_location_wide_16:
-    end
-
-    macro defineWide32()
-        global _%opcodeName%_return_location_wide_32
-        _%opcodeName%_return_location_wide_32:
-    end
-
-    size(defineNarrow, defineWide16, defineWide32, macro (f) f() end)
-end
-
-macro callTargetFunction(opcodeName, size, opcodeStruct, dispatch, callee, callPtrTag)
+macro callTargetFunction(size, opcodeStruct, dispatch, callee, callPtrTag)
     if C_LOOP or C_LOOP_WIN
         cloopCallJSFunction callee
     else
         call callee, callPtrTag
     end
-
-    defineOSRExitReturnLabel(opcodeName, size)
     restoreStackPointerAfterCall()
     dispatchAfterCall(size, opcodeStruct, dispatch)
 end
@@ -1025,7 +1004,7 @@ macro prepareForTailCall(callee, temp1, temp2, temp3, callPtrTag)
     jmp callee, callPtrTag
 end
 
-macro slowPathForCall(opcodeName, size, opcodeStruct, dispatch, slowPath, prepareCall)
+macro slowPathForCall(size, opcodeStruct, dispatch, slowPath, prepareCall)
     callCallSlowPath(
         slowPath,
         # Those are r0 and r1
@@ -1034,19 +1013,10 @@ macro slowPathForCall(opcodeName, size, opcodeStruct, dispatch, slowPath, prepar
             move calleeFramePtr, sp
             prepareCall(callee, t2, t3, t4, SlowPathPtrTag)
         .dontUpdateSP:
-            callTargetFunction(%opcodeName%_slow, size, opcodeStruct, dispatch, callee, SlowPathPtrTag)
+            callTargetFunction(size, opcodeStruct, dispatch, callee, SlowPathPtrTag)
         end)
 end
 
-macro getterSetterOSRExitReturnPoint(opName, size)
-    crash() # We don't reach this in straight line code. We only reach it via returning to the code below when reconstructing stack frames during OSR exit.
-
-    defineOSRExitReturnLabel(opName, size)
-
-    restoreStackPointerAfterCall()
-    loadi ArgumentCount + TagOffset[cfr], PC
-end
-
 macro arrayProfile(offset, cellAndIndexingType, metadata, scratch)
     const cell = cellAndIndexingType
     const indexingType = cellAndIndexingType 
@@ -1771,7 +1741,7 @@ end)
 callOp(construct, OpConstruct, prepareForRegularCall, macro (getu, metadata) end)
 
 
-macro doCallVarargs(opcodeName, size, opcodeStruct, dispatch, frameSlowPath, slowPath, prepareCall)
+macro doCallVarargs(size, opcodeStruct, dispatch, frameSlowPath, slowPath, prepareCall)
     callSlowPath(frameSlowPath)
     branchIfException(_llint_throw_from_slow_path_trampoline)
     # calleeFrame in r1
@@ -1786,19 +1756,19 @@ macro doCallVarargs(opcodeName, size, opcodeStruct, dispatch, frameSlowPath, slo
             subp r1, CallerFrameAndPCSize, sp
         end
     end
-    slowPathForCall(opcodeName, size, opcodeStruct, dispatch, slowPath, prepareCall)
+    slowPathForCall(size, opcodeStruct, dispatch, slowPath, prepareCall)
 end
 
 
 llintOp(op_call_varargs, OpCallVarargs, macro (size, get, dispatch)
-    doCallVarargs(op_call_varargs, size, OpCallVarargs, dispatch, _llint_slow_path_size_frame_for_varargs, _llint_slow_path_call_varargs, prepareForRegularCall)
+    doCallVarargs(size, OpCallVarargs, dispatch, _llint_slow_path_size_frame_for_varargs, _llint_slow_path_call_varargs, prepareForRegularCall)
 end)
 
 llintOp(op_tail_call_varargs, OpTailCallVarargs, macro (size, get, dispatch)
     checkSwitchToJITForEpilogue()
     # We lie and perform the tail call instead of preparing it since we can't
     # prepare the frame for a call opcode
-    doCallVarargs(op_tail_call_varargs, size, OpTailCallVarargs, dispatch, _llint_slow_path_size_frame_for_varargs, _llint_slow_path_tail_call_varargs, prepareForTailCall)
+    doCallVarargs(size, OpTailCallVarargs, dispatch, _llint_slow_path_size_frame_for_varargs, _llint_slow_path_tail_call_varargs, prepareForTailCall)
 end)
 
 
@@ -1806,12 +1776,12 @@ llintOp(op_tail_call_forward_arguments, OpTailCallForwardArguments, macro (size,
     checkSwitchToJITForEpilogue()
     # We lie and perform the tail call instead of preparing it since we can't
     # prepare the frame for a call opcode
-    doCallVarargs(op_tail_call_forward_arguments, size, OpTailCallForwardArguments, dispatch, _llint_slow_path_size_frame_for_forward_arguments, _llint_slow_path_tail_call_forward_arguments, prepareForTailCall)
+    doCallVarargs(size, OpTailCallForwardArguments, dispatch, _llint_slow_path_size_frame_for_forward_arguments, _llint_slow_path_tail_call_forward_arguments, prepareForTailCall)
 end)
 
 
 llintOp(op_construct_varargs, OpConstructVarargs, macro (size, get, dispatch)
-    doCallVarargs(op_construct_varargs, size, OpConstructVarargs, dispatch, _llint_slow_path_size_frame_for_varargs, _llint_slow_path_construct_varargs, prepareForRegularCall)
+    doCallVarargs(size, OpConstructVarargs, dispatch, _llint_slow_path_size_frame_for_varargs, _llint_slow_path_construct_varargs, prepareForRegularCall)
 end)
 
 
@@ -1850,7 +1820,6 @@ end)
 
 _llint_op_call_eval:
     slowPathForCall(
-        op_call_eval_narrow,
         narrow,
         OpCallEval,
         macro () dispatchOp(narrow, op_call_eval) end,
@@ -1859,7 +1828,6 @@ _llint_op_call_eval:
 
 _llint_op_call_eval_wide16:
     slowPathForCall(
-        op_call_eval_wide16,
         wide16,
         OpCallEval,
         macro () dispatchOp(wide16, op_call_eval) end,
@@ -1868,7 +1836,6 @@ _llint_op_call_eval_wide16:
 
 _llint_op_call_eval_wide32:
     slowPathForCall(
-        op_call_eval_wide32,
         wide32,
         OpCallEval,
         macro () dispatchOp(wide32, op_call_eval) end,
index 85c2d6a..65f924f 100644 (file)
@@ -1398,13 +1398,6 @@ llintOpWithMetadata(op_get_by_id, OpGetById, macro (size, get, dispatch, metadat
 .opGetByIdSlow:
     callSlowPath(_llint_slow_path_get_by_id)
     dispatch()
-
-# osr return point
-    getterSetterOSRExitReturnPoint(op_get_by_id, size)
-    metadata(t2, t3)
-    valueProfile(OpGetById, t2, r1, r0)
-    return(r1, r0)
-
 end)
 
 
@@ -1467,11 +1460,6 @@ llintOpWithMetadata(op_put_by_id, OpPutById, macro (size, get, dispatch, metadat
 .opPutByIdSlow:
     callSlowPath(_llint_slow_path_put_by_id)
     dispatch()
-
-# osr return point
-    getterSetterOSRExitReturnPoint(op_put_by_id, size)
-    dispatch()
-
 end)
 
 
@@ -1523,17 +1511,10 @@ llintOpWithMetadata(op_get_by_val, OpGetByVal, macro (size, get, dispatch, metad
 .opGetByValSlow:
     callSlowPath(_llint_slow_path_get_by_val)
     dispatch()
-
-# osr return point
-    getterSetterOSRExitReturnPoint(op_get_by_val, size)
-    metadata(t2, t3)
-    valueProfile(OpGetByVal, t2, r1, r0)
-    return(r1, r0)
-
 end)
 
 
-macro putByValOp(opcodeName, opcodeStruct, osrExitPoint)
+macro putByValOp(opcodeName, opcodeStruct)
     llintOpWithMetadata(op_%opcodeName%, opcodeStruct, macro (size, get, dispatch, metadata, return)
         macro contiguousPutByVal(storeCallback)
             biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds
@@ -1621,20 +1602,13 @@ macro putByValOp(opcodeName, opcodeStruct, osrExitPoint)
     .opPutByValSlow:
         callSlowPath(_llint_slow_path_%opcodeName%)
         dispatch()
-
-    .osrExitPoint:
-        osrExitPoint(size, dispatch)
     end)
 end
 
 
-putByValOp(put_by_val, OpPutByVal, macro (size, dispatch)
-    # osr return point
-    getterSetterOSRExitReturnPoint(op_put_by_val, size)
-    dispatch()
-end)
+putByValOp(put_by_val, OpPutByVal)
 
-putByValOp(put_by_val_direct, OpPutByValDirect, macro (a, b) end)
+putByValOp(put_by_val_direct, OpPutByValDirect)
 
 
 macro llintJumpTrueOrFalseOp(opcodeName, opcodeStruct, conditionOp)
@@ -1901,10 +1875,10 @@ macro commonCallOp(opcodeName, slowPath, opcodeStruct, prepareCall, prologue)
         storei CellTag, Callee + TagOffset[t3]
         move t3, sp
         prepareCall(%opcodeStruct%::Metadata::m_callLinkInfo.m_machineCodeTarget[t5], t2, t3, t4, JSEntryPtrTag)
-        callTargetFunction(opcodeName, size, opcodeStruct, dispatch, %opcodeStruct%::Metadata::m_callLinkInfo.m_machineCodeTarget[t5], JSEntryPtrTag)
+        callTargetFunction(size, opcodeStruct, dispatch, %opcodeStruct%::Metadata::m_callLinkInfo.m_machineCodeTarget[t5], JSEntryPtrTag)
 
     .opCallSlow:
-        slowPathForCall(opcodeName, size, opcodeStruct, dispatch, slowPath, prepareCall)
+        slowPathForCall(size, opcodeStruct, dispatch, slowPath, prepareCall)
     end)
 end
 
index 3aac856..7f4eb83 100644 (file)
@@ -1325,6 +1325,7 @@ llintOpWithMetadata(op_get_by_id_direct, OpGetByIdDirect, macro (size, get, disp
     dispatch()
 end)
 
+
 llintOpWithMetadata(op_get_by_id, OpGetById, macro (size, get, dispatch, metadata, return)
     metadata(t2, t1)
     loadb OpGetById::Metadata::m_modeMetadata.mode[t2], t1
@@ -1375,13 +1376,6 @@ llintOpWithMetadata(op_get_by_id, OpGetById, macro (size, get, dispatch, metadat
 .opGetByIdSlow:
     callSlowPath(_llint_slow_path_get_by_id)
     dispatch()
-
-# osr return point
-    getterSetterOSRExitReturnPoint(op_get_by_id, size)
-    metadata(t2, t3)
-    valueProfile(OpGetById, t2, r0)
-    return(r0)
-
 end)
 
 
@@ -1454,11 +1448,6 @@ llintOpWithMetadata(op_put_by_id, OpPutById, macro (size, get, dispatch, metadat
 .opPutByIdSlow:
     callSlowPath(_llint_slow_path_put_by_id)
     dispatch()
-
-# osr return point
-    getterSetterOSRExitReturnPoint(op_put_by_id, size)
-    dispatch()
-
 end)
 
 
@@ -1630,17 +1619,10 @@ llintOpWithMetadata(op_get_by_val, OpGetByVal, macro (size, get, dispatch, metad
 .opGetByValSlow:
     callSlowPath(_llint_slow_path_get_by_val)
     dispatch()
-
-# osr return point
-    getterSetterOSRExitReturnPoint(op_get_by_val, size)
-    metadata(t5, t2)
-    valueProfile(OpGetByVal, t5, r0)
-    return(r0)
-
 end)
 
 
-macro putByValOp(opcodeName, opcodeStruct, osrExitPoint)
+macro putByValOp(opcodeName, opcodeStruct)
     llintOpWithMetadata(op_%opcodeName%, opcodeStruct, macro (size, get, dispatch, metadata, return)
         macro contiguousPutByVal(storeCallback)
             biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds
@@ -1728,19 +1710,12 @@ macro putByValOp(opcodeName, opcodeStruct, osrExitPoint)
     .opPutByValSlow:
         callSlowPath(_llint_slow_path_%opcodeName%)
         dispatch()
-
-        osrExitPoint(size, dispatch)
-        
     end)
 end
 
-putByValOp(put_by_val, OpPutByVal, macro (size, dispatch)
-    # osr return point
-    getterSetterOSRExitReturnPoint(op_put_by_val, size)
-    dispatch()
-end)
+putByValOp(put_by_val, OpPutByVal)
 
-putByValOp(put_by_val_direct, OpPutByValDirect, macro (a, b) end)
+putByValOp(put_by_val_direct, OpPutByValDirect)
 
 
 macro llintJumpTrueOrFalseOp(opcodeName, opcodeStruct, conditionOp)
@@ -2029,10 +2004,10 @@ macro commonCallOp(opcodeName, slowPath, opcodeStruct, prepareCall, prologue)
         storei t2, ArgumentCount + PayloadOffset[t3]
         move t3, sp
         prepareCall(%opcodeStruct%::Metadata::m_callLinkInfo.m_machineCodeTarget[t5], t2, t3, t4, JSEntryPtrTag)
-        callTargetFunction(opcodeName, size, opcodeStruct, dispatch, %opcodeStruct%::Metadata::m_callLinkInfo.m_machineCodeTarget[t5], JSEntryPtrTag)
+        callTargetFunction(size, opcodeStruct, dispatch, %opcodeStruct%::Metadata::m_callLinkInfo.m_machineCodeTarget[t5], JSEntryPtrTag)
 
     .opCallSlow:
-        slowPathForCall(opcodeName, size, opcodeStruct, dispatch, slowPath, prepareCall)
+        slowPathForCall(size, opcodeStruct, dispatch, slowPath, prepareCall)
     end)
 end
 
index e311384..f96defc 100644 (file)
@@ -214,12 +214,10 @@ class Assembler
 
     def putsLabel(labelName, isGlobal)
         raise unless @state == :asm
-        unless isGlobal
-            @deferredNextLabelActions.each {
-                | action |
-                action.call()
-            }
-        end
+        @deferredNextLabelActions.each {
+            | action |
+            action.call()
+        }
         @deferredNextLabelActions = []
         @numGlobalLabels += 1
         putsProcEndIfNeeded if $emitWinAsm and isGlobal
@@ -403,7 +401,7 @@ File.open(outputFlnm, "w") {
             lowLevelAST = lowLevelAST.resolve(buildOffsetsMap(lowLevelAST, offsetsList))
             lowLevelAST.validate
             emitCodeInConfiguration(concreteSettings, lowLevelAST, backend) {
-                $currentSettings = concreteSettings
+                 $currentSettings = concreteSettings
                 $asm.inAsm {
                     lowLevelAST.lower(backend)
                 }
index 69583b6..6b8d9b4 100644 (file)
@@ -259,9 +259,7 @@ class Label
                     match
                 end
             }
-            result = Label.forName(codeOrigin, name, @definedInFile)
-            result.setGlobal() if @global
-            result
+            Label.forName(codeOrigin, name, @definedInFile)
         else
             self
         end
@@ -274,9 +272,7 @@ class Label
                 raise "Unknown variable `#{var.originalName}` in substitution at #{codeOrigin}" unless mapping[var]
                 mapping[var].name
             }
-            result = Label.forName(codeOrigin, name, @definedInFile)
-            result.setGlobal() if @global
-            result
+            Label.forName(codeOrigin, name, @definedInFile)
         else
             self
         end
index 461f038..3586e64 100644 (file)
@@ -464,7 +464,6 @@ namespace JSC {
     v(OptionString, dumpJITMemoryPath, nullptr, Restricted, nullptr) \
     v(Double, dumpJITMemoryFlushInterval, 10, Restricted, "Maximum time in between flushes of the JIT memory dump in seconds.") \
     v(Bool, useUnlinkedCodeBlockJettisoning, false, Normal, "If true, UnlinkedCodeBlock can be jettisoned.") \
-    v(Bool, forceOSRExitToLLInt, false, Normal, "If true, we always exit to the LLInt. If false, we exit to whatever is most convenient.") \
 
 enum OptionEquivalence {
     SameOption,
index b96b23e..1dbb388 100644 (file)
@@ -1,3 +1,15 @@
+2019-10-07  Matt Lewis  <jlewis3@apple.com>
+
+        Unreviewed, rolling out r250750.
+
+        Reverting change as this broke interal test over the weekend.
+
+        Reverted changeset:
+
+        "Allow OSR exit to the LLInt"
+        https://bugs.webkit.org/show_bug.cgi?id=197993
+        https://trac.webkit.org/changeset/250750
+
 2019-10-07  youenn fablet  <youenn@apple.com>
 
         [iOS] Unmuting capture of a page is not working
index d1b4daf..24d395f 100755 (executable)
@@ -495,7 +495,6 @@ B3O1_OPTIONS = ["--defaultB3OptLevel=1"]
 B3O0_OPTIONS = ["--maxDFGNodesInBasicBlockForPreciseAnalysis=100", "--defaultB3OptLevel=0"]
 FTL_OPTIONS = ["--useFTLJIT=true"]
 PROBE_OSR_EXIT_OPTION = ["--useProbeOSRExit=true"]
-FORCE_LLINT_EXIT_OPTIONS = ["--forceOSRExitToLLInt=true"]
 
 require_relative "webkitruby/jsc-stress-test-writer-#{$testWriter}"
 
@@ -709,7 +708,7 @@ def runFTLNoCJIT(*optionalTestSpecificOptions)
 end
 
 def runFTLNoCJITB3O0(*optionalTestSpecificOptions)
-    run("ftl-no-cjit-b3o0", "--useArrayAllocationProfiling=false", "--forcePolyProto=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + B3O0_OPTIONS + FORCE_LLINT_EXIT_OPTIONS + optionalTestSpecificOptions))
+    run("ftl-no-cjit-b3o0", "--useArrayAllocationProfiling=false", "--forcePolyProto=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + B3O0_OPTIONS + optionalTestSpecificOptions))
 end
 
 def runFTLNoCJITValidate(*optionalTestSpecificOptions)
@@ -729,7 +728,7 @@ def runFTLNoCJITOSRValidation(*optionalTestSpecificOptions)
 end
 
 def runDFGEager(*optionalTestSpecificOptions)
-    run("dfg-eager", *(EAGER_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS + PROBE_OSR_EXIT_OPTION + FORCE_LLINT_EXIT_OPTIONS + optionalTestSpecificOptions))
+    run("dfg-eager", *(EAGER_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS + PROBE_OSR_EXIT_OPTION + optionalTestSpecificOptions))
 end
 
 def runDFGEagerNoCJITValidate(*optionalTestSpecificOptions)
@@ -746,7 +745,7 @@ def runFTLEagerWatchdog(*optionalTestSpecificOptions)
 end
 
 def runFTLEagerNoCJITValidate(*optionalTestSpecificOptions)
-    run("ftl-eager-no-cjit", "--validateGraph=true", "--airForceIRCAllocator=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + EAGER_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS + FORCE_LLINT_EXIT_OPTIONS + optionalTestSpecificOptions))
+    run("ftl-eager-no-cjit", "--validateGraph=true", "--airForceIRCAllocator=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + EAGER_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS + optionalTestSpecificOptions))
 end
 
 def runFTLEagerNoCJITB3O1(*optionalTestSpecificOptions)