transition void cti_op_* methods to JIT operations.
authormsaboff@apple.com <msaboff@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 15 Oct 2013 18:33:04 +0000 (18:33 +0000)
committermsaboff@apple.com <msaboff@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 15 Oct 2013 18:33:04 +0000 (18:33 +0000)
https://bugs.webkit.org/show_bug.cgi?id=122617

Reviewed by Geoffrey Garen.

Converted the follow stubs to JIT operations:
    cti_handle_watchdog_timer
    cti_op_debug
    cti_op_pop_scope
    cti_op_profile_did_call
    cti_op_profile_will_call
    cti_op_put_by_index
    cti_op_put_getter_setter
    cti_op_tear_off_activation
    cti_op_tear_off_arguments
    cti_op_throw_static_error
    cti_optimize

* dfg/DFGOperations.cpp:
* dfg/DFGOperations.h:
* jit/CCallHelpers.h:
(JSC::CCallHelpers::setupArgumentsWithExecState):
(JSC::CCallHelpers::setupThreeStubArgsGPR):
(JSC::CCallHelpers::setupStubArguments):
(JSC::CCallHelpers::setupStubArguments134):
* jit/JIT.cpp:
(JSC::JIT::emitEnterOptimizationCheck):
* jit/JIT.h:
* jit/JITInlines.h:
(JSC::JIT::callOperation):
* jit/JITOpcodes.cpp:
(JSC::JIT::emit_op_tear_off_activation):
(JSC::JIT::emit_op_tear_off_arguments):
(JSC::JIT::emit_op_push_with_scope):
(JSC::JIT::emit_op_pop_scope):
(JSC::JIT::emit_op_push_name_scope):
(JSC::JIT::emit_op_throw_static_error):
(JSC::JIT::emit_op_debug):
(JSC::JIT::emit_op_profile_will_call):
(JSC::JIT::emit_op_profile_did_call):
(JSC::JIT::emitSlow_op_loop_hint):
* jit/JITOpcodes32_64.cpp:
(JSC::JIT::emit_op_push_with_scope):
(JSC::JIT::emit_op_pop_scope):
(JSC::JIT::emit_op_push_name_scope):
(JSC::JIT::emit_op_throw_static_error):
(JSC::JIT::emit_op_debug):
(JSC::JIT::emit_op_profile_will_call):
(JSC::JIT::emit_op_profile_did_call):
* jit/JITOperations.cpp:
* jit/JITOperations.h:
* jit/JITPropertyAccess.cpp:
(JSC::JIT::emit_op_put_by_index):
(JSC::JIT::emit_op_put_getter_setter):
* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::emit_op_put_by_index):
(JSC::JIT::emit_op_put_getter_setter):
* jit/JITStubs.cpp:
* jit/JITStubs.h:

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@157457 268f45cc-cd09-0410-ab3c-d52691b4dbfc

15 files changed:
Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/dfg/DFGOperations.cpp
Source/JavaScriptCore/dfg/DFGOperations.h
Source/JavaScriptCore/jit/CCallHelpers.h
Source/JavaScriptCore/jit/JIT.cpp
Source/JavaScriptCore/jit/JIT.h
Source/JavaScriptCore/jit/JITInlines.h
Source/JavaScriptCore/jit/JITOpcodes.cpp
Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
Source/JavaScriptCore/jit/JITOperations.cpp
Source/JavaScriptCore/jit/JITOperations.h
Source/JavaScriptCore/jit/JITPropertyAccess.cpp
Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
Source/JavaScriptCore/jit/JITStubs.cpp
Source/JavaScriptCore/jit/JITStubs.h

index 2c1e538..31b59d5 100644 (file)
@@ -1,3 +1,65 @@
+2013-10-14  Michael Saboff  <msaboff@apple.com>
+
+        transition void cti_op_* methods to JIT operations.
+        https://bugs.webkit.org/show_bug.cgi?id=122617
+
+        Reviewed by Geoffrey Garen.
+
+        Converted the follow stubs to JIT operations:
+            cti_handle_watchdog_timer
+            cti_op_debug
+            cti_op_pop_scope
+            cti_op_profile_did_call
+            cti_op_profile_will_call
+            cti_op_put_by_index
+            cti_op_put_getter_setter
+            cti_op_tear_off_activation
+            cti_op_tear_off_arguments
+            cti_op_throw_static_error
+            cti_optimize
+
+        * dfg/DFGOperations.cpp:
+        * dfg/DFGOperations.h:
+        * jit/CCallHelpers.h:
+        (JSC::CCallHelpers::setupArgumentsWithExecState):
+        (JSC::CCallHelpers::setupThreeStubArgsGPR):
+        (JSC::CCallHelpers::setupStubArguments):
+        (JSC::CCallHelpers::setupStubArguments134):
+        * jit/JIT.cpp:
+        (JSC::JIT::emitEnterOptimizationCheck):
+        * jit/JIT.h:
+        * jit/JITInlines.h:
+        (JSC::JIT::callOperation):
+        * jit/JITOpcodes.cpp:
+        (JSC::JIT::emit_op_tear_off_activation):
+        (JSC::JIT::emit_op_tear_off_arguments):
+        (JSC::JIT::emit_op_push_with_scope):
+        (JSC::JIT::emit_op_pop_scope):
+        (JSC::JIT::emit_op_push_name_scope):
+        (JSC::JIT::emit_op_throw_static_error):
+        (JSC::JIT::emit_op_debug):
+        (JSC::JIT::emit_op_profile_will_call):
+        (JSC::JIT::emit_op_profile_did_call):
+        (JSC::JIT::emitSlow_op_loop_hint):
+        * jit/JITOpcodes32_64.cpp:
+        (JSC::JIT::emit_op_push_with_scope):
+        (JSC::JIT::emit_op_pop_scope):
+        (JSC::JIT::emit_op_push_name_scope):
+        (JSC::JIT::emit_op_throw_static_error):
+        (JSC::JIT::emit_op_debug):
+        (JSC::JIT::emit_op_profile_will_call):
+        (JSC::JIT::emit_op_profile_did_call):
+        * jit/JITOperations.cpp:
+        * jit/JITOperations.h:
+        * jit/JITPropertyAccess.cpp:
+        (JSC::JIT::emit_op_put_by_index):
+        (JSC::JIT::emit_op_put_getter_setter):
+        * jit/JITPropertyAccess32_64.cpp:
+        (JSC::JIT::emit_op_put_by_index):
+        (JSC::JIT::emit_op_put_getter_setter):
+        * jit/JITStubs.cpp:
+        * jit/JITStubs.h:
+
 2013-10-15  Julien Brianceau  <jbriance@cisco.com>
 
         [sh4] Introduce const pools in LLINT.
index 10f8051..f35b331 100644 (file)
@@ -692,16 +692,6 @@ JSCell* JIT_OPERATION operationCreateInlinedArguments(
     return result;
 }
 
-void JIT_OPERATION operationTearOffArguments(ExecState* exec, JSCell* argumentsCell, JSCell* activationCell)
-{
-    ASSERT(exec->codeBlock()->usesArguments());
-    if (activationCell) {
-        jsCast<Arguments*>(argumentsCell)->didTearOffActivation(exec, jsCast<JSActivation*>(activationCell));
-        return;
-    }
-    jsCast<Arguments*>(argumentsCell)->tearOff(exec);
-}
-
 void JIT_OPERATION operationTearOffInlinedArguments(
     ExecState* exec, JSCell* argumentsCell, JSCell* activationCell, InlineCallFrame* inlineCallFrame)
 {
index 88e9aa5..6e73f13 100644 (file)
@@ -91,7 +91,6 @@ size_t JIT_OPERATION operationRegExpTest(ExecState*, JSCell*, JSCell*) WTF_INTER
 size_t JIT_OPERATION operationCompareStrictEqCell(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL;
 size_t JIT_OPERATION operationCompareStrictEq(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL;
 JSCell* JIT_OPERATION operationCreateInlinedArguments(ExecState*, InlineCallFrame*) WTF_INTERNAL;
-void JIT_OPERATION operationTearOffArguments(ExecState*, JSCell*, JSCell*) WTF_INTERNAL;
 void JIT_OPERATION operationTearOffInlinedArguments(ExecState*, JSCell*, JSCell*, InlineCallFrame*) WTF_INTERNAL;
 EncodedJSValue JIT_OPERATION operationGetArgumentsLength(ExecState*, int32_t) WTF_INTERNAL;
 EncodedJSValue JIT_OPERATION operationGetInlinedArgumentByVal(ExecState*, int32_t, InlineCallFrame*, int32_t) WTF_INTERNAL;
index 405ac3b..6ff88cf 100644 (file)
@@ -210,6 +210,15 @@ public:
         addCallArgument(arg3);
     }
 
+    ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3)
+    {
+        resetCallArguments();
+        addCallArgument(GPRInfo::callFrameRegister);
+        addCallArgument(arg1);
+        addCallArgument(arg2);
+        addCallArgument(arg3);
+    }
+
     ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3)
     {
         resetCallArguments();
@@ -372,7 +381,27 @@ public:
         addCallArgument(arg4);
     }
 
-    ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5)
+    ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4)
+    {
+        resetCallArguments();
+        addCallArgument(GPRInfo::callFrameRegister);
+        addCallArgument(arg1);
+        addCallArgument(arg2);
+        addCallArgument(arg3);
+        addCallArgument(arg4);
+    }
+
+    ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4)
+    {
+        resetCallArguments();
+        addCallArgument(GPRInfo::callFrameRegister);
+        addCallArgument(arg1);
+        addCallArgument(arg2);
+        addCallArgument(arg3);
+        addCallArgument(arg4);
+    }
+
+ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5)
     {
         resetCallArguments();
         addCallArgument(GPRInfo::callFrameRegister);
@@ -383,6 +412,17 @@ public:
         addCallArgument(arg5);
     }
 
+    ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+    {
+        resetCallArguments();
+        addCallArgument(GPRInfo::callFrameRegister);
+        addCallArgument(arg1);
+        addCallArgument(arg2);
+        addCallArgument(arg3);
+        addCallArgument(arg4);
+        addCallArgument(arg5);
+    }
+    
     ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5)
     {
         resetCallArguments();
@@ -464,6 +504,60 @@ public:
         } else
             swap(destA, destB);
     }
+
+    template<GPRReg destA, GPRReg destB, GPRReg destC>
+    void setupThreeStubArgsGPR(GPRReg srcA, GPRReg srcB, GPRReg srcC)
+    {
+        // If neither of srcB/srcC are in our way, then we can move srcA into place.
+        // Then we can use setupTwoStubArgs to fix srcB/srcC.
+        if (srcB != destA && srcC != destA) {
+            move(srcA, destA);
+            setupTwoStubArgsGPR<destB, destC>(srcB, srcC);
+            return;
+        }
+        
+        // If neither of srcA/srcC are in our way, then we can move srcB into place.
+        // Then we can use setupTwoStubArgs to fix srcA/srcC.
+        if (srcA != destB && srcC != destB) {
+            move(srcB, destB);
+            setupTwoStubArgsGPR<destA, destC>(srcA, srcC);
+            return;
+        }
+        
+        // If neither of srcA/srcB are in our way, then we can move srcC into place.
+        // Then we can use setupTwoStubArgs to fix srcA/srcB.
+        if (srcA != destC && srcB != destC) {
+            move(srcC, destC);
+            setupTwoStubArgsGPR<destA, destB>(srcA, srcB);
+            return;
+        }
+        
+        // If we get here, we haven't been able to move any of srcA/srcB/srcC.
+        // Since all three are blocked, then all three must already be in the argument register.
+        // But are they in the right ones?
+        
+        // First, ensure srcA is in place.
+        if (srcA != destA) {
+            swap(srcA, destA);
+            
+            // If srcA wasn't in argumentGPR1, one of srcB/srcC must be.
+            ASSERT(srcB == destA || srcC == destA);
+            // If srcB was in argumentGPR1 it no longer is (due to the swap).
+            // Otherwise srcC must have been. Mark him as moved.
+            if (srcB == destA)
+                srcB = srcA;
+            else
+                srcC = srcA;
+        }
+        
+        // Either srcB & srcC need swapping, or we're all done.
+        ASSERT((srcB == destB || srcC == destC)
+            || (srcB == destC || srcC == destB));
+        
+        if (srcB != destB)
+            swap(destB, destC);
+    }
+
 #if CPU(X86_64)
     template<FPRReg destA, FPRReg destB>
     void setupTwoStubArgsFPR(FPRReg srcA, FPRReg srcB)
@@ -516,58 +610,16 @@ public:
     {
         setupTwoStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2);
     }
+
     void setupStubArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3)
     {
-        // If neither of arg2/arg3 are in our way, then we can move arg1 into place.
-        // Then we can use setupTwoStubArgs to fix arg2/arg3.
-        if (arg2 != GPRInfo::argumentGPR1 && arg3 != GPRInfo::argumentGPR1) {
-            move(arg1, GPRInfo::argumentGPR1);
-            setupTwoStubArgsGPR<GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg2, arg3);
-            return;
-        }
-
-        // If neither of arg1/arg3 are in our way, then we can move arg2 into place.
-        // Then we can use setupTwoStubArgs to fix arg1/arg3.
-        if (arg1 != GPRInfo::argumentGPR2 && arg3 != GPRInfo::argumentGPR2) {
-            move(arg2, GPRInfo::argumentGPR2);
-            setupTwoStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR3>(arg1, arg3);
-            return;
-        }
-
-        // If neither of arg1/arg2 are in our way, then we can move arg3 into place.
-        // Then we can use setupTwoStubArgs to fix arg1/arg2.
-        if (arg1 != GPRInfo::argumentGPR3 && arg2 != GPRInfo::argumentGPR3) {
-            move(arg3, GPRInfo::argumentGPR3);
-            setupTwoStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2);
-            return;
-        }
-
-        // If we get here, we haven't been able to move any of arg1/arg2/arg3.
-        // Since all three are blocked, then all three must already be in the argument register.
-        // But are they in the right ones?
-
-        // First, ensure arg1 is in place.
-        if (arg1 != GPRInfo::argumentGPR1) {
-            swap(arg1, GPRInfo::argumentGPR1);
-
-            // If arg1 wasn't in argumentGPR1, one of arg2/arg3 must be.
-            ASSERT(arg2 == GPRInfo::argumentGPR1 || arg3 == GPRInfo::argumentGPR1);
-            // If arg2 was in argumentGPR1 it no longer is (due to the swap).
-            // Otherwise arg3 must have been. Mark him as moved.
-            if (arg2 == GPRInfo::argumentGPR1)
-                arg2 = arg1;
-            else
-                arg3 = arg1;
-        }
-
-        // Either arg2 & arg3 need swapping, or we're all done.
-        ASSERT((arg2 == GPRInfo::argumentGPR2 || arg3 == GPRInfo::argumentGPR3)
-            || (arg2 == GPRInfo::argumentGPR3 || arg3 == GPRInfo::argumentGPR2));
-
-        if (arg2 != GPRInfo::argumentGPR2)
-            swap(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3);
+        setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg1, arg2, arg3);
     }
 
+    void setupStubArguments134(GPRReg arg1, GPRReg arg3, GPRReg arg4)
+    {
+        setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR3, GPRInfo::argumentGPR4>(arg1, arg3, arg4);
+    }
 #if CPU(MIPS)
 #define POKE_ARGUMENT_OFFSET 4
 #else
@@ -1039,6 +1091,12 @@ public:
         setupArgumentsWithExecState(arg1, arg2, arg3);
     }
 
+    ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3,  GPRReg arg4)
+    {
+        poke(arg4, POKE_ARGUMENT_OFFSET);
+        setupArgumentsWithExecState(arg1, arg2, arg3);
+    }
+
     ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4)
     {
         poke(arg4, POKE_ARGUMENT_OFFSET);
@@ -1191,6 +1249,13 @@ public:
         move(arg3, GPRInfo::argumentGPR3);
         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
     }
+
+    ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4)
+    {
+        setupStubArguments134(arg1, arg3, arg4);
+        move(arg2, GPRInfo::argumentGPR2);
+        move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+    }
 #endif
 
     void setupResults(GPRReg destA, GPRReg destB)
index 1c55017..e0d322d 100644 (file)
@@ -104,11 +104,13 @@ void JIT::emitEnterOptimizationCheck()
     if (!canBeOptimized())
         return;
 
-    Jump skipOptimize = branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()));
-    JITStubCall stubCall(this, cti_optimize);
-    stubCall.addArgument(TrustedImm32(m_bytecodeOffset));
+    JumpList skipOptimize;
+    
+    skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
     ASSERT(!m_bytecodeOffset);
-    stubCall.call();
+    callOperation(operationOptimize, m_bytecodeOffset);
+    skipOptimize.append(branchTestPtr(Zero, returnValueRegister));
+    jump(returnValueRegister);
     skipOptimize.link(this);
 }
 #endif
index 76c90c5..ff12cdc 100644 (file)
@@ -870,16 +870,27 @@ namespace JSC {
         MacroAssembler::Call callOperation(J_JITOperation_EPc, int, Instruction*);
         MacroAssembler::Call callOperation(J_JITOperation_EZ, int, int32_t);
         MacroAssembler::Call callOperation(P_JITOperation_EJS, GPRReg, size_t);
+        MacroAssembler::Call callOperation(P_JITOperation_EZ, int32_t);
         MacroAssembler::Call callOperation(S_JITOperation_ECC, RegisterID, RegisterID);
         MacroAssembler::Call callOperation(S_JITOperation_EJ, RegisterID);
         MacroAssembler::Call callOperation(S_JITOperation_EJJ, RegisterID, RegisterID);
         MacroAssembler::Call callOperation(S_JITOperation_EOJss, RegisterID, RegisterID);
+        MacroAssembler::Call callOperation(V_JITOperation_E);
+        MacroAssembler::Call callOperation(V_JITOperation_EC, RegisterID);
+        MacroAssembler::Call callOperation(V_JITOperation_ECC, RegisterID, RegisterID);
+        MacroAssembler::Call callOperation(V_JITOperation_ECICC, RegisterID, const Identifier*, RegisterID, RegisterID);
+        MacroAssembler::Call callOperation(V_JITOperation_EIdJZ, const Identifier*, RegisterID, int32_t);
+        MacroAssembler::Call callOperation(V_JITOperation_EJ, RegisterID);
+        MacroAssembler::Call callOperation(V_JITOperation_EJIdJJ, RegisterID, const Identifier*, RegisterID, RegisterID);
 #if USE(JSVALUE64)
         MacroAssembler::Call callOperation(V_JITOperation_EJJI, RegisterID, RegisterID, StringImpl*);
 #else
         MacroAssembler::Call callOperation(V_JITOperation_EJJI, RegisterID, RegisterID, RegisterID, RegisterID, StringImpl*);
 #endif
+        MacroAssembler::Call callOperation(V_JITOperation_EJZJ, RegisterID, int32_t, RegisterID);
+        MacroAssembler::Call callOperation(V_JITOperation_EJZ, RegisterID, int32_t);
         MacroAssembler::Call callOperation(V_JITOperation_EPc, Instruction*);
+        MacroAssembler::Call callOperation(V_JITOperation_EZ, int32_t);
         MacroAssembler::Call callOperationWithCallFrameRollbackOnException(J_JITOperation_E);
         MacroAssembler::Call callOperationWithCallFrameRollbackOnException(V_JITOperation_ECb, CodeBlock*);
         MacroAssembler::Call callOperationWithCallFrameRollbackOnException(Z_JITOperation_E);
@@ -892,6 +903,10 @@ namespace JSC {
         MacroAssembler::Call callOperation(P_JITOperation_EJS, GPRReg, GPRReg, size_t);
         MacroAssembler::Call callOperation(S_JITOperation_EJ, RegisterID, RegisterID);
         MacroAssembler::Call callOperation(S_JITOperation_EJJ, RegisterID, RegisterID, RegisterID, RegisterID);
+        MacroAssembler::Call callOperation(V_JITOperation_EIdJZ, const Identifier*, RegisterID, RegisterID, int32_t);
+        MacroAssembler::Call callOperation(V_JITOperation_EJ, RegisterID, RegisterID);
+        MacroAssembler::Call callOperation(V_JITOperation_EJZ, RegisterID, RegisterID, int32_t);
+        MacroAssembler::Call callOperation(V_JITOperation_EJZJ, RegisterID, RegisterID, int32_t, RegisterID, RegisterID);
 #endif
 
         Jump checkStructure(RegisterID reg, Structure* structure);
index c8c2b07..e37f184 100644 (file)
@@ -336,6 +336,12 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EZ operatio
     return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
 }
 
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(P_JITOperation_EZ operation, int32_t op)
+{
+    setupArgumentsWithExecState(TrustedImm32(op));
+    return appendCallWithExceptionCheck(operation);
+}
+
 ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(P_JITOperation_EJS operation, GPRReg arg1, size_t arg2)
 {
     setupArgumentsWithExecState(arg1, TrustedImmPtr(arg2));
@@ -366,12 +372,66 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EOJss opera
     return appendCallWithExceptionCheck(operation);
 }
 
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_E operation)
+{
+    setupArgumentsExecState();
+    return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EIdJZ operation, const Identifier* identOp1, RegisterID regOp2, int32_t op3)
+{
+    setupArgumentsWithExecState(TrustedImmPtr(identOp1), regOp2, TrustedImm32(op3));
+    return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJ operation, RegisterID regOp)
+{
+    setupArgumentsWithExecState(regOp);
+    return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EC operation, RegisterID regOp)
+{
+    setupArgumentsWithExecState(regOp);
+    return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECC operation, RegisterID regOp1, RegisterID regOp2)
+{
+    setupArgumentsWithExecState(regOp1, regOp2);
+    return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJIdJJ operation, RegisterID regOp1, const Identifier* identOp2, RegisterID regOp3, RegisterID regOp4)
+{
+    setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), regOp3, regOp4);
+    return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJZ operation, RegisterID regOp1, int32_t op2)
+{
+    setupArgumentsWithExecState(regOp1, TrustedImm32(op2));
+    return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJZJ operation, RegisterID regOp1, int32_t op2, RegisterID regOp3)
+{
+    setupArgumentsWithExecState(regOp1, TrustedImm32(op2), regOp3);
+    return appendCallWithExceptionCheck(operation);
+}
+
 ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EPc operation, Instruction* bytecodePC)
 {
     setupArgumentsWithExecState(TrustedImmPtr(bytecodePC));
     return appendCallWithExceptionCheck(operation);
 }
 
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZ operation, int32_t op)
+{
+    setupArgumentsWithExecState(TrustedImm32(op));
+    return appendCallWithExceptionCheck(operation);
+}
+
 ALWAYS_INLINE MacroAssembler::Call JIT::callOperationWithCallFrameRollbackOnException(J_JITOperation_E operation)
 {
     setupArgumentsExecState();
@@ -456,12 +516,41 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJJ operati
     return appendCallWithExceptionCheck(operation);
 }
 
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECICC operation, RegisterID regOp1, const Identifier* identOp2, RegisterID regOp3, RegisterID regOp4)
+{
+    setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), regOp3, regOp4);
+    return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJ operation, RegisterID regOp1Tag, RegisterID regOp1Payload)
+{
+    setupArgumentsWithExecState(regOp1Payload, regOp1Tag);
+    return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EIdJZ operation, const Identifier* identOp1, RegisterID regOp2Tag, RegisterID regOp2Payload, int32_t op3)
+{
+    setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG TrustedImmPtr(identOp1), regOp2Payload, regOp2Tag, TrustedImm32(op3));
+    return appendCallWithExceptionCheck(operation);
+}
+
 ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJI operation, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, StringImpl* uid)
 {
     setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, SH4_32BIT_DUMMY_ARG regOp2Payload, regOp2Tag, TrustedImmPtr(uid));
     return appendCallWithExceptionCheck(operation);
 }
 
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJZ operation, RegisterID regOp1Tag, RegisterID regOp1Payload, int32_t op2)
+{
+    setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, TrustedImm32(op2));
+    return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJZJ operation, RegisterID regOp1Tag, RegisterID regOp1Payload, int32_t op2, RegisterID regOp3Tag, RegisterID regOp3Payload)
+{
+    setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, TrustedImm32(op2), regOp3Payload, regOp3Tag);
+    return appendCallWithExceptionCheck(operation);
+}
 #undef EABI_32BIT_DUMMY_ARG
 #undef SH4_32BIT_DUMMY_ARG
 
index 49b9285..62a7584 100644 (file)
@@ -248,9 +248,8 @@ void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
 {
     int activation = currentInstruction[1].u.operand;
     Jump activationNotCreated = branchTest64(Zero, addressFor(activation));
-    JITStubCall stubCall(this, cti_op_tear_off_activation);
-    stubCall.addArgument(activation, regT2);
-    stubCall.call();
+    emitGetVirtualRegister(activation, regT0);
+    callOperation(operationTearOffActivation, regT0);
     activationNotCreated.link(this);
 }
 
@@ -260,10 +259,9 @@ void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
     int activation = currentInstruction[2].u.operand;
 
     Jump argsNotCreated = branchTest64(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset())));
-    JITStubCall stubCall(this, cti_op_tear_off_arguments);
-    stubCall.addArgument(unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset(), regT2);
-    stubCall.addArgument(activation, regT2);
-    stubCall.call();
+    emitGetVirtualRegister(unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset(), regT0);
+    emitGetVirtualRegister(activation, regT1);
+    callOperation(operationTearOffArguments, regT0, regT1);
     argsNotCreated.link(this);
 }
 
@@ -600,14 +598,13 @@ void JIT::emit_op_next_pname(Instruction* currentInstruction)
 
 void JIT::emit_op_push_with_scope(Instruction* currentInstruction)
 {
-    JITStubCall stubCall(this, cti_op_push_with_scope);
-    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
-    stubCall.call();
+    emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+    callOperation(operationPushWithScope, regT0);
 }
 
 void JIT::emit_op_pop_scope(Instruction*)
 {
-    JITStubCall(this, cti_op_pop_scope).call();
+    callOperation(operationPopScope);
 }
 
 void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
@@ -663,11 +660,8 @@ void JIT::emit_op_to_number(Instruction* currentInstruction)
 
 void JIT::emit_op_push_name_scope(Instruction* currentInstruction)
 {
-    JITStubCall stubCall(this, cti_op_push_name_scope);
-    stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[1].u.operand)));
-    stubCall.addArgument(currentInstruction[2].u.operand, regT2);
-    stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
-    stubCall.call();
+    emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+    callOperation(operationPushNameScope, &m_codeBlock->identifier(currentInstruction[1].u.operand), regT0, currentInstruction[3].u.operand);
 }
 
 void JIT::emit_op_catch(Instruction* currentInstruction)
@@ -729,13 +723,8 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
 
 void JIT::emit_op_throw_static_error(Instruction* currentInstruction)
 {
-    JITStubCall stubCall(this, cti_op_throw_static_error);
-    if (!m_codeBlock->getConstant(currentInstruction[1].u.operand).isNumber())
-        stubCall.addArgument(TrustedImm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
-    else
-        stubCall.addArgument(Imm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
-    stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
-    stubCall.call();
+    move(TrustedImm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))), regT0);
+    callOperation(operationThrowStaticError, regT0, currentInstruction[2].u.operand);
 }
 
 void JIT::emit_op_debug(Instruction* currentInstruction)
@@ -744,9 +733,7 @@ void JIT::emit_op_debug(Instruction* currentInstruction)
     UNUSED_PARAM(currentInstruction);
     breakpoint();
 #else
-    JITStubCall stubCall(this, cti_op_debug);
-    stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
-    stubCall.call();
+    callOperation(operationDebug, currentInstruction[1].u.operand);
 #endif
 }
 
@@ -919,16 +906,14 @@ void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector<SlowCa
 
 void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
 {
-    JITStubCall stubCall(this, cti_op_profile_will_call);
-    stubCall.addArgument(currentInstruction[1].u.operand, regT1);
-    stubCall.call();
+    emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+    callOperation(operationProfileWillCall, regT0);
 }
 
 void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
 {
-    JITStubCall stubCall(this, cti_op_profile_did_call);
-    stubCall.addArgument(currentInstruction[1].u.operand, regT1);
-    stubCall.call();
+    emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+    callOperation(operationProfileDidCall, regT0);
 }
 
 
@@ -1168,10 +1153,11 @@ void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& i
     if (canBeOptimized() && Options::enableOSREntryInLoops()) {
         linkSlowCase(iter);
         
-        JITStubCall stubCall(this, cti_optimize);
-        stubCall.addArgument(TrustedImm32(m_bytecodeOffset));
-        stubCall.call();
-        
+        callOperation(operationOptimize, m_bytecodeOffset);
+        Jump noOptimizedEntry = branchTestPtr(Zero, returnValueRegister);
+        jump(returnValueRegister);
+        noOptimizedEntry.link(this);
+
         emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
     }
 #endif
@@ -1179,9 +1165,7 @@ void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& i
     // Emit the slow path of the watchdog timer check:
     if (m_vm->watchdog.isEnabled()) {
         linkSlowCase(iter);
-
-        JITStubCall stubCall(this, cti_handle_watchdog_timer);
-        stubCall.call();
+        callOperation(operationHandleWatchdogTimer);
 
         emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
     }
index 276c5ac..a36e982 100644 (file)
@@ -929,14 +929,13 @@ void JIT::emit_op_next_pname(Instruction* currentInstruction)
 
 void JIT::emit_op_push_with_scope(Instruction* currentInstruction)
 {
-    JITStubCall stubCall(this, cti_op_push_with_scope);
-    stubCall.addArgument(currentInstruction[1].u.operand);
-    stubCall.call();
+    emitLoad(currentInstruction[1].u.operand, regT1, regT0);
+    callOperation(operationPushWithScope, regT1, regT0);
 }
 
 void JIT::emit_op_pop_scope(Instruction*)
 {
-    JITStubCall(this, cti_op_pop_scope).call();
+    callOperation(operationPopScope);
 }
 
 void JIT::emit_op_to_number(Instruction* currentInstruction)
@@ -968,11 +967,8 @@ void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCase
 
 void JIT::emit_op_push_name_scope(Instruction* currentInstruction)
 {
-    JITStubCall stubCall(this, cti_op_push_name_scope);
-    stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[1].u.operand)));
-    stubCall.addArgument(currentInstruction[2].u.operand);
-    stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
-    stubCall.call();
+    emitLoad(currentInstruction[2].u.operand, regT1, regT0);
+    callOperation(operationPushNameScope, &m_codeBlock->identifier(currentInstruction[1].u.operand), regT1, regT0, currentInstruction[3].u.operand);
 }
 
 void JIT::emit_op_catch(Instruction* currentInstruction)
@@ -1041,12 +1037,8 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
 
 void JIT::emit_op_throw_static_error(Instruction* currentInstruction)
 {
-    unsigned message = currentInstruction[1].u.operand;
-
-    JITStubCall stubCall(this, cti_op_throw_static_error);
-    stubCall.addArgument(m_codeBlock->getConstant(message));
-    stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
-    stubCall.call();
+    emitLoad(m_codeBlock->getConstant(currentInstruction[1].u.operand), regT1, regT0);
+    callOperation(operationThrowStaticError, regT1, regT0, currentInstruction[2].u.operand);
 }
 
 void JIT::emit_op_debug(Instruction* currentInstruction)
@@ -1055,9 +1047,7 @@ void JIT::emit_op_debug(Instruction* currentInstruction)
     UNUSED_PARAM(currentInstruction);
     breakpoint();
 #else
-    JITStubCall stubCall(this, cti_op_debug);
-    stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
-    stubCall.call();
+    callOperation(operationDebug, currentInstruction[1].u.operand);
 #endif
 }
 
@@ -1179,16 +1169,14 @@ void JIT::emitSlow_op_to_this(Instruction* currentInstruction, Vector<SlowCaseEn
 
 void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
 {
-    JITStubCall stubCall(this, cti_op_profile_will_call);
-    stubCall.addArgument(currentInstruction[1].u.operand);
-    stubCall.call();
+    emitLoad(currentInstruction[1].u.operand, regT1, regT0);
+    callOperation(operationProfileWillCall, regT1, regT0);
 }
 
 void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
 {
-    JITStubCall stubCall(this, cti_op_profile_did_call);
-    stubCall.addArgument(currentInstruction[1].u.operand);
-    stubCall.call();
+    emitLoad(currentInstruction[1].u.operand, regT1, regT0);
+    callOperation(operationProfileDidCall, regT1, regT0);
 }
 
 void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
index 02b3a35..e7a2aab 100644 (file)
 #include "Arguments.h"
 #include "ArrayConstructor.h"
 #include "CommonSlowPaths.h"
+#include "DFGCompilationMode.h"
+#include "DFGDriver.h"
+#include "DFGOSREntry.h"
+#include "DFGWorklist.h"
 #include "Error.h"
 #include "GetterSetter.h"
 #include "HostCallReturnValue.h"
 #include "JITOperationWrappers.h"
+#include "JITToDFGDeferredCompilationCallback.h"
 #include "JSGlobalObjectFunctions.h"
+#include "JSNameScope.h"
 #include "JSPropertyNameIterator.h"
+#include "JSWithScope.h"
 #include "ObjectConstructor.h"
 #include "Operations.h"
 #include "Repatch.h"
@@ -796,6 +803,372 @@ EncodedJSValue JIT_OPERATION operationNewRegexp(ExecState* exec, void* regexpPtr
     return JSValue::encode(RegExpObject::create(vm, exec->lexicalGlobalObject()->regExpStructure(), regexp));
 }
 
+void JIT_OPERATION operationHandleWatchdogTimer(ExecState* exec)
+{
+    VM& vm = exec->vm();
+    NativeCallFrameTracer tracer(&vm, exec);
+
+    if (UNLIKELY(vm.watchdog.didFire(exec)))
+        vm.throwException(exec, createTerminatedExecutionException(&vm));
+}
+
+void JIT_OPERATION operationThrowStaticError(ExecState* exec, EncodedJSValue encodedValue, int32_t referenceErrorFlag)
+{
+    VM& vm = exec->vm();
+    NativeCallFrameTracer tracer(&vm, exec);
+
+    String message = errorDescriptionForValue(exec, JSValue::decode(encodedValue))->value(exec);
+    if (referenceErrorFlag)
+        vm.throwException(exec, createReferenceError(exec, message));
+    else
+        vm.throwException(exec, createTypeError(exec, message));
+}
+
+void JIT_OPERATION operationDebug(ExecState* exec, int32_t debugHookID)
+{
+    VM& vm = exec->vm();
+    NativeCallFrameTracer tracer(&vm, exec);
+
+    vm.interpreter->debug(exec, static_cast<DebugHookID>(debugHookID));
+}
+
+#if ENABLE(DFG_JIT)
+char* JIT_OPERATION operationOptimize(ExecState* exec, int32_t bytecodeIndex)
+{
+    VM& vm = exec->vm();
+    NativeCallFrameTracer tracer(&vm, exec);
+
+    // Defer GC so that it doesn't run between when we enter into this slow path and
+    // when we figure out the state of our code block. This prevents a number of
+    // awkward reentrancy scenarios, including:
+    //
+    // - The optimized version of our code block being jettisoned by GC right after
+    //   we concluded that we wanted to use it.
+    //
+    // - An optimized version of our code block being installed just as we decided
+    //   that it wasn't ready yet.
+    //
+    // This still leaves the following: anytime we return from cti_optimize, we may
+    // GC, and the GC may either jettison the optimized version of our code block,
+    // or it may install the optimized version of our code block even though we
+    // concluded that it wasn't ready yet.
+    //
+    // Note that jettisoning won't happen if we already initiated OSR, because in
+    // that case we would have already planted the optimized code block into the JS
+    // stack.
+    DeferGC deferGC(vm.heap);
+    
+    CodeBlock* codeBlock = exec->codeBlock();
+
+    if (bytecodeIndex) {
+        // If we're attempting to OSR from a loop, assume that this should be
+        // separately optimized.
+        codeBlock->m_shouldAlwaysBeInlined = false;
+    }
+
+    if (Options::verboseOSR()) {
+        dataLog(
+            *codeBlock, ": Entered optimize with bytecodeIndex = ", bytecodeIndex,
+            ", executeCounter = ", codeBlock->jitExecuteCounter(),
+            ", optimizationDelayCounter = ", codeBlock->reoptimizationRetryCounter(),
+            ", exitCounter = ");
+        if (codeBlock->hasOptimizedReplacement())
+            dataLog(codeBlock->replacement()->osrExitCounter());
+        else
+            dataLog("N/A");
+        dataLog("\n");
+    }
+
+    if (!codeBlock->checkIfOptimizationThresholdReached()) {
+        codeBlock->updateAllPredictions();
+        if (Options::verboseOSR())
+            dataLog("Choosing not to optimize ", *codeBlock, " yet, because the threshold hasn't been reached.\n");
+        return 0;
+    }
+    
+    if (codeBlock->m_shouldAlwaysBeInlined) {
+        codeBlock->updateAllPredictions();
+        codeBlock->optimizeAfterWarmUp();
+        if (Options::verboseOSR())
+            dataLog("Choosing not to optimize ", *codeBlock, " yet, because m_shouldAlwaysBeInlined == true.\n");
+        return 0;
+    }
+
+    // We cannot be in the process of asynchronous compilation and also have an optimized
+    // replacement.
+    ASSERT(
+        !vm.worklist
+        || !(vm.worklist->compilationState(DFG::CompilationKey(codeBlock, DFG::DFGMode)) != DFG::Worklist::NotKnown
+        && codeBlock->hasOptimizedReplacement()));
+
+    DFG::Worklist::State worklistState;
+    if (vm.worklist) {
+        // The call to DFG::Worklist::completeAllReadyPlansForVM() will complete all ready
+        // (i.e. compiled) code blocks. But if it completes ours, we also need to know
+        // what the result was so that we don't plow ahead and attempt OSR or immediate
+        // reoptimization. This will have already also set the appropriate JIT execution
+        // count threshold depending on what happened, so if the compilation was anything
+        // but successful we just want to return early. See the case for worklistState ==
+        // DFG::Worklist::Compiled, below.
+        
+        // Note that we could have alternatively just called Worklist::compilationState()
+        // here, and if it returned Compiled, we could have then called
+        // completeAndScheduleOSR() below. But that would have meant that it could take
+        // longer for code blocks to be completed: they would only complete when *their*
+        // execution count trigger fired; but that could take a while since the firing is
+        // racy. It could also mean that code blocks that never run again after being
+        // compiled would sit on the worklist until next GC. That's fine, but it's
+        // probably a waste of memory. Our goal here is to complete code blocks as soon as
+        // possible in order to minimize the chances of us executing baseline code after
+        // optimized code is already available.
+        worklistState = vm.worklist->completeAllReadyPlansForVM(
+            vm, DFG::CompilationKey(codeBlock, DFG::DFGMode));
+    } else
+        worklistState = DFG::Worklist::NotKnown;
+
+    if (worklistState == DFG::Worklist::Compiling) {
+        // We cannot be in the process of asynchronous compilation and also have an optimized
+        // replacement.
+        RELEASE_ASSERT(!codeBlock->hasOptimizedReplacement());
+        codeBlock->setOptimizationThresholdBasedOnCompilationResult(CompilationDeferred);
+        return 0;
+    }
+
+    if (worklistState == DFG::Worklist::Compiled) {
+        // If we don't have an optimized replacement but we did just get compiled, then
+        // the compilation failed or was invalidated, in which case the execution count
+        // thresholds have already been set appropriately by
+        // CodeBlock::setOptimizationThresholdBasedOnCompilationResult() and we have
+        // nothing left to do.
+        if (!codeBlock->hasOptimizedReplacement()) {
+            codeBlock->updateAllPredictions();
+            if (Options::verboseOSR())
+                dataLog("Code block ", *codeBlock, " was compiled but it doesn't have an optimized replacement.\n");
+            return 0;
+        }
+    } else if (codeBlock->hasOptimizedReplacement()) {
+        if (Options::verboseOSR())
+            dataLog("Considering OSR ", *codeBlock, " -> ", *codeBlock->replacement(), ".\n");
+        // If we have an optimized replacement, then it must be the case that we entered
+        // cti_optimize from a loop. That's because if there's an optimized replacement,
+        // then all calls to this function will be relinked to the replacement and so
+        // the prologue OSR will never fire.
+        
+        // This is an interesting threshold check. Consider that a function OSR exits
+        // in the middle of a loop, while having a relatively low exit count. The exit
+        // will reset the execution counter to some target threshold, meaning that this
+        // code won't be reached until that loop heats up for >=1000 executions. But then
+        // we do a second check here, to see if we should either reoptimize, or just
+        // attempt OSR entry. Hence it might even be correct for
+        // shouldReoptimizeFromLoopNow() to always return true. But we make it do some
+        // additional checking anyway, to reduce the amount of recompilation thrashing.
+        if (codeBlock->replacement()->shouldReoptimizeFromLoopNow()) {
+            if (Options::verboseOSR()) {
+                dataLog(
+                    "Triggering reoptimization of ", *codeBlock,
+                    "(", *codeBlock->replacement(), ") (in loop).\n");
+            }
+            codeBlock->reoptimize();
+            return 0;
+        }
+    } else {
+        if (!codeBlock->shouldOptimizeNow()) {
+            if (Options::verboseOSR()) {
+                dataLog(
+                    "Delaying optimization for ", *codeBlock,
+                    " because of insufficient profiling.\n");
+            }
+            return 0;
+        }
+
+        if (Options::verboseOSR())
+            dataLog("Triggering optimized compilation of ", *codeBlock, "\n");
+
+        unsigned numVarsWithValues;
+        if (bytecodeIndex)
+            numVarsWithValues = codeBlock->m_numVars;
+        else
+            numVarsWithValues = 0;
+        Operands<JSValue> mustHandleValues(codeBlock->numParameters(), numVarsWithValues);
+        for (size_t i = 0; i < mustHandleValues.size(); ++i) {
+            int operand = mustHandleValues.operandForIndex(i);
+            if (operandIsArgument(operand)
+                && !VirtualRegister(operand).toArgument()
+                && codeBlock->codeType() == FunctionCode
+                && codeBlock->specializationKind() == CodeForConstruct) {
+                // Ugh. If we're in a constructor, the 'this' argument may hold garbage. It will
+                // also never be used. It doesn't matter what we put into the value for this,
+                // but it has to be an actual value that can be grokked by subsequent DFG passes,
+                // so we sanitize it here by turning it into Undefined.
+                mustHandleValues[i] = jsUndefined();
+            } else
+                mustHandleValues[i] = exec->uncheckedR(operand).jsValue();
+        }
+
+        CompilationResult result = DFG::compile(
+            vm, codeBlock->newReplacement().get(), DFG::DFGMode, bytecodeIndex,
+            mustHandleValues, JITToDFGDeferredCompilationCallback::create(),
+            vm.ensureWorklist());
+        
+        if (result != CompilationSuccessful)
+            return 0;
+    }
+    
+    CodeBlock* optimizedCodeBlock = codeBlock->replacement();
+    ASSERT(JITCode::isOptimizingJIT(optimizedCodeBlock->jitType()));
+    
+    if (void* address = DFG::prepareOSREntry(exec, optimizedCodeBlock, bytecodeIndex)) {
+        if (Options::verboseOSR()) {
+            dataLog(
+                "Performing OSR ", *codeBlock, " -> ", *optimizedCodeBlock, ", address ",
+                RawPointer(OUR_RETURN_ADDRESS), " -> ", RawPointer(address), ".\n");
+        }
+
+        codeBlock->optimizeSoon();
+        return static_cast<char*>(address);
+    }
+
+    if (Options::verboseOSR()) {
+        dataLog(
+            "Optimizing ", *codeBlock, " -> ", *codeBlock->replacement(),
+            " succeeded, OSR failed, after a delay of ",
+            codeBlock->optimizationDelayCounter(), ".\n");
+    }
+
+    // Count the OSR failure as a speculation failure. If this happens a lot, then
+    // reoptimize.
+    optimizedCodeBlock->countOSRExit();
+
+    // We are a lot more conservative about triggering reoptimization after OSR failure than
+    // before it. If we enter the optimize_from_loop trigger with a bucket full of fail
+    // already, then we really would like to reoptimize immediately. But this case covers
+    // something else: there weren't many (or any) speculation failures before, but we just
+    // failed to enter the speculative code because some variable had the wrong value or
+    // because the OSR code decided for any spurious reason that it did not want to OSR
+    // right now. So, we only trigger reoptimization only upon the more conservative (non-loop)
+    // reoptimization trigger.
+    if (optimizedCodeBlock->shouldReoptimizeNow()) {
+        if (Options::verboseOSR()) {
+            dataLog(
+                "Triggering reoptimization of ", *codeBlock, " -> ",
+                *codeBlock->replacement(), " (after OSR fail).\n");
+        }
+        codeBlock->reoptimize();
+        return 0;
+    }
+
+    // OSR failed this time, but it might succeed next time! Let the code run a bit
+    // longer and then try again.
+    codeBlock->optimizeAfterWarmUp();
+    
+    return 0;
+}
+#endif
+
+void JIT_OPERATION operationPutByIndex(ExecState* exec, EncodedJSValue encodedArrayValue, int32_t index, EncodedJSValue encodedValue)
+{
+    VM& vm = exec->vm();
+    NativeCallFrameTracer tracer(&vm, exec);
+
+    JSValue arrayValue = JSValue::decode(encodedArrayValue);
+    ASSERT(isJSArray(arrayValue));
+    asArray(arrayValue)->putDirectIndex(exec, index, JSValue::decode(encodedValue));
+}
+
+#if USE(JSVALUE64)
+void JIT_OPERATION operationPutGetterSetter(ExecState* exec, EncodedJSValue encodedObjectValue, Identifier* identifier, EncodedJSValue encodedGetterValue, EncodedJSValue encodedSetterValue)
+{
+    VM& vm = exec->vm();
+    NativeCallFrameTracer tracer(&vm, exec);
+
+    ASSERT(JSValue::decode(encodedObjectValue).isObject());
+    JSObject* baseObj = asObject(JSValue::decode(encodedObjectValue));
+
+    GetterSetter* accessor = GetterSetter::create(vm);
+
+    JSValue getter = JSValue::decode(encodedGetterValue);
+    JSValue setter = JSValue::decode(encodedSetterValue);
+    ASSERT(getter.isObject() || getter.isUndefined());
+    ASSERT(setter.isObject() || setter.isUndefined());
+    ASSERT(getter.isObject() || setter.isObject());
+
+    if (!getter.isUndefined())
+        accessor->setGetter(vm, asObject(getter));
+    if (!setter.isUndefined())
+        accessor->setSetter(vm, asObject(setter));
+    baseObj->putDirectAccessor(exec, *identifier, accessor, Accessor);
+}
+#else
+void JIT_OPERATION operationPutGetterSetter(ExecState* exec, JSCell* object, Identifier* identifier, JSCell* getter, JSCell* setter)
+{
+    VM& vm = exec->vm();
+    NativeCallFrameTracer tracer(&vm, exec);
+
+    ASSERT(object && object->isObject());
+    JSObject* baseObj = object->getObject();
+
+    GetterSetter* accessor = GetterSetter::create(vm);
+
+    ASSERT(!getter || getter->isObject());
+    ASSERT(!setter || setter->isObject());
+    ASSERT(getter || setter);
+
+    if (getter)
+        accessor->setGetter(vm, getter->getObject());
+    if (setter)
+        accessor->setSetter(vm, setter->getObject());
+    baseObj->putDirectAccessor(exec, *identifier, accessor, Accessor);
+}
+#endif
+
+void JIT_OPERATION operationPushNameScope(ExecState* exec, Identifier* identifier, EncodedJSValue encodedValue, int32_t attibutes)
+{
+    VM& vm = exec->vm();
+    NativeCallFrameTracer tracer(&vm, exec);
+
+    JSNameScope* scope = JSNameScope::create(exec, *identifier, JSValue::decode(encodedValue), attibutes);
+
+    exec->setScope(scope);
+}
+
+void JIT_OPERATION operationPushWithScope(ExecState* exec, EncodedJSValue encodedValue)
+{
+    VM& vm = exec->vm();
+    NativeCallFrameTracer tracer(&vm, exec);
+
+    JSObject* o = JSValue::decode(encodedValue).toObject(exec);
+    if (vm.exception())
+        return;
+
+    exec->setScope(JSWithScope::create(exec, o));
+}
+
+void JIT_OPERATION operationPopScope(ExecState* exec)
+{
+    VM& vm = exec->vm();
+    NativeCallFrameTracer tracer(&vm, exec);
+
+    exec->setScope(exec->scope()->next());
+}
+
+void JIT_OPERATION operationProfileDidCall(ExecState* exec, EncodedJSValue encodedValue)
+{
+    VM& vm = exec->vm();
+    NativeCallFrameTracer tracer(&vm, exec);
+
+    if (LegacyProfiler* profiler = vm.enabledProfiler())
+        profiler->didExecute(exec, JSValue::decode(encodedValue));
+}
+
+void JIT_OPERATION operationProfileWillCall(ExecState* exec, EncodedJSValue encodedValue)
+{
+    VM& vm = exec->vm();
+    NativeCallFrameTracer tracer(&vm, exec);
+
+    if (LegacyProfiler* profiler = vm.enabledProfiler())
+        profiler->willExecute(exec, JSValue::decode(encodedValue));
+}
+
 EncodedJSValue JIT_OPERATION operationCheckHasInstance(ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedBaseVal)
 {
     VM* vm = &exec->vm();
@@ -837,6 +1210,25 @@ JSCell* JIT_OPERATION operationCreateArguments(ExecState* exec)
     return result;
 }
 
+void JIT_OPERATION operationTearOffActivation(ExecState* exec, JSCell* activationCell)
+{
+    VM& vm = exec->vm();
+    NativeCallFrameTracer tracer(&vm, exec);
+
+    ASSERT(exec->codeBlock()->needsFullScopeChain());
+    jsCast<JSActivation*>(activationCell)->tearOff(vm);
+}
+
+void JIT_OPERATION operationTearOffArguments(ExecState* exec, JSCell* argumentsCell, JSCell* activationCell)
+{
+    ASSERT(exec->codeBlock()->usesArguments());
+    if (activationCell) {
+        jsCast<Arguments*>(argumentsCell)->didTearOffActivation(exec, jsCast<JSActivation*>(activationCell));
+        return;
+    }
+    jsCast<Arguments*>(argumentsCell)->tearOff(exec);
+}
+
 EncodedJSValue JIT_OPERATION operationDeleteById(ExecState* exec, EncodedJSValue encodedBase, const Identifier* identifier)
 {
     VM& vm = exec->vm();
index 3587aba..f37732c 100644 (file)
@@ -60,6 +60,7 @@ class ArrayAllocationProfile;
     Idc: const Identifier*
     J: EncodedJSValue
     Jcp: const JSValue*
+    Jsa: JSActivation*
     Jss: JSString*
     O: JSObject*
     P: pointer (char*)
@@ -122,21 +123,30 @@ typedef size_t JIT_OPERATION (*S_JITOperation_EJJ)(ExecState*, EncodedJSValue, E
 typedef size_t JIT_OPERATION (*S_JITOperation_EOJss)(ExecState*, JSObject*, JSString*);
 typedef size_t JIT_OPERATION (*S_JITOperation_J)(EncodedJSValue);
 typedef void JIT_OPERATION (*V_JITOperation_E)(ExecState*);
-typedef void JIT_OPERATION (*V_JITOperation_EOZD)(ExecState*, JSObject*, int32_t, double);
-typedef void JIT_OPERATION (*V_JITOperation_EOZJ)(ExecState*, JSObject*, int32_t, EncodedJSValue);
-typedef void JIT_OPERATION (*V_JITOperation_ECb)(ExecState*, CodeBlock*);
 typedef void JIT_OPERATION (*V_JITOperation_EC)(ExecState*, JSCell*);
+typedef void JIT_OPERATION (*V_JITOperation_ECb)(ExecState*, CodeBlock*);
+typedef void JIT_OPERATION (*V_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
 typedef void JIT_OPERATION (*V_JITOperation_ECIcf)(ExecState*, JSCell*, InlineCallFrame*);
+typedef void JIT_OPERATION (*V_JITOperation_ECICC)(ExecState*, JSCell*, Identifier*, JSCell*, JSCell*);
 typedef void JIT_OPERATION (*V_JITOperation_ECCIcf)(ExecState*, JSCell*, JSCell*, InlineCallFrame*);
 typedef void JIT_OPERATION (*V_JITOperation_ECJJ)(ExecState*, JSCell*, EncodedJSValue, EncodedJSValue);
 typedef void JIT_OPERATION (*V_JITOperation_ECZ)(ExecState*, JSCell*, int32_t);
 typedef void JIT_OPERATION (*V_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
+typedef void JIT_OPERATION (*V_JITOperation_EIdJZ)(ExecState*, Identifier*, EncodedJSValue, int32_t);
+typedef void JIT_OPERATION (*V_JITOperation_EJ)(ExecState*, EncodedJSValue);
+typedef void JIT_OPERATION (*V_JITOperation_EJCI)(ExecState*, EncodedJSValue, JSCell*, StringImpl*);
+typedef void JIT_OPERATION (*V_JITOperation_EJIdJJ)(ExecState*, EncodedJSValue, Identifier*, EncodedJSValue, EncodedJSValue);
 typedef void JIT_OPERATION (*V_JITOperation_EJJI)(ExecState*, EncodedJSValue, EncodedJSValue, StringImpl*);
 typedef void JIT_OPERATION (*V_JITOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue);
 typedef void JIT_OPERATION (*V_JITOperation_EJPP)(ExecState*, EncodedJSValue, void*, void*);
+typedef void JIT_OPERATION (*V_JITOperation_EJZJ)(ExecState*, EncodedJSValue, int32_t, EncodedJSValue);
+typedef void JIT_OPERATION (*V_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t);
+typedef void JIT_OPERATION (*V_JITOperation_EOZD)(ExecState*, JSObject*, int32_t, double);
+typedef void JIT_OPERATION (*V_JITOperation_EOZJ)(ExecState*, JSObject*, int32_t, EncodedJSValue);
 typedef void JIT_OPERATION (*V_JITOperation_EPc)(ExecState*, Instruction*);
 typedef void JIT_OPERATION (*V_JITOperation_EPZJ)(ExecState*, void*, int32_t, EncodedJSValue);
 typedef void JIT_OPERATION (*V_JITOperation_W)(WatchpointSet*);
+typedef void JIT_OPERATION (*V_JITOperation_EZ)(ExecState*, int32_t);
 typedef char* JIT_OPERATION (*P_JITOperation_E)(ExecState*);
 typedef char* JIT_OPERATION (*P_JITOperation_EC)(ExecState*, JSCell*);
 typedef char* JIT_OPERATION (*P_JITOperation_EJS)(ExecState*, EncodedJSValue, size_t);
@@ -151,6 +161,7 @@ typedef char* JIT_OPERATION (*P_JITOperation_EStJ)(ExecState*, Structure*, Encod
 typedef char* JIT_OPERATION (*P_JITOperation_EStPS)(ExecState*, Structure*, void*, size_t);
 typedef char* JIT_OPERATION (*P_JITOperation_EStSS)(ExecState*, Structure*, size_t, size_t);
 typedef char* JIT_OPERATION (*P_JITOperation_EStZ)(ExecState*, Structure*, int32_t);
+typedef char* JIT_OPERATION (*P_JITOperation_EZ)(ExecState*, int32_t);
 typedef char* JIT_OPERATION (*P_JITOperation_EZZ)(ExecState*, int32_t, int32_t);
 typedef StringImpl* JIT_OPERATION (*I_JITOperation_EJss)(ExecState*, JSString*);
 typedef JSString* JIT_OPERATION (*Jss_JITOperation_EZ)(ExecState*, int32_t);
@@ -227,10 +238,10 @@ char* JIT_OPERATION operationLinkCall(ExecState*) WTF_INTERNAL;
 char* JIT_OPERATION operationLinkClosureCall(ExecState*) WTF_INTERNAL;
 char* JIT_OPERATION operationVirtualConstruct(ExecState*) WTF_INTERNAL;
 char* JIT_OPERATION operationLinkConstruct(ExecState*) WTF_INTERNAL;
-size_t JIT_OPERATION operationCompareLess(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL;
-size_t JIT_OPERATION operationCompareLessEq(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL;
-size_t JIT_OPERATION operationCompareGreater(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL;
-size_t JIT_OPERATION operationCompareGreaterEq(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL;
+size_t JIT_OPERATION operationCompareLess(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
+size_t JIT_OPERATION operationCompareLessEq(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
+size_t JIT_OPERATION operationCompareGreater(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
+size_t JIT_OPERATION operationCompareGreaterEq(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
 size_t JIT_OPERATION operationConvertJSValueToBoolean(ExecState*, EncodedJSValue) WTF_INTERNAL;
 size_t JIT_OPERATION operationCompareEq(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
 #if USE(JSVALUE64)
@@ -245,10 +256,28 @@ EncodedJSValue JIT_OPERATION operationNewArrayWithSizeAndProfile(ExecState*, Arr
 EncodedJSValue JIT_OPERATION operationNewFunction(ExecState*, JSCell*) WTF_INTERNAL;
 JSCell* JIT_OPERATION operationNewObject(ExecState*, Structure*) WTF_INTERNAL;
 EncodedJSValue JIT_OPERATION operationNewRegexp(ExecState*, void*) WTF_INTERNAL;
-
+void JIT_OPERATION operationHandleWatchdogTimer(ExecState*) WTF_INTERNAL;
+void JIT_OPERATION operationThrowStaticError(ExecState*, EncodedJSValue, int32_t) WTF_INTERNAL;
+void JIT_OPERATION operationDebug(ExecState*, int32_t) WTF_INTERNAL;
+#if ENABLE(DFG_JIT)
+char* JIT_OPERATION operationOptimize(ExecState*, int32_t) WTF_INTERNAL;
+#endif
+void JIT_OPERATION operationPutByIndex(ExecState*, EncodedJSValue, int32_t, EncodedJSValue);
+#if USE(JSVALUE64)
+void JIT_OPERATION operationPutGetterSetter(ExecState*, EncodedJSValue, Identifier*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
+#else
+void JIT_OPERATION operationPutGetterSetter(ExecState*, JSCell*, Identifier*, JSCell*, JSCell*) WTF_INTERNAL;
+#endif
+void JIT_OPERATION operationPushNameScope(ExecState*, Identifier*, EncodedJSValue, int32_t) WTF_INTERNAL;
+void JIT_OPERATION operationPushWithScope(ExecState*, EncodedJSValue) WTF_INTERNAL;
+void JIT_OPERATION operationPopScope(ExecState*) WTF_INTERNAL;
+void JIT_OPERATION operationProfileDidCall(ExecState*, EncodedJSValue) WTF_INTERNAL;
+void JIT_OPERATION operationProfileWillCall(ExecState*, EncodedJSValue) WTF_INTERNAL;
 EncodedJSValue JIT_OPERATION operationCheckHasInstance(ExecState*, EncodedJSValue, EncodedJSValue baseVal) WTF_INTERNAL;
 JSCell* JIT_OPERATION operationCreateActivation(ExecState*, int32_t offset) WTF_INTERNAL;
 JSCell* JIT_OPERATION operationCreateArguments(ExecState*) WTF_INTERNAL;
+void JIT_OPERATION operationTearOffActivation(ExecState*, JSCell*) WTF_INTERNAL;
+void JIT_OPERATION operationTearOffArguments(ExecState*, JSCell*, JSCell*) WTF_INTERNAL;
 EncodedJSValue JIT_OPERATION operationDeleteById(ExecState*, EncodedJSValue base, const Identifier*) WTF_INTERNAL;
 JSCell* JIT_OPERATION operationGetPNames(ExecState*, JSObject*) WTF_INTERNAL;
 EncodedJSValue JIT_OPERATION operationInstanceOf(ExecState*, EncodedJSValue, EncodedJSValue proto) WTF_INTERNAL;
index 0b7538c..7bfb823 100644 (file)
@@ -489,21 +489,17 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
 
 void JIT::emit_op_put_by_index(Instruction* currentInstruction)
 {
-    JITStubCall stubCall(this, cti_op_put_by_index);
-    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
-    stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
-    stubCall.addArgument(currentInstruction[3].u.operand, regT2);
-    stubCall.call();
+    emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+    emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
+    callOperation(operationPutByIndex, regT0, currentInstruction[2].u.operand, regT1);
 }
 
 void JIT::emit_op_put_getter_setter(Instruction* currentInstruction)
 {
-    JITStubCall stubCall(this, cti_op_put_getter_setter);
-    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
-    stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
-    stubCall.addArgument(currentInstruction[3].u.operand, regT2);
-    stubCall.addArgument(currentInstruction[4].u.operand, regT2);
-    stubCall.call();
+    emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+    emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
+    emitGetVirtualRegister(currentInstruction[4].u.operand, regT2);
+    callOperation(operationPutGetterSetter, regT0, &m_codeBlock->identifier(currentInstruction[2].u.operand), regT1, regT2);
 }
 
 void JIT::emit_op_del_by_id(Instruction* currentInstruction)
index 66a19c8..fa9d281 100644 (file)
@@ -57,12 +57,10 @@ void JIT::emit_op_put_by_index(Instruction* currentInstruction)
     int base = currentInstruction[1].u.operand;
     int property = currentInstruction[2].u.operand;
     int value = currentInstruction[3].u.operand;
-    
-    JITStubCall stubCall(this, cti_op_put_by_index);
-    stubCall.addArgument(base);
-    stubCall.addArgument(TrustedImm32(property));
-    stubCall.addArgument(value);
-    stubCall.call();
+
+    emitLoad(base, regT1, regT0);
+    emitLoad(value, regT3, regT2);
+    callOperation(operationPutByIndex, regT1, regT0, property, regT3, regT2);
 }
 
 void JIT::emit_op_put_getter_setter(Instruction* currentInstruction)
@@ -71,13 +69,11 @@ void JIT::emit_op_put_getter_setter(Instruction* currentInstruction)
     int property = currentInstruction[2].u.operand;
     int getter = currentInstruction[3].u.operand;
     int setter = currentInstruction[4].u.operand;
-    
-    JITStubCall stubCall(this, cti_op_put_getter_setter);
-    stubCall.addArgument(base);
-    stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
-    stubCall.addArgument(getter);
-    stubCall.addArgument(setter);
-    stubCall.call();
+
+    emitLoadPayload(base, regT1);
+    emitLoadPayload(getter, regT3);
+    emitLoadPayload(setter, regT4);
+    callOperation(operationPutGetterSetter, regT1, &m_codeBlock->identifier(property), regT3, regT4);
 }
 
 void JIT::emit_op_del_by_id(Instruction* currentInstruction)
index 398cc39..84d42c7 100644 (file)
@@ -368,18 +368,6 @@ template<typename T> static T throwExceptionFromOpCall(JITStackFrame& jitStackFr
 #define DEFINE_STUB_FUNCTION(rtype, op) rtype JIT_STUB cti_##op(STUB_ARGS_DECLARATION)
 #endif
 
-DEFINE_STUB_FUNCTION(void, handle_watchdog_timer)
-{
-    STUB_INIT_STACK_FRAME(stackFrame);
-    CallFrame* callFrame = stackFrame.callFrame;
-    VM* vm = stackFrame.vm;
-    if (UNLIKELY(vm->watchdog.didFire(callFrame))) {
-        vm->throwException(callFrame, createTerminatedExecutionException(vm));
-        VM_THROW_EXCEPTION_AT_END();
-        return;
-    }
-}
-
 DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_generic)
 {
     STUB_INIT_STACK_FRAME(stackFrame);
@@ -659,242 +647,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_string_fail)
     return JSValue::encode(result);
 }
 
-#if ENABLE(DFG_JIT)
-DEFINE_STUB_FUNCTION(void, optimize)
-{
-    STUB_INIT_STACK_FRAME(stackFrame);
-    
-    // Defer GC so that it doesn't run between when we enter into this slow path and
-    // when we figure out the state of our code block. This prevents a number of
-    // awkward reentrancy scenarios, including:
-    //
-    // - The optimized version of our code block being jettisoned by GC right after
-    //   we concluded that we wanted to use it.
-    //
-    // - An optimized version of our code block being installed just as we decided
-    //   that it wasn't ready yet.
-    //
-    // This still leaves the following: anytime we return from cti_optimize, we may
-    // GC, and the GC may either jettison the optimized version of our code block,
-    // or it may install the optimized version of our code block even though we
-    // concluded that it wasn't ready yet.
-    //
-    // Note that jettisoning won't happen if we already initiated OSR, because in
-    // that case we would have already planted the optimized code block into the JS
-    // stack.
-    DeferGC deferGC(stackFrame.vm->heap);
-    
-    CallFrame* callFrame = stackFrame.callFrame;
-    CodeBlock* codeBlock = callFrame->codeBlock();
-    VM& vm = callFrame->vm();
-    unsigned bytecodeIndex = stackFrame.args[0].int32();
-
-    if (bytecodeIndex) {
-        // If we're attempting to OSR from a loop, assume that this should be
-        // separately optimized.
-        codeBlock->m_shouldAlwaysBeInlined = false;
-    }
-    
-    if (Options::verboseOSR()) {
-        dataLog(
-            *codeBlock, ": Entered optimize with bytecodeIndex = ", bytecodeIndex,
-            ", executeCounter = ", codeBlock->jitExecuteCounter(),
-            ", optimizationDelayCounter = ", codeBlock->reoptimizationRetryCounter(),
-            ", exitCounter = ");
-        if (codeBlock->hasOptimizedReplacement())
-            dataLog(codeBlock->replacement()->osrExitCounter());
-        else
-            dataLog("N/A");
-        dataLog("\n");
-    }
-
-    if (!codeBlock->checkIfOptimizationThresholdReached()) {
-        codeBlock->updateAllPredictions();
-        if (Options::verboseOSR())
-            dataLog("Choosing not to optimize ", *codeBlock, " yet, because the threshold hasn't been reached.\n");
-        return;
-    }
-    
-    if (codeBlock->m_shouldAlwaysBeInlined) {
-        codeBlock->updateAllPredictions();
-        codeBlock->optimizeAfterWarmUp();
-        if (Options::verboseOSR())
-            dataLog("Choosing not to optimize ", *codeBlock, " yet, because m_shouldAlwaysBeInlined == true.\n");
-        return;
-    }
-    
-    // We cannot be in the process of asynchronous compilation and also have an optimized
-    // replacement.
-    ASSERT(
-        !vm.worklist
-        || !(vm.worklist->compilationState(DFG::CompilationKey(codeBlock, DFG::DFGMode)) != DFG::Worklist::NotKnown
-             && codeBlock->hasOptimizedReplacement()));
-    
-    DFG::Worklist::State worklistState;
-    if (vm.worklist) {
-        // The call to DFG::Worklist::completeAllReadyPlansForVM() will complete all ready
-        // (i.e. compiled) code blocks. But if it completes ours, we also need to know
-        // what the result was so that we don't plow ahead and attempt OSR or immediate
-        // reoptimization. This will have already also set the appropriate JIT execution
-        // count threshold depending on what happened, so if the compilation was anything
-        // but successful we just want to return early. See the case for worklistState ==
-        // DFG::Worklist::Compiled, below.
-        
-        // Note that we could have alternatively just called Worklist::compilationState()
-        // here, and if it returned Compiled, we could have then called
-        // completeAndScheduleOSR() below. But that would have meant that it could take
-        // longer for code blocks to be completed: they would only complete when *their*
-        // execution count trigger fired; but that could take a while since the firing is
-        // racy. It could also mean that code blocks that never run again after being
-        // compiled would sit on the worklist until next GC. That's fine, but it's
-        // probably a waste of memory. Our goal here is to complete code blocks as soon as
-        // possible in order to minimize the chances of us executing baseline code after
-        // optimized code is already available.
-        
-        worklistState = vm.worklist->completeAllReadyPlansForVM(
-            vm, DFG::CompilationKey(codeBlock, DFG::DFGMode));
-    } else
-        worklistState = DFG::Worklist::NotKnown;
-    
-    if (worklistState == DFG::Worklist::Compiling) {
-        // We cannot be in the process of asynchronous compilation and also have an optimized
-        // replacement.
-        RELEASE_ASSERT(!codeBlock->hasOptimizedReplacement());
-        codeBlock->setOptimizationThresholdBasedOnCompilationResult(CompilationDeferred);
-        return;
-    }
-    
-    if (worklistState == DFG::Worklist::Compiled) {
-        // If we don't have an optimized replacement but we did just get compiled, then
-        // the compilation failed or was invalidated, in which case the execution count
-        // thresholds have already been set appropriately by
-        // CodeBlock::setOptimizationThresholdBasedOnCompilationResult() and we have
-        // nothing left to do.
-        if (!codeBlock->hasOptimizedReplacement()) {
-            codeBlock->updateAllPredictions();
-            if (Options::verboseOSR())
-                dataLog("Code block ", *codeBlock, " was compiled but it doesn't have an optimized replacement.\n");
-            return;
-        }
-    } else if (codeBlock->hasOptimizedReplacement()) {
-        if (Options::verboseOSR())
-            dataLog("Considering OSR ", *codeBlock, " -> ", *codeBlock->replacement(), ".\n");
-        // If we have an optimized replacement, then it must be the case that we entered
-        // cti_optimize from a loop. That's because if there's an optimized replacement,
-        // then all calls to this function will be relinked to the replacement and so
-        // the prologue OSR will never fire.
-        
-        // This is an interesting threshold check. Consider that a function OSR exits
-        // in the middle of a loop, while having a relatively low exit count. The exit
-        // will reset the execution counter to some target threshold, meaning that this
-        // code won't be reached until that loop heats up for >=1000 executions. But then
-        // we do a second check here, to see if we should either reoptimize, or just
-        // attempt OSR entry. Hence it might even be correct for
-        // shouldReoptimizeFromLoopNow() to always return true. But we make it do some
-        // additional checking anyway, to reduce the amount of recompilation thrashing.
-        if (codeBlock->replacement()->shouldReoptimizeFromLoopNow()) {
-            if (Options::verboseOSR()) {
-                dataLog(
-                    "Triggering reoptimization of ", *codeBlock,
-                    "(", *codeBlock->replacement(), ") (in loop).\n");
-            }
-            codeBlock->reoptimize();
-            return;
-        }
-    } else {
-        if (!codeBlock->shouldOptimizeNow()) {
-            if (Options::verboseOSR()) {
-                dataLog(
-                    "Delaying optimization for ", *codeBlock,
-                    " because of insufficient profiling.\n");
-            }
-            return;
-        }
-        
-        if (Options::verboseOSR())
-            dataLog("Triggering optimized compilation of ", *codeBlock, "\n");
-        
-        unsigned numVarsWithValues;
-        if (bytecodeIndex)
-            numVarsWithValues = codeBlock->m_numVars;
-        else
-            numVarsWithValues = 0;
-        Operands<JSValue> mustHandleValues(
-            codeBlock->numParameters(), numVarsWithValues);
-        for (size_t i = 0; i < mustHandleValues.size(); ++i) {
-            int operand = mustHandleValues.operandForIndex(i);
-            if (operandIsArgument(operand)
-                && !VirtualRegister(operand).toArgument()
-                && codeBlock->codeType() == FunctionCode
-                && codeBlock->specializationKind() == CodeForConstruct) {
-                // Ugh. If we're in a constructor, the 'this' argument may hold garbage. It will
-                // also never be used. It doesn't matter what we put into the value for this,
-                // but it has to be an actual value that can be grokked by subsequent DFG passes,
-                // so we sanitize it here by turning it into Undefined.
-                mustHandleValues[i] = jsUndefined();
-            } else
-                mustHandleValues[i] = callFrame->uncheckedR(operand).jsValue();
-        }
-        
-        CompilationResult result = DFG::compile(
-            vm, codeBlock->newReplacement().get(), DFG::DFGMode, bytecodeIndex,
-            mustHandleValues, JITToDFGDeferredCompilationCallback::create(),
-            vm.ensureWorklist());
-        
-        if (result != CompilationSuccessful)
-            return;
-    }
-    
-    CodeBlock* optimizedCodeBlock = codeBlock->replacement();
-    ASSERT(JITCode::isOptimizingJIT(optimizedCodeBlock->jitType()));
-    
-    if (void* address = DFG::prepareOSREntry(callFrame, optimizedCodeBlock, bytecodeIndex)) {
-        if (Options::verboseOSR()) {
-            dataLog(
-                "Performing OSR ", *codeBlock, " -> ", *optimizedCodeBlock, ", address ",
-                RawPointer((STUB_RETURN_ADDRESS).value()), " -> ", RawPointer(address), ".\n");
-        }
-
-        codeBlock->optimizeSoon();
-        STUB_SET_RETURN_ADDRESS(address);
-        return;
-    }
-
-    if (Options::verboseOSR()) {
-        dataLog(
-            "Optimizing ", *codeBlock, " -> ", *codeBlock->replacement(),
-            " succeeded, OSR failed, after a delay of ",
-            codeBlock->optimizationDelayCounter(), ".\n");
-    }
-
-    // Count the OSR failure as a speculation failure. If this happens a lot, then
-    // reoptimize.
-    optimizedCodeBlock->countOSRExit();
-    
-    // We are a lot more conservative about triggering reoptimization after OSR failure than
-    // before it. If we enter the optimize_from_loop trigger with a bucket full of fail
-    // already, then we really would like to reoptimize immediately. But this case covers
-    // something else: there weren't many (or any) speculation failures before, but we just
-    // failed to enter the speculative code because some variable had the wrong value or
-    // because the OSR code decided for any spurious reason that it did not want to OSR
-    // right now. So, we only trigger reoptimization only upon the more conservative (non-loop)
-    // reoptimization trigger.
-    if (optimizedCodeBlock->shouldReoptimizeNow()) {
-        if (Options::verboseOSR()) {
-            dataLog(
-                "Triggering reoptimization of ", *codeBlock, " -> ",
-                *codeBlock->replacement(), " (after OSR fail).\n");
-        }
-        codeBlock->reoptimize();
-        return;
-    }
-
-    // OSR failed this time, but it might succeed next time! Let the code run a bit
-    // longer and then try again.
-    codeBlock->optimizeAfterWarmUp();
-}
-#endif // ENABLE(DFG_JIT)
-
 DEFINE_STUB_FUNCTION(void, op_tear_off_activation)
 {
     STUB_INIT_STACK_FRAME(stackFrame);
@@ -917,22 +669,6 @@ DEFINE_STUB_FUNCTION(void, op_tear_off_arguments)
     arguments->tearOff(callFrame);
 }
 
-DEFINE_STUB_FUNCTION(void, op_profile_will_call)
-{
-    STUB_INIT_STACK_FRAME(stackFrame);
-
-    if (LegacyProfiler* profiler = stackFrame.vm->enabledProfiler())
-        profiler->willExecute(stackFrame.callFrame, stackFrame.args[0].jsValue());
-}
-
-DEFINE_STUB_FUNCTION(void, op_profile_did_call)
-{
-    STUB_INIT_STACK_FRAME(stackFrame);
-
-    if (LegacyProfiler* profiler = stackFrame.vm->enabledProfiler())
-        profiler->didExecute(stackFrame.callFrame, stackFrame.args[0].jsValue());
-}
-
 static JSValue getByVal(
     CallFrame* callFrame, JSValue baseValue, JSValue subscript, ReturnAddressPtr returnAddress)
 {
@@ -1147,69 +883,6 @@ DEFINE_STUB_FUNCTION(void*, op_throw)
     return handler.callFrame;
 }
 
-DEFINE_STUB_FUNCTION(void, op_push_with_scope)
-{
-    STUB_INIT_STACK_FRAME(stackFrame);
-
-    JSObject* o = stackFrame.args[0].jsValue().toObject(stackFrame.callFrame);
-    CHECK_FOR_EXCEPTION_VOID();
-    stackFrame.callFrame->setScope(JSWithScope::create(stackFrame.callFrame, o));
-}
-
-DEFINE_STUB_FUNCTION(void, op_pop_scope)
-{
-    STUB_INIT_STACK_FRAME(stackFrame);
-
-    stackFrame.callFrame->setScope(stackFrame.callFrame->scope()->next());
-}
-
-DEFINE_STUB_FUNCTION(void, op_push_name_scope)
-{
-    STUB_INIT_STACK_FRAME(stackFrame);
-
-    JSNameScope* scope = JSNameScope::create(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.args[1].jsValue(), stackFrame.args[2].int32());
-
-    CallFrame* callFrame = stackFrame.callFrame;
-    callFrame->setScope(scope);
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_index)
-{
-    STUB_INIT_STACK_FRAME(stackFrame);
-
-    CallFrame* callFrame = stackFrame.callFrame;
-    unsigned property = stackFrame.args[1].int32();
-
-    JSValue arrayValue = stackFrame.args[0].jsValue();
-    ASSERT(isJSArray(arrayValue));
-    asArray(arrayValue)->putDirectIndex(callFrame, property, stackFrame.args[2].jsValue());
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_getter_setter)
-{
-    STUB_INIT_STACK_FRAME(stackFrame);
-
-    CallFrame* callFrame = stackFrame.callFrame;
-
-    ASSERT(stackFrame.args[0].jsValue().isObject());
-    JSObject* baseObj = asObject(stackFrame.args[0].jsValue());
-
-    VM& vm = callFrame->vm();
-    GetterSetter* accessor = GetterSetter::create(vm);
-
-    JSValue getter = stackFrame.args[2].jsValue();
-    JSValue setter = stackFrame.args[3].jsValue();
-    ASSERT(getter.isObject() || getter.isUndefined());
-    ASSERT(setter.isObject() || setter.isUndefined());
-    ASSERT(getter.isObject() || setter.isObject());
-
-    if (!getter.isUndefined())
-        accessor->setGetter(vm, asObject(getter));
-    if (!setter.isUndefined())
-        accessor->setSetter(vm, asObject(setter));
-    baseObj->putDirectAccessor(callFrame, stackFrame.args[1].identifier(), accessor, Accessor);
-}
-
 DEFINE_STUB_FUNCTION(void, op_throw_static_error)
 {
     STUB_INIT_STACK_FRAME(stackFrame);
@@ -1223,16 +896,6 @@ DEFINE_STUB_FUNCTION(void, op_throw_static_error)
     VM_THROW_EXCEPTION_AT_END();
 }
 
-DEFINE_STUB_FUNCTION(void, op_debug)
-{
-    STUB_INIT_STACK_FRAME(stackFrame);
-
-    CallFrame* callFrame = stackFrame.callFrame;
-
-    int debugHookID = stackFrame.args[0].int32();
-    stackFrame.vm->interpreter->debug(callFrame, static_cast<DebugHookID>(debugHookID));
-}
-
 DEFINE_STUB_FUNCTION(void*, vm_throw)
 {
     STUB_INIT_STACK_FRAME(stackFrame);
index ce8d32d..525a2a1 100644 (file)
@@ -343,23 +343,11 @@ EncodedJSValue JIT_STUB cti_op_get_by_id_string_fail(STUB_ARGS_DECLARATION) WTF_
 EncodedJSValue JIT_STUB cti_op_get_by_val(STUB_ARGS_DECLARATION) WTF_INTERNAL;
 EncodedJSValue JIT_STUB cti_op_get_by_val_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
 EncodedJSValue JIT_STUB cti_op_get_by_val_string(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_push_name_scope(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_push_with_scope(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_handle_watchdog_timer(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_debug(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_pop_scope(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_profile_did_call(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_profile_will_call(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION) WTF_INTERNAL;
 void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION) WTF_INTERNAL;
 void JIT_STUB cti_op_put_by_val_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_put_getter_setter(STUB_ARGS_DECLARATION) WTF_INTERNAL;
 void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION) WTF_INTERNAL;
 void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION) WTF_INTERNAL;
 void JIT_STUB cti_op_throw_static_error(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-#if ENABLE(DFG_JIT)
-void JIT_STUB cti_optimize(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-#endif
 void* JIT_STUB cti_op_throw(STUB_ARGS_DECLARATION) WTF_INTERNAL;
 void* JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION) REFERENCED_FROM_ASM WTF_INTERNAL;