2009-05-11 Geoffrey Garen <ggaren@apple.com>
authorggaren@apple.com <ggaren@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 12 May 2009 04:20:29 +0000 (04:20 +0000)
committerggaren@apple.com <ggaren@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 12 May 2009 04:20:29 +0000 (04:20 +0000)
        Reviewed by Sam Weinig.

        A little more JIT refactoring.

        Rearranged code to more clearly indicate what's conditionally compiled
        and why. Now, all shared code is at the top of our JIT files, and all
        #if'd code is at the bottom. #if'd code is delineated by large comments.

        Moved functions that relate to the JIT but don't explicitly do codegen
        into JIT.cpp. Refactored SSE2 check to store its result as a data member
        in the JIT.

        * jit/JIT.cpp:
        (JSC::isSSE2Present):
        (JSC::JIT::JIT):
        (JSC::JIT::unlinkCall):
        (JSC::JIT::linkCall):
        * jit/JIT.h:
        (JSC::JIT::isSSE2Present):
        * jit/JITArithmetic.cpp:
        (JSC::JIT::emit_op_mod):
        (JSC::JIT::emitSlow_op_mod):
        * jit/JITCall.cpp:
        (JSC::JIT::compileOpCallVarargs):
        (JSC::JIT::compileOpCallVarargsSlowCase):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@43543 268f45cc-cd09-0410-ab3c-d52691b4dbfc

JavaScriptCore/ChangeLog
JavaScriptCore/jit/JIT.cpp
JavaScriptCore/jit/JIT.h
JavaScriptCore/jit/JITArithmetic.cpp
JavaScriptCore/jit/JITCall.cpp
JavaScriptCore/jit/JITPropertyAccess.cpp

index 5293d2d..8c0d4c3 100644 (file)
@@ -1,3 +1,31 @@
+2009-05-11  Geoffrey Garen  <ggaren@apple.com>
+
+        Reviewed by Sam Weinig.
+        
+        A little more JIT refactoring.
+        
+        Rearranged code to more clearly indicate what's conditionally compiled
+        and why. Now, all shared code is at the top of our JIT files, and all
+        #if'd code is at the bottom. #if'd code is delineated by large comments.
+        
+        Moved functions that relate to the JIT but don't explicitly do codegen
+        into JIT.cpp. Refactored SSE2 check to store its result as a data member
+        in the JIT.
+
+        * jit/JIT.cpp:
+        (JSC::isSSE2Present):
+        (JSC::JIT::JIT):
+        (JSC::JIT::unlinkCall):
+        (JSC::JIT::linkCall):
+        * jit/JIT.h:
+        (JSC::JIT::isSSE2Present):
+        * jit/JITArithmetic.cpp:
+        (JSC::JIT::emit_op_mod):
+        (JSC::JIT::emitSlow_op_mod):
+        * jit/JITCall.cpp:
+        (JSC::JIT::compileOpCallVarargs):
+        (JSC::JIT::compileOpCallVarargsSlowCase):
+
 2009-05-11  Holger Hans Peter Freyther  <zecke@selfish.org>
 
         Build fix.
index e762af1..6bd8510 100644 (file)
@@ -56,6 +56,53 @@ void ctiPatchNearCallByReturnAddress(MacroAssembler::ProcessorReturnAddress retu
     returnAddress.relinkNearCallerToFunction(newCalleeFunction);
 }
 
+// All X86 Macs are guaranteed to support at least SSE2
+#if PLATFORM(X86_64) || (PLATFORM(X86) && PLATFORM(MAC))
+
+static inline bool isSSE2Present()
+{
+    return true;
+}
+
+#else
+
+static bool isSSE2Present()
+{
+    static const int SSE2FeatureBit = 1 << 26;
+    struct SSE2Check {
+        SSE2Check()
+        {
+            int flags;
+#if COMPILER(MSVC)
+            _asm {
+                mov eax, 1 // cpuid function 1 gives us the standard feature set
+                cpuid;
+                mov flags, edx;
+            }
+#elif COMPILER(GCC)
+            asm (
+                 "movl $0x1, %%eax;"
+                 "pushl %%ebx;"
+                 "cpuid;"
+                 "popl %%ebx;"
+                 "movl %%edx, %0;"
+                 : "=g" (flags)
+                 :
+                 : "%eax", "%ecx", "%edx"
+                 );
+#else
+            flags = 0;
+#endif
+            present = (flags & SSE2FeatureBit) != 0;
+        }
+        bool present;
+    };
+    static SSE2Check check;
+    return check.present;
+}
+
+#endif // PLATFORM(X86_64) || (PLATFORM(X86) && PLATFORM(MAC))
+
 JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
     : m_interpreter(globalData->interpreter)
     , m_globalData(globalData)
@@ -65,6 +112,7 @@ JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
     , m_callStructureStubCompilationInfo(codeBlock ? codeBlock->numberOfCallLinkInfos() : 0)
     , m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
     , m_jumpTargetsPosition(0)
+    , m_isSSE2Present(JSC::isSSE2Present())
 {
 }
 
@@ -887,6 +935,32 @@ void JIT::emitPutVariableObjectRegister(RegisterID src, RegisterID variableObjec
     storePtr(src, Address(variableObject, index * sizeof(Register)));
 }
 
+void JIT::unlinkCall(CallLinkInfo* callLinkInfo)
+{
+    // When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid
+    // (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive
+    // match).  Reset the check so it no longer matches.
+    callLinkInfo->hotPathBegin.repatch(JSValue::encode(JSValue()));
+}
+
+void JIT::linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, JITCode ctiCode, CallLinkInfo* callLinkInfo, int callerArgCount)
+{
+    // Currently we only link calls with the exact number of arguments.
+    // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
+    if (!calleeCodeBlock || callerArgCount == calleeCodeBlock->m_numParameters) {
+        ASSERT(!callLinkInfo->isLinked());
+    
+        if (calleeCodeBlock)
+            calleeCodeBlock->addCaller(callLinkInfo);
+    
+        callLinkInfo->hotPathBegin.repatch(callee);
+        callLinkInfo->hotPathOther.relink(ctiCode.addressForCall());
+    }
+
+    // patch the instruction that jumps out to the cold path, so that we only try to link once.
+    callLinkInfo->hotPathBegin.jumpAtOffset(patchOffsetOpCallCompareToJump).relink(callLinkInfo->coldPathOther);
+}
+
 } // namespace JSC
 
 #endif // ENABLE(JIT)
index dc8cbb3..145194d 100644 (file)
@@ -633,6 +633,8 @@ namespace JSC {
         void sampleInstruction(Instruction*, bool) {}
 #endif
 
+        bool isSSE2Present() const { return m_isSSE2Present; }
+
         Interpreter* m_interpreter;
         JSGlobalData* m_globalData;
         CodeBlock* m_codeBlock;
@@ -650,6 +652,7 @@ namespace JSC {
 
         int m_lastResultBytecodeRegister;
         unsigned m_jumpTargetsPosition;
+        const bool m_isSSE2Present;
 
         unsigned m_propertyAccessInstructionIndex;
         unsigned m_globalResolveInfoIndex;
index 13d694e..53955fc 100644 (file)
 
 using namespace std;
 
-// All X86 Macs are guaranteed to support at least SSE2
-#if PLATFORM(X86_64) || (PLATFORM(X86) && PLATFORM(MAC))
-
-static inline bool isSSE2Present()
-{
-    return true;
-}
-
-#else
-
-static bool isSSE2Present()
-{
-    static const int SSE2FeatureBit = 1 << 26;
-    struct SSE2Check {
-        SSE2Check()
-        {
-            int flags;
-#if COMPILER(MSVC)
-            _asm {
-                mov eax, 1 // cpuid function 1 gives us the standard feature set
-                cpuid;
-                mov flags, edx;
-            }
-#elif COMPILER(GCC)
-            asm (
-                 "movl $0x1, %%eax;"
-                 "pushl %%ebx;"
-                 "cpuid;"
-                 "popl %%ebx;"
-                 "movl %%edx, %0;"
-                 : "=g" (flags)
-                 :
-                 : "%eax", "%ecx", "%edx"
-                 );
-#else
-            flags = 0;
-#endif
-            present = (flags & SSE2FeatureBit) != 0;
-        }
-        bool present;
-    };
-    static SSE2Check check;
-    return check.present;
-}
-
-#endif
-
 namespace JSC {
 
 void JIT::emit_op_lshift(Instruction* currentInstruction)
@@ -119,6 +72,7 @@ void JIT::emit_op_lshift(Instruction* currentInstruction)
     emitFastArithReTagImmediate(regT0, regT0);
     emitPutVirtualRegister(result);
 }
+
 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned result = currentInstruction[1].u.operand;
@@ -277,6 +231,7 @@ void JIT::emit_op_jnless(Instruction* currentInstruction)
         addJump(branch32(GreaterThanOrEqual, regT0, regT1), target + 3);
     }
 }
+
 void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned op1 = currentInstruction[1].u.operand;
@@ -464,6 +419,7 @@ void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
         addJump(branch32(GreaterThan, regT0, regT1), target + 3);
     }
 }
+
 void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned op1 = currentInstruction[1].u.operand;
@@ -649,6 +605,7 @@ void JIT::emit_op_bitand(Instruction* currentInstruction)
     }
     emitPutVirtualRegister(result);
 }
+
 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned result = currentInstruction[1].u.operand;
@@ -674,70 +631,6 @@ void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEnt
     }
 }
 
-#if PLATFORM(X86) || PLATFORM(X86_64)
-void JIT::emit_op_mod(Instruction* currentInstruction)
-{
-    unsigned result = currentInstruction[1].u.operand;
-    unsigned op1 = currentInstruction[2].u.operand;
-    unsigned op2 = currentInstruction[3].u.operand;
-
-    emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
-    emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
-    emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
-#if USE(ALTERNATE_JSIMMEDIATE)
-    addSlowCase(branchPtr(Equal, X86::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
-    m_assembler.cdq();
-    m_assembler.idivl_r(X86::ecx);
-#else
-    emitFastArithDeTagImmediate(X86::eax);
-    addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx));
-    m_assembler.cdq();
-    m_assembler.idivl_r(X86::ecx);
-    signExtend32ToPtr(X86::edx, X86::edx);
-#endif
-    emitFastArithReTagImmediate(X86::edx, X86::eax);
-    emitPutVirtualRegister(result);
-}
-void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
-    unsigned result = currentInstruction[1].u.operand;
-
-#if USE(ALTERNATE_JSIMMEDIATE)
-    linkSlowCase(iter);
-    linkSlowCase(iter);
-    linkSlowCase(iter);
-#else
-    Jump notImm1 = getSlowCase(iter);
-    Jump notImm2 = getSlowCase(iter);
-    linkSlowCase(iter);
-    emitFastArithReTagImmediate(X86::eax, X86::eax);
-    emitFastArithReTagImmediate(X86::ecx, X86::ecx);
-    notImm1.link(this);
-    notImm2.link(this);
-#endif
-    JITStubCall stubCall(this, JITStubs::cti_op_mod);
-    stubCall.addArgument(X86::eax);
-    stubCall.addArgument(X86::ecx);
-    stubCall.call(result);
-}
-#else
-void JIT::emit_op_mod(Instruction* currentInstruction)
-{
-    unsigned result = currentInstruction[1].u.operand;
-    unsigned op1 = currentInstruction[2].u.operand;
-    unsigned op2 = currentInstruction[3].u.operand;
-
-    JITStubCall stubCall(this, JITStubs::cti_op_mod);
-    stubCall.addArgument(op1, regT2);
-    stubCall.addArgument(op2, regT2);
-    stubCall.call(result);
-}
-void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
-    ASSERT_NOT_REACHED();
-}
-#endif
-
 void JIT::emit_op_post_inc(Instruction* currentInstruction)
 {
     unsigned result = currentInstruction[1].u.operand;
@@ -788,6 +681,7 @@ void JIT::emit_op_post_dec(Instruction* currentInstruction)
     emitPutVirtualRegister(srcDst, regT1);
     emitPutVirtualRegister(result);
 }
+
 void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned result = currentInstruction[1].u.operand;
@@ -816,6 +710,7 @@ void JIT::emit_op_pre_inc(Instruction* currentInstruction)
 #endif
     emitPutVirtualRegister(srcDst);
 }
+
 void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned srcDst = currentInstruction[1].u.operand;
@@ -844,6 +739,7 @@ void JIT::emit_op_pre_dec(Instruction* currentInstruction)
 #endif
     emitPutVirtualRegister(srcDst);
 }
+
 void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned srcDst = currentInstruction[1].u.operand;
@@ -857,9 +753,84 @@ void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEn
     stubCall.call(srcDst);
 }
 
+/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
+
+#if PLATFORM(X86) || PLATFORM(X86_64)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+    unsigned result = currentInstruction[1].u.operand;
+    unsigned op1 = currentInstruction[2].u.operand;
+    unsigned op2 = currentInstruction[3].u.operand;
+
+    emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
+    emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
+    emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
+#if USE(ALTERNATE_JSIMMEDIATE)
+    addSlowCase(branchPtr(Equal, X86::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
+    m_assembler.cdq();
+    m_assembler.idivl_r(X86::ecx);
+#else
+    emitFastArithDeTagImmediate(X86::eax);
+    addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx));
+    m_assembler.cdq();
+    m_assembler.idivl_r(X86::ecx);
+    signExtend32ToPtr(X86::edx, X86::edx);
+#endif
+    emitFastArithReTagImmediate(X86::edx, X86::eax);
+    emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+    unsigned result = currentInstruction[1].u.operand;
+
+#if USE(ALTERNATE_JSIMMEDIATE)
+    linkSlowCase(iter);
+    linkSlowCase(iter);
+    linkSlowCase(iter);
+#else
+    Jump notImm1 = getSlowCase(iter);
+    Jump notImm2 = getSlowCase(iter);
+    linkSlowCase(iter);
+    emitFastArithReTagImmediate(X86::eax, X86::eax);
+    emitFastArithReTagImmediate(X86::ecx, X86::ecx);
+    notImm1.link(this);
+    notImm2.link(this);
+#endif
+    JITStubCall stubCall(this, JITStubs::cti_op_mod);
+    stubCall.addArgument(X86::eax);
+    stubCall.addArgument(X86::ecx);
+    stubCall.call(result);
+}
+
+#else // PLATFORM(X86) || PLATFORM(X86_64)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+    unsigned result = currentInstruction[1].u.operand;
+    unsigned op1 = currentInstruction[2].u.operand;
+    unsigned op2 = currentInstruction[3].u.operand;
+
+    JITStubCall stubCall(this, JITStubs::cti_op_mod);
+    stubCall.addArgument(op1, regT2);
+    stubCall.addArgument(op2, regT2);
+    stubCall.call(result);
+}
+
+void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+    ASSERT_NOT_REACHED();
+}
+
+#endif // PLATFORM(X86) || PLATFORM(X86_64)
+
+/* ------------------------------ END: OP_MOD ------------------------------ */
 
 #if !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
 
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_ARITHMETIC) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
+
 void JIT::emit_op_add(Instruction* currentInstruction)
 {
     unsigned result = currentInstruction[1].u.operand;
@@ -913,6 +884,8 @@ void JIT::emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&)
 
 #elif USE(ALTERNATE_JSIMMEDIATE) // *AND* ENABLE(JIT_OPTIMIZE_ARITHMETIC)
 
+/* ------------------------------ BEGIN: USE(ALTERNATE_JSIMMEDIATE) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
+
 void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
 {
     emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx);
@@ -1017,6 +990,7 @@ void JIT::emit_op_add(Instruction* currentInstruction)
 
     emitPutVirtualRegister(result);
 }
+
 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned result = currentInstruction[1].u.operand;
@@ -1058,6 +1032,7 @@ void JIT::emit_op_mul(Instruction* currentInstruction)
 
     emitPutVirtualRegister(result);
 }
+
 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned result = currentInstruction[1].u.operand;
@@ -1089,6 +1064,7 @@ void JIT::emit_op_sub(Instruction* currentInstruction)
 
     emitPutVirtualRegister(result);
 }
+
 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned result = currentInstruction[1].u.operand;
@@ -1099,7 +1075,9 @@ void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>
     compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types);
 }
 
-#else
+#else // !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
+
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_ARITHMETIC) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
 
 typedef X86Assembler::JmpSrc JmpSrc;
 typedef X86Assembler::JmpDst JmpDst;
@@ -1405,12 +1383,15 @@ void JIT::emit_op_sub(Instruction* currentInstruction)
 {
     compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
 }
+
 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
 }
 
-#endif
+#endif // !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
+
+/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
 
 } // namespace JSC
 
index 96543af..a9b5a33 100644 (file)
@@ -45,33 +45,6 @@ using namespace std;
 
 namespace JSC {
 
-void JIT::unlinkCall(CallLinkInfo* callLinkInfo)
-{
-    // When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid
-    // (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive
-    // match).  Reset the check so it no longer matches.
-    callLinkInfo->hotPathBegin.repatch(JSValue::encode(JSValue()));
-}
-
-//void JIT::linkCall(JSFunction* , CodeBlock* , JITCode , CallLinkInfo* callLinkInfo, int )
-void JIT::linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, JITCode ctiCode, CallLinkInfo* callLinkInfo, int callerArgCount)
-{
-    // Currently we only link calls with the exact number of arguments.
-    // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
-    if (!calleeCodeBlock || callerArgCount == calleeCodeBlock->m_numParameters) {
-        ASSERT(!callLinkInfo->isLinked());
-    
-        if (calleeCodeBlock)
-            calleeCodeBlock->addCaller(callLinkInfo);
-    
-        callLinkInfo->hotPathBegin.repatch(callee);
-        callLinkInfo->hotPathOther.relink(ctiCode.addressForCall());
-    }
-
-    // patch the instruction that jumps out to the cold path, so that we only try to link once.
-    callLinkInfo->hotPathBegin.jumpAtOffset(patchOffsetOpCallCompareToJump).relink(callLinkInfo->coldPathOther);
-}
-
 void JIT::compileOpCallInitializeCallFrame()
 {
     store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
@@ -120,8 +93,51 @@ void JIT::compileOpConstructSetupArgs(Instruction* instruction)
     emitPutJITStubArgConstant(thisRegister, 5);
 }
 
+void JIT::compileOpCallVarargs(Instruction* instruction)
+{
+    int dst = instruction[1].u.operand;
+    int callee = instruction[2].u.operand;
+    int argCountRegister = instruction[3].u.operand;
+
+    emitGetVirtualRegister(argCountRegister, regT1);
+    emitGetVirtualRegister(callee, regT2);
+    compileOpCallVarargsSetupArgs(instruction);
+
+    // Check for JSFunctions.
+    emitJumpSlowCaseIfNotJSCell(regT2);
+    addSlowCase(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr)));
+    
+    // Speculatively roll the callframe, assuming argCount will match the arity.
+    mul32(Imm32(sizeof(Register)), regT0, regT0);
+    intptr_t offset = (intptr_t)sizeof(Register) * (intptr_t)RegisterFile::CallerFrame;
+    addPtr(Imm32((int32_t)offset), regT0, regT3);
+    addPtr(callFrameRegister, regT3);
+    storePtr(callFrameRegister, regT3);
+    addPtr(regT0, callFrameRegister);
+    emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
+
+    // Put the return value in dst. In the interpreter, op_ret does this.
+    emitPutVirtualRegister(dst);
+    
+    sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+    int dst = instruction[1].u.operand;
+    
+    linkSlowCase(iter);
+    linkSlowCase(iter);
+    JITStubCall stubCall(this, JITStubs::cti_op_call_NotJSFunction);
+    stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
+    
+    sampleCodeBlock(m_codeBlock);
+}
+    
 #if !ENABLE(JIT_OPTIMIZE_CALL)
 
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
 void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
 {
     int dst = instruction[1].u.operand;
@@ -181,7 +197,9 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
     sampleCodeBlock(m_codeBlock);
 }
 
-#else
+#else // !ENABLE(JIT_OPTIMIZE_CALL)
+
+/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
 
 void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
 {
@@ -325,49 +343,10 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
     sampleCodeBlock(m_codeBlock);
 }
 
-#endif
-
-void JIT::compileOpCallVarargs(Instruction* instruction)
-{
-    int dst = instruction[1].u.operand;
-    int callee = instruction[2].u.operand;
-    int argCountRegister = instruction[3].u.operand;
+/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
 
-    emitGetVirtualRegister(argCountRegister, regT1);
-    emitGetVirtualRegister(callee, regT2);
-    compileOpCallVarargsSetupArgs(instruction);
+#endif // !ENABLE(JIT_OPTIMIZE_CALL)
 
-    // Check for JSFunctions.
-    emitJumpSlowCaseIfNotJSCell(regT2);
-    addSlowCase(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr)));
-    
-    // Speculatively roll the callframe, assuming argCount will match the arity.
-    mul32(Imm32(sizeof(Register)), regT0, regT0);
-    intptr_t offset = (intptr_t)sizeof(Register) * (intptr_t)RegisterFile::CallerFrame;
-    addPtr(Imm32((int32_t)offset), regT0, regT3);
-    addPtr(callFrameRegister, regT3);
-    storePtr(callFrameRegister, regT3);
-    addPtr(regT0, callFrameRegister);
-    emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
-
-    // Put the return value in dst. In the interpreter, op_ret does this.
-    emitPutVirtualRegister(dst);
-    
-    sampleCodeBlock(m_codeBlock);
-}
-
-void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
-{
-    int dst = instruction[1].u.operand;
-    
-    linkSlowCase(iter);
-    linkSlowCase(iter);
-    JITStubCall stubCall(this, JITStubs::cti_op_call_NotJSFunction);
-    stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
-    
-    sampleCodeBlock(m_codeBlock);
-}
-    
 } // namespace JSC
 
 #endif // ENABLE(JIT)
index 9883ca0..b6abc8f 100644 (file)
@@ -146,8 +146,11 @@ void JIT::emit_op_del_by_id(Instruction* currentInstruction)
     stubCall.call(currentInstruction[1].u.operand);
 }
 
+
 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
 
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
 void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned)
 {
     // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
@@ -188,7 +191,9 @@ void JIT::compilePutByIdSlowCase(int, Identifier*, int, Vector<SlowCaseEntry>::i
     ASSERT_NOT_REACHED();
 }
 
-#else
+#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
 
 void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
 {
@@ -722,6 +727,8 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
     jumpLocation.relink(entryLabel);
 }
 
+/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
 #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
 
 } // namespace JSC