2009-05-11 Sam Weinig <sam@webkit.org>
authorweinig@apple.com <weinig@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 12 May 2009 01:06:58 +0000 (01:06 +0000)
committerweinig@apple.com <weinig@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 12 May 2009 01:06:58 +0000 (01:06 +0000)
        Reviewed by Geoffrey Garen.

        Start re-factoring JIT code generation to move op_code generation
        to helper functions outside the main switch-statement and gave those
        helper functions standardized names.  This patch only covers the main
        pass and all the arithmetic opcodes in the slow path.

        * JavaScriptCore.xcodeproj/project.pbxproj:
        * jit/JIT.cpp:
        (JSC::JIT::privateCompileMainPass):
        (JSC::JIT::privateCompileSlowCases):
        * jit/JIT.h:
        * jit/JITArithmetic.cpp:
        * jit/JITOpcodes.cpp: Copied from jit/JIT.cpp.
        * jit/JITPropertyAccess.cpp:

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@43531 268f45cc-cd09-0410-ab3c-d52691b4dbfc

JavaScriptCore/ChangeLog
JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
JavaScriptCore/jit/JIT.cpp
JavaScriptCore/jit/JIT.h
JavaScriptCore/jit/JITArithmetic.cpp
JavaScriptCore/jit/JITOpcodes.cpp [new file with mode: 0644]
JavaScriptCore/jit/JITPropertyAccess.cpp

index 6a05b64..e4a3c26 100644 (file)
@@ -1,3 +1,21 @@
+2009-05-11  Sam Weinig  <sam@webkit.org>
+
+        Reviewed by Geoffrey Garen.
+
+        Start re-factoring JIT code generation to move op_code generation
+        to helper functions outside the main switch-statement and gave those
+        helper functions standardized names.  This patch only covers the main
+        pass and all the arithmetic opcodes in the slow path.
+
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * jit/JIT.cpp:
+        (JSC::JIT::privateCompileMainPass):
+        (JSC::JIT::privateCompileSlowCases):
+        * jit/JIT.h:
+        * jit/JITArithmetic.cpp:
+        * jit/JITOpcodes.cpp: Copied from jit/JIT.cpp.
+        * jit/JITPropertyAccess.cpp:
+
 2009-05-11  Steve Falkenburg  <sfalken@apple.com>
 
         Re-add experimental PGO configs.
index af86aef..882d0e2 100644 (file)
                BCD2034A0E17135E002C7E82 /* DateConstructor.h in Headers */ = {isa = PBXBuildFile; fileRef = BCD203460E17135E002C7E82 /* DateConstructor.h */; };
                BCD2034C0E17135E002C7E82 /* DatePrototype.h in Headers */ = {isa = PBXBuildFile; fileRef = BCD203480E17135E002C7E82 /* DatePrototype.h */; };
                BCD203E80E1718F4002C7E82 /* DatePrototype.lut.h in Headers */ = {isa = PBXBuildFile; fileRef = BCD203E70E1718F4002C7E82 /* DatePrototype.lut.h */; };
+               BCDD51EB0FB8DF74004A8BDC /* JITOpcodes.cpp in Sources */ = {isa = PBXBuildFile; fileRef = BCDD51E90FB8DF74004A8BDC /* JITOpcodes.cpp */; };
                BCDE3AB80E6C82F5001453A7 /* Structure.h in Headers */ = {isa = PBXBuildFile; fileRef = BCDE3AB10E6C82CF001453A7 /* Structure.h */; settings = {ATTRIBUTES = (Private, ); }; };
                BCDE3B430E6C832D001453A7 /* Structure.cpp in Sources */ = {isa = PBXBuildFile; fileRef = BCDE3AB00E6C82CF001453A7 /* Structure.cpp */; };
                BCF605140E203EF800B9A64D /* ArgList.h in Headers */ = {isa = PBXBuildFile; fileRef = BCF605120E203EF800B9A64D /* ArgList.h */; settings = {ATTRIBUTES = (Private, ); }; };
                BCD203470E17135E002C7E82 /* DatePrototype.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DatePrototype.cpp; sourceTree = "<group>"; };
                BCD203480E17135E002C7E82 /* DatePrototype.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DatePrototype.h; sourceTree = "<group>"; };
                BCD203E70E1718F4002C7E82 /* DatePrototype.lut.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DatePrototype.lut.h; sourceTree = "<group>"; };
+               BCDD51E90FB8DF74004A8BDC /* JITOpcodes.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JITOpcodes.cpp; sourceTree = "<group>"; };
                BCDE3AB00E6C82CF001453A7 /* Structure.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Structure.cpp; sourceTree = "<group>"; };
                BCDE3AB10E6C82CF001453A7 /* Structure.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Structure.h; sourceTree = "<group>"; };
                BCF605110E203EF800B9A64D /* ArgList.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ArgList.cpp; sourceTree = "<group>"; };
                                A782F1A40EEC9FA20036273F /* ExecutableAllocatorPosix.cpp */,
                                86DB645F0F954E9100D7D921 /* ExecutableAllocatorWin.cpp */,
                                1429D92D0ED22D7000B89619 /* JIT.cpp */,
+                               BCDD51E90FB8DF74004A8BDC /* JITOpcodes.cpp */,
                                1429D92E0ED22D7000B89619 /* JIT.h */,
                                86A90ECF0EE7D51F00AB350D /* JITArithmetic.cpp */,
                                86CC85A20EE79B7400288682 /* JITCall.cpp */,
                                86DB64640F95C6FC00D7D921 /* ExecutableAllocatorFixedVMPool.cpp in Sources */,
                                A7E2EA6C0FB460CF00601F06 /* LiteralParser.cpp in Sources */,
                                93052C340FB792190048FDC3 /* ParserArena.cpp in Sources */,
+                               BCDD51EB0FB8DF74004A8BDC /* JITOpcodes.cpp in Sources */,
                        );
                        runOnlyForDeploymentPostprocessing = 0;
                };
index 15aa106..3223a5a 100644 (file)
@@ -121,16 +121,14 @@ void JIT::emitTimeoutCheck()
         NEXT_OPCODE(name); \
     }
 
-#define RECORD_JUMP_TARGET(targetOffset) \
-   do { m_labels[m_bytecodeIndex + (targetOffset)].used(); } while (false)
-
 void JIT::privateCompileMainPass()
 {
     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
     unsigned instructionCount = m_codeBlock->instructions().size();
-    unsigned propertyAccessInstructionIndex = 0;
-    unsigned globalResolveInfoIndex = 0;
-    unsigned callLinkInfoIndex = 0;
+
+    m_propertyAccessInstructionIndex = 0;
+    m_globalResolveInfoIndex = 0;
+    m_callLinkInfoIndex = 0;
 
     for (m_bytecodeIndex = 0; m_bytecodeIndex < instructionCount; ) {
         Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex;
@@ -163,952 +161,410 @@ void JIT::privateCompileMainPass()
         DEFINE_UNARY_OP(op_is_undefined)
         DEFINE_UNARY_OP(op_negate)
         DEFINE_UNARY_OP(op_typeof)
-        case op_mov: {
-            int src = currentInstruction[2].u.operand;
-            int dst = currentInstruction[1].u.operand;
-
-            if (m_codeBlock->isConstantRegisterIndex(src)) {
-                storePtr(ImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
-                if (dst == m_lastResultBytecodeRegister)
-                    killLastResultRegister();
-            } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) {
-                // If either the src or dst is the cached register go though
-                // get/put registers to make sure we track this correctly.
-                emitGetVirtualRegister(src, regT0);
-                emitPutVirtualRegister(dst);
-            } else {
-                // Perform the copy via regT1; do not disturb any mapping in regT0.
-                loadPtr(Address(callFrameRegister, src * sizeof(Register)), regT1);
-                storePtr(regT1, Address(callFrameRegister, dst * sizeof(Register)));
-            }
-            NEXT_OPCODE(op_mov);
-        }
+
+        // Arithmetic
+
         case op_add: {
-            compileFastArith_op_add(currentInstruction);
+            emit_op_add(currentInstruction);
             NEXT_OPCODE(op_add);
         }
-        case op_end: {
-            if (m_codeBlock->needsFullScopeChain())
-                JITStubCall(this, JITStubs::cti_op_end).call();
-            ASSERT(returnValueRegister != callFrameRegister);
-            emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
-            push(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
-            ret();
-            NEXT_OPCODE(op_end);
+        case op_sub: {
+            emit_op_sub(currentInstruction);
+            NEXT_OPCODE(op_sub);
         }
-        case op_jmp: {
-            unsigned target = currentInstruction[1].u.operand;
-            addJump(jump(), target + 1);
-            RECORD_JUMP_TARGET(target + 1);
-            NEXT_OPCODE(op_jmp);
+        case op_mul: {
+            emit_op_mul(currentInstruction);
+            NEXT_OPCODE(op_mul);
+        }
+        case op_mod: {
+            emit_op_mod(currentInstruction);
+            NEXT_OPCODE(op_mod);
+        }
+        case op_bitand: {
+            emit_op_bitand(currentInstruction);
+            NEXT_OPCODE(op_bitand);
+        }
+        case op_lshift: {
+            emit_op_lshift(currentInstruction);
+            NEXT_OPCODE(op_lshift);
+        }
+        case op_rshift: {
+            emit_op_rshift(currentInstruction);
+            NEXT_OPCODE(op_rshift);
         }
         case op_pre_inc: {
-            compileFastArith_op_pre_inc(currentInstruction[1].u.operand);
+            emit_op_pre_inc(currentInstruction);
             NEXT_OPCODE(op_pre_inc);
         }
-        case op_loop: {
-            emitTimeoutCheck();
-
-            unsigned target = currentInstruction[1].u.operand;
-            addJump(jump(), target + 1);
-            NEXT_OPCODE(op_end);
+        case op_pre_dec: {
+            emit_op_pre_dec(currentInstruction);
+            NEXT_OPCODE(op_pre_dec);
         }
-        case op_loop_if_less: {
-            emitTimeoutCheck();
-
-            unsigned op1 = currentInstruction[1].u.operand;
-            unsigned op2 = currentInstruction[2].u.operand;
-            unsigned target = currentInstruction[3].u.operand;
-            if (isOperandConstantImmediateInt(op2)) {
-                emitGetVirtualRegister(op1, regT0);
-                emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
-                int32_t op2imm = getConstantOperandImmediateInt(op2);
-#else
-                int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
-#endif
-                addJump(branch32(LessThan, regT0, Imm32(op2imm)), target + 3);
-            } else if (isOperandConstantImmediateInt(op1)) {
-                emitGetVirtualRegister(op2, regT1);
-                emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(ALTERNATE_JSIMMEDIATE)
-                int32_t op1imm = getConstantOperandImmediateInt(op1);
-#else
-                int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
-#endif
-                addJump(branch32(GreaterThan, regT1, Imm32(op1imm)), target + 3);
-            } else {
-                emitGetVirtualRegisters(op1, regT0, op2, regT1);
-                emitJumpSlowCaseIfNotImmediateInteger(regT0);
-                emitJumpSlowCaseIfNotImmediateInteger(regT1);
-                addJump(branch32(LessThan, regT0, regT1), target + 3);
-            }
-            NEXT_OPCODE(op_loop_if_less);
+        case op_post_inc: {
+            emit_op_post_inc(currentInstruction);
+            NEXT_OPCODE(op_post_inc);
+        }
+        case op_post_dec: {
+            emit_op_post_dec(currentInstruction);
+            NEXT_OPCODE(op_post_dec);
         }
-        case op_loop_if_lesseq: {
-            emitTimeoutCheck();
 
-            unsigned op1 = currentInstruction[1].u.operand;
-            unsigned op2 = currentInstruction[2].u.operand;
-            unsigned target = currentInstruction[3].u.operand;
-            if (isOperandConstantImmediateInt(op2)) {
-                emitGetVirtualRegister(op1, regT0);
-                emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
-                int32_t op2imm = getConstantOperandImmediateInt(op2);
-#else
-                int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
-#endif
-                addJump(branch32(LessThanOrEqual, regT0, Imm32(op2imm)), target + 3);
-            } else {
-                emitGetVirtualRegisters(op1, regT0, op2, regT1);
-                emitJumpSlowCaseIfNotImmediateInteger(regT0);
-                emitJumpSlowCaseIfNotImmediateInteger(regT1);
-                addJump(branch32(LessThanOrEqual, regT0, regT1), target + 3);
-            }
-            NEXT_OPCODE(op_loop_if_less);
+        /* in JITOpcodes */
+        case op_bitnot: {
+            emit_op_bitnot(currentInstruction);
+            NEXT_OPCODE(op_bitnot);
         }
-        case op_new_object: {
-            JITStubCall(this, JITStubs::cti_op_new_object).call(currentInstruction[1].u.operand);
-            NEXT_OPCODE(op_new_object);
+        case op_bitxor: {
+            emit_op_bitxor(currentInstruction);
+            NEXT_OPCODE(op_bitxor);
         }
-        case op_put_by_id: {
-            compilePutByIdHotPath(currentInstruction[1].u.operand, &(m_codeBlock->identifier(currentInstruction[2].u.operand)), currentInstruction[3].u.operand, propertyAccessInstructionIndex++);
-            NEXT_OPCODE(op_put_by_id);
+        case op_bitor: {
+            emit_op_bitor(currentInstruction);
+            NEXT_OPCODE(op_bitor);
         }
-        case op_get_by_id: {
-            compileGetByIdHotPath(currentInstruction[1].u.operand, currentInstruction[2].u.operand, &(m_codeBlock->identifier(currentInstruction[3].u.operand)), propertyAccessInstructionIndex++);
-            NEXT_OPCODE(op_get_by_id);
+        case op_not: {
+            emit_op_not(currentInstruction);
+            NEXT_OPCODE(op_not);
         }
-        case op_instanceof: {
-            emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); // value
-            emitGetVirtualRegister(currentInstruction[3].u.operand, regT2); // baseVal
-            emitGetVirtualRegister(currentInstruction[4].u.operand, regT1); // proto
-
-            // check if any are immediates
-            move(regT0, regT3);
-            orPtr(regT2, regT3);
-            orPtr(regT1, regT3);
-            emitJumpSlowCaseIfNotJSCell(regT3);
-
-            // check that all are object type - this is a bit of a bithack to avoid excess branching;
-            // we check that the sum of the three type codes from Structures is exactly 3 * ObjectType,
-            // this works because NumberType and StringType are smaller
-            move(Imm32(3 * ObjectType), regT3);
-            loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT0);
-            loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2);
-            loadPtr(Address(regT1, FIELD_OFFSET(JSCell, m_structure)), regT1);
-            sub32(Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_type)), regT3);
-            sub32(Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_type)), regT3);
-            addSlowCase(branch32(NotEqual, Address(regT1, FIELD_OFFSET(Structure, m_typeInfo.m_type)), regT3));
+        /* in JITOpcodes */
 
-            // check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance
-            load32(Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), regT2);
-            and32(Imm32(ImplementsHasInstance | OverridesHasInstance), regT2);
-            addSlowCase(branch32(NotEqual, regT2, Imm32(ImplementsHasInstance)));
 
-            emitGetVirtualRegister(currentInstruction[2].u.operand, regT2); // reload value
-            emitGetVirtualRegister(currentInstruction[4].u.operand, regT1); // reload proto
+        // Comparison
 
-            // optimistically load true result
-            move(ImmPtr(JSValue::encode(jsBoolean(true))), regT0);
-
-            Label loop(this);
-
-            // load value's prototype
-            loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2);
-            loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2);
+        case op_eq: {
+            emit_op_eq(currentInstruction);
+            NEXT_OPCODE(op_eq);
+        }
+        case op_neq: {
+            emit_op_neq(currentInstruction);
+            NEXT_OPCODE(op_neq);
+        }
+        case op_eq_null: {
+            emit_op_eq_null(currentInstruction);
+            NEXT_OPCODE(op_eq_null);
+        }
+        case op_neq_null: {
+            emit_op_neq_null(currentInstruction);
+            NEXT_OPCODE(op_neq_null);
+        }
+        case op_stricteq: {
+            emit_op_stricteq(currentInstruction);
+            NEXT_OPCODE(op_stricteq);
+        }
+        case op_nstricteq: {
+            emit_op_nstricteq(currentInstruction);
+            NEXT_OPCODE(op_nstricteq);
+        }
 
-            Jump exit = branchPtr(Equal, regT2, regT1);
 
-            branchPtr(NotEqual, regT2, ImmPtr(JSValue::encode(jsNull())), loop);
+        // Jump / Loop
 
-            move(ImmPtr(JSValue::encode(jsBoolean(false))), regT0);
+        case op_jnless: {
+            emit_op_jnless(currentInstruction);
+            NEXT_OPCODE(op_jnless);
+        }
+        case op_jnlesseq: {
+            emit_op_jnlesseq(currentInstruction);
+            NEXT_OPCODE(op_jnlesseq);
+        }
+        case op_jmp: {
+            emit_op_jmp(currentInstruction);
+            NEXT_OPCODE(op_jmp);
+        }
+        case op_loop: {
+            emit_op_loop(currentInstruction);
+            NEXT_OPCODE(op_loop);
+        }
+        case op_loop_if_less: {
+            emit_op_loop_if_less(currentInstruction);
+            NEXT_OPCODE(op_loop_if_less);
+        }
+        case op_loop_if_lesseq: {
+            emit_op_loop_if_lesseq(currentInstruction);
+            NEXT_OPCODE(op_loop_if_lesseq);
+        }
+        case op_loop_if_true: {
+            emit_op_loop_if_true(currentInstruction);
+            NEXT_OPCODE(op_loop_if_true);
+        }
+        case op_jtrue: {
+            emit_op_jtrue(currentInstruction);
+            NEXT_OPCODE(op_jtrue);
+        }
+        case op_jfalse: {
+            emit_op_jfalse(currentInstruction);
+            NEXT_OPCODE(op_jfalse);
+        }
+        case op_jeq_null: {
+            emit_op_jeq_null(currentInstruction);
+            NEXT_OPCODE(op_jeq_null);
+        }
+        case op_jneq_null: {
+            emit_op_jneq_null(currentInstruction);
+            NEXT_OPCODE(op_jneq_null);
+        }
+        case op_jneq_ptr: {
+            emit_op_jneq_ptr(currentInstruction);
+            NEXT_OPCODE(op_jneq_ptr);
+        }
 
-            exit.link(this);
 
-            emitPutVirtualRegister(currentInstruction[1].u.operand);
+        // Property Access
 
-            NEXT_OPCODE(op_instanceof);
+        case op_get_by_id: {
+            emit_op_get_by_id(currentInstruction);
+            NEXT_OPCODE(op_get_by_id);
+        }
+        case op_put_by_id: {
+            emit_op_put_by_id(currentInstruction);
+            NEXT_OPCODE(op_put_by_id);
         }
         case op_del_by_id: {
-            JITStubCall stubCall(this, JITStubs::cti_op_del_by_id);
-            stubCall.addArgument(currentInstruction[2].u.operand, regT2);
-            stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
-            stubCall.call(currentInstruction[1].u.operand);
+            emit_op_del_by_id(currentInstruction);
             NEXT_OPCODE(op_del_by_id);
         }
-        case op_mul: {
-            compileFastArith_op_mul(currentInstruction);
-            NEXT_OPCODE(op_mul);
-        }
-        case op_new_func: {
-            JITStubCall stubCall(this, JITStubs::cti_op_new_func);
-            stubCall.addArgument(ImmPtr(m_codeBlock->function(currentInstruction[2].u.operand)));
-            stubCall.call(currentInstruction[1].u.operand);
-            NEXT_OPCODE(op_new_func);
-        }
-        case op_call: {
-            compileOpCall(opcodeID, currentInstruction, callLinkInfoIndex++);
-            NEXT_OPCODE(op_call);
+        case op_get_by_val: {
+            emit_op_get_by_val(currentInstruction);
+            NEXT_OPCODE(op_get_by_val);
         }
-        case op_call_eval: {
-            compileOpCall(opcodeID, currentInstruction, callLinkInfoIndex++);
-            NEXT_OPCODE(op_call_eval);
+        case op_put_by_val: {
+            emit_op_put_by_val(currentInstruction);
+            NEXT_OPCODE(op_put_by_val);
         }
-        case op_load_varargs: {
-            JITStubCall stubCall(this, JITStubs::cti_op_load_varargs);
-            stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
-            stubCall.call(currentInstruction[1].u.operand);
-            NEXT_OPCODE(op_load_varargs);
+        case op_put_by_index: {
+            emit_op_put_by_index(currentInstruction);
+            NEXT_OPCODE(op_put_by_index);
         }
-        case op_call_varargs: {
-            compileOpCallVarargs(currentInstruction);
-            NEXT_OPCODE(op_call_varargs);
+        case op_put_getter: {
+            emit_op_put_getter(currentInstruction);
+            NEXT_OPCODE(op_put_getter);
         }
-        case op_construct: {
-            compileOpCall(opcodeID, currentInstruction, callLinkInfoIndex++);
-            NEXT_OPCODE(op_construct);
+        case op_put_setter: {
+            emit_op_put_setter(currentInstruction);
+            NEXT_OPCODE(op_put_setter);
         }
+        
+        // Variables
+
         case op_get_global_var: {
-            JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[2].u.jsCell);
-            move(ImmPtr(globalObject), regT0);
-            emitGetVariableObjectRegister(regT0, currentInstruction[3].u.operand, regT0);
-            emitPutVirtualRegister(currentInstruction[1].u.operand);
+            emit_op_get_global_var(currentInstruction);
             NEXT_OPCODE(op_get_global_var);
         }
         case op_put_global_var: {
-            emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
-            JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[1].u.jsCell);
-            move(ImmPtr(globalObject), regT0);
-            emitPutVariableObjectRegister(regT1, regT0, currentInstruction[2].u.operand);
+            emit_op_put_global_var(currentInstruction);
             NEXT_OPCODE(op_put_global_var);
         }
         case op_get_scoped_var: {
-            int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain();
-
-            emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
-            while (skip--)
-                loadPtr(Address(regT0, FIELD_OFFSET(ScopeChainNode, next)), regT0);
-
-            loadPtr(Address(regT0, FIELD_OFFSET(ScopeChainNode, object)), regT0);
-            emitGetVariableObjectRegister(regT0, currentInstruction[2].u.operand, regT0);
-            emitPutVirtualRegister(currentInstruction[1].u.operand);
+            emit_op_get_scoped_var(currentInstruction);
             NEXT_OPCODE(op_get_scoped_var);
         }
         case op_put_scoped_var: {
-            int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain();
+            emit_op_put_scoped_var(currentInstruction);
+            NEXT_OPCODE(op_put_scoped_var);
+        }
 
-            emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1);
-            emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
-            while (skip--)
-                loadPtr(Address(regT1, FIELD_OFFSET(ScopeChainNode, next)), regT1);
+        // Call
 
-            loadPtr(Address(regT1, FIELD_OFFSET(ScopeChainNode, object)), regT1);
-            emitPutVariableObjectRegister(regT0, regT1, currentInstruction[1].u.operand);
-            NEXT_OPCODE(op_put_scoped_var);
+        case op_call: {
+            emit_op_call(currentInstruction);
+            NEXT_OPCODE(op_call);
+        }
+        case op_call_eval: {
+            emit_op_call_eval(currentInstruction);
+            NEXT_OPCODE(op_call_eval);
+        }
+        case op_load_varargs: {
+            emit_op_load_varargs(currentInstruction);
+            NEXT_OPCODE(op_load_varargs);
+        }
+        case op_call_varargs: {
+            emit_op_call_varargs(currentInstruction);
+            NEXT_OPCODE(op_call_varargs);
+        }
+        case op_construct: {
+            emit_op_construct(currentInstruction);
+            NEXT_OPCODE(op_construct);
         }
         case op_tear_off_activation: {
-            JITStubCall stubCall(this, JITStubs::cti_op_tear_off_activation);
-            stubCall.addArgument(currentInstruction[1].u.operand, regT2);
-            stubCall.call();
+            emit_op_tear_off_activation(currentInstruction);
             NEXT_OPCODE(op_tear_off_activation);
         }
         case op_tear_off_arguments: {
-            JITStubCall(this, JITStubs::cti_op_tear_off_arguments).call();
+            emit_op_tear_off_arguments(currentInstruction);
             NEXT_OPCODE(op_tear_off_arguments);
         }
         case op_ret: {
-            // We could JIT generate the deref, only calling out to C when the refcount hits zero.
-            if (m_codeBlock->needsFullScopeChain())
-                JITStubCall(this, JITStubs::cti_op_ret_scopeChain).call();
+            emit_op_ret(currentInstruction);
+            NEXT_OPCODE(op_ret);
+        }
 
-            ASSERT(callFrameRegister != regT1);
-            ASSERT(regT1 != returnValueRegister);
-            ASSERT(returnValueRegister != callFrameRegister);
 
-            // Return the result in %eax.
-            emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
+        // Profiling / Debugging
 
-            // Grab the return address.
-            emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+        case op_profile_will_call: {
+            emit_op_profile_will_call(currentInstruction);
+            NEXT_OPCODE(op_profile_will_call);
+        }
+        case op_profile_did_call: {
+            emit_op_profile_did_call(currentInstruction);
+            NEXT_OPCODE(op_profile_did_call);
+        }
+        case op_debug: {
+            emit_op_debug(currentInstruction);
+            NEXT_OPCODE(op_debug);
+        }
 
-            // Restore our caller's "r".
-            emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
 
-            // Return.
-            push(regT1);
-            ret();
+        // Unsorted
 
-            NEXT_OPCODE(op_ret);
+        case op_mov: {
+            emit_op_mov(currentInstruction);
+            NEXT_OPCODE(op_mov);
+        }
+        case op_end: {
+            emit_op_end(currentInstruction);
+            NEXT_OPCODE(op_end);
+        }
+        case op_new_object: {
+            emit_op_new_object(currentInstruction);
+            NEXT_OPCODE(op_new_object);
+        }
+        case op_instanceof: {
+            emit_op_instanceof(currentInstruction);
+            NEXT_OPCODE(op_instanceof);
+        }
+        case op_new_func: {
+            emit_op_new_func(currentInstruction);
+            NEXT_OPCODE(op_new_func);
         }
         case op_new_array: {
-            JITStubCall stubCall(this, JITStubs::cti_op_new_array);
-            stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
-            stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
-            stubCall.call(currentInstruction[1].u.operand);
+            emit_op_new_array(currentInstruction);
             NEXT_OPCODE(op_new_array);
         }
         case op_resolve: {
-            JITStubCall stubCall(this, JITStubs::cti_op_resolve);
-            stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
-            stubCall.call(currentInstruction[1].u.operand);
+            emit_op_resolve(currentInstruction);
             NEXT_OPCODE(op_resolve);
         }
         case op_construct_verify: {
-            emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
-            emitJumpSlowCaseIfNotJSCell(regT0);
-            loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
-            addSlowCase(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
-
+            emit_op_construct_verify(currentInstruction);
             NEXT_OPCODE(op_construct_verify);
         }
         case op_to_primitive: {
-            int dst = currentInstruction[1].u.operand;
-            int src = currentInstruction[2].u.operand;
-
-            emitGetVirtualRegister(src, regT0);
-            
-            Jump isImm = emitJumpIfNotJSCell(regT0);
-            addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
-            isImm.link(this);
-
-            if (dst != src)
-                emitPutVirtualRegister(dst);
-
+            emit_op_to_primitive(currentInstruction);
             NEXT_OPCODE(op_to_primitive);
         }
         case op_strcat: {
-            JITStubCall stubCall(this, JITStubs::cti_op_strcat);
-            stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
-            stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
-            stubCall.call(currentInstruction[1].u.operand);
+            emit_op_strcat(currentInstruction);
             NEXT_OPCODE(op_strcat);
         }
-        case op_get_by_val: {
-            emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
-            emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(ALTERNATE_JSIMMEDIATE)
-            // This is technically incorrect - we're zero-extending an int32.  On the hot path this doesn't matter.
-            // We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if
-            // number was signed since m_fastAccessCutoff is always less than intmax (since the total allocation
-            // size is always less than 4Gb).  As such zero extending wil have been correct (and extending the value
-            // to 64-bits is necessary since it's used in the address calculation.  We zero extend rather than sign
-            // extending since it makes it easier to re-tag the value in the slow case.
-            zeroExtend32ToPtr(regT1, regT1);
-#else
-            emitFastArithImmToInt(regT1);
-#endif
-            emitJumpSlowCaseIfNotJSCell(regT0);
-            addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
-
-            // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
-            loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
-            addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff))));
-
-            // Get the value from the vector
-            loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), regT0);
-            emitPutVirtualRegister(currentInstruction[1].u.operand);
-            NEXT_OPCODE(op_get_by_val);
-        }
         case op_resolve_func: {
-            JITStubCall stubCall(this, JITStubs::cti_op_resolve_func);
-            stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
-            stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
-            stubCall.call(currentInstruction[2].u.operand);
+            emit_op_resolve_func(currentInstruction);
             NEXT_OPCODE(op_resolve_func);
         }
-        case op_sub: {
-            compileFastArith_op_sub(currentInstruction);
-            NEXT_OPCODE(op_sub);
-        }
-        case op_put_by_val: {
-            emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1);
-            emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(ALTERNATE_JSIMMEDIATE)
-            // See comment in op_get_by_val.
-            zeroExtend32ToPtr(regT1, regT1);
-#else
-            emitFastArithImmToInt(regT1);
-#endif
-            emitJumpSlowCaseIfNotJSCell(regT0);
-            addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
-
-            // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
-            loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
-            Jump inFastVector = branch32(Below, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff)));
-            // No; oh well, check if the access if within the vector - if so, we may still be okay.
-            addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSET(ArrayStorage, m_vectorLength))));
-
-            // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
-            // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff. 
-            addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0]))));
-
-            // All good - put the value into the array.
-            inFastVector.link(this);
-            emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
-            storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])));
-            NEXT_OPCODE(op_put_by_val);
-        }
-        case op_loop_if_true: {
-            emitTimeoutCheck();
-
-            unsigned target = currentInstruction[2].u.operand;
-            emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
-            Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0))));
-            addJump(emitJumpIfImmediateInteger(regT0), target + 2);
-
-            addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target + 2);
-            addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))));
-
-            isZero.link(this);
-            NEXT_OPCODE(op_loop_if_true);
-        };
         case op_resolve_base: {
-            JITStubCall stubCall(this, JITStubs::cti_op_resolve_base);
-            stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
-            stubCall.call(currentInstruction[1].u.operand);
+            emit_op_resolve_base(currentInstruction);
             NEXT_OPCODE(op_resolve_base);
         }
         case op_resolve_skip: {
-            JITStubCall stubCall(this, JITStubs::cti_op_resolve_skip);
-            stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
-            stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain()));
-            stubCall.call(currentInstruction[1].u.operand);
+            emit_op_resolve_skip(currentInstruction);
             NEXT_OPCODE(op_resolve_skip);
         }
         case op_resolve_global: {
-            // Fast case
-            void* globalObject = currentInstruction[2].u.jsCell;
-            Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
-            
-            unsigned currentIndex = globalResolveInfoIndex++;
-            void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
-            void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
-
-            // Check Structure of global object
-            move(ImmPtr(globalObject), regT0);
-            loadPtr(structureAddress, regT1);
-            Jump noMatch = branchPtr(NotEqual, regT1, Address(regT0, FIELD_OFFSET(JSCell, m_structure))); // Structures don't match
-
-            // Load cached property
-            // Assume that the global object always uses external storage.
-            loadPtr(Address(regT0, FIELD_OFFSET(JSGlobalObject, m_externalStorage)), regT0);
-            load32(offsetAddr, regT1);
-            loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0);
-            emitPutVirtualRegister(currentInstruction[1].u.operand);
-            Jump end = jump();
-
-            // Slow case
-            noMatch.link(this);
-            JITStubCall stubCall(this, JITStubs::cti_op_resolve_global);
-            stubCall.addArgument(ImmPtr(globalObject));
-            stubCall.addArgument(ImmPtr(ident));
-            stubCall.addArgument(Imm32(currentIndex));
-            stubCall.call(currentInstruction[1].u.operand);
-            end.link(this);
+            emit_op_resolve_global(currentInstruction);
             NEXT_OPCODE(op_resolve_global);
         }
-        case op_pre_dec: {
-            compileFastArith_op_pre_dec(currentInstruction[1].u.operand);
-            NEXT_OPCODE(op_pre_dec);
-        }
-        case op_jnless: {
-            unsigned target = currentInstruction[3].u.operand;
-            compileFastArith_op_jnless(currentInstruction[1].u.operand, currentInstruction[2].u.operand, target);
-            RECORD_JUMP_TARGET(target + 3);
-            NEXT_OPCODE(op_jnless);
-        }
-        case op_jnlesseq: {
-            unsigned target = currentInstruction[3].u.operand;
-            compileFastArith_op_jnlesseq(currentInstruction[1].u.operand, currentInstruction[2].u.operand, target);
-            RECORD_JUMP_TARGET(target + 3);
-            NEXT_OPCODE(op_jnlesseq);
-        }
-        case op_not: {
-            emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
-            xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
-            addSlowCase(branchTestPtr(NonZero, regT0, Imm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue))));
-            xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue)), regT0);
-            emitPutVirtualRegister(currentInstruction[1].u.operand);
-            NEXT_OPCODE(op_not);
-        }
-        case op_jfalse: {
-            unsigned target = currentInstruction[2].u.operand;
-            emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
-            addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))), target + 2);
-            Jump isNonZero = emitJumpIfImmediateInteger(regT0);
-
-            addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))), target + 2);
-            addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))));
-
-            isNonZero.link(this);
-            RECORD_JUMP_TARGET(target + 2);
-            NEXT_OPCODE(op_jfalse);
-        };
-        case op_jeq_null: {
-            unsigned src = currentInstruction[1].u.operand;
-            unsigned target = currentInstruction[2].u.operand;
-
-            emitGetVirtualRegister(src, regT0);
-            Jump isImmediate = emitJumpIfNotJSCell(regT0);
-
-            // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
-            loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
-            addJump(branchTest32(NonZero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
-            Jump wasNotImmediate = jump();
-
-            // Now handle the immediate cases - undefined & null
-            isImmediate.link(this);
-            andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
-            addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNull()))), target + 2);            
-
-            wasNotImmediate.link(this);
-            RECORD_JUMP_TARGET(target + 2);
-            NEXT_OPCODE(op_jeq_null);
-        };
-        case op_jneq_null: {
-            unsigned src = currentInstruction[1].u.operand;
-            unsigned target = currentInstruction[2].u.operand;
-
-            emitGetVirtualRegister(src, regT0);
-            Jump isImmediate = emitJumpIfNotJSCell(regT0);
-
-            // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
-            loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
-            addJump(branchTest32(Zero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
-            Jump wasNotImmediate = jump();
-
-            // Now handle the immediate cases - undefined & null
-            isImmediate.link(this);
-            andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
-            addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsNull()))), target + 2);            
-
-            wasNotImmediate.link(this);
-            RECORD_JUMP_TARGET(target + 2);
-            NEXT_OPCODE(op_jneq_null);
-        }
-        case op_jneq_ptr: {
-            unsigned src = currentInstruction[1].u.operand;
-            JSCell* ptr = currentInstruction[2].u.jsCell;
-            unsigned target = currentInstruction[3].u.operand;
-            
-            emitGetVirtualRegister(src, regT0);
-            addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue(ptr)))), target + 3);            
-
-            RECORD_JUMP_TARGET(target + 3);
-            NEXT_OPCODE(op_jneq_ptr);
-        }
-        case op_post_inc: {
-            compileFastArith_op_post_inc(currentInstruction[1].u.operand, currentInstruction[2].u.operand);
-            NEXT_OPCODE(op_post_inc);
-        }
         case op_unexpected_load: {
-            JSValue v = m_codeBlock->unexpectedConstant(currentInstruction[2].u.operand);
-            move(ImmPtr(JSValue::encode(v)), regT0);
-            emitPutVirtualRegister(currentInstruction[1].u.operand);
+            emit_op_unexpected_load(currentInstruction);
             NEXT_OPCODE(op_unexpected_load);
         }
         case op_jsr: {
-            int retAddrDst = currentInstruction[1].u.operand;
-            int target = currentInstruction[2].u.operand;
-            DataLabelPtr storeLocation = storePtrWithPatch(Address(callFrameRegister, sizeof(Register) * retAddrDst));
-            addJump(jump(), target + 2);
-            m_jsrSites.append(JSRInfo(storeLocation, label()));
-            killLastResultRegister();
-            RECORD_JUMP_TARGET(target + 2);
+            emit_op_jsr(currentInstruction);
             NEXT_OPCODE(op_jsr);
         }
         case op_sret: {
-            jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
-            killLastResultRegister();
+            emit_op_sret(currentInstruction);
             NEXT_OPCODE(op_sret);
         }
-        case op_eq: {
-            emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
-            emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
-            set32(Equal, regT1, regT0, regT0);
-            emitTagAsBoolImmediate(regT0);
-            emitPutVirtualRegister(currentInstruction[1].u.operand);
-            NEXT_OPCODE(op_eq);
-        }
-        case op_lshift: {
-            compileFastArith_op_lshift(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand);
-            NEXT_OPCODE(op_lshift);
-        }
-        case op_bitand: {
-            compileFastArith_op_bitand(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand);
-            NEXT_OPCODE(op_bitand);
-        }
-        case op_rshift: {
-            compileFastArith_op_rshift(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand);
-            NEXT_OPCODE(op_rshift);
-        }
-        case op_bitnot: {
-            emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
-            emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
-            not32(regT0);
-            emitFastArithIntToImmNoCheck(regT0, regT0);
-#else
-            xorPtr(Imm32(~JSImmediate::TagTypeNumber), regT0);
-#endif
-            emitPutVirtualRegister(currentInstruction[1].u.operand);
-            NEXT_OPCODE(op_bitnot);
-        }
         case op_resolve_with_base: {
-            JITStubCall stubCall(this, JITStubs::cti_op_resolve_with_base);
-            stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
-            stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
-            stubCall.call(currentInstruction[2].u.operand);
+            emit_op_resolve_with_base(currentInstruction);
             NEXT_OPCODE(op_resolve_with_base);
         }
         case op_new_func_exp: {
-            JITStubCall stubCall(this, JITStubs::cti_op_new_func_exp);
-            stubCall.addArgument(ImmPtr(m_codeBlock->functionExpression(currentInstruction[2].u.operand)));
-            stubCall.call(currentInstruction[1].u.operand);
+            emit_op_new_func_exp(currentInstruction);
             NEXT_OPCODE(op_new_func_exp);
         }
-        case op_mod: {
-            compileFastArith_op_mod(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand);
-            NEXT_OPCODE(op_mod);
-        }
-        case op_jtrue: {
-            unsigned target = currentInstruction[2].u.operand;
-            emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
-            Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0))));
-            addJump(emitJumpIfImmediateInteger(regT0), target + 2);
-
-            addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target + 2);
-            addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))));
-
-            isZero.link(this);
-            RECORD_JUMP_TARGET(target + 2);
-            NEXT_OPCODE(op_jtrue);
-        }
-        case op_neq: {
-            emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
-            emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
-            set32(NotEqual, regT1, regT0, regT0);
-            emitTagAsBoolImmediate(regT0);
-
-            emitPutVirtualRegister(currentInstruction[1].u.operand);
-
-            NEXT_OPCODE(op_neq);
-        }
-        case op_post_dec: {
-            compileFastArith_op_post_dec(currentInstruction[1].u.operand, currentInstruction[2].u.operand);
-            NEXT_OPCODE(op_post_dec);
-        }
-        case op_bitxor: {
-            emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
-            emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
-            xorPtr(regT1, regT0);
-            emitFastArithReTagImmediate(regT0, regT0);
-            emitPutVirtualRegister(currentInstruction[1].u.operand);
-            NEXT_OPCODE(op_bitxor);
-        }
         case op_new_regexp: {
-            JITStubCall stubCall(this, JITStubs::cti_op_new_regexp);
-            stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
-            stubCall.call(currentInstruction[1].u.operand);
+            emit_op_new_regexp(currentInstruction);
             NEXT_OPCODE(op_new_regexp);
         }
-        case op_bitor: {
-            emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
-            emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
-            orPtr(regT1, regT0);
-            emitPutVirtualRegister(currentInstruction[1].u.operand);
-            NEXT_OPCODE(op_bitor);
-        }
         case op_throw: {
-            JITStubCall stubCall(this, JITStubs::cti_op_throw);
-            stubCall.addArgument(currentInstruction[1].u.operand, regT2);
-            stubCall.call();
-            ASSERT(regT0 == returnValueRegister);
-#if PLATFORM(X86_64)
-            addPtr(Imm32(0x48), X86::esp);
-            pop(X86::ebx);
-            pop(X86::r15);
-            pop(X86::r14);
-            pop(X86::r13);
-            pop(X86::r12);
-            pop(X86::ebp);
-            ret();
-#else
-            addPtr(Imm32(0x1c), X86::esp);
-            pop(X86::ebx);
-            pop(X86::edi);
-            pop(X86::esi);
-            pop(X86::ebp);
-            ret();
-#endif
+            emit_op_throw(currentInstruction);
             NEXT_OPCODE(op_throw);
         }
         case op_next_pname: {
-            JITStubCall stubCall(this, JITStubs::cti_op_next_pname);
-            stubCall.addArgument(currentInstruction[2].u.operand, regT2);
-            stubCall.call();
-            Jump endOfIter = branchTestPtr(Zero, regT0);
-            emitPutVirtualRegister(currentInstruction[1].u.operand);
-            addJump(jump(), currentInstruction[3].u.operand + 3);
-            endOfIter.link(this);
+            emit_op_next_pname(currentInstruction);
             NEXT_OPCODE(op_next_pname);
         }
         case op_push_scope: {
-            JITStubCall stubCall(this, JITStubs::cti_op_push_scope);
-            stubCall.addArgument(currentInstruction[1].u.operand, regT2);
-            stubCall.call(currentInstruction[1].u.operand);
+            emit_op_push_scope(currentInstruction);
             NEXT_OPCODE(op_push_scope);
         }
         case op_pop_scope: {
-            JITStubCall(this, JITStubs::cti_op_pop_scope).call();
+            emit_op_pop_scope(currentInstruction);
             NEXT_OPCODE(op_pop_scope);
         }
-        case op_stricteq: {
-            compileOpStrictEq(currentInstruction, OpStrictEq);
-            NEXT_OPCODE(op_stricteq);
-        }
-        case op_nstricteq: {
-            compileOpStrictEq(currentInstruction, OpNStrictEq);
-            NEXT_OPCODE(op_nstricteq);
-        }
         case op_to_jsnumber: {
-            int srcVReg = currentInstruction[2].u.operand;
-            emitGetVirtualRegister(srcVReg, regT0);
-            
-            Jump wasImmediate = emitJumpIfImmediateInteger(regT0);
-
-            emitJumpSlowCaseIfNotJSCell(regT0, srcVReg);
-            loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
-            addSlowCase(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(NumberType)));
-            
-            wasImmediate.link(this);
-
-            emitPutVirtualRegister(currentInstruction[1].u.operand);
+            emit_op_to_jsnumber(currentInstruction);
             NEXT_OPCODE(op_to_jsnumber);
         }
         case op_push_new_scope: {
-            JITStubCall stubCall(this, JITStubs::cti_op_push_new_scope);
-            stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
-            stubCall.addArgument(currentInstruction[3].u.operand, regT2);
-            stubCall.call(currentInstruction[1].u.operand);
+            emit_op_push_new_scope(currentInstruction);
             NEXT_OPCODE(op_push_new_scope);
         }
         case op_catch: {
-            emitGetCTIParam(offsetof(struct JITStackFrame, callFrame) / sizeof (void*), callFrameRegister);
-            emitPutVirtualRegister(currentInstruction[1].u.operand);
+            emit_op_catch(currentInstruction);
             NEXT_OPCODE(op_catch);
         }
         case op_jmp_scopes: {
-            JITStubCall stubCall(this, JITStubs::cti_op_jmp_scopes);
-            stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
-            stubCall.call();
-            addJump(jump(), currentInstruction[2].u.operand + 2);
-            RECORD_JUMP_TARGET(currentInstruction[2].u.operand + 2);
+            emit_op_jmp_scopes(currentInstruction);
             NEXT_OPCODE(op_jmp_scopes);
         }
-        case op_put_by_index: {
-            JITStubCall stubCall(this, JITStubs::cti_op_put_by_index);
-            stubCall.addArgument(currentInstruction[1].u.operand, regT2);
-            stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
-            stubCall.addArgument(currentInstruction[3].u.operand, regT2);
-            stubCall.call();
-            NEXT_OPCODE(op_put_by_index);
-        }
         case op_switch_imm: {
-            unsigned tableIndex = currentInstruction[1].u.operand;
-            unsigned defaultOffset = currentInstruction[2].u.operand;
-            unsigned scrutinee = currentInstruction[3].u.operand;
-
-            // create jump table for switch destinations, track this switch statement.
-            SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
-            m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
-            jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
-
-            JITStubCall stubCall(this, JITStubs::cti_op_switch_imm);
-            stubCall.addArgument(scrutinee, regT2);
-            stubCall.addArgument(Imm32(tableIndex));
-            stubCall.call();
-            jump(regT0);
+            emit_op_switch_imm(currentInstruction);
             NEXT_OPCODE(op_switch_imm);
         }
         case op_switch_char: {
-            unsigned tableIndex = currentInstruction[1].u.operand;
-            unsigned defaultOffset = currentInstruction[2].u.operand;
-            unsigned scrutinee = currentInstruction[3].u.operand;
-
-            // create jump table for switch destinations, track this switch statement.
-            SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
-            m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
-            jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
-
-            JITStubCall stubCall(this, JITStubs::cti_op_switch_char);
-            stubCall.addArgument(scrutinee, regT2);
-            stubCall.addArgument(Imm32(tableIndex));
-            stubCall.call();
-            jump(regT0);
+            emit_op_switch_char(currentInstruction);
             NEXT_OPCODE(op_switch_char);
         }
         case op_switch_string: {
-            unsigned tableIndex = currentInstruction[1].u.operand;
-            unsigned defaultOffset = currentInstruction[2].u.operand;
-            unsigned scrutinee = currentInstruction[3].u.operand;
-
-            // create jump table for switch destinations, track this switch statement.
-            StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
-            m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
-
-            JITStubCall stubCall(this, JITStubs::cti_op_switch_string);
-            stubCall.addArgument(scrutinee, regT2);
-            stubCall.addArgument(Imm32(tableIndex));
-            stubCall.call();
-            jump(regT0);
+            emit_op_switch_string(currentInstruction);
             NEXT_OPCODE(op_switch_string);
         }
-        case op_put_getter: {
-            JITStubCall stubCall(this, JITStubs::cti_op_put_getter);
-            stubCall.addArgument(currentInstruction[1].u.operand, regT2);
-            stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
-            stubCall.addArgument(currentInstruction[3].u.operand, regT2);
-            stubCall.call();
-            NEXT_OPCODE(op_put_getter);
-        }
-        case op_put_setter: {
-            JITStubCall stubCall(this, JITStubs::cti_op_put_setter);
-            stubCall.addArgument(currentInstruction[1].u.operand, regT2);
-            stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
-            stubCall.addArgument(currentInstruction[3].u.operand, regT2);
-            stubCall.call();
-            NEXT_OPCODE(op_put_setter);
-        }
         case op_new_error: {
-            JITStubCall stubCall(this, JITStubs::cti_op_new_error);
-            stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
-            stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->unexpectedConstant(currentInstruction[3].u.operand))));
-            stubCall.addArgument(Imm32(m_bytecodeIndex));
-            stubCall.call(currentInstruction[1].u.operand);
+            emit_op_new_error(currentInstruction);
             NEXT_OPCODE(op_new_error);
         }
-        case op_debug: {
-            JITStubCall stubCall(this, JITStubs::cti_op_debug);
-            stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
-            stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
-            stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
-            stubCall.call();
-            NEXT_OPCODE(op_debug);
-        }
-        case op_eq_null: {
-            unsigned dst = currentInstruction[1].u.operand;
-            unsigned src1 = currentInstruction[2].u.operand;
-
-            emitGetVirtualRegister(src1, regT0);
-            Jump isImmediate = emitJumpIfNotJSCell(regT0);
-
-            loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
-            setTest32(NonZero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
-
-            Jump wasNotImmediate = jump();
-
-            isImmediate.link(this);
-
-            andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
-            setPtr(Equal, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0);
-
-            wasNotImmediate.link(this);
-
-            emitTagAsBoolImmediate(regT0);
-            emitPutVirtualRegister(dst);
-
-            NEXT_OPCODE(op_eq_null);
-        }
-        case op_neq_null: {
-            unsigned dst = currentInstruction[1].u.operand;
-            unsigned src1 = currentInstruction[2].u.operand;
-
-            emitGetVirtualRegister(src1, regT0);
-            Jump isImmediate = emitJumpIfNotJSCell(regT0);
-
-            loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
-            setTest32(Zero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
-
-            Jump wasNotImmediate = jump();
-
-            isImmediate.link(this);
-
-            andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
-            setPtr(NotEqual, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0);
-
-            wasNotImmediate.link(this);
-
-            emitTagAsBoolImmediate(regT0);
-            emitPutVirtualRegister(dst);
-
-            NEXT_OPCODE(op_neq_null);
-        }
         case op_enter: {
-            // Even though CTI doesn't use them, we initialize our constant
-            // registers to zap stale pointers, to avoid unnecessarily prolonging
-            // object lifetime and increasing GC pressure.
-            size_t count = m_codeBlock->m_numVars + m_codeBlock->numberOfConstantRegisters();
-            for (size_t j = 0; j < count; ++j)
-                emitInitRegister(j);
-
+            emit_op_enter(currentInstruction);
             NEXT_OPCODE(op_enter);
         }
         case op_enter_with_activation: {
-            // Even though CTI doesn't use them, we initialize our constant
-            // registers to zap stale pointers, to avoid unnecessarily prolonging
-            // object lifetime and increasing GC pressure.
-            size_t count = m_codeBlock->m_numVars + m_codeBlock->numberOfConstantRegisters();
-            for (size_t j = 0; j < count; ++j)
-                emitInitRegister(j);
-
-            JITStubCall(this, JITStubs::cti_op_push_activation).call(currentInstruction[1].u.operand);
+            emit_op_enter_with_activation(currentInstruction);
             NEXT_OPCODE(op_enter_with_activation);
         }
         case op_create_arguments: {
-            if (m_codeBlock->m_numParameters == 1)
-                JITStubCall(this, JITStubs::cti_op_create_arguments_no_params).call();
-            else
-                JITStubCall(this, JITStubs::cti_op_create_arguments).call();
+            emit_op_create_arguments(currentInstruction);
             NEXT_OPCODE(op_create_arguments);
         }
         case op_convert_this: {
-            emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
-            emitJumpSlowCaseIfNotJSCell(regT0);
-            loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT1);
-            addSlowCase(branchTest32(NonZero, Address(regT1, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
-
+            emit_op_convert_this(currentInstruction);
             NEXT_OPCODE(op_convert_this);
         }
-        case op_profile_will_call: {
-            emitGetCTIParam(FIELD_OFFSET(JITStackFrame, enabledProfilerReference) / sizeof (void*), regT0);
-            Jump noProfiler = branchTestPtr(Zero, Address(regT0));
-
-            JITStubCall stubCall(this, JITStubs::cti_op_profile_will_call);
-            stubCall.addArgument(currentInstruction[1].u.operand, regT0);
-            stubCall.call();
-            noProfiler.link(this);
 
-            NEXT_OPCODE(op_profile_will_call);
-        }
-        case op_profile_did_call: {
-            emitGetCTIParam(FIELD_OFFSET(JITStackFrame, enabledProfilerReference) / sizeof (void*), regT0);
-            Jump noProfiler = branchTestPtr(Zero, Address(regT0));
 
-            JITStubCall stubCall(this, JITStubs::cti_op_profile_did_call);
-            stubCall.addArgument(currentInstruction[1].u.operand, regT0);
-            stubCall.call();
-            noProfiler.link(this);
+        // No implementation
 
-            NEXT_OPCODE(op_profile_did_call);
-        }
         case op_get_array_length:
         case op_get_by_id_chain:
         case op_get_by_id_generic:
@@ -1124,8 +580,8 @@ void JIT::privateCompileMainPass()
         }
     }
 
-    ASSERT(propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
-    ASSERT(callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
+    ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
+    ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
 
 #ifndef NDEBUG
     // Reset this, in order to guard its use with ASSERTs.
@@ -1145,8 +601,9 @@ void JIT::privateCompileLinkPass()
 void JIT::privateCompileSlowCases()
 {
     Instruction* instructionsBegin = m_codeBlock->instructions().begin();
-    unsigned propertyAccessInstructionIndex = 0;
-    unsigned callLinkInfoIndex = 0;
+
+    m_propertyAccessInstructionIndex = 0;
+    m_callLinkInfoIndex = 0;
 
     for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
         // FIXME: enable peephole optimizations for slow cases when applicable
@@ -1159,6 +616,58 @@ void JIT::privateCompileSlowCases()
         Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex;
 
         switch (OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
+        case op_add: {
+            emitSlow_op_add(currentInstruction, iter);
+            NEXT_OPCODE(op_add);
+        }
+        case op_sub: {
+            emitSlow_op_sub(currentInstruction, iter);
+            NEXT_OPCODE(op_sub);
+        }
+        case op_mul: {
+            emitSlow_op_mul(currentInstruction, iter);
+            NEXT_OPCODE(op_mul);
+        }
+        case op_mod: {
+            emitSlow_op_mod(currentInstruction, iter);
+            NEXT_OPCODE(op_mod);
+        }
+        case op_bitand: {
+            emitSlow_op_bitand(currentInstruction, iter);
+            NEXT_OPCODE(op_bitand);
+        }
+        case op_lshift: {
+            emitSlow_op_lshift(currentInstruction, iter);
+            NEXT_OPCODE(op_lshift);
+        }
+        case op_rshift: {
+            emitSlow_op_rshift(currentInstruction, iter);
+            NEXT_OPCODE(op_rshift);
+        }
+        case op_jnless: {
+            emitSlow_op_jnless(currentInstruction, iter);
+            NEXT_OPCODE(op_jnless);
+        }
+        case op_jnlesseq: {
+            emitSlow_op_jnlesseq(currentInstruction, iter);
+            NEXT_OPCODE(op_jnlesseq);
+        }
+        case op_pre_dec: {
+            emitSlow_op_pre_dec(currentInstruction, iter);
+            NEXT_OPCODE(op_pre_dec);
+        }
+        case op_pre_inc: {
+            emitSlow_op_pre_inc(currentInstruction, iter);
+            NEXT_OPCODE(op_pre_inc);
+        }
+        case op_post_inc: {
+            emitSlow_op_post_inc(currentInstruction, iter);
+            NEXT_OPCODE(op_post_inc);
+        }
+        case op_post_dec: {
+            emitSlow_op_post_dec(currentInstruction, iter);
+            NEXT_OPCODE(op_post_dec);
+        }
         case op_convert_this: {
             linkSlowCase(iter);
             linkSlowCase(iter);
@@ -1167,10 +676,6 @@ void JIT::privateCompileSlowCases()
             stubCall.call(currentInstruction[1].u.operand);
             NEXT_OPCODE(op_convert_this);
         }
-        case op_add: {
-            compileFastArithSlow_op_add(currentInstruction, iter);
-            NEXT_OPCODE(op_add);
-        }
         case op_construct_verify: {
             linkSlowCase(iter);
             linkSlowCase(iter);
@@ -1218,18 +723,6 @@ void JIT::privateCompileSlowCases()
 
             NEXT_OPCODE(op_get_by_val);
         }
-        case op_sub: {
-            compileFastArithSlow_op_sub(currentInstruction, iter);
-            NEXT_OPCODE(op_sub);
-        }
-        case op_rshift: {
-            compileFastArithSlow_op_rshift(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, iter);
-            NEXT_OPCODE(op_rshift);
-        }
-        case op_lshift: {
-            compileFastArithSlow_op_lshift(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, iter);
-            NEXT_OPCODE(op_lshift);
-        }
         case op_loop_if_less: {
             unsigned op1 = currentInstruction[1].u.operand;
             unsigned op2 = currentInstruction[2].u.operand;
@@ -1260,11 +753,11 @@ void JIT::privateCompileSlowCases()
             NEXT_OPCODE(op_loop_if_less);
         }
         case op_put_by_id: {
-            compilePutByIdSlowCase(currentInstruction[1].u.operand, &(m_codeBlock->identifier(currentInstruction[2].u.operand)), currentInstruction[3].u.operand, iter, propertyAccessInstructionIndex++);
+            compilePutByIdSlowCase(currentInstruction[1].u.operand, &(m_codeBlock->identifier(currentInstruction[2].u.operand)), currentInstruction[3].u.operand, iter, m_propertyAccessInstructionIndex++);
             NEXT_OPCODE(op_put_by_id);
         }
         case op_get_by_id: {
-            compileGetByIdSlowCase(currentInstruction[1].u.operand, currentInstruction[2].u.operand, &(m_codeBlock->identifier(currentInstruction[3].u.operand)), iter, propertyAccessInstructionIndex++);
+            compileGetByIdSlowCase(currentInstruction[1].u.operand, currentInstruction[2].u.operand, &(m_codeBlock->identifier(currentInstruction[3].u.operand)), iter, m_propertyAccessInstructionIndex++);
             NEXT_OPCODE(op_get_by_id);
         }
         case op_loop_if_lesseq: {
@@ -1288,10 +781,6 @@ void JIT::privateCompileSlowCases()
             }
             NEXT_OPCODE(op_loop_if_lesseq);
         }
-        case op_pre_inc: {
-            compileFastArithSlow_op_pre_inc(currentInstruction[1].u.operand, iter);
-            NEXT_OPCODE(op_pre_inc);
-        }
         case op_put_by_val: {
             // Normal slow cases - either is not an immediate imm, or is an array.
             Jump notImm = getSlowCase(iter);
@@ -1327,18 +816,6 @@ void JIT::privateCompileSlowCases()
             emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand + 2);
             NEXT_OPCODE(op_loop_if_true);
         }
-        case op_pre_dec: {
-            compileFastArithSlow_op_pre_dec(currentInstruction[1].u.operand, iter);
-            NEXT_OPCODE(op_pre_dec);
-        }
-        case op_jnless: {
-            compileFastArithSlow_op_jnless(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, iter);
-            NEXT_OPCODE(op_jnless);
-        }
-        case op_jnlesseq: {
-            compileFastArithSlow_op_jnlesseq(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, iter);
-            NEXT_OPCODE(op_jnlesseq);
-        }
         case op_not: {
             linkSlowCase(iter);
             xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
@@ -1355,10 +832,6 @@ void JIT::privateCompileSlowCases()
             emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand + 2); // inverted!
             NEXT_OPCODE(op_jfalse);
         }
-        case op_post_inc: {
-            compileFastArithSlow_op_post_inc(currentInstruction[1].u.operand, currentInstruction[2].u.operand, iter);
-            NEXT_OPCODE(op_post_inc);
-        }
         case op_bitnot: {
             linkSlowCase(iter);
             JITStubCall stubCall(this, JITStubs::cti_op_bitnot);
@@ -1366,10 +839,6 @@ void JIT::privateCompileSlowCases()
             stubCall.call(currentInstruction[1].u.operand);
             NEXT_OPCODE(op_bitnot);
         }
-        case op_bitand: {
-            compileFastArithSlow_op_bitand(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, iter);
-            NEXT_OPCODE(op_bitand);
-        }
         case op_jtrue: {
             linkSlowCase(iter);
             JITStubCall stubCall(this, JITStubs::cti_op_jtrue);
@@ -1378,10 +847,6 @@ void JIT::privateCompileSlowCases()
             emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand + 2);
             NEXT_OPCODE(op_jtrue);
         }
-        case op_post_dec: {
-            compileFastArithSlow_op_post_dec(currentInstruction[1].u.operand, currentInstruction[2].u.operand, iter);
-            NEXT_OPCODE(op_post_dec);
-        }
         case op_bitxor: {
             linkSlowCase(iter);
             JITStubCall stubCall(this, JITStubs::cti_op_bitxor);
@@ -1443,21 +908,12 @@ void JIT::privateCompileSlowCases()
             stubCall.call(currentInstruction[1].u.operand);
             NEXT_OPCODE(op_instanceof);
         }
-        case op_mod: {
-            compileFastArithSlow_op_mod(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, iter);
-            NEXT_OPCODE(op_mod);
-        }
-        case op_mul: {
-            compileFastArithSlow_op_mul(currentInstruction, iter);
-            NEXT_OPCODE(op_mul);
-        }
-
         case op_call: {
-            compileOpCallSlowCase(currentInstruction, iter, callLinkInfoIndex++, opcodeID);
+            compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, opcodeID);
             NEXT_OPCODE(op_call);
         }
         case op_call_eval: {
-            compileOpCallSlowCase(currentInstruction, iter, callLinkInfoIndex++, opcodeID);
+            compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, opcodeID);
             NEXT_OPCODE(op_call_eval);
         }
         case op_call_varargs: {
@@ -1465,7 +921,7 @@ void JIT::privateCompileSlowCases()
             NEXT_OPCODE(op_call_varargs);
         }
         case op_construct: {
-            compileOpCallSlowCase(currentInstruction, iter, callLinkInfoIndex++, opcodeID);
+            compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, opcodeID);
             NEXT_OPCODE(op_construct);
         }
         case op_to_jsnumber: {
@@ -1489,9 +945,9 @@ void JIT::privateCompileSlowCases()
     }
 
 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-    ASSERT(propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
+    ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
 #endif
-    ASSERT(callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
+    ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
 
 #ifndef NDEBUG
     // Reset this, in order to guard its use with ASSERTs.
index 64000d0..aa0f377 100644 (file)
@@ -377,32 +377,116 @@ namespace JSC {
         void compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset);
         void compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset);
 
-        void compileFastArith_op_add(Instruction*);
-        void compileFastArith_op_sub(Instruction*);
-        void compileFastArith_op_mul(Instruction*);
-        void compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2);
-        void compileFastArith_op_bitand(unsigned result, unsigned op1, unsigned op2);
-        void compileFastArith_op_lshift(unsigned result, unsigned op1, unsigned op2);
-        void compileFastArith_op_rshift(unsigned result, unsigned op1, unsigned op2);
-        void compileFastArith_op_jnless(unsigned op1, unsigned op2, unsigned target);
-        void compileFastArith_op_jnlesseq(unsigned op1, unsigned op2, unsigned target);
-        void compileFastArith_op_pre_inc(unsigned srcDst);
-        void compileFastArith_op_pre_dec(unsigned srcDst);
-        void compileFastArith_op_post_inc(unsigned result, unsigned srcDst);
-        void compileFastArith_op_post_dec(unsigned result, unsigned srcDst);
-        void compileFastArithSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
-        void compileFastArithSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
-        void compileFastArithSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
-        void compileFastArithSlow_op_mod(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator&);
-        void compileFastArithSlow_op_bitand(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator&);
-        void compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator&);
-        void compileFastArithSlow_op_rshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator&);
-        void compileFastArithSlow_op_jnless(unsigned op1, unsigned op2, unsigned target, Vector<SlowCaseEntry>::iterator&);
-        void compileFastArithSlow_op_jnlesseq(unsigned op1, unsigned op2, unsigned target, Vector<SlowCaseEntry>::iterator&);
-        void compileFastArithSlow_op_pre_inc(unsigned srcDst, Vector<SlowCaseEntry>::iterator&);
-        void compileFastArithSlow_op_pre_dec(unsigned srcDst, Vector<SlowCaseEntry>::iterator&);
-        void compileFastArithSlow_op_post_inc(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator&);
-        void compileFastArithSlow_op_post_dec(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator&);
+        // Arithmetic Ops
+
+        void emit_op_add(Instruction*);
+        void emit_op_sub(Instruction*);
+        void emit_op_mul(Instruction*);
+        void emit_op_mod(Instruction*);
+        void emit_op_bitand(Instruction*);
+        void emit_op_lshift(Instruction*);
+        void emit_op_rshift(Instruction*);
+        void emit_op_jnless(Instruction*);
+        void emit_op_jnlesseq(Instruction*);
+        void emit_op_pre_inc(Instruction*);
+        void emit_op_pre_dec(Instruction*);
+        void emit_op_post_inc(Instruction*);
+        void emit_op_post_dec(Instruction*);
+        void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
+        void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
+        void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
+        void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&);
+        void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&);
+        void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
+        void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
+        void emitSlow_op_jnless(Instruction*, Vector<SlowCaseEntry>::iterator&);
+        void emitSlow_op_jnlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+        void emitSlow_op_pre_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
+        void emitSlow_op_pre_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
+        void emitSlow_op_post_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
+        void emitSlow_op_post_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
+
+        void emit_op_get_by_val(Instruction*);
+        void emit_op_put_by_val(Instruction*);
+        void emit_op_put_by_index(Instruction*);
+        void emit_op_put_getter(Instruction*);
+        void emit_op_put_setter(Instruction*);
+        void emit_op_del_by_id(Instruction*);
+
+        void emit_op_mov(Instruction*);
+        void emit_op_end(Instruction*);
+        void emit_op_jmp(Instruction*);
+        void emit_op_loop(Instruction*);
+        void emit_op_loop_if_less(Instruction*);
+        void emit_op_loop_if_lesseq(Instruction*);
+        void emit_op_new_object(Instruction*);
+        void emit_op_put_by_id(Instruction*);
+        void emit_op_get_by_id(Instruction*);
+        void emit_op_instanceof(Instruction*);
+        void emit_op_new_func(Instruction*);
+        void emit_op_call(Instruction*);
+        void emit_op_call_eval(Instruction*);
+        void emit_op_load_varargs(Instruction*);
+        void emit_op_call_varargs(Instruction*);
+        void emit_op_construct(Instruction*);
+        void emit_op_get_global_var(Instruction*);
+        void emit_op_put_global_var(Instruction*);
+        void emit_op_get_scoped_var(Instruction*);
+        void emit_op_put_scoped_var(Instruction*);
+        void emit_op_tear_off_activation(Instruction*);
+        void emit_op_tear_off_arguments(Instruction*);
+        void emit_op_ret(Instruction*);
+        void emit_op_new_array(Instruction*);
+        void emit_op_resolve(Instruction*);
+        void emit_op_construct_verify(Instruction*);
+        void emit_op_to_primitive(Instruction*);
+        void emit_op_strcat(Instruction*);
+        void emit_op_resolve_func(Instruction*);
+        void emit_op_loop_if_true(Instruction*);
+        void emit_op_resolve_base(Instruction*);
+        void emit_op_resolve_skip(Instruction*);
+        void emit_op_resolve_global(Instruction*);
+        void emit_op_not(Instruction*);
+        void emit_op_jfalse(Instruction*);
+        void emit_op_jeq_null(Instruction*);
+        void emit_op_jneq_null(Instruction*);
+        void emit_op_jneq_ptr(Instruction*);
+        void emit_op_unexpected_load(Instruction*);
+        void emit_op_jsr(Instruction*);
+        void emit_op_sret(Instruction*);
+        void emit_op_eq(Instruction*);
+        void emit_op_bitnot(Instruction*);
+        void emit_op_resolve_with_base(Instruction*);
+        void emit_op_new_func_exp(Instruction*);
+        void emit_op_jtrue(Instruction*);
+        void emit_op_neq(Instruction*);
+        void emit_op_bitxor(Instruction*);
+        void emit_op_new_regexp(Instruction*);
+        void emit_op_bitor(Instruction*);
+        void emit_op_throw(Instruction*);
+        void emit_op_next_pname(Instruction*);
+        void emit_op_push_scope(Instruction*);
+        void emit_op_pop_scope(Instruction*);
+        void emit_op_stricteq(Instruction*);
+        void emit_op_nstricteq(Instruction*);
+        void emit_op_to_jsnumber(Instruction*);
+        void emit_op_push_new_scope(Instruction*);
+        void emit_op_catch(Instruction*);
+        void emit_op_jmp_scopes(Instruction*);
+        void emit_op_switch_imm(Instruction*);
+        void emit_op_switch_char(Instruction*);
+        void emit_op_switch_string(Instruction*);
+        void emit_op_new_error(Instruction*);
+        void emit_op_debug(Instruction*);
+        void emit_op_eq_null(Instruction*);
+        void emit_op_neq_null(Instruction*);
+        void emit_op_enter(Instruction*);
+        void emit_op_enter_with_activation(Instruction*);
+        void emit_op_create_arguments(Instruction*);
+        void emit_op_convert_this(Instruction*);
+        void emit_op_profile_will_call(Instruction*);
+        void emit_op_profile_did_call(Instruction*);
+
 #if ENABLE(JIT_OPTIMIZE_ARITHMETIC)
         void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
         void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
@@ -543,6 +627,10 @@ namespace JSC {
 
         int m_lastResultBytecodeRegister;
         unsigned m_jumpTargetsPosition;
+
+        unsigned m_propertyAccessInstructionIndex;
+        unsigned m_globalResolveInfoIndex;
+        unsigned m_callLinkInfoIndex;
     };
 
     class JITStubCall {
index dbfe2e8..e8ff0ee 100644 (file)
@@ -93,8 +93,12 @@ static bool isSSE2Present()
 
 namespace JSC {
 
-void JIT::compileFastArith_op_lshift(unsigned result, unsigned op1, unsigned op2)
+void JIT::emit_op_lshift(Instruction* currentInstruction)
 {
+    unsigned result = currentInstruction[1].u.operand;
+    unsigned op1 = currentInstruction[2].u.operand;
+    unsigned op2 = currentInstruction[3].u.operand;
+
     emitGetVirtualRegisters(op1, regT0, op2, regT2);
     // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
     emitJumpSlowCaseIfNotImmediateInteger(regT0);
@@ -114,8 +118,12 @@ void JIT::compileFastArith_op_lshift(unsigned result, unsigned op1, unsigned op2
     emitFastArithReTagImmediate(regT0, regT0);
     emitPutVirtualRegister(result);
 }
-void JIT::compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
+    unsigned result = currentInstruction[1].u.operand;
+    unsigned op1 = currentInstruction[2].u.operand;
+    unsigned op2 = currentInstruction[3].u.operand;
+
 #if USE(ALTERNATE_JSIMMEDIATE)
     UNUSED_PARAM(op1);
     UNUSED_PARAM(op2);
@@ -136,8 +144,12 @@ void JIT::compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned
     stubCall.call(result);
 }
 
-void JIT::compileFastArith_op_rshift(unsigned result, unsigned op1, unsigned op2)
+void JIT::emit_op_rshift(Instruction* currentInstruction)
 {
+    unsigned result = currentInstruction[1].u.operand;
+    unsigned op1 = currentInstruction[2].u.operand;
+    unsigned op2 = currentInstruction[3].u.operand;
+
     if (isOperandConstantImmediateInt(op2)) {
         emitGetVirtualRegister(op1, regT0);
         emitJumpSlowCaseIfNotImmediateInteger(regT0);
@@ -189,8 +201,12 @@ void JIT::compileFastArith_op_rshift(unsigned result, unsigned op1, unsigned op2
     emitPutVirtualRegister(result);
 }
 
-void JIT::compileFastArithSlow_op_rshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
+    unsigned result = currentInstruction[1].u.operand;
+    unsigned op1 = currentInstruction[2].u.operand;
+    unsigned op2 = currentInstruction[3].u.operand;
+
     linkSlowCase(iter);
     JITStubCall stubCall(this, JITStubs::cti_op_rshift);
 
@@ -223,8 +239,12 @@ void JIT::compileFastArithSlow_op_rshift(unsigned result, unsigned op1, unsigned
     stubCall.call(result);
 }
 
-void JIT::compileFastArith_op_jnless(unsigned op1, unsigned op2, unsigned target)
+void JIT::emit_op_jnless(Instruction* currentInstruction)
 {
+    unsigned op1 = currentInstruction[1].u.operand;
+    unsigned op2 = currentInstruction[2].u.operand;
+    unsigned target = currentInstruction[3].u.operand;
+
     // We generate inline code for the following cases in the fast path:
     // - int immediate to constant int immediate
     // - constant int immediate to int immediate
@@ -256,8 +276,12 @@ void JIT::compileFastArith_op_jnless(unsigned op1, unsigned op2, unsigned target
         addJump(branch32(GreaterThanOrEqual, regT0, regT1), target + 3);
     }
 }
-void JIT::compileFastArithSlow_op_jnless(unsigned op1, unsigned op2, unsigned target, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
+    unsigned op1 = currentInstruction[1].u.operand;
+    unsigned op2 = currentInstruction[2].u.operand;
+    unsigned target = currentInstruction[3].u.operand;
+
     // We generate inline code for the following cases in the slow path:
     // - floating-point number to constant int immediate
     // - constant int immediate to floating-point number
@@ -402,8 +426,12 @@ void JIT::compileFastArithSlow_op_jnless(unsigned op1, unsigned op2, unsigned ta
     }
 }
 
-void JIT::compileFastArith_op_jnlesseq(unsigned op1, unsigned op2, unsigned target)
+void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
 {
+    unsigned op1 = currentInstruction[1].u.operand;
+    unsigned op2 = currentInstruction[2].u.operand;
+    unsigned target = currentInstruction[3].u.operand;
+
     // We generate inline code for the following cases in the fast path:
     // - int immediate to constant int immediate
     // - constant int immediate to int immediate
@@ -435,8 +463,12 @@ void JIT::compileFastArith_op_jnlesseq(unsigned op1, unsigned op2, unsigned targ
         addJump(branch32(GreaterThan, regT0, regT1), target + 3);
     }
 }
-void JIT::compileFastArithSlow_op_jnlesseq(unsigned op1, unsigned op2, unsigned target, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
+    unsigned op1 = currentInstruction[1].u.operand;
+    unsigned op2 = currentInstruction[2].u.operand;
+    unsigned target = currentInstruction[3].u.operand;
+
     // We generate inline code for the following cases in the slow path:
     // - floating-point number to constant int immediate
     // - constant int immediate to floating-point number
@@ -581,8 +613,12 @@ void JIT::compileFastArithSlow_op_jnlesseq(unsigned op1, unsigned op2, unsigned
     }
 }
 
-void JIT::compileFastArith_op_bitand(unsigned result, unsigned op1, unsigned op2)
+void JIT::emit_op_bitand(Instruction* currentInstruction)
 {
+    unsigned result = currentInstruction[1].u.operand;
+    unsigned op1 = currentInstruction[2].u.operand;
+    unsigned op2 = currentInstruction[3].u.operand;
+
     if (isOperandConstantImmediateInt(op1)) {
         emitGetVirtualRegister(op2, regT0);
         emitJumpSlowCaseIfNotImmediateInteger(regT0);
@@ -612,8 +648,12 @@ void JIT::compileFastArith_op_bitand(unsigned result, unsigned op1, unsigned op2
     }
     emitPutVirtualRegister(result);
 }
-void JIT::compileFastArithSlow_op_bitand(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
+    unsigned result = currentInstruction[1].u.operand;
+    unsigned op1 = currentInstruction[2].u.operand;
+    unsigned op2 = currentInstruction[3].u.operand;
+
     linkSlowCase(iter);
     if (isOperandConstantImmediateInt(op1)) {
         JITStubCall stubCall(this, JITStubs::cti_op_bitand);
@@ -634,8 +674,12 @@ void JIT::compileFastArithSlow_op_bitand(unsigned result, unsigned op1, unsigned
 }
 
 #if PLATFORM(X86) || PLATFORM(X86_64)
-void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
+void JIT::emit_op_mod(Instruction* currentInstruction)
 {
+    unsigned result = currentInstruction[1].u.operand;
+    unsigned op1 = currentInstruction[2].u.operand;
+    unsigned op2 = currentInstruction[3].u.operand;
+
     emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
     emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
     emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
@@ -653,8 +697,10 @@ void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
     emitFastArithReTagImmediate(X86::edx, X86::eax);
     emitPutVirtualRegister(result);
 }
-void JIT::compileFastArithSlow_op_mod(unsigned result, unsigned, unsigned, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
+    unsigned result = currentInstruction[1].u.operand;
+
 #if USE(ALTERNATE_JSIMMEDIATE)
     linkSlowCase(iter);
     linkSlowCase(iter);
@@ -674,21 +720,28 @@ void JIT::compileFastArithSlow_op_mod(unsigned result, unsigned, unsigned, Vecto
     stubCall.call(result);
 }
 #else
-void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
+void JIT::emit_op_mod(Instruction* currentInstruction)
 {
+    unsigned result = currentInstruction[1].u.operand;
+    unsigned op1 = currentInstruction[2].u.operand;
+    unsigned op2 = currentInstruction[3].u.operand;
+
     JITStubCall stubCall(this, JITStubs::cti_op_mod);
     stubCall.addArgument(op1, regT2);
     stubCall.addArgument(op2, regT2);
     stubCall.call(result);
 }
-void JIT::compileFastArithSlow_op_mod(unsigned, unsigned, unsigned, Vector<SlowCaseEntry>::iterator&)
+void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
 {
     ASSERT_NOT_REACHED();
 }
 #endif
 
-void JIT::compileFastArith_op_post_inc(unsigned result, unsigned srcDst)
+void JIT::emit_op_post_inc(Instruction* currentInstruction)
 {
+    unsigned result = currentInstruction[1].u.operand;
+    unsigned srcDst = currentInstruction[2].u.operand;
+
     emitGetVirtualRegister(srcDst, regT0);
     move(regT0, regT1);
     emitJumpSlowCaseIfNotImmediateInteger(regT0);
@@ -703,8 +756,11 @@ void JIT::compileFastArith_op_post_inc(unsigned result, unsigned srcDst)
     emitPutVirtualRegister(result);
 }
 
-void JIT::compileFastArithSlow_op_post_inc(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
+    unsigned result = currentInstruction[1].u.operand;
+    unsigned srcDst = currentInstruction[2].u.operand;
+
     linkSlowCase(iter);
     linkSlowCase(iter);
     JITStubCall stubCall(this, JITStubs::cti_op_post_inc);
@@ -713,8 +769,11 @@ void JIT::compileFastArithSlow_op_post_inc(unsigned result, unsigned srcDst, Vec
     stubCall.call(result);
 }
 
-void JIT::compileFastArith_op_post_dec(unsigned result, unsigned srcDst)
+void JIT::emit_op_post_dec(Instruction* currentInstruction)
 {
+    unsigned result = currentInstruction[1].u.operand;
+    unsigned srcDst = currentInstruction[2].u.operand;
+
     emitGetVirtualRegister(srcDst, regT0);
     move(regT0, regT1);
     emitJumpSlowCaseIfNotImmediateInteger(regT0);
@@ -728,8 +787,11 @@ void JIT::compileFastArith_op_post_dec(unsigned result, unsigned srcDst)
     emitPutVirtualRegister(srcDst, regT1);
     emitPutVirtualRegister(result);
 }
-void JIT::compileFastArithSlow_op_post_dec(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
+    unsigned result = currentInstruction[1].u.operand;
+    unsigned srcDst = currentInstruction[2].u.operand;
+
     linkSlowCase(iter);
     linkSlowCase(iter);
     JITStubCall stubCall(this, JITStubs::cti_op_post_dec);
@@ -738,8 +800,10 @@ void JIT::compileFastArithSlow_op_post_dec(unsigned result, unsigned srcDst, Vec
     stubCall.call(result);
 }
 
-void JIT::compileFastArith_op_pre_inc(unsigned srcDst)
+void JIT::emit_op_pre_inc(Instruction* currentInstruction)
 {
+    unsigned srcDst = currentInstruction[1].u.operand;
+
     emitGetVirtualRegister(srcDst, regT0);
     emitJumpSlowCaseIfNotImmediateInteger(regT0);
 #if USE(ALTERNATE_JSIMMEDIATE)
@@ -751,8 +815,10 @@ void JIT::compileFastArith_op_pre_inc(unsigned srcDst)
 #endif
     emitPutVirtualRegister(srcDst);
 }
-void JIT::compileFastArithSlow_op_pre_inc(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
+    unsigned srcDst = currentInstruction[1].u.operand;
+
     Jump notImm = getSlowCase(iter);
     linkSlowCase(iter);
     emitGetVirtualRegister(srcDst, regT0);
@@ -762,8 +828,10 @@ void JIT::compileFastArithSlow_op_pre_inc(unsigned srcDst, Vector<SlowCaseEntry>
     stubCall.call(srcDst);
 }
 
-void JIT::compileFastArith_op_pre_dec(unsigned srcDst)
+void JIT::emit_op_pre_dec(Instruction* currentInstruction)
 {
+    unsigned srcDst = currentInstruction[1].u.operand;
+
     emitGetVirtualRegister(srcDst, regT0);
     emitJumpSlowCaseIfNotImmediateInteger(regT0);
 #if USE(ALTERNATE_JSIMMEDIATE)
@@ -775,8 +843,10 @@ void JIT::compileFastArith_op_pre_dec(unsigned srcDst)
 #endif
     emitPutVirtualRegister(srcDst);
 }
-void JIT::compileFastArithSlow_op_pre_dec(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
+    unsigned srcDst = currentInstruction[1].u.operand;
+
     Jump notImm = getSlowCase(iter);
     linkSlowCase(iter);
     emitGetVirtualRegister(srcDst, regT0);
@@ -789,7 +859,7 @@ void JIT::compileFastArithSlow_op_pre_dec(unsigned srcDst, Vector<SlowCaseEntry>
 
 #if !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
 
-void JIT::compileFastArith_op_add(Instruction* currentInstruction)
+void JIT::emit_op_add(Instruction* currentInstruction)
 {
     unsigned result = currentInstruction[1].u.operand;
     unsigned op1 = currentInstruction[2].u.operand;
@@ -801,12 +871,12 @@ void JIT::compileFastArith_op_add(Instruction* currentInstruction)
     stubCall.call(result);
 }
 
-void JIT::compileFastArithSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&)
+void JIT::emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&)
 {
     ASSERT_NOT_REACHED();
 }
 
-void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
+void JIT::emit_op_mul(Instruction* currentInstruction)
 {
     unsigned result = currentInstruction[1].u.operand;
     unsigned op1 = currentInstruction[2].u.operand;
@@ -818,12 +888,12 @@ void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
     stubCall.call(result);
 }
 
-void JIT::compileFastArithSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&)
+void JIT::emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&)
 {
     ASSERT_NOT_REACHED();
 }
 
-void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
+void JIT::emit_op_sub(Instruction* currentInstruction)
 {
     unsigned result = currentInstruction[1].u.operand;
     unsigned op1 = currentInstruction[2].u.operand;
@@ -835,7 +905,7 @@ void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
     stubCall.call(result);
 }
 
-void JIT::compileFastArithSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&)
+void JIT::emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&)
 {
     ASSERT_NOT_REACHED();
 }
@@ -916,7 +986,7 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
     end.link(this);
 }
 
-void JIT::compileFastArith_op_add(Instruction* currentInstruction)
+void JIT::emit_op_add(Instruction* currentInstruction)
 {
     unsigned result = currentInstruction[1].u.operand;
     unsigned op1 = currentInstruction[2].u.operand;
@@ -946,7 +1016,7 @@ void JIT::compileFastArith_op_add(Instruction* currentInstruction)
 
     emitPutVirtualRegister(result);
 }
-void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned result = currentInstruction[1].u.operand;
     unsigned op1 = currentInstruction[2].u.operand;
@@ -963,7 +1033,7 @@ void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<Sl
         compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
 }
 
-void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
+void JIT::emit_op_mul(Instruction* currentInstruction)
 {
     unsigned result = currentInstruction[1].u.operand;
     unsigned op1 = currentInstruction[2].u.operand;
@@ -987,7 +1057,7 @@ void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
 
     emitPutVirtualRegister(result);
 }
-void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned result = currentInstruction[1].u.operand;
     unsigned op1 = currentInstruction[2].u.operand;
@@ -1007,7 +1077,7 @@ void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<Sl
         compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types);
 }
 
-void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
+void JIT::emit_op_sub(Instruction* currentInstruction)
 {
     unsigned result = currentInstruction[1].u.operand;
     unsigned op1 = currentInstruction[2].u.operand;
@@ -1018,7 +1088,7 @@ void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
 
     emitPutVirtualRegister(result);
 }
-void JIT::compileFastArithSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned result = currentInstruction[1].u.operand;
     unsigned op1 = currentInstruction[2].u.operand;
@@ -1221,7 +1291,7 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
     stubCall.call(dst);
 }
 
-void JIT::compileFastArith_op_add(Instruction* currentInstruction)
+void JIT::emit_op_add(Instruction* currentInstruction)
 {
     unsigned result = currentInstruction[1].u.operand;
     unsigned op1 = currentInstruction[2].u.operand;
@@ -1252,7 +1322,7 @@ void JIT::compileFastArith_op_add(Instruction* currentInstruction)
     }
 }
 
-void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned result = currentInstruction[1].u.operand;
     unsigned op1 = currentInstruction[2].u.operand;
@@ -1283,7 +1353,7 @@ void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<Sl
     }
 }
 
-void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
+void JIT::emit_op_mul(Instruction* currentInstruction)
 {
     unsigned result = currentInstruction[1].u.operand;
     unsigned op1 = currentInstruction[2].u.operand;
@@ -1310,7 +1380,8 @@ void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
     } else
         compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
 }
-void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+
+void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned result = currentInstruction[1].u.operand;
     unsigned op1 = currentInstruction[2].u.operand;
@@ -1329,11 +1400,11 @@ void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<Sl
         compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
 }
 
-void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
+void JIT::emit_op_sub(Instruction* currentInstruction)
 {
     compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
 }
-void JIT::compileFastArithSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
 }
diff --git a/JavaScriptCore/jit/JITOpcodes.cpp b/JavaScriptCore/jit/JITOpcodes.cpp
new file mode 100644 (file)
index 0000000..776b40b
--- /dev/null
@@ -0,0 +1,906 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "JIT.h"
+
+#if ENABLE(JIT)
+
+#include "JITInlineMethods.h"
+#include "JSCell.h"
+
+namespace JSC {
+
+#define RECORD_JUMP_TARGET(targetOffset) \
+   do { m_labels[m_bytecodeIndex + (targetOffset)].used(); } while (false)
+
+void JIT::emit_op_mov(Instruction* currentInstruction)
+{
+    int dst = currentInstruction[1].u.operand;
+    int src = currentInstruction[2].u.operand;
+
+    if (m_codeBlock->isConstantRegisterIndex(src)) {
+        storePtr(ImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
+        if (dst == m_lastResultBytecodeRegister)
+            killLastResultRegister();
+    } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) {
+        // If either the src or dst is the cached register go though
+        // get/put registers to make sure we track this correctly.
+        emitGetVirtualRegister(src, regT0);
+        emitPutVirtualRegister(dst);
+    } else {
+        // Perform the copy via regT1; do not disturb any mapping in regT0.
+        loadPtr(Address(callFrameRegister, src * sizeof(Register)), regT1);
+        storePtr(regT1, Address(callFrameRegister, dst * sizeof(Register)));
+    }
+}
+
+void JIT::emit_op_end(Instruction* currentInstruction)
+{
+    if (m_codeBlock->needsFullScopeChain())
+        JITStubCall(this, JITStubs::cti_op_end).call();
+    ASSERT(returnValueRegister != callFrameRegister);
+    emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
+    push(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
+    ret();
+}
+
+void JIT::emit_op_jmp(Instruction* currentInstruction)
+{
+    unsigned target = currentInstruction[1].u.operand;
+    addJump(jump(), target + 1);
+    RECORD_JUMP_TARGET(target + 1);
+}
+
+void JIT::emit_op_loop(Instruction* currentInstruction)
+{
+    emitTimeoutCheck();
+
+    unsigned target = currentInstruction[1].u.operand;
+    addJump(jump(), target + 1);
+}
+
+void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
+{
+    emitTimeoutCheck();
+
+    unsigned op1 = currentInstruction[1].u.operand;
+    unsigned op2 = currentInstruction[2].u.operand;
+    unsigned target = currentInstruction[3].u.operand;
+    if (isOperandConstantImmediateInt(op2)) {
+        emitGetVirtualRegister(op1, regT0);
+        emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+        int32_t op2imm = getConstantOperandImmediateInt(op2);
+#else
+        int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
+#endif
+        addJump(branch32(LessThan, regT0, Imm32(op2imm)), target + 3);
+    } else if (isOperandConstantImmediateInt(op1)) {
+        emitGetVirtualRegister(op2, regT1);
+        emitJumpSlowCaseIfNotImmediateInteger(regT1);
+#if USE(ALTERNATE_JSIMMEDIATE)
+        int32_t op1imm = getConstantOperandImmediateInt(op1);
+#else
+        int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
+#endif
+        addJump(branch32(GreaterThan, regT1, Imm32(op1imm)), target + 3);
+    } else {
+        emitGetVirtualRegisters(op1, regT0, op2, regT1);
+        emitJumpSlowCaseIfNotImmediateInteger(regT0);
+        emitJumpSlowCaseIfNotImmediateInteger(regT1);
+        addJump(branch32(LessThan, regT0, regT1), target + 3);
+    }
+}
+
+void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
+{
+    emitTimeoutCheck();
+
+    unsigned op1 = currentInstruction[1].u.operand;
+    unsigned op2 = currentInstruction[2].u.operand;
+    unsigned target = currentInstruction[3].u.operand;
+    if (isOperandConstantImmediateInt(op2)) {
+        emitGetVirtualRegister(op1, regT0);
+        emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+        int32_t op2imm = getConstantOperandImmediateInt(op2);
+#else
+        int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
+#endif
+        addJump(branch32(LessThanOrEqual, regT0, Imm32(op2imm)), target + 3);
+    } else {
+        emitGetVirtualRegisters(op1, regT0, op2, regT1);
+        emitJumpSlowCaseIfNotImmediateInteger(regT0);
+        emitJumpSlowCaseIfNotImmediateInteger(regT1);
+        addJump(branch32(LessThanOrEqual, regT0, regT1), target + 3);
+    }
+}
+
+void JIT::emit_op_new_object(Instruction* currentInstruction)
+{
+    JITStubCall(this, JITStubs::cti_op_new_object).call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_instanceof(Instruction* currentInstruction)
+{
+    emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); // value
+    emitGetVirtualRegister(currentInstruction[3].u.operand, regT2); // baseVal
+    emitGetVirtualRegister(currentInstruction[4].u.operand, regT1); // proto
+
+    // check if any are immediates
+    move(regT0, regT3);
+    orPtr(regT2, regT3);
+    orPtr(regT1, regT3);
+    emitJumpSlowCaseIfNotJSCell(regT3);
+
+    // check that all are object type - this is a bit of a bithack to avoid excess branching;
+    // we check that the sum of the three type codes from Structures is exactly 3 * ObjectType,
+    // this works because NumberType and StringType are smaller
+    move(Imm32(3 * ObjectType), regT3);
+    loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT0);
+    loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2);
+    loadPtr(Address(regT1, FIELD_OFFSET(JSCell, m_structure)), regT1);
+    sub32(Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_type)), regT3);
+    sub32(Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_type)), regT3);
+    addSlowCase(branch32(NotEqual, Address(regT1, FIELD_OFFSET(Structure, m_typeInfo.m_type)), regT3));
+
+    // check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance
+    load32(Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), regT2);
+    and32(Imm32(ImplementsHasInstance | OverridesHasInstance), regT2);
+    addSlowCase(branch32(NotEqual, regT2, Imm32(ImplementsHasInstance)));
+
+    emitGetVirtualRegister(currentInstruction[2].u.operand, regT2); // reload value
+    emitGetVirtualRegister(currentInstruction[4].u.operand, regT1); // reload proto
+
+    // optimistically load true result
+    move(ImmPtr(JSValue::encode(jsBoolean(true))), regT0);
+
+    Label loop(this);
+
+    // load value's prototype
+    loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2);
+    loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2);
+
+    Jump exit = branchPtr(Equal, regT2, regT1);
+
+    branchPtr(NotEqual, regT2, ImmPtr(JSValue::encode(jsNull())), loop);
+
+    move(ImmPtr(JSValue::encode(jsBoolean(false))), regT0);
+
+    exit.link(this);
+
+    emitPutVirtualRegister(currentInstruction[1].u.operand);
+
+}
+
+void JIT::emit_op_new_func(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_new_func);
+    stubCall.addArgument(ImmPtr(m_codeBlock->function(currentInstruction[2].u.operand)));
+    stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_call(Instruction* currentInstruction)
+{
+    compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_call_eval(Instruction* currentInstruction)
+{
+    compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_load_varargs(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_load_varargs);
+    stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+    stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_call_varargs(Instruction* currentInstruction)
+{
+    compileOpCallVarargs(currentInstruction);
+}
+
+void JIT::emit_op_construct(Instruction* currentInstruction)
+{
+    compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_get_global_var(Instruction* currentInstruction)
+{
+    JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[2].u.jsCell);
+    move(ImmPtr(globalObject), regT0);
+    emitGetVariableObjectRegister(regT0, currentInstruction[3].u.operand, regT0);
+    emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_put_global_var(Instruction* currentInstruction)
+{
+    emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
+    JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[1].u.jsCell);
+    move(ImmPtr(globalObject), regT0);
+    emitPutVariableObjectRegister(regT1, regT0, currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+{
+    int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain();
+
+    emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
+    while (skip--)
+        loadPtr(Address(regT0, FIELD_OFFSET(ScopeChainNode, next)), regT0);
+
+    loadPtr(Address(regT0, FIELD_OFFSET(ScopeChainNode, object)), regT0);
+    emitGetVariableObjectRegister(regT0, currentInstruction[2].u.operand, regT0);
+    emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+{
+    int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain();
+
+    emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1);
+    emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
+    while (skip--)
+        loadPtr(Address(regT1, FIELD_OFFSET(ScopeChainNode, next)), regT1);
+
+    loadPtr(Address(regT1, FIELD_OFFSET(ScopeChainNode, object)), regT1);
+    emitPutVariableObjectRegister(regT0, regT1, currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_tear_off_activation);
+    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+    stubCall.call();
+}
+
+void JIT::emit_op_tear_off_arguments(Instruction*)
+{
+    JITStubCall(this, JITStubs::cti_op_tear_off_arguments).call();
+}
+
+void JIT::emit_op_ret(Instruction* currentInstruction)
+{
+    // We could JIT generate the deref, only calling out to C when the refcount hits zero.
+    if (m_codeBlock->needsFullScopeChain())
+        JITStubCall(this, JITStubs::cti_op_ret_scopeChain).call();
+
+    ASSERT(callFrameRegister != regT1);
+    ASSERT(regT1 != returnValueRegister);
+    ASSERT(returnValueRegister != callFrameRegister);
+
+    // Return the result in %eax.
+    emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
+
+    // Grab the return address.
+    emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+
+    // Restore our caller's "r".
+    emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+    // Return.
+    push(regT1);
+    ret();
+
+}
+
+void JIT::emit_op_new_array(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_new_array);
+    stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+    stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+    stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_resolve);
+    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+    stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_construct_verify(Instruction* currentInstruction)
+{
+    emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+    emitJumpSlowCaseIfNotJSCell(regT0);
+    loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+    addSlowCase(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
+
+}
+
+void JIT::emit_op_to_primitive(Instruction* currentInstruction)
+{
+    int dst = currentInstruction[1].u.operand;
+    int src = currentInstruction[2].u.operand;
+
+    emitGetVirtualRegister(src, regT0);
+    
+    Jump isImm = emitJumpIfNotJSCell(regT0);
+    addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
+    isImm.link(this);
+
+    if (dst != src)
+        emitPutVirtualRegister(dst);
+
+}
+
+void JIT::emit_op_strcat(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_strcat);
+    stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+    stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+    stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_func(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_resolve_func);
+    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+    stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+    stubCall.call(currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
+{
+    emitTimeoutCheck();
+
+    unsigned target = currentInstruction[2].u.operand;
+    emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+    Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0))));
+    addJump(emitJumpIfImmediateInteger(regT0), target + 2);
+
+    addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target + 2);
+    addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))));
+
+    isZero.link(this);
+};
+void JIT::emit_op_resolve_base(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_resolve_base);
+    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+    stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_resolve_skip);
+    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+    stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain()));
+    stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_global(Instruction* currentInstruction)
+{
+    // Fast case
+    void* globalObject = currentInstruction[2].u.jsCell;
+    Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
+    
+    unsigned currentIndex = m_globalResolveInfoIndex++;
+    void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
+    void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
+
+    // Check Structure of global object
+    move(ImmPtr(globalObject), regT0);
+    loadPtr(structureAddress, regT1);
+    Jump noMatch = branchPtr(NotEqual, regT1, Address(regT0, FIELD_OFFSET(JSCell, m_structure))); // Structures don't match
+
+    // Load cached property
+    // Assume that the global object always uses external storage.
+    loadPtr(Address(regT0, FIELD_OFFSET(JSGlobalObject, m_externalStorage)), regT0);
+    load32(offsetAddr, regT1);
+    loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0);
+    emitPutVirtualRegister(currentInstruction[1].u.operand);
+    Jump end = jump();
+
+    // Slow case
+    noMatch.link(this);
+    JITStubCall stubCall(this, JITStubs::cti_op_resolve_global);
+    stubCall.addArgument(ImmPtr(globalObject));
+    stubCall.addArgument(ImmPtr(ident));
+    stubCall.addArgument(Imm32(currentIndex));
+    stubCall.call(currentInstruction[1].u.operand);
+    end.link(this);
+}
+
+void JIT::emit_op_not(Instruction* currentInstruction)
+{
+    emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+    xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
+    addSlowCase(branchTestPtr(NonZero, regT0, Imm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue))));
+    xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue)), regT0);
+    emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_jfalse(Instruction* currentInstruction)
+{
+    unsigned target = currentInstruction[2].u.operand;
+    emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+    addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))), target + 2);
+    Jump isNonZero = emitJumpIfImmediateInteger(regT0);
+
+    addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))), target + 2);
+    addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))));
+
+    isNonZero.link(this);
+    RECORD_JUMP_TARGET(target + 2);
+};
+void JIT::emit_op_jeq_null(Instruction* currentInstruction)
+{
+    unsigned src = currentInstruction[1].u.operand;
+    unsigned target = currentInstruction[2].u.operand;
+
+    emitGetVirtualRegister(src, regT0);
+    Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+    // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+    loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+    addJump(branchTest32(NonZero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+    Jump wasNotImmediate = jump();
+
+    // Now handle the immediate cases - undefined & null
+    isImmediate.link(this);
+    andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+    addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNull()))), target + 2);            
+
+    wasNotImmediate.link(this);
+    RECORD_JUMP_TARGET(target + 2);
+};
+void JIT::emit_op_jneq_null(Instruction* currentInstruction)
+{
+    unsigned src = currentInstruction[1].u.operand;
+    unsigned target = currentInstruction[2].u.operand;
+
+    emitGetVirtualRegister(src, regT0);
+    Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+    // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+    loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+    addJump(branchTest32(Zero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+    Jump wasNotImmediate = jump();
+
+    // Now handle the immediate cases - undefined & null
+    isImmediate.link(this);
+    andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+    addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsNull()))), target + 2);            
+
+    wasNotImmediate.link(this);
+    RECORD_JUMP_TARGET(target + 2);
+}
+
+void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
+{
+    unsigned src = currentInstruction[1].u.operand;
+    JSCell* ptr = currentInstruction[2].u.jsCell;
+    unsigned target = currentInstruction[3].u.operand;
+    
+    emitGetVirtualRegister(src, regT0);
+    addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue(ptr)))), target + 3);            
+
+    RECORD_JUMP_TARGET(target + 3);
+}
+
+void JIT::emit_op_unexpected_load(Instruction* currentInstruction)
+{
+    JSValue v = m_codeBlock->unexpectedConstant(currentInstruction[2].u.operand);
+    move(ImmPtr(JSValue::encode(v)), regT0);
+    emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_jsr(Instruction* currentInstruction)
+{
+    int retAddrDst = currentInstruction[1].u.operand;
+    int target = currentInstruction[2].u.operand;
+    DataLabelPtr storeLocation = storePtrWithPatch(Address(callFrameRegister, sizeof(Register) * retAddrDst));
+    addJump(jump(), target + 2);
+    m_jsrSites.append(JSRInfo(storeLocation, label()));
+    killLastResultRegister();
+    RECORD_JUMP_TARGET(target + 2);
+}
+
+void JIT::emit_op_sret(Instruction* currentInstruction)
+{
+    jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
+    killLastResultRegister();
+}
+
+void JIT::emit_op_eq(Instruction* currentInstruction)
+{
+    emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+    emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+    set32(Equal, regT1, regT0, regT0);
+    emitTagAsBoolImmediate(regT0);
+    emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_bitnot(Instruction* currentInstruction)
+{
+    emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+    emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+    not32(regT0);
+    emitFastArithIntToImmNoCheck(regT0, regT0);
+#else
+    xorPtr(Imm32(~JSImmediate::TagTypeNumber), regT0);
+#endif
+    emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_resolve_with_base);
+    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+    stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+    stubCall.call(currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_new_func_exp);
+    stubCall.addArgument(ImmPtr(m_codeBlock->functionExpression(currentInstruction[2].u.operand)));
+    stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_jtrue(Instruction* currentInstruction)
+{
+    unsigned target = currentInstruction[2].u.operand;
+    emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+    Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0))));
+    addJump(emitJumpIfImmediateInteger(regT0), target + 2);
+
+    addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target + 2);
+    addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))));
+
+    isZero.link(this);
+    RECORD_JUMP_TARGET(target + 2);
+}
+
+void JIT::emit_op_neq(Instruction* currentInstruction)
+{
+    emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+    emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+    set32(NotEqual, regT1, regT0, regT0);
+    emitTagAsBoolImmediate(regT0);
+
+    emitPutVirtualRegister(currentInstruction[1].u.operand);
+
+}
+
+void JIT::emit_op_bitxor(Instruction* currentInstruction)
+{
+    emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+    emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+    xorPtr(regT1, regT0);
+    emitFastArithReTagImmediate(regT0, regT0);
+    emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_new_regexp(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_new_regexp);
+    stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
+    stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_bitor(Instruction* currentInstruction)
+{
+    emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+    emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+    orPtr(regT1, regT0);
+    emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_throw(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_throw);
+    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+    stubCall.call();
+    ASSERT(regT0 == returnValueRegister);
+#if PLATFORM(X86_64)
+    addPtr(Imm32(0x48), X86::esp);
+    pop(X86::ebx);
+    pop(X86::r15);
+    pop(X86::r14);
+    pop(X86::r13);
+    pop(X86::r12);
+    pop(X86::ebp);
+    ret();
+#else
+    addPtr(Imm32(0x1c), X86::esp);
+    pop(X86::ebx);
+    pop(X86::edi);
+    pop(X86::esi);
+    pop(X86::ebp);
+    ret();
+#endif
+}
+
+void JIT::emit_op_next_pname(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_next_pname);
+    stubCall.addArgument(currentInstruction[2].u.operand, regT2);
+    stubCall.call();
+    Jump endOfIter = branchTestPtr(Zero, regT0);
+    emitPutVirtualRegister(currentInstruction[1].u.operand);
+    addJump(jump(), currentInstruction[3].u.operand + 3);
+    endOfIter.link(this);
+}
+
+void JIT::emit_op_push_scope(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_push_scope);
+    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+    stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_pop_scope(Instruction*)
+{
+    JITStubCall(this, JITStubs::cti_op_pop_scope).call();
+}
+
+void JIT::emit_op_stricteq(Instruction* currentInstruction)
+{
+    compileOpStrictEq(currentInstruction, OpStrictEq);
+}
+
+void JIT::emit_op_nstricteq(Instruction* currentInstruction)
+{
+    compileOpStrictEq(currentInstruction, OpNStrictEq);
+}
+
+void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
+{
+    int srcVReg = currentInstruction[2].u.operand;
+    emitGetVirtualRegister(srcVReg, regT0);
+    
+    Jump wasImmediate = emitJumpIfImmediateInteger(regT0);
+
+    emitJumpSlowCaseIfNotJSCell(regT0, srcVReg);
+    loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+    addSlowCase(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(NumberType)));
+    
+    wasImmediate.link(this);
+
+    emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_push_new_scope);
+    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+    stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+    stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_catch(Instruction* currentInstruction)
+{
+    emitGetCTIParam(offsetof(struct JITStackFrame, callFrame) / sizeof (void*), callFrameRegister);
+    emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_jmp_scopes);
+    stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+    stubCall.call();
+    addJump(jump(), currentInstruction[2].u.operand + 2);
+    RECORD_JUMP_TARGET(currentInstruction[2].u.operand + 2);
+}
+
+void JIT::emit_op_switch_imm(Instruction* currentInstruction)
+{
+    unsigned tableIndex = currentInstruction[1].u.operand;
+    unsigned defaultOffset = currentInstruction[2].u.operand;
+    unsigned scrutinee = currentInstruction[3].u.operand;
+
+    // create jump table for switch destinations, track this switch statement.
+    SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
+    m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
+    jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+    JITStubCall stubCall(this, JITStubs::cti_op_switch_imm);
+    stubCall.addArgument(scrutinee, regT2);
+    stubCall.addArgument(Imm32(tableIndex));
+    stubCall.call();
+    jump(regT0);
+}
+
+void JIT::emit_op_switch_char(Instruction* currentInstruction)
+{
+    unsigned tableIndex = currentInstruction[1].u.operand;
+    unsigned defaultOffset = currentInstruction[2].u.operand;
+    unsigned scrutinee = currentInstruction[3].u.operand;
+
+    // create jump table for switch destinations, track this switch statement.
+    SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
+    m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
+    jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+    JITStubCall stubCall(this, JITStubs::cti_op_switch_char);
+    stubCall.addArgument(scrutinee, regT2);
+    stubCall.addArgument(Imm32(tableIndex));
+    stubCall.call();
+    jump(regT0);
+}
+
+void JIT::emit_op_switch_string(Instruction* currentInstruction)
+{
+    unsigned tableIndex = currentInstruction[1].u.operand;
+    unsigned defaultOffset = currentInstruction[2].u.operand;
+    unsigned scrutinee = currentInstruction[3].u.operand;
+
+    // create jump table for switch destinations, track this switch statement.
+    StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
+    m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
+
+    JITStubCall stubCall(this, JITStubs::cti_op_switch_string);
+    stubCall.addArgument(scrutinee, regT2);
+    stubCall.addArgument(Imm32(tableIndex));
+    stubCall.call();
+    jump(regT0);
+}
+
+void JIT::emit_op_new_error(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_new_error);
+    stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+    stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->unexpectedConstant(currentInstruction[3].u.operand))));
+    stubCall.addArgument(Imm32(m_bytecodeIndex));
+    stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_debug(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_debug);
+    stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+    stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+    stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+    stubCall.call();
+}
+
+void JIT::emit_op_eq_null(Instruction* currentInstruction)
+{
+    unsigned dst = currentInstruction[1].u.operand;
+    unsigned src1 = currentInstruction[2].u.operand;
+
+    emitGetVirtualRegister(src1, regT0);
+    Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+    loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+    setTest32(NonZero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
+
+    Jump wasNotImmediate = jump();
+
+    isImmediate.link(this);
+
+    andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+    setPtr(Equal, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0);
+
+    wasNotImmediate.link(this);
+
+    emitTagAsBoolImmediate(regT0);
+    emitPutVirtualRegister(dst);
+
+}
+
+void JIT::emit_op_neq_null(Instruction* currentInstruction)
+{
+    unsigned dst = currentInstruction[1].u.operand;
+    unsigned src1 = currentInstruction[2].u.operand;
+
+    emitGetVirtualRegister(src1, regT0);
+    Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+    loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+    setTest32(Zero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
+
+    Jump wasNotImmediate = jump();
+
+    isImmediate.link(this);
+
+    andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+    setPtr(NotEqual, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0);
+
+    wasNotImmediate.link(this);
+
+    emitTagAsBoolImmediate(regT0);
+    emitPutVirtualRegister(dst);
+
+}
+
+void JIT::emit_op_enter(Instruction*)
+{
+    // Even though CTI doesn't use them, we initialize our constant
+    // registers to zap stale pointers, to avoid unnecessarily prolonging
+    // object lifetime and increasing GC pressure.
+    size_t count = m_codeBlock->m_numVars + m_codeBlock->numberOfConstantRegisters();
+    for (size_t j = 0; j < count; ++j)
+        emitInitRegister(j);
+
+}
+
+void JIT::emit_op_enter_with_activation(Instruction* currentInstruction)
+{
+    // Even though CTI doesn't use them, we initialize our constant
+    // registers to zap stale pointers, to avoid unnecessarily prolonging
+    // object lifetime and increasing GC pressure.
+    size_t count = m_codeBlock->m_numVars + m_codeBlock->numberOfConstantRegisters();
+    for (size_t j = 0; j < count; ++j)
+        emitInitRegister(j);
+
+    JITStubCall(this, JITStubs::cti_op_push_activation).call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_create_arguments(Instruction*)
+{
+    if (m_codeBlock->m_numParameters == 1)
+        JITStubCall(this, JITStubs::cti_op_create_arguments_no_params).call();
+    else
+        JITStubCall(this, JITStubs::cti_op_create_arguments).call();
+}
+
+void JIT::emit_op_convert_this(Instruction* currentInstruction)
+{
+    emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+    emitJumpSlowCaseIfNotJSCell(regT0);
+    loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT1);
+    addSlowCase(branchTest32(NonZero, Address(regT1, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
+
+}
+
+void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
+{
+    emitGetCTIParam(FIELD_OFFSET(JITStackFrame, enabledProfilerReference) / sizeof (void*), regT0);
+    Jump noProfiler = branchTestPtr(Zero, Address(regT0));
+
+    JITStubCall stubCall(this, JITStubs::cti_op_profile_will_call);
+    stubCall.addArgument(currentInstruction[1].u.operand, regT0);
+    stubCall.call();
+    noProfiler.link(this);
+
+}
+
+void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
+{
+    emitGetCTIParam(FIELD_OFFSET(JITStackFrame, enabledProfilerReference) / sizeof (void*), regT0);
+    Jump noProfiler = branchTestPtr(Zero, Address(regT0));
+
+    JITStubCall stubCall(this, JITStubs::cti_op_profile_did_call);
+    stubCall.addArgument(currentInstruction[1].u.operand, regT0);
+    stubCall.call();
+    noProfiler.link(this);
+
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
index 3520d67..79ed25e 100644 (file)
@@ -44,6 +44,107 @@ using namespace std;
 
 namespace JSC {
 
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+{
+    compilePutByIdHotPath(currentInstruction[1].u.operand, &(m_codeBlock->identifier(currentInstruction[2].u.operand)), currentInstruction[3].u.operand, m_propertyAccessInstructionIndex++);
+}
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+    compileGetByIdHotPath(currentInstruction[1].u.operand, currentInstruction[2].u.operand, &(m_codeBlock->identifier(currentInstruction[3].u.operand)), m_propertyAccessInstructionIndex++);
+}
+
+void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+{
+    emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+    emitJumpSlowCaseIfNotImmediateInteger(regT1);
+#if USE(ALTERNATE_JSIMMEDIATE)
+    // This is technically incorrect - we're zero-extending an int32.  On the hot path this doesn't matter.
+    // We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if
+    // number was signed since m_fastAccessCutoff is always less than intmax (since the total allocation
+    // size is always less than 4Gb).  As such zero extending wil have been correct (and extending the value
+    // to 64-bits is necessary since it's used in the address calculation.  We zero extend rather than sign
+    // extending since it makes it easier to re-tag the value in the slow case.
+    zeroExtend32ToPtr(regT1, regT1);
+#else
+    emitFastArithImmToInt(regT1);
+#endif
+    emitJumpSlowCaseIfNotJSCell(regT0);
+    addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+
+    // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
+    loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
+    addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff))));
+
+    // Get the value from the vector
+    loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), regT0);
+    emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+{
+    emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1);
+    emitJumpSlowCaseIfNotImmediateInteger(regT1);
+#if USE(ALTERNATE_JSIMMEDIATE)
+    // See comment in op_get_by_val.
+    zeroExtend32ToPtr(regT1, regT1);
+#else
+    emitFastArithImmToInt(regT1);
+#endif
+    emitJumpSlowCaseIfNotJSCell(regT0);
+    addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+
+    // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
+    loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
+    Jump inFastVector = branch32(Below, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff)));
+    // No; oh well, check if the access if within the vector - if so, we may still be okay.
+    addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSET(ArrayStorage, m_vectorLength))));
+
+    // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
+    // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff. 
+    addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0]))));
+
+    // All good - put the value into the array.
+    inFastVector.link(this);
+    emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
+    storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])));
+}
+
+void JIT::emit_op_put_by_index(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_put_by_index);
+    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+    stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+    stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+    stubCall.call();
+}
+
+void JIT::emit_op_put_getter(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_put_getter);
+    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+    stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+    stubCall.call();
+}
+
+void JIT::emit_op_put_setter(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_put_setter);
+    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+    stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+    stubCall.call();
+}
+
+void JIT::emit_op_del_by_id(Instruction* currentInstruction)
+{
+    JITStubCall stubCall(this, JITStubs::cti_op_del_by_id);
+    stubCall.addArgument(currentInstruction[2].u.operand, regT2);
+    stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+    stubCall.call(currentInstruction[1].u.operand);
+}
+
 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
 
 void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned)