2008-12-20 Gavin Barraclough <barraclough@apple.com>
authorbarraclough@apple.com <barraclough@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Sat, 20 Dec 2008 10:11:31 +0000 (10:11 +0000)
committerbarraclough@apple.com <barraclough@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Sat, 20 Dec 2008 10:11:31 +0000 (10:11 +0000)
        Reviewed by Oliver Hunt.

        Port optimized property access generation to the MacroAssembler.

        * assembler/MacroAssembler.h:
        (JSC::MacroAssembler::AbsoluteAddress::AbsoluteAddress):
        (JSC::MacroAssembler::DataLabelPtr::repatch):
        (JSC::MacroAssembler::DataLabel32::DataLabel32):
        (JSC::MacroAssembler::DataLabel32::repatch):
        (JSC::MacroAssembler::Label::operator X86Assembler::JmpDst):
        (JSC::MacroAssembler::Jump::repatch):
        (JSC::MacroAssembler::JumpList::empty):
        (JSC::MacroAssembler::RepatchBuffer::link):
        (JSC::MacroAssembler::add32):
        (JSC::MacroAssembler::and32):
        (JSC::MacroAssembler::sub32):
        (JSC::MacroAssembler::loadPtrWithAddressRepatch):
        (JSC::MacroAssembler::storePtrWithAddressRepatch):
        (JSC::MacroAssembler::push):
        (JSC::MacroAssembler::ja32):
        (JSC::MacroAssembler::jePtr):
        (JSC::MacroAssembler::jnePtr):
        (JSC::MacroAssembler::jnePtrWithRepatch):
        (JSC::MacroAssembler::align):
        (JSC::MacroAssembler::differenceBetween):
        * assembler/X86Assembler.h:
        (JSC::X86Assembler::movl_rm_disp32):
        (JSC::X86Assembler::movl_mr_disp32):
        (JSC::X86Assembler::X86InstructionFormatter::oneByteOp_disp32):
        (JSC::X86Assembler::X86InstructionFormatter::memoryModRM):
        * jit/JIT.cpp:
        (JSC::ctiRepatchCallByReturnAddress):
        (JSC::JIT::privateCompileMainPass):
        (JSC::JIT::privateCompile):
        (JSC::JIT::privateCompileCTIMachineTrampolines):
        * jit/JIT.h:
        * jit/JITPropertyAccess.cpp:
        (JSC::JIT::compileGetByIdHotPath):
        (JSC::JIT::compileGetByIdSlowCase):
        (JSC::JIT::compilePutByIdHotPath):
        (JSC::JIT::compilePutByIdSlowCase):
        (JSC::resizePropertyStorage):
        (JSC::JIT::privateCompilePutByIdTransition):
        (JSC::JIT::patchGetByIdSelf):
        (JSC::JIT::patchPutByIdReplace):
        (JSC::JIT::privateCompilePatchGetArrayLength):
        (JSC::JIT::privateCompileGetByIdSelf):
        (JSC::JIT::privateCompileGetByIdProto):
        (JSC::JIT::privateCompileGetByIdSelfList):
        (JSC::JIT::privateCompileGetByIdProtoList):
        (JSC::JIT::privateCompileGetByIdChainList):
        (JSC::JIT::privateCompileGetByIdChain):
        (JSC::JIT::privateCompilePutByIdReplace):
        * wtf/RefCounted.h:
        (WTF::RefCountedBase::addressOfCount):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@39422 268f45cc-cd09-0410-ab3c-d52691b4dbfc

JavaScriptCore/ChangeLog
JavaScriptCore/assembler/MacroAssembler.h
JavaScriptCore/assembler/X86Assembler.h
JavaScriptCore/jit/JIT.cpp
JavaScriptCore/jit/JIT.h
JavaScriptCore/jit/JITPropertyAccess.cpp
JavaScriptCore/runtime/Structure.h
JavaScriptCore/wtf/RefCounted.h

index b585c30..41b8e5e 100644 (file)
@@ -1,3 +1,61 @@
+2008-12-20  Gavin Barraclough  <barraclough@apple.com>
+
+        Reviewed by Oliver Hunt.
+
+        Port optimized property access generation to the MacroAssembler.
+
+        * assembler/MacroAssembler.h:
+        (JSC::MacroAssembler::AbsoluteAddress::AbsoluteAddress):
+        (JSC::MacroAssembler::DataLabelPtr::repatch):
+        (JSC::MacroAssembler::DataLabel32::DataLabel32):
+        (JSC::MacroAssembler::DataLabel32::repatch):
+        (JSC::MacroAssembler::Label::operator X86Assembler::JmpDst):
+        (JSC::MacroAssembler::Jump::repatch):
+        (JSC::MacroAssembler::JumpList::empty):
+        (JSC::MacroAssembler::RepatchBuffer::link):
+        (JSC::MacroAssembler::add32):
+        (JSC::MacroAssembler::and32):
+        (JSC::MacroAssembler::sub32):
+        (JSC::MacroAssembler::loadPtrWithAddressRepatch):
+        (JSC::MacroAssembler::storePtrWithAddressRepatch):
+        (JSC::MacroAssembler::push):
+        (JSC::MacroAssembler::ja32):
+        (JSC::MacroAssembler::jePtr):
+        (JSC::MacroAssembler::jnePtr):
+        (JSC::MacroAssembler::jnePtrWithRepatch):
+        (JSC::MacroAssembler::align):
+        (JSC::MacroAssembler::differenceBetween):
+        * assembler/X86Assembler.h:
+        (JSC::X86Assembler::movl_rm_disp32):
+        (JSC::X86Assembler::movl_mr_disp32):
+        (JSC::X86Assembler::X86InstructionFormatter::oneByteOp_disp32):
+        (JSC::X86Assembler::X86InstructionFormatter::memoryModRM):
+        * jit/JIT.cpp:
+        (JSC::ctiRepatchCallByReturnAddress):
+        (JSC::JIT::privateCompileMainPass):
+        (JSC::JIT::privateCompile):
+        (JSC::JIT::privateCompileCTIMachineTrampolines):
+        * jit/JIT.h:
+        * jit/JITPropertyAccess.cpp:
+        (JSC::JIT::compileGetByIdHotPath):
+        (JSC::JIT::compileGetByIdSlowCase):
+        (JSC::JIT::compilePutByIdHotPath):
+        (JSC::JIT::compilePutByIdSlowCase):
+        (JSC::resizePropertyStorage):
+        (JSC::JIT::privateCompilePutByIdTransition):
+        (JSC::JIT::patchGetByIdSelf):
+        (JSC::JIT::patchPutByIdReplace):
+        (JSC::JIT::privateCompilePatchGetArrayLength):
+        (JSC::JIT::privateCompileGetByIdSelf):
+        (JSC::JIT::privateCompileGetByIdProto):
+        (JSC::JIT::privateCompileGetByIdSelfList):
+        (JSC::JIT::privateCompileGetByIdProtoList):
+        (JSC::JIT::privateCompileGetByIdChainList):
+        (JSC::JIT::privateCompileGetByIdChain):
+        (JSC::JIT::privateCompilePutByIdReplace):
+        * wtf/RefCounted.h:
+        (WTF::RefCountedBase::addressOfCount):
+
 2008-12-19  Gustavo Noronha Silva  <gns@gnome.org>
 
         Reviewed by Holger Freyther.
index 5bc5406..6cfea5b 100644 (file)
@@ -119,14 +119,6 @@ public:
     //
     // Describes a complex addressing mode.
     struct BaseIndex {
-        BaseIndex(RegisterID base, RegisterID index, int32_t offset = 0)
-            : base(base)
-            , index(index)
-            , scale(TimesOne)
-            , offset(offset)
-        {
-        }
-
         BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
             : base(base)
             , index(index)
@@ -141,6 +133,19 @@ public:
         int32_t offset;
     };
 
+    // AbsoluteAddress:
+    //
+    // Describes an memory operand given by a pointer.  For regular load & store
+    // operations an unwrapped void* will be used, rather than using this.
+    struct AbsoluteAddress {
+        explicit AbsoluteAddress(void* ptr)
+            : m_ptr(ptr)
+        {
+        }
+
+        void* m_ptr;
+    };
+
 
     class Jump;
     class RepatchBuffer;
@@ -163,6 +168,42 @@ public:
         {
         }
 
+#if !PLATFORM(X86_64)
+        static void repatch(void* address, void* value)
+        {
+            X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(address), reinterpret_cast<uint32_t>(value));
+        }
+#endif
+
+    private:
+        X86Assembler::JmpDst m_label;
+    };
+
+    // DataLabel32:
+    //
+    // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
+    // repatched after the code has been generated.
+    class DataLabel32 {
+        friend class MacroAssembler;
+        friend class RepatchBuffer;
+
+    public:
+        DataLabel32()
+        {
+        }
+
+        DataLabel32(MacroAssembler* masm)
+            : m_label(masm->m_assembler.label())
+        {
+        }
+
+#if !PLATFORM(X86_64)
+        static void repatch(void* address, int32_t value)
+        {
+            X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(address), value);
+        }
+#endif
+
     private:
         X86Assembler::JmpDst m_label;
     };
@@ -185,6 +226,12 @@ public:
             : m_label(masm->m_assembler.label())
         {
         }
+        
+        // FIXME: transitionary method, while we replace JmpSrces with Jumps.
+        operator X86Assembler::JmpDst()
+        {
+            return m_label;
+        }
 
     private:
         X86Assembler::JmpDst m_label;
@@ -237,6 +284,11 @@ public:
             return m_jmp;
         }
 
+        static void repatch(void* address, void* destination)
+        {
+            X86Assembler::repatchBranchOffset(reinterpret_cast<intptr_t>(address), destination);
+        }
+
     private:
         X86Assembler::JmpSrc m_jmp;
     };
@@ -246,6 +298,8 @@ public:
     // A JumpList is a set of Jump objects.
     // All jumps in the set will be linked to the same destination.
     class JumpList {
+        friend class RepatchBuffer;
+
     public:
         void link(MacroAssembler* masm)
         {
@@ -273,8 +327,13 @@ public:
             m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
         }
 
+        bool empty()
+        {
+            return !m_jumps.size();
+        }
+
     private:
-        Vector<Jump> m_jumps;
+        Vector<Jump, 16> m_jumps;
     };
 
 
@@ -306,12 +365,18 @@ public:
         {
             X86Assembler::link(m_code, jump.m_jmp, target);
         }
-        
+
+        void link(JumpList list, void* target)
+        {
+            for (unsigned i = 0; i < list.m_jumps.size(); ++i)
+                X86Assembler::link(m_code, list.m_jumps[i], target);
+        }
+
         void* addressOf(Jump jump)
         {
             return X86Assembler::getRelocatedAddress(m_code, jump.m_jmp);
         }
-        
+
         void* addressOf(Label label)
         {
             return X86Assembler::getRelocatedAddress(m_code, label.m_label);
@@ -388,21 +453,18 @@ public:
         m_assembler.addl_ir(imm.m_value, dest);
     }
     
-    void add32(Address src, RegisterID dest)
+#if !PLATFORM(X86_64)
+    void add32(Imm32 imm, AbsoluteAddress address)
     {
-        m_assembler.addl_mr(src.offset, src.base, dest);
+        m_assembler.addl_im(imm.m_value, address.m_ptr);
     }
+#endif
     
-    void and32(RegisterID src, RegisterID dest)
-    {
-        m_assembler.andl_rr(src, dest);
-    }
-
-    void and32(Imm32 imm, RegisterID dest)
+    void add32(Address src, RegisterID dest)
     {
-        m_assembler.andl_ir(imm.m_value, dest);
+        m_assembler.addl_mr(src.offset, src.base, dest);
     }
-
+    
     void andPtr(RegisterID src, RegisterID dest)
     {
 #if PLATFORM(X86_64)
@@ -421,6 +483,16 @@ public:
 #endif
     }
 
+    void and32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.andl_rr(src, dest);
+    }
+
+    void and32(Imm32 imm, RegisterID dest)
+    {
+        m_assembler.andl_ir(imm.m_value, dest);
+    }
+
     void lshift32(Imm32 imm, RegisterID dest)
     {
         m_assembler.shll_i8r(imm.m_value, dest);
@@ -523,6 +595,13 @@ public:
         m_assembler.subl_ir(imm.m_value, dest);
     }
     
+#if !PLATFORM(X86_64)
+    void sub32(Imm32 imm, AbsoluteAddress address)
+    {
+        m_assembler.subl_im(imm.m_value, address.m_ptr);
+    }
+#endif
+    
     void sub32(Address src, RegisterID dest)
     {
         m_assembler.subl_mr(src.offset, src.base, dest);
@@ -564,6 +643,14 @@ public:
 #endif
     }
 
+#if !PLATFORM(X86_64)
+    DataLabel32 loadPtrWithAddressRepatch(Address address, RegisterID dest)
+    {
+        m_assembler.movl_mr_disp32(address.offset, address.base, dest);
+        return DataLabel32(this);
+    }
+#endif
+
     void loadPtr(BaseIndex address, RegisterID dest)
     {
 #if PLATFORM(X86_64)
@@ -627,6 +714,14 @@ public:
 #endif
     }
 
+#if !PLATFORM(X86_64)
+    DataLabel32 storePtrWithAddressRepatch(RegisterID src, Address address)
+    {
+        m_assembler.movl_rm_disp32(src, address.offset, address.base);
+        return DataLabel32(this);
+    }
+#endif
+
     void storePtr(RegisterID src, BaseIndex address)
     {
 #if PLATFORM(X86_64)
@@ -705,6 +800,16 @@ public:
         m_assembler.push_r(src);
     }
 
+    void push(Address address)
+    {
+        m_assembler.push_m(address.offset, address.base);
+    }
+
+    void push(Imm32 imm)
+    {
+        m_assembler.push_i32(imm.m_value);
+    }
+
     void pop()
     {
         addPtr(Imm32(sizeof(void*)), X86::esp);
@@ -879,6 +984,12 @@ private:
 #endif
 
 public:
+    Jump ja32(RegisterID left, Imm32 right)
+    {
+        compareImm32ForBranch(left, right.m_value);
+        return Jump(m_assembler.ja());
+    }
+    
     Jump jae32(RegisterID left, Imm32 right)
     {
         compareImm32ForBranch(left, right.m_value);
@@ -913,6 +1024,16 @@ public:
 #endif
     }
 
+    Jump jePtr(RegisterID reg, ImmPtr imm)
+    {
+#if PLATFORM(X86_64)
+        move(imm, scratchRegister);
+        return jePtr(scratchRegister, reg);
+#else
+        return je32(reg, Imm32(reinterpret_cast<int32_t>(imm.m_value)));
+#endif
+    }
+
     Jump je32(RegisterID op1, RegisterID op2)
     {
         m_assembler.cmpl_rr(op1, op2);
@@ -1031,6 +1152,23 @@ public:
 #endif
     }
 
+#if !PLATFORM(X86_64)
+    Jump jnePtr(AbsoluteAddress address, ImmPtr imm)
+    {
+        m_assembler.cmpl_im(reinterpret_cast<uint32_t>(imm.m_value), address.m_ptr);
+        return Jump(m_assembler.jne());
+    }
+#endif
+
+#if !PLATFORM(X86_64)
+    Jump jnePtrWithRepatch(Address address, DataLabelPtr& dataLabel, ImmPtr initialValue = ImmPtr(0))
+    {
+        m_assembler.cmpl_im_force32(reinterpret_cast<int32_t>(initialValue.m_value), address.offset, address.base);
+        dataLabel = DataLabelPtr(this);
+        return Jump(m_assembler.jne());
+    }
+#endif
+
     Jump jne32(RegisterID op1, RegisterID op2)
     {
         m_assembler.cmpl_rr(op1, op2);
@@ -1296,6 +1434,32 @@ public:
     {
         return Label(this);
     }
+    
+    Label align()
+    {
+        m_assembler.align(16);
+        return Label(this);
+    }
+
+    ptrdiff_t differenceBetween(Label from, Jump to)
+    {
+        return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
+    }
+
+    ptrdiff_t differenceBetween(Label from, Label to)
+    {
+        return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label);
+    }
+
+    ptrdiff_t differenceBetween(Label from, DataLabelPtr to)
+    {
+        return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label);
+    }
+
+    ptrdiff_t differenceBetween(Label from, DataLabel32 to)
+    {
+        return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label);
+    }
 
     void ret()
     {
index b6ee611..d1f10af 100644 (file)
@@ -696,6 +696,11 @@ public:
         m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
     }
 
+    void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
+    }
+
     void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
     {
         m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
@@ -716,6 +721,11 @@ public:
         m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
     }
 
+    void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
+    }
+
     void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
     {
         m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
@@ -1115,7 +1125,9 @@ public:
     
     static void repatchBranchOffset(intptr_t where, void* destination)
     {
-        reinterpret_cast<intptr_t*>(where)[-1] = (reinterpret_cast<intptr_t>(destination) - where);
+        intptr_t offset = reinterpret_cast<intptr_t>(destination) - where;
+        ASSERT(offset == static_cast<int32_t>(offset));
+        reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset);
     }
     
     void* executableCopy(ExecutablePool* allocator)
@@ -1185,6 +1197,14 @@ private:
             memoryModRM(reg, base, offset);
         }
 
+        void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+        {
+            m_buffer.ensureSpace(maxInstructionSize);
+            emitRexIfNeeded(reg, 0, base);
+            m_buffer.putByteUnchecked(opcode);
+            memoryModRM_disp32(reg, base, offset);
+        }
+
         void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
         {
             m_buffer.ensureSpace(maxInstructionSize);
@@ -1485,6 +1505,22 @@ private:
             }
         }
     
+        void memoryModRM_disp32(int reg, RegisterID base, int offset)
+        {
+            // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+#if PLATFORM(X86_64)
+            if ((base == hasSib) || (base == hasSib2)) {
+#else
+            if (base == hasSib) {
+#endif
+                putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
+                m_buffer.putIntUnchecked(offset);
+            } else {
+                putModRm(ModRmMemoryDisp32, reg, base);
+                m_buffer.putIntUnchecked(offset);
+            }
+        }
+    
         void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
         {
             ASSERT(index != noIndex);
index cce6157..05a99fd 100644 (file)
@@ -40,8 +40,6 @@
 #include <stdio.h>
 #endif
 
-#define __ m_assembler.
-
 using namespace std;
 
 namespace JSC {
@@ -198,7 +196,7 @@ void ctiSetReturnAddress(void** where, void* what)
 
 void ctiRepatchCallByReturnAddress(void* where, void* what)
 {
-    (static_cast<void**>(where))[-1] = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(what) - reinterpret_cast<uintptr_t>(where));
+    MacroAssembler::Jump::repatch(where, what);
 }
 
 JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
@@ -347,8 +345,8 @@ void JIT::privateCompileMainPass()
             if (m_codeBlock->needsFullScopeChain())
                 emitCTICall(Interpreter::cti_op_end);
             emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax);
-            __ push_m(RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)), callFrameRegister);
-            __ ret();
+            push(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
+            ret();
             NEXT_OPCODE(op_end);
         }
         case op_jmp: {
@@ -587,8 +585,8 @@ void JIT::privateCompileMainPass()
             emitGetFromCallFrameHeader(RegisterFile::CallerFrame, callFrameRegister);
 
             // Return.
-            __ push_r(X86::edx);
-            __ ret();
+            push(X86::edx);
+            ret();
 
             NEXT_OPCODE(op_ret);
         }
@@ -1806,7 +1804,7 @@ void JIT::privateCompile()
 #endif
 
     // Could use a pop_m, but would need to offset the following instruction if so.
-    __ pop_r(X86::ecx);
+    pop(X86::ecx);
     emitPutToCallFrameHeader(X86::ecx, RegisterFile::ReturnPC);
 
     Jump slowRegisterFileCheck;
@@ -1921,50 +1919,47 @@ void JIT::privateCompileCTIMachineTrampolines()
 {
 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
     // (1) The first function provides fast property access for array length
-    X86Assembler::JmpDst arrayLengthBegin = __ align(16);
-    
+    Label arrayLengthBegin = align();
+
     // Check eax is an array
-    X86Assembler::JmpSrc array_failureCases1 = emitJumpIfNotJSCell(X86::eax);
-    __ cmpl_im(reinterpret_cast<unsigned>(m_interpreter->m_jsArrayVptr), 0, X86::eax);
-    X86Assembler::JmpSrc array_failureCases2 = __ jne();
+    Jump array_failureCases1 = emitJumpIfNotJSCell(X86::eax);
+    Jump array_failureCases2 = jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsArrayVptr));
 
     // Checks out okay! - get the length from the storage
-    __ movl_mr(FIELD_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
-    __ movl_mr(FIELD_OFFSET(ArrayStorage, m_length), X86::eax, X86::eax);
+    loadPtr(Address(X86::eax, FIELD_OFFSET(JSArray, m_storage)), X86::eax);
+    load32(Address(X86::eax, FIELD_OFFSET(ArrayStorage, m_length)), X86::eax);
 
-    __ cmpl_ir(JSImmediate::maxImmediateInt, X86::eax);
-    X86Assembler::JmpSrc array_failureCases3 = __ ja();
+    Jump array_failureCases3 = ja32(X86::eax, Imm32(JSImmediate::maxImmediateInt));
 
-    __ addl_rr(X86::eax, X86::eax);
-    __ addl_ir(1, X86::eax);
+    add32(X86::eax, X86::eax);
+    add32(Imm32(1), X86::eax);
     
-    __ ret();
+    ret();
 
     // (2) The second function provides fast property access for string length
-    X86Assembler::JmpDst stringLengthBegin = __ align(16);
+    Label stringLengthBegin = align();
 
     // Check eax is a string
-    X86Assembler::JmpSrc string_failureCases1 = emitJumpIfNotJSCell(X86::eax);
-    __ cmpl_im(reinterpret_cast<unsigned>(m_interpreter->m_jsStringVptr), 0, X86::eax);
-    X86Assembler::JmpSrc string_failureCases2 = __ jne();
+    Jump string_failureCases1 = emitJumpIfNotJSCell(X86::eax);
+    Jump string_failureCases2 = jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsStringVptr));
 
     // Checks out okay! - get the length from the Ustring.
-    __ movl_mr(FIELD_OFFSET(JSString, m_value) + FIELD_OFFSET(UString, m_rep), X86::eax, X86::eax);
-    __ movl_mr(FIELD_OFFSET(UString::Rep, len), X86::eax, X86::eax);
+    loadPtr(Address(X86::eax, FIELD_OFFSET(JSString, m_value) + FIELD_OFFSET(UString, m_rep)), X86::eax);
+    load32(Address(X86::eax, FIELD_OFFSET(UString::Rep, len)), X86::eax);
 
-    __ cmpl_ir(JSImmediate::maxImmediateInt, X86::eax);
-    X86Assembler::JmpSrc string_failureCases3 = __ ja();
+    Jump string_failureCases3 = ja32(X86::eax, Imm32(JSImmediate::maxImmediateInt));
 
-    __ addl_rr(X86::eax, X86::eax);
-    __ addl_ir(1, X86::eax);
+    add32(X86::eax, X86::eax);
+    add32(Imm32(1), X86::eax);
     
-    __ ret();
+    ret();
 #endif
 
     // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
     
-    X86Assembler::JmpDst virtualCallPreLinkBegin = __ align(16);
+    Label virtualCallPreLinkBegin = align();
 
+#define __ m_assembler.
     // Load the callee CodeBlock* into eax
     __ movl_mr(FIELD_OFFSET(JSFunction, m_body), X86::ecx, X86::eax);
     __ movl_mr(FIELD_OFFSET(FunctionBodyNode, m_code), X86::eax, X86::eax);
@@ -2002,7 +1997,7 @@ void JIT::privateCompileCTIMachineTrampolines()
 
     __ jmp_r(X86::eax);
 
-    X86Assembler::JmpDst virtualCallLinkBegin = __ align(16);
+    Label virtualCallLinkBegin = align();
 
     // Load the callee CodeBlock* into eax
     __ movl_mr(FIELD_OFFSET(JSFunction, m_body), X86::ecx, X86::eax);
@@ -2041,7 +2036,7 @@ void JIT::privateCompileCTIMachineTrampolines()
 
     __ jmp_r(X86::eax);
 
-    X86Assembler::JmpDst virtualCallBegin = __ align(16);
+    Label virtualCallBegin = align();
 
     // Load the callee CodeBlock* into eax
     loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_body)), X86::eax);
@@ -2076,21 +2071,21 @@ void JIT::privateCompileCTIMachineTrampolines()
     jump(X86::eax);
 
     // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
-    m_interpreter->m_executablePool = m_globalData->poolForSize(__ size());
-    void* code = __ executableCopy(m_interpreter->m_executablePool.get());
+    m_interpreter->m_executablePool = m_globalData->poolForSize(m_assembler.size());
+    void* code = m_assembler.executableCopy(m_interpreter->m_executablePool.get());
+    RepatchBuffer repatchBuffer(code);
 
 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-    X86Assembler::link(code, array_failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
-    X86Assembler::link(code, array_failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
-    X86Assembler::link(code, array_failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
-    X86Assembler::link(code, string_failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail));
-    X86Assembler::link(code, string_failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail));
-    X86Assembler::link(code, string_failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail));
-
-    m_interpreter->m_ctiArrayLengthTrampoline = X86Assembler::getRelocatedAddress(code, arrayLengthBegin);
-    m_interpreter->m_ctiStringLengthTrampoline = X86Assembler::getRelocatedAddress(code, stringLengthBegin);
-#endif    
-
+    repatchBuffer.link(array_failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
+    repatchBuffer.link(array_failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
+    repatchBuffer.link(array_failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
+    repatchBuffer.link(string_failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail));
+    repatchBuffer.link(string_failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail));
+    repatchBuffer.link(string_failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail));
+
+    m_interpreter->m_ctiArrayLengthTrampoline = repatchBuffer.addressOf(arrayLengthBegin);
+    m_interpreter->m_ctiStringLengthTrampoline = repatchBuffer.addressOf(stringLengthBegin);
+#endif
     X86Assembler::link(code, callArityCheck1, reinterpret_cast<void*>(Interpreter::cti_op_call_arityCheck));
     X86Assembler::link(code, callArityCheck2, reinterpret_cast<void*>(Interpreter::cti_op_call_arityCheck));
     X86Assembler::link(code, callArityCheck3, reinterpret_cast<void*>(Interpreter::cti_op_call_arityCheck));
@@ -2100,9 +2095,9 @@ void JIT::privateCompileCTIMachineTrampolines()
     X86Assembler::link(code, callDontLazyLinkCall, reinterpret_cast<void*>(Interpreter::cti_vm_dontLazyLinkCall));
     X86Assembler::link(code, callLazyLinkCall, reinterpret_cast<void*>(Interpreter::cti_vm_lazyLinkCall));
 
-    m_interpreter->m_ctiVirtualCallPreLink = X86Assembler::getRelocatedAddress(code, virtualCallPreLinkBegin);
-    m_interpreter->m_ctiVirtualCallLink = X86Assembler::getRelocatedAddress(code, virtualCallLinkBegin);
-    m_interpreter->m_ctiVirtualCall = X86Assembler::getRelocatedAddress(code, virtualCallBegin);
+    m_interpreter->m_ctiVirtualCallPreLink = repatchBuffer.addressOf(virtualCallPreLinkBegin);
+    m_interpreter->m_ctiVirtualCallLink = repatchBuffer.addressOf(virtualCallLinkBegin);
+    m_interpreter->m_ctiVirtualCall = repatchBuffer.addressOf(virtualCallBegin);
 }
 
 void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst)
index 7d81f41..b100013 100644 (file)
@@ -238,6 +238,7 @@ namespace JSC {
         static const int repatchOffsetGetByIdStructure = 7;
         static const int repatchOffsetGetByIdBranchToSlowCase = 13;
         static const int repatchOffsetGetByIdPropertyMapOffset = 22;
+        static const int repatchOffsetGetByIdPutResult = 22;
 #if ENABLE(OPCODE_SAMPLING)
         static const int repatchOffsetGetByIdSlowCaseCall = 27 + 4 + ctiArgumentInitSize;
 #else
index 269ebc6..2182a8b 100644 (file)
 #include <stdio.h>
 #endif
 
-#define __ m_assembler.
-
 using namespace std;
 
 namespace JSC {
 
-typedef X86Assembler::JmpSrc JmpSrc;
-typedef X86Assembler::JmpDst JmpDst;
-
 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
 
 void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned)
@@ -104,17 +99,21 @@ void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier*, unsig
 
     emitJumpSlowCaseIfNotJSCell(X86::eax, baseVReg);
 
-    JmpDst hotPathBegin = __ label();
+    Label hotPathBegin(this);
     m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
 
-    __ cmpl_im_force32(repatchGetByIdDefaultStructure, FIELD_OFFSET(JSCell, m_structure), X86::eax);
-    ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdStructure);
-    addSlowCase(__ jne());
-    ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdBranchToSlowCase);
+    DataLabelPtr structureToCompare;
+    Jump structureCheck = jnePtrWithRepatch(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(repatchGetByIdDefaultStructure)));
+    addSlowCase(structureCheck);
+    ASSERT(differenceBetween(hotPathBegin, structureToCompare) == repatchOffsetGetByIdStructure);
+    ASSERT(differenceBetween(hotPathBegin, structureCheck) == repatchOffsetGetByIdBranchToSlowCase);
 
-    __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
-    __ movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::eax);
-    ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdPropertyMapOffset);
+    loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
+    DataLabel32 displacementLabel = loadPtrWithAddressRepatch(Address(X86::eax, repatchGetByIdDefaultOffset), X86::eax);
+    ASSERT(differenceBetween(hotPathBegin, displacementLabel) == repatchOffsetGetByIdPropertyMapOffset);
+
+    Label putResult(this);
+    ASSERT(differenceBetween(hotPathBegin, putResult) == repatchOffsetGetByIdPutResult);
     emitPutVirtualRegister(resultVReg);
 }
 
@@ -131,14 +130,15 @@ void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident
     linkSlowCase(iter);
 
 #ifndef NDEBUG
-    JmpDst coldPathBegin = __ label();
+    Label coldPathBegin(this);
 #endif
     emitPutJITStubArg(X86::eax, 1);
     emitPutJITStubArgConstant(reinterpret_cast<unsigned>(ident), 2);
-    JmpSrc call = emitCTICall(Interpreter::cti_op_get_by_id);
-    ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
+    Jump call = emitCTICall(Interpreter::cti_op_get_by_id);
     emitPutVirtualRegister(resultVReg);
 
+    ASSERT(differenceBetween(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
+
     // Track the location of the call; this will be used to recover repatch information.
     m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
 }
@@ -154,18 +154,18 @@ void JIT::compilePutByIdHotPath(int baseVReg, Identifier*, int valueVReg, unsign
     // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
     emitJumpSlowCaseIfNotJSCell(X86::eax, baseVReg);
 
-    JmpDst hotPathBegin = __ label();
+    Label hotPathBegin(this);
     m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
 
     // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
-    __ cmpl_im_force32(repatchGetByIdDefaultStructure, FIELD_OFFSET(JSCell, m_structure), X86::eax);
-    ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetPutByIdStructure);
-    addSlowCase(__ jne());
+    DataLabelPtr structureToCompare;
+    addSlowCase(jnePtrWithRepatch(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(repatchGetByIdDefaultStructure))));
+    ASSERT(differenceBetween(hotPathBegin, structureToCompare) == repatchOffsetPutByIdStructure);
 
     // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
-    __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
-    __ movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
-    ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetPutByIdPropertyMapOffset);
+    loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
+    DataLabel32 displacementLabel = storePtrWithAddressRepatch(X86::edx, Address(X86::eax, repatchGetByIdDefaultOffset));
+    ASSERT(differenceBetween(hotPathBegin, displacementLabel) == repatchOffsetPutByIdPropertyMapOffset);
 }
 
 void JIT::compilePutByIdSlowCase(int baseVReg, Identifier* ident, int, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex)
@@ -176,16 +176,21 @@ void JIT::compilePutByIdSlowCase(int baseVReg, Identifier* ident, int, Vector<Sl
     emitPutJITStubArgConstant(reinterpret_cast<unsigned>(ident), 2);
     emitPutJITStubArg(X86::eax, 1);
     emitPutJITStubArg(X86::edx, 3);
-    JmpSrc call = emitCTICall(Interpreter::cti_op_put_by_id);
+    Jump call = emitCTICall(Interpreter::cti_op_put_by_id);
 
     // Track the location of the call; this will be used to recover repatch information.
     m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
 }
 
-static JSObject* resizePropertyStorage(JSObject* baseObject, size_t oldSize, size_t newSize)
+struct JsObjectJSValue {
+    JSObject* obj;
+    JSValue* val;
+};
+static JsObjectJSValue resizePropertyStorage(JSObject* baseObject, JSValue* valueBeingPut, size_t oldSize, size_t newSize)
 {
     baseObject->allocatePropertyStorageInline(oldSize, newSize);
-    return baseObject;
+    JsObjectJSValue objVal = { baseObject, valueBeingPut };
+    return objVal;
 }
 
 static inline bool transitionWillNeedStorageRealloc(Structure* oldStructure, Structure* newStructure)
@@ -195,107 +200,105 @@ static inline bool transitionWillNeedStorageRealloc(Structure* oldStructure, Str
 
 void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, void* returnAddress)
 {
-    Vector<JmpSrc, 16> failureCases;
+    JumpList failureCases;
     // Check eax is an object of the right Structure.
-    __ testl_i32r(JSImmediate::TagMask, X86::eax);
-    failureCases.append(__ jne());
-    __ cmpl_im(reinterpret_cast<uint32_t>(oldStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
-    failureCases.append(__ jne());
-    Vector<JmpSrc> successCases;
+    failureCases.append(jnz32(X86::eax, Imm32(JSImmediate::TagMask)));
+    failureCases.append(jnePtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(oldStructure)));
+    JumpList successCases;
 
     //  ecx = baseObject
-    __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx);
+    loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
     // proto(ecx) = baseObject->structure()->prototype()
-    __ cmpl_im(ObjectType, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type), X86::ecx);
-    failureCases.append(__ jne());
-    __ movl_mr(FIELD_OFFSET(Structure, m_prototype), X86::ecx, X86::ecx);
+    failureCases.append(jne32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
+
+    loadPtr(Address(X86::ecx, FIELD_OFFSET(Structure, m_prototype)), X86::ecx);
     
     // ecx = baseObject->m_structure
     for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
         // null check the prototype
-        __ cmpl_ir(asInteger(jsNull()), X86::ecx);
-        successCases.append(__ je());
+        successCases.append(jePtr(X86::ecx, ImmPtr(jsNull())));
 
         // Check the structure id
-        __ cmpl_im(reinterpret_cast<uint32_t>(it->get()), FIELD_OFFSET(JSCell, m_structure), X86::ecx);
-        failureCases.append(__ jne());
+        failureCases.append(jnePtr(Address(X86::ecx, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(it->get())));
         
-        __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::ecx, X86::ecx);
-        __ cmpl_im(ObjectType, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type), X86::ecx);
-        failureCases.append(__ jne());
-        __ movl_mr(FIELD_OFFSET(Structure, m_prototype), X86::ecx, X86::ecx);
+        loadPtr(Address(X86::ecx, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
+        failureCases.append(jne32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
+        loadPtr(Address(X86::ecx, FIELD_OFFSET(Structure, m_prototype)), X86::ecx);
     }
 
-    failureCases.append(__ jne());
-    for (unsigned i = 0; i < successCases.size(); ++i)
-        __ link(successCases[i], __ label());
+    successCases.link(this);
 
-    JmpSrc callTarget;
+    Jump callTarget;
 
     // emit a call only if storage realloc is needed
     if (transitionWillNeedStorageRealloc(oldStructure, newStructure)) {
-        __ push_r(X86::edx);
-        __ push_i32(newStructure->propertyStorageCapacity());
-        __ push_i32(oldStructure->propertyStorageCapacity());
-        __ push_r(X86::eax);
-        callTarget = __ call();
-        __ addl_ir(3 * sizeof(void*), X86::esp);
-        __ pop_r(X86::edx);
+        push(Imm32(newStructure->propertyStorageCapacity()));
+        push(Imm32(oldStructure->propertyStorageCapacity()));
+        push(X86::edx);
+        push(X86::eax);
+        callTarget = call();
+        addPtr(Imm32(4 * sizeof(void*)), X86::esp);
     }
 
     // Assumes m_refCount can be decremented easily, refcount decrement is safe as 
     // codeblock should ensure oldStructure->m_refCount > 0
-    __ subl_im(1, reinterpret_cast<void*>(oldStructure));
-    __ addl_im(1, reinterpret_cast<void*>(newStructure));
-    __ movl_i32m(reinterpret_cast<uint32_t>(newStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
+    sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
+    add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
+    storePtr(ImmPtr(newStructure), Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)));
 
     // write the value
-    __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
-    __ movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
+    loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
+    storePtr(X86::edx, Address(X86::eax, cachedOffset * sizeof(JSValue*)));
 
-    __ ret();
+    ret();
     
-    JmpSrc failureJump;
-    if (failureCases.size()) {
-        for (unsigned i = 0; i < failureCases.size(); ++i)
-            __ link(failureCases[i], __ label());
+    Jump failureJump;
+    bool plantedFailureJump = false;
+    if (!failureCases.empty()) {
+        failureCases.link(this);
         restoreArgumentReferenceForTrampoline();
-        failureJump = __ jmp();
+        failureJump = jump();
+        plantedFailureJump = true;
     }
 
-    void* code = __ executableCopy(m_codeBlock->executablePool());
+    void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+    RepatchBuffer repatchBuffer(code);
 
-    if (failureCases.size())
-        X86Assembler::link(code, failureJump, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
+    if (plantedFailureJump)
+        repatchBuffer.link(failureJump, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
 
     if (transitionWillNeedStorageRealloc(oldStructure, newStructure))
-        X86Assembler::link(code, callTarget, reinterpret_cast<void*>(resizePropertyStorage));
+        repatchBuffer.link(callTarget, reinterpret_cast<void*>(resizePropertyStorage));
     
     stubInfo->stubRoutine = code;
     
-    ctiRepatchCallByReturnAddress(returnAddress, code);
+    Jump::repatch(returnAddress, code);
 }
 
 void JIT::patchGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
 {
     // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
     // Should probably go to Interpreter::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
-    ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
+    Jump::repatch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
 
     // Repatch the offset into the propoerty map to load from, then repatch the Structure to look for.
-    X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
-    X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdStructure, reinterpret_cast<uint32_t>(structure));
+    void* structureAddress = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdStructure);
+    void* displacementAddress = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset);
+    DataLabelPtr::repatch(structureAddress, structure);
+    DataLabel32::repatch(displacementAddress, cachedOffset * sizeof(JSValue*));
 }
 
 void JIT::patchPutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
 {
     // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
     // Should probably go to Interpreter::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
-    ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_generic));
+    Jump::repatch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_generic));
 
     // Repatch the offset into the propoerty map to load from, then repatch the Structure to look for.
-    X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
-    X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetPutByIdStructure, reinterpret_cast<uint32_t>(structure));
+    void* structureAddress = reinterpret_cast<char*>(stubInfo->hotPathBegin) + repatchOffsetPutByIdStructure;
+    void* displacementAddress = reinterpret_cast<char*>(stubInfo->hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset;
+    DataLabelPtr::repatch(structureAddress, structure);
+    DataLabel32::repatch(displacementAddress, cachedOffset * sizeof(JSValue*));
 }
 
 void JIT::privateCompilePatchGetArrayLength(void* returnAddress)
@@ -303,169 +306,168 @@ void JIT::privateCompilePatchGetArrayLength(void* returnAddress)
     StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
 
     // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
-    ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
+    Jump::repatch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
 
     // Check eax is an array
-    __ cmpl_im(reinterpret_cast<unsigned>(m_interpreter->m_jsArrayVptr), 0, X86::eax);
-    JmpSrc failureCases1 = __ jne();
+    Jump failureCases1 = jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsArrayVptr));
 
     // Checks out okay! - get the length from the storage
-    __ movl_mr(FIELD_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
-    __ movl_mr(FIELD_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
+    loadPtr(Address(X86::eax, FIELD_OFFSET(JSArray, m_storage)), X86::ecx);
+    loadPtr(Address(X86::ecx, FIELD_OFFSET(ArrayStorage, m_length)), X86::ecx);
 
-    __ cmpl_ir(JSImmediate::maxImmediateInt, X86::ecx);
-    JmpSrc failureCases2 = __ ja();
+    Jump failureCases2 = ja32(X86::ecx, Imm32(JSImmediate::maxImmediateInt));
 
-    __ addl_rr(X86::ecx, X86::ecx);
-    __ addl_ir(1, X86::ecx);
-    __ movl_rr(X86::ecx, X86::eax);
-    JmpSrc success = __ jmp();
+    add32(X86::ecx, X86::ecx);
+    add32(Imm32(1), X86::ecx);
+    signExtend32ToPtr(X86::ecx, X86::eax);
+    Jump success = jump();
 
-    void* code = __ executableCopy(m_codeBlock->executablePool());
+    void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+    RepatchBuffer repatchBuffer(code);
 
     // Use the repatch information to link the failure cases back to the original slow case routine.
     void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
-    X86Assembler::link(code, failureCases1, slowCaseBegin);
-    X86Assembler::link(code, failureCases2, slowCaseBegin);
+    repatchBuffer.link(failureCases1, slowCaseBegin);
+    repatchBuffer.link(failureCases2, slowCaseBegin);
 
     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
-    X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
+    void* hotPathPutResult = reinterpret_cast<char*>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPutResult;
+    repatchBuffer.link(success, hotPathPutResult);
 
     // Track the stub we have created so that it will be deleted later.
     stubInfo->stubRoutine = code;
 
     // Finally repatch the jump to sow case back in the hot path to jump here instead.
-    intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
-    X86Assembler::repatchBranchOffset(jmpLocation, code);
+    void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
+    Jump::repatch(jumpLocation, code);
 }
 
 void JIT::privateCompileGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
 {
     // Check eax is an object of the right Structure.
-    __ testl_i32r(JSImmediate::TagMask, X86::eax);
-    JmpSrc failureCases1 = __ jne();
-    JmpSrc failureCases2 = checkStructure(X86::eax, structure);
+    Jump failureCases1 = jnz32(X86::eax, Imm32(JSImmediate::TagMask));
+    Jump failureCases2 = checkStructure(X86::eax, structure);
 
     // Checks out okay! - getDirectOffset
-    __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
-    __ movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
-    __ ret();
+    loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
+    loadPtr(Address(X86::eax, cachedOffset * sizeof(JSValue*)), X86::eax);
+    ret();
 
-    void* code = __ executableCopy(m_codeBlock->executablePool());
+    void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+    RepatchBuffer repatchBuffer(code);
 
-    X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
-    X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
+    repatchBuffer.link(failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
+    repatchBuffer.link(failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
 
     stubInfo->stubRoutine = code;
 
-    ctiRepatchCallByReturnAddress(returnAddress, code);
+    Jump::repatch(returnAddress, code);
 }
 
 void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress, CallFrame* callFrame)
 {
 #if USE(CTI_REPATCH_PIC)
     // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
-    ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));
+    Jump::repatch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));
 
     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
     // referencing the prototype object - let's speculatively load it's table nice and early!)
     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
     PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
-    __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
+    loadPtr(static_cast<void*>(protoPropertyStorage), X86::edx);
 
     // Check eax is an object of the right Structure.
-    JmpSrc failureCases1 = checkStructure(X86::eax, structure);
+    Jump failureCases1 = checkStructure(X86::eax, structure);
 
     // Check the prototype object's Structure had not changed.
     Structure** prototypeStructureAddress = &(protoObject->m_structure);
-    __ cmpl_im(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress);
-    JmpSrc failureCases2 = __ jne();
+    Jump failureCases2 = jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
 
     // Checks out okay! - getDirectOffset
-    __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
+    loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValue*)), X86::eax);
 
-    JmpSrc success = __ jmp();
+    Jump success = jump();
 
-    void* code = __ executableCopy(m_codeBlock->executablePool());
+    void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+    RepatchBuffer repatchBuffer(code);
 
     // Use the repatch information to link the failure cases back to the original slow case routine.
     void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
-    X86Assembler::link(code, failureCases1, slowCaseBegin);
-    X86Assembler::link(code, failureCases2, slowCaseBegin);
+    repatchBuffer.link(failureCases1, slowCaseBegin);
+    repatchBuffer.link(failureCases2, slowCaseBegin);
 
     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
-    X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
+    intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPutResult;
+    repatchBuffer.link(success, reinterpret_cast<void*>(successDest));
 
     // Track the stub we have created so that it will be deleted later.
     stubInfo->stubRoutine = code;
 
     // Finally repatch the jump to slow case back in the hot path to jump here instead.
-    intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
-    X86Assembler::repatchBranchOffset(jmpLocation, code);
+    void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
+    Jump::repatch(jumpLocation, code);
 #else
     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
     // referencing the prototype object - let's speculatively load it's table nice and early!)
     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
     PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
-    __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
+    loadPtr(protoPropertyStorage, X86::edx);
 
     // Check eax is an object of the right Structure.
-    __ testl_i32r(JSImmediate::TagMask, X86::eax);
-    JmpSrc failureCases1 = __ jne();
-    JmpSrc failureCases2 = checkStructure(X86::eax, structure);
+    Jump failureCases1 = jne32(X86::eax, Imm32(JSImmediate::TagMask));
+    Jump failureCases2 = checkStructure(X86::eax, structure);
 
     // Check the prototype object's Structure had not changed.
     Structure** prototypeStructureAddress = &(protoObject->m_structure);
-    __ cmpl_im(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress);
-    JmpSrc failureCases3 = __ jne();
+    Jump failureCases3 = jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
 
     // Checks out okay! - getDirectOffset
-    __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
+    loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValue*)), X86::eax);
 
-    __ ret();
+    ret();
 
-    void* code = __ executableCopy(m_codeBlock->executablePool());
+    void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+    RepatchBuffer repatchBuffer(code);
 
-    X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
-    X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
-    X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
+    repatchBuffer.link(failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
+    repatchBuffer.link(failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
+    repatchBuffer.link(failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
 
     stubInfo->stubRoutine = code;
 
-    ctiRepatchCallByReturnAddress(returnAddress, code);
+    Jump::repatch(returnAddress, code);
 #endif
 }
 
 #if USE(CTI_REPATCH_PIC)
 void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
 {
-    JmpSrc failureCase = checkStructure(X86::eax, structure);
-    __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
-    __ movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
-    JmpSrc success = __ jmp();
+    Jump failureCase = checkStructure(X86::eax, structure);
+    loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
+    loadPtr(Address(X86::eax, cachedOffset * sizeof(JSValue*)), X86::eax);
+    Jump success = jump();
 
-    void* code = __ executableCopy(m_codeBlock->executablePool());
+    void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
     ASSERT(code);
+    RepatchBuffer repatchBuffer(code);
 
     // Use the repatch information to link the failure cases back to the original slow case routine.
     void* lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
     if (!lastProtoBegin)
         lastProtoBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
 
-    X86Assembler::link(code, failureCase, lastProtoBegin);
+    repatchBuffer.link(failureCase, lastProtoBegin);
 
     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
-    X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
+    intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPutResult;
+    repatchBuffer.link(success, reinterpret_cast<void*>(successDest));
 
     structure->ref();
     polymorphicStructures->list[currentIndex].set(code, structure);
 
     // Finally repatch the jump to slow case back in the hot path to jump here instead.
-    intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
-    X86Assembler::repatchBranchOffset(jmpLocation, code);
+    void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
+    Jump::repatch(jumpLocation, code);
 }
 
 void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
@@ -474,49 +476,49 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
     // referencing the prototype object - let's speculatively load it's table nice and early!)
     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
     PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
-    __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
+    loadPtr(protoPropertyStorage, X86::edx);
 
     // Check eax is an object of the right Structure.
-    JmpSrc failureCases1 = checkStructure(X86::eax, structure);
+    Jump failureCases1 = checkStructure(X86::eax, structure);
 
     // Check the prototype object's Structure had not changed.
     Structure** prototypeStructureAddress = &(protoObject->m_structure);
-    __ cmpl_im(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress);
-    JmpSrc failureCases2 = __ jne();
+    Jump failureCases2 = jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
 
     // Checks out okay! - getDirectOffset
-    __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
+    loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValue*)), X86::eax);
 
-    JmpSrc success = __ jmp();
+    Jump success = jump();
 
-    void* code = __ executableCopy(m_codeBlock->executablePool());
+    void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+    RepatchBuffer repatchBuffer(code);
 
     // Use the repatch information to link the failure cases back to the original slow case routine.
     void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
-    X86Assembler::link(code, failureCases1, lastProtoBegin);
-    X86Assembler::link(code, failureCases2, lastProtoBegin);
+    repatchBuffer.link(failureCases1, lastProtoBegin);
+    repatchBuffer.link(failureCases2, lastProtoBegin);
 
     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
-    X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
+    intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPutResult;
+    repatchBuffer.link(success, reinterpret_cast<void*>(successDest));
 
     structure->ref();
     prototypeStructure->ref();
     prototypeStructures->list[currentIndex].set(code, structure, prototypeStructure);
 
     // Finally repatch the jump to slow case back in the hot path to jump here instead.
-    intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
-    X86Assembler::repatchBranchOffset(jmpLocation, code);
+    void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
+    Jump::repatch(jumpLocation, code);
 }
 
 void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
 {
     ASSERT(count);
     
-    Vector<JmpSrc> bucketsOfFail;
+    JumpList bucketsOfFail;
 
     // Check eax is an object of the right Structure.
-    JmpSrc baseObjectCheck = checkStructure(X86::eax, structure);
+    Jump baseObjectCheck = checkStructure(X86::eax, structure);
     bucketsOfFail.append(baseObjectCheck);
 
     Structure* currStructure = structure;
@@ -528,27 +530,26 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
 
         // Check the prototype object's Structure had not changed.
         Structure** prototypeStructureAddress = &(protoObject->m_structure);
-        __ cmpl_im(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
-        bucketsOfFail.append(__ jne());
+        bucketsOfFail.append(jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
     }
     ASSERT(protoObject);
 
     PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
-    __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
-    __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
-    JmpSrc success = __ jmp();
+    loadPtr(protoPropertyStorage, X86::edx);
+    loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValue*)), X86::eax);
+    Jump success = jump();
 
-    void* code = __ executableCopy(m_codeBlock->executablePool());
+    void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+    RepatchBuffer repatchBuffer(code);
 
     // Use the repatch information to link the failure cases back to the original slow case routine.
     void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
 
-    for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
-        X86Assembler::link(code, bucketsOfFail[i], lastProtoBegin);
+    repatchBuffer.link(bucketsOfFail, lastProtoBegin);
 
     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
-    X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
+    intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPutResult;
+    repatchBuffer.link(success, reinterpret_cast<void*>(successDest));
 
     // Track the stub we have created so that it will be deleted later.
     structure->ref();
@@ -556,8 +557,8 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
     prototypeStructures->list[currentIndex].set(code, structure, chain);
 
     // Finally repatch the jump to slow case back in the hot path to jump here instead.
-    intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
-    X86Assembler::repatchBranchOffset(jmpLocation, code);
+    void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
+    Jump::repatch(jumpLocation, code);
 }
 #endif
 
@@ -565,15 +566,14 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
 {
 #if USE(CTI_REPATCH_PIC)
     // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
-    ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));
+    Jump::repatch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));
 
     ASSERT(count);
     
-    Vector<JmpSrc> bucketsOfFail;
+    JumpList bucketsOfFail;
 
     // Check eax is an object of the right Structure.
-    JmpSrc baseObjectCheck = checkStructure(X86::eax, structure);
-    bucketsOfFail.append(baseObjectCheck);
+    bucketsOfFail.append(checkStructure(X86::eax, structure));
 
     Structure* currStructure = structure;
     RefPtr<Structure>* chainEntries = chain->head();
@@ -584,42 +584,40 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
 
         // Check the prototype object's Structure had not changed.
         Structure** prototypeStructureAddress = &(protoObject->m_structure);
-        __ cmpl_im(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
-        bucketsOfFail.append(__ jne());
+        bucketsOfFail.append(jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
     }
     ASSERT(protoObject);
 
     PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
-    __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
-    __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
-    JmpSrc success = __ jmp();
+    loadPtr(protoPropertyStorage, X86::edx);
+    loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValue*)), X86::eax);
+    Jump success = jump();
 
-    void* code = __ executableCopy(m_codeBlock->executablePool());
+    void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+    RepatchBuffer repatchBuffer(code);
 
     // Use the repatch information to link the failure cases back to the original slow case routine.
     void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
 
-    for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
-        X86Assembler::link(code, bucketsOfFail[i], slowCaseBegin);
+    repatchBuffer.link(bucketsOfFail, slowCaseBegin);
 
     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
-    intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
-    X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
+    intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPutResult;
+    repatchBuffer.link(success, reinterpret_cast<void*>(successDest));
 
     // Track the stub we have created so that it will be deleted later.
     stubInfo->stubRoutine = code;
 
     // Finally repatch the jump to slow case back in the hot path to jump here instead.
-    intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
-    X86Assembler::repatchBranchOffset(jmpLocation, code);
+    void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
+    Jump::repatch(jumpLocation, code);
 #else
     ASSERT(count);
     
-    Vector<JmpSrc> bucketsOfFail;
+    JumpList bucketsOfFail;
 
     // Check eax is an object of the right Structure.
-    __ testl_i32r(JSImmediate::TagMask, X86::eax);
-    bucketsOfFail.append(__ jne());
+    bucketsOfFail.append(jne32(X86::eax, Imm32(JSImmediate::TagMask)));
     bucketsOfFail.append(checkStructure(X86::eax, structure));
 
     Structure* currStructure = structure;
@@ -631,47 +629,45 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
 
         // Check the prototype object's Structure had not changed.
         Structure** prototypeStructureAddress = &(protoObject->m_structure);
-        __ cmpl_im(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
-        bucketsOfFail.append(__ jne());
+        bucketsOfFail.append(jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
     }
     ASSERT(protoObject);
 
     PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
-    __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
-    __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
-    __ ret();
+    loadPtr(protoPropertyStorage, X86::edx);
+    loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValue*)), X86::eax);
+    ret();
 
-    void* code = __ executableCopy(m_codeBlock->executablePool());
+    void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
 
-    for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
-        X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
+    repatchBuffer.link(bucketsOfFail, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
 
     stubInfo->stubRoutine = code;
 
-    ctiRepatchCallByReturnAddress(returnAddress, code);
+    Jump::repatch(returnAddress, code);
 #endif
 }
 
 void JIT::privateCompilePutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
 {
     // Check eax is an object of the right Structure.
-    __ testl_i32r(JSImmediate::TagMask, X86::eax);
-    JmpSrc failureCases1 = __ jne();
-    JmpSrc failureCases2 = checkStructure(X86::eax, structure);
+    Jump failureCases1 = jne32(X86::eax, Imm32(JSImmediate::TagMask));
+    Jump failureCases2 = checkStructure(X86::eax, structure);
 
     // checks out okay! - putDirectOffset
-    __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
-    __ movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
-    __ ret();
+    loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
+    storePtr(X86::edx, Address(X86::eax, cachedOffset * sizeof(JSValue*)));
+    ret();
 
-    void* code = __ executableCopy(m_codeBlock->executablePool());
+    void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+    RepatchBuffer repatchBuffer(code);
     
-    X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
-    X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
+    repatchBuffer.link(failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
+    repatchBuffer.link(failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
 
     stubInfo->stubRoutine = code;
     
-    ctiRepatchCallByReturnAddress(returnAddress, code);
+    Jump::repatch(returnAddress, code);
 }
 
 #endif
index 6ef73a9..d002b22 100644 (file)
@@ -140,6 +140,11 @@ namespace JSC {
 
         void clearEnumerationCache();
 
+        void* addressOfCount()
+        {
+            return &m_refCount;
+        }
+
         static const unsigned emptyEntryIndex = 0;
     
         static const size_t s_maxTransitionLength = 64;
index 2dd5b2a..ac8e167 100644 (file)
@@ -75,7 +75,7 @@ protected:
         return false;
     }
 
-private:
+protected:
     int m_refCount;
 #ifndef NDEBUG
     bool m_deletionHasBegun;