Remove excessive headers from JavaScriptCore
[WebKit-https.git] / Source / JavaScriptCore / jit / AssemblyHelpers.h
index 2557b44..71facd2 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  */
 
-#ifndef AssemblyHelpers_h
-#define AssemblyHelpers_h
+#pragma once
 
 #if ENABLE(JIT)
 
 #include "CodeBlock.h"
 #include "FPRInfo.h"
 #include "GPRInfo.h"
+#include "Heap.h"
 #include "InlineCallFrame.h"
 #include "JITCode.h"
 #include "MacroAssembler.h"
+#include "MarkedSpace.h"
+#include "RegisterAtOffsetList.h"
+#include "RegisterSet.h"
+#include "TagRegistersMode.h"
 #include "TypeofType.h"
 #include "VM.h"
 
@@ -43,9 +47,8 @@ typedef void (*V_DebugOperation_EPP)(ExecState*, void*, void*);
 
 class AssemblyHelpers : public MacroAssembler {
 public:
-    AssemblyHelpers(VM* vm, CodeBlock* codeBlock)
-        : m_vm(vm)
-        , m_codeBlock(codeBlock)
+    AssemblyHelpers(CodeBlock* codeBlock)
+        : m_codeBlock(codeBlock)
         , m_baselineCodeBlock(codeBlock ? codeBlock->baselineAlternative() : 0)
     {
         if (m_codeBlock) {
@@ -54,9 +57,8 @@ public:
             ASSERT(m_baselineCodeBlock->jitType() == JITCode::None || JITCode::isBaselineCode(m_baselineCodeBlock->jitType()));
         }
     }
-    
+
     CodeBlock* codeBlock() { return m_codeBlock; }
-    VM* vm() { return m_vm; }
     AssemblerType_T& assembler() { return m_assembler; }
 
     void checkStackPointerAlignment()
@@ -80,6 +82,15 @@ public:
 #endif
     }
     
+    void loadCell(Address address, GPRReg gpr)
+    {
+#if USE(JSVALUE64)
+        load64(address, gpr);
+#else
+        load32(address.withOffset(PayloadOffset), gpr);
+#endif
+    }
+    
     void storeValue(JSValueRegs regs, Address address)
     {
 #if USE(JSVALUE64)
@@ -144,7 +155,49 @@ public:
         }
 #endif
     }
+
+    void loadValue(void* address, JSValueRegs regs)
+    {
+#if USE(JSVALUE64)
+        load64(address, regs.gpr());
+#else
+        load32(bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + PayloadOffset), regs.payloadGPR());
+        load32(bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + TagOffset), regs.tagGPR());
+#endif
+    }
     
+    // Note that this clobbers offset.
+    void loadProperty(GPRReg object, GPRReg offset, JSValueRegs result);
+
+    void moveValueRegs(JSValueRegs srcRegs, JSValueRegs destRegs)
+    {
+#if USE(JSVALUE32_64)
+        if (destRegs.tagGPR() == srcRegs.payloadGPR()) {
+            if (destRegs.payloadGPR() == srcRegs.tagGPR()) {
+                swap(srcRegs.payloadGPR(), srcRegs.tagGPR());
+                return;
+            }
+            move(srcRegs.payloadGPR(), destRegs.payloadGPR());
+            move(srcRegs.tagGPR(), destRegs.tagGPR());
+            return;
+        }
+        move(srcRegs.tagGPR(), destRegs.tagGPR());
+        move(srcRegs.payloadGPR(), destRegs.payloadGPR());
+#else
+        move(srcRegs.gpr(), destRegs.gpr());
+#endif
+    }
+
+    void moveValue(JSValue value, JSValueRegs regs)
+    {
+#if USE(JSVALUE64)
+        move(Imm64(JSValue::encode(value)), regs.gpr());
+#else
+        move(Imm32(value.tag()), regs.tagGPR());
+        move(Imm32(value.payload()), regs.payloadGPR());
+#endif
+    }
+
     void moveTrustedValue(JSValue value, JSValueRegs regs)
     {
 #if USE(JSVALUE64)
@@ -174,6 +227,222 @@ public:
         store32(TrustedImm32(value.payload()), address.withOffset(PayloadOffset));
 #endif
     }
+    
+    Address addressFor(const RegisterAtOffset& entry)
+    {
+        return Address(GPRInfo::callFrameRegister, entry.offset());
+    }
+    
+    void emitSave(const RegisterAtOffsetList& list)
+    {
+        for (const RegisterAtOffset& entry : list) {
+            if (entry.reg().isGPR())
+                storePtr(entry.reg().gpr(), addressFor(entry));
+            else
+                storeDouble(entry.reg().fpr(), addressFor(entry));
+        }
+    }
+    
+    void emitRestore(const RegisterAtOffsetList& list)
+    {
+        for (const RegisterAtOffset& entry : list) {
+            if (entry.reg().isGPR())
+                loadPtr(addressFor(entry), entry.reg().gpr());
+            else
+                loadDouble(addressFor(entry), entry.reg().fpr());
+        }
+    }
+
+    void emitSaveCalleeSavesFor(CodeBlock* codeBlock)
+    {
+        ASSERT(codeBlock);
+
+        RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
+        RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
+        unsigned registerCount = calleeSaves->size();
+
+        for (unsigned i = 0; i < registerCount; i++) {
+            RegisterAtOffset entry = calleeSaves->at(i);
+            if (dontSaveRegisters.get(entry.reg()))
+                continue;
+            storePtr(entry.reg().gpr(), Address(framePointerRegister, entry.offset()));
+        }
+    }
+    
+    enum RestoreTagRegisterMode { UseExistingTagRegisterContents, CopyBaselineCalleeSavedRegistersFromBaseFrame };
+
+    void emitSaveOrCopyCalleeSavesFor(CodeBlock* codeBlock, VirtualRegister offsetVirtualRegister, RestoreTagRegisterMode tagRegisterMode, GPRReg temp)
+    {
+        ASSERT(codeBlock);
+        
+        RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
+        RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
+        unsigned registerCount = calleeSaves->size();
+
+#if USE(JSVALUE64)
+        RegisterSet baselineCalleeSaves = RegisterSet::llintBaselineCalleeSaveRegisters();
+#endif
+        
+        for (unsigned i = 0; i < registerCount; i++) {
+            RegisterAtOffset entry = calleeSaves->at(i);
+            if (dontSaveRegisters.get(entry.reg()))
+                continue;
+            
+            GPRReg registerToWrite;
+            
+#if USE(JSVALUE32_64)
+            UNUSED_PARAM(tagRegisterMode);
+            UNUSED_PARAM(temp);
+#else
+            if (tagRegisterMode == CopyBaselineCalleeSavedRegistersFromBaseFrame && baselineCalleeSaves.get(entry.reg())) {
+                registerToWrite = temp;
+                loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, entry.offset()), registerToWrite);
+            } else
+#endif
+                registerToWrite = entry.reg().gpr();
+
+            storePtr(registerToWrite, Address(framePointerRegister, offsetVirtualRegister.offsetInBytes() + entry.offset()));
+        }
+    }
+    
+    void emitRestoreCalleeSavesFor(CodeBlock* codeBlock)
+    {
+        ASSERT(codeBlock);
+
+        RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
+        RegisterSet dontRestoreRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
+        unsigned registerCount = calleeSaves->size();
+        
+        for (unsigned i = 0; i < registerCount; i++) {
+            RegisterAtOffset entry = calleeSaves->at(i);
+            if (dontRestoreRegisters.get(entry.reg()))
+                continue;
+            loadPtr(Address(framePointerRegister, entry.offset()), entry.reg().gpr());
+        }
+    }
+
+    void emitSaveCalleeSaves()
+    {
+        emitSaveCalleeSavesFor(codeBlock());
+    }
+
+    void emitSaveThenMaterializeTagRegisters()
+    {
+#if USE(JSVALUE64)
+#if CPU(ARM64)
+        pushPair(GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister);
+#else
+        push(GPRInfo::tagTypeNumberRegister);
+        push(GPRInfo::tagMaskRegister);
+#endif
+        emitMaterializeTagCheckRegisters();
+#endif
+    }
+    
+    void emitRestoreCalleeSaves()
+    {
+        emitRestoreCalleeSavesFor(codeBlock());
+    }
+
+    void emitRestoreSavedTagRegisters()
+    {
+#if USE(JSVALUE64)
+#if CPU(ARM64)
+        popPair(GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister);
+#else
+        pop(GPRInfo::tagMaskRegister);
+        pop(GPRInfo::tagTypeNumberRegister);
+#endif
+#endif
+    }
+
+    // If you use this, be aware that vmGPR will get trashed.
+    void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(GPRReg vmGPR)
+    {
+#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
+        loadPtr(Address(vmGPR, VM::topVMEntryFrameOffset()), vmGPR);
+        copyCalleeSavesToVMEntryFrameCalleeSavesBufferImpl(vmGPR);
+#else
+        UNUSED_PARAM(vmGPR);
+#endif
+    }
+
+    void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(VM& vm, const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() })
+    {
+#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
+        GPRReg temp1 = usedRegisters.getFreeGPR(0);
+        loadPtr(&vm.topVMEntryFrame, temp1);
+        copyCalleeSavesToVMEntryFrameCalleeSavesBufferImpl(temp1);
+#else
+        UNUSED_PARAM(vm);
+        UNUSED_PARAM(usedRegisters);
+#endif
+    }
+
+    void restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(VM&);
+
+    void copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer(VM& vm, const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() })
+    {
+#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
+        GPRReg temp1 = usedRegisters.getFreeGPR(0);
+        GPRReg temp2 = usedRegisters.getFreeGPR(1);
+        FPRReg fpTemp = usedRegisters.getFreeFPR();
+        ASSERT(temp2 != InvalidGPRReg);
+
+        ASSERT(codeBlock());
+
+        // Copy saved calleeSaves on stack or unsaved calleeSaves in register to vm calleeSave buffer
+        loadPtr(&vm.topVMEntryFrame, temp1);
+        addPtr(TrustedImm32(VMEntryFrame::calleeSaveRegistersBufferOffset()), temp1);
+
+        RegisterAtOffsetList* allCalleeSaves = VM::getAllCalleeSaveRegisterOffsets();
+        RegisterAtOffsetList* currentCalleeSaves = codeBlock()->calleeSaveRegisters();
+        RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
+        unsigned registerCount = allCalleeSaves->size();
+
+        for (unsigned i = 0; i < registerCount; i++) {
+            RegisterAtOffset vmEntry = allCalleeSaves->at(i);
+            if (dontCopyRegisters.get(vmEntry.reg()))
+                continue;
+            RegisterAtOffset* currentFrameEntry = currentCalleeSaves->find(vmEntry.reg());
+
+            if (vmEntry.reg().isGPR()) {
+                GPRReg regToStore;
+                if (currentFrameEntry) {
+                    // Load calleeSave from stack into temp register
+                    regToStore = temp2;
+                    loadPtr(Address(framePointerRegister, currentFrameEntry->offset()), regToStore);
+                } else
+                    // Just store callee save directly
+                    regToStore = vmEntry.reg().gpr();
+
+                storePtr(regToStore, Address(temp1, vmEntry.offset()));
+            } else {
+                FPRReg fpRegToStore;
+                if (currentFrameEntry) {
+                    // Load calleeSave from stack into temp register
+                    fpRegToStore = fpTemp;
+                    loadDouble(Address(framePointerRegister, currentFrameEntry->offset()), fpRegToStore);
+                } else
+                    // Just store callee save directly
+                    fpRegToStore = vmEntry.reg().fpr();
+
+                storeDouble(fpRegToStore, Address(temp1, vmEntry.offset()));
+            }
+        }
+#else
+        UNUSED_PARAM(vm);
+        UNUSED_PARAM(usedRegisters);
+#endif
+    }
+
+    void emitMaterializeTagCheckRegisters()
+    {
+#if USE(JSVALUE64)
+        move(MacroAssembler::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
+        orPtr(MacroAssembler::TrustedImm32(TagBitTypeOther), GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister);
+#endif
+    }
 
 #if CPU(X86_64) || CPU(X86)
     static size_t prologueStackPointerDelta()
@@ -188,6 +457,11 @@ public:
         move(stackPointerRegister, framePointerRegister);
     }
 
+    void emitFunctionEpilogueWithEmptyFrame()
+    {
+        pop(framePointerRegister);
+    }
+
     void emitFunctionEpilogue()
     {
         move(framePointerRegister, stackPointerRegister);
@@ -223,10 +497,15 @@ public:
         move(stackPointerRegister, framePointerRegister);
     }
 
+    void emitFunctionEpilogueWithEmptyFrame()
+    {
+        popPair(framePointerRegister, linkRegister);
+    }
+
     void emitFunctionEpilogue()
     {
         move(framePointerRegister, stackPointerRegister);
-        popPair(framePointerRegister, linkRegister);
+        emitFunctionEpilogueWithEmptyFrame();
     }
 
     ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
@@ -252,79 +531,59 @@ public:
         return 2 * sizeof(void*);
     }
 
-    ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
-    {
-        move(returnAddressRegister, reg);
-    }
-
-    ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
-    {
-        move(reg, returnAddressRegister);
-    }
-
-    ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
+    void emitFunctionPrologue()
     {
-        loadPtr(address, returnAddressRegister);
+        pushPair(framePointerRegister, returnAddressRegister);
+        move(stackPointerRegister, framePointerRegister);
     }
-#endif
 
-#if CPU(SH4)
-    static size_t prologueStackPointerDelta()
+    void emitFunctionEpilogueWithEmptyFrame()
     {
-        // Prologue saves the framePointerRegister and link register
-        return 2 * sizeof(void*);
-    }
-
-    void emitFunctionPrologue()
-    {
-        push(linkRegister);
-        push(framePointerRegister);
-        move(stackPointerRegister, framePointerRegister);
+        popPair(framePointerRegister, returnAddressRegister);
     }
 
     void emitFunctionEpilogue()
     {
         move(framePointerRegister, stackPointerRegister);
-        pop(framePointerRegister);
-        pop(linkRegister);
+        emitFunctionEpilogueWithEmptyFrame();
     }
 
     ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
     {
-        m_assembler.stspr(reg);
+        move(returnAddressRegister, reg);
     }
 
     ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
     {
-        m_assembler.ldspr(reg);
+        move(reg, returnAddressRegister);
     }
 
     ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
     {
-        loadPtrLinkReg(address);
+        loadPtr(address, returnAddressRegister);
     }
 #endif
 
-    void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister)
+    void emitGetFromCallFrameHeaderPtr(int entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister)
     {
         loadPtr(Address(from, entry * sizeof(Register)), to);
     }
-    void emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister)
+    void emitGetFromCallFrameHeader32(int entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister)
     {
         load32(Address(from, entry * sizeof(Register)), to);
     }
 #if USE(JSVALUE64)
-    void emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister)
+    void emitGetFromCallFrameHeader64(int entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister)
     {
         load64(Address(from, entry * sizeof(Register)), to);
     }
 #endif // USE(JSVALUE64)
-    void emitPutToCallFrameHeader(GPRReg from, JSStack::CallFrameHeaderEntry entry)
+    void emitPutToCallFrameHeader(GPRReg from, int entry)
     {
         storePtr(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
     }
 
-    void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
+    void emitPutToCallFrameHeader(void* value, int entry)
     {
         storePtr(TrustedImmPtr(value), Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
     }
@@ -354,17 +613,17 @@ public:
     // caller's frame pointer. On some platforms, the callee is responsible for pushing the
     // "link register" containing the return address in the function prologue.
 #if USE(JSVALUE64)
-    void emitPutToCallFrameHeaderBeforePrologue(GPRReg from, JSStack::CallFrameHeaderEntry entry)
+    void emitPutToCallFrameHeaderBeforePrologue(GPRReg from, int entry)
     {
         storePtr(from, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta()));
     }
 #else
-    void emitPutPayloadToCallFrameHeaderBeforePrologue(GPRReg from, JSStack::CallFrameHeaderEntry entry)
+    void emitPutPayloadToCallFrameHeaderBeforePrologue(GPRReg from, int entry)
     {
         storePtr(from, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
     }
 
-    void emitPutTagToCallFrameHeaderBeforePrologue(TrustedImm32 tag, JSStack::CallFrameHeaderEntry entry)
+    void emitPutTagToCallFrameHeaderBeforePrologue(TrustedImm32 tag, int entry)
     {
         storePtr(tag, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
     }
@@ -400,37 +659,43 @@ public:
 #endif
     }
 
-    Jump branchIfNotCell(GPRReg reg)
+    Jump branchIfNotCell(GPRReg reg, TagRegistersMode mode = HaveTagRegisters)
     {
 #if USE(JSVALUE64)
-        return branchTest64(MacroAssembler::NonZero, reg, GPRInfo::tagMaskRegister);
+        if (mode == HaveTagRegisters)
+            return branchTest64(NonZero, reg, GPRInfo::tagMaskRegister);
+        return branchTest64(NonZero, reg, TrustedImm64(TagMask));
 #else
+        UNUSED_PARAM(mode);
         return branch32(MacroAssembler::NotEqual, reg, TrustedImm32(JSValue::CellTag));
 #endif
     }
-    Jump branchIfNotCell(JSValueRegs regs)
+    Jump branchIfNotCell(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
     {
 #if USE(JSVALUE64)
-        return branchIfNotCell(regs.gpr());
+        return branchIfNotCell(regs.gpr(), mode);
 #else
-        return branchIfNotCell(regs.tagGPR());
+        return branchIfNotCell(regs.tagGPR(), mode);
 #endif
     }
     
-    Jump branchIfCell(GPRReg reg)
+    Jump branchIfCell(GPRReg reg, TagRegistersMode mode = HaveTagRegisters)
     {
 #if USE(JSVALUE64)
-        return branchTest64(MacroAssembler::Zero, reg, GPRInfo::tagMaskRegister);
+        if (mode == HaveTagRegisters)
+            return branchTest64(Zero, reg, GPRInfo::tagMaskRegister);
+        return branchTest64(Zero, reg, TrustedImm64(TagMask));
 #else
+        UNUSED_PARAM(mode);
         return branch32(MacroAssembler::Equal, reg, TrustedImm32(JSValue::CellTag));
 #endif
     }
-    Jump branchIfCell(JSValueRegs regs)
+    Jump branchIfCell(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
     {
 #if USE(JSVALUE64)
-        return branchIfCell(regs.gpr());
+        return branchIfCell(regs.gpr(), mode);
 #else
-        return branchIfCell(regs.tagGPR());
+        return branchIfCell(regs.tagGPR(), mode);
 #endif
     }
     
@@ -458,48 +723,93 @@ public:
 #endif
     }
     
-    Jump branchIfInt32(JSValueRegs regs)
+    Jump branchIfInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
     {
 #if USE(JSVALUE64)
-        return branch64(AboveOrEqual, regs.gpr(), GPRInfo::tagTypeNumberRegister);
+        if (mode == HaveTagRegisters)
+            return branch64(AboveOrEqual, regs.gpr(), GPRInfo::tagTypeNumberRegister);
+        return branch64(AboveOrEqual, regs.gpr(), TrustedImm64(TagTypeNumber));
 #else
+        UNUSED_PARAM(mode);
         return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::Int32Tag));
 #endif
     }
-    
-    Jump branchIfNotInt32(JSValueRegs regs)
+
+#if USE(JSVALUE64)
+    Jump branchIfNotInt32(GPRReg gpr, TagRegistersMode mode = HaveTagRegisters)
+    {
+        if (mode == HaveTagRegisters)
+            return branch64(Below, gpr, GPRInfo::tagTypeNumberRegister);
+        return branch64(Below, gpr, TrustedImm64(TagTypeNumber));
+    }
+#endif
+
+    Jump branchIfNotInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
     {
 #if USE(JSVALUE64)
-        return branch64(Below, regs.gpr(), GPRInfo::tagTypeNumberRegister);
+        return branchIfNotInt32(regs.gpr(), mode);
 #else
+        UNUSED_PARAM(mode);
         return branch32(NotEqual, regs.tagGPR(), TrustedImm32(JSValue::Int32Tag));
 #endif
     }
 
     // Note that the tempGPR is not used in 64-bit mode.
-    Jump branchIfNumber(JSValueRegs regs, GPRReg tempGPR)
+    Jump branchIfNumber(JSValueRegs regs, GPRReg tempGPR, TagRegistersMode mode = HaveTagRegisters)
     {
 #if USE(JSVALUE64)
         UNUSED_PARAM(tempGPR);
-        return branchTest64(NonZero, regs.gpr(), GPRInfo::tagTypeNumberRegister);
+        return branchIfNumber(regs.gpr(), mode);
 #else
+        UNUSED_PARAM(mode);
         add32(TrustedImm32(1), regs.tagGPR(), tempGPR);
         return branch32(Below, tempGPR, TrustedImm32(JSValue::LowestTag + 1));
 #endif
     }
+
+#if USE(JSVALUE64)
+    Jump branchIfNumber(GPRReg reg, TagRegistersMode mode = HaveTagRegisters)
+    {
+        if (mode == HaveTagRegisters)
+            return branchTest64(NonZero, reg, GPRInfo::tagTypeNumberRegister);
+        return branchTest64(NonZero, reg, TrustedImm64(TagTypeNumber));
+    }
+#endif
     
     // Note that the tempGPR is not used in 64-bit mode.
-    Jump branchIfNotNumber(JSValueRegs regs, GPRReg tempGPR)
+    Jump branchIfNotNumber(JSValueRegs regs, GPRReg tempGPR, TagRegistersMode mode = HaveTagRegisters)
     {
 #if USE(JSVALUE64)
         UNUSED_PARAM(tempGPR);
-        return branchTest64(Zero, regs.gpr(), GPRInfo::tagTypeNumberRegister);
+        return branchIfNotNumber(regs.gpr(), mode);
 #else
+        UNUSED_PARAM(mode);
         add32(TrustedImm32(1), regs.tagGPR(), tempGPR);
         return branch32(AboveOrEqual, tempGPR, TrustedImm32(JSValue::LowestTag + 1));
 #endif
     }
 
+#if USE(JSVALUE64)
+    Jump branchIfNotNumber(GPRReg reg, TagRegistersMode mode = HaveTagRegisters)
+    {
+        if (mode == HaveTagRegisters)
+            return branchTest64(Zero, reg, GPRInfo::tagTypeNumberRegister);
+        return branchTest64(Zero, reg, TrustedImm64(TagTypeNumber));
+    }
+#endif
+
+    Jump branchIfNotDoubleKnownNotInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
+    {
+#if USE(JSVALUE64)
+        if (mode == HaveTagRegisters)
+            return branchTest64(Zero, regs.gpr(), GPRInfo::tagTypeNumberRegister);
+        return branchTest64(Zero, regs.gpr(), TrustedImm64(TagTypeNumber));
+#else
+        UNUSED_PARAM(mode);
+        return branch32(AboveOrEqual, regs.tagGPR(), TrustedImm32(JSValue::LowestTag));
+#endif
+    }
+
     // Note that the tempGPR is not used in 32-bit mode.
     Jump branchIfBoolean(JSValueRegs regs, GPRReg tempGPR)
     {
@@ -563,7 +873,24 @@ public:
         return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::EmptyValueTag));
 #endif
     }
-    
+
+    JumpList branchIfNotType(
+        JSValueRegs, GPRReg tempGPR, const InferredType::Descriptor&,
+        TagRegistersMode = HaveTagRegisters);
+
+    template<typename T>
+    Jump branchStructure(RelationalCondition condition, T leftHandSide, Structure* structure)
+    {
+#if USE(JSVALUE64)
+        return branch32(condition, leftHandSide, TrustedImm32(structure->id()));
+#else
+        return branchPtr(condition, leftHandSide, TrustedImmPtr(structure));
+#endif
+    }
+
+    Jump branchIfFastTypedArray(GPRReg baseGPR);
+    Jump branchIfNotFastTypedArray(GPRReg baseGPR);
+
     static Address addressForByteOffset(ptrdiff_t byteOffset)
     {
         return Address(GPRInfo::callFrameRegister, byteOffset);
@@ -610,8 +937,8 @@ public:
     // Access to our fixed callee CallFrame.
     static Address calleeFrameSlot(int slot)
     {
-        ASSERT(slot >= JSStack::CallerFrameAndPCSize);
-        return Address(stackPointerRegister, sizeof(Register) * (slot - JSStack::CallerFrameAndPCSize));
+        ASSERT(slot >= CallerFrameAndPC::sizeInRegisters);
+        return Address(stackPointerRegister, sizeof(Register) * (slot - CallerFrameAndPC::sizeInRegisters));
     }
 
     // Access to our fixed callee CallFrame.
@@ -666,60 +993,7 @@ public:
     }
 
     // Add a debug call. This call has no effect on JIT code execution state.
-    void debugCall(V_DebugOperation_EPP function, void* argument)
-    {
-        size_t scratchSize = sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters);
-        ScratchBuffer* scratchBuffer = m_vm->scratchBufferForSize(scratchSize);
-        EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
-
-        for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
-#if USE(JSVALUE64)
-            store64(GPRInfo::toRegister(i), buffer + i);
-#else
-            store32(GPRInfo::toRegister(i), buffer + i);
-#endif
-        }
-
-        for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
-            move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
-            storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
-        }
-
-        // Tell GC mark phase how much of the scratch buffer is active during call.
-        move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
-        storePtr(TrustedImmPtr(scratchSize), GPRInfo::regT0);
-
-#if CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS) || CPU(SH4)
-        move(TrustedImmPtr(buffer), GPRInfo::argumentGPR2);
-        move(TrustedImmPtr(argument), GPRInfo::argumentGPR1);
-        move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
-        GPRReg scratch = selectScratchGPR(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR2);
-#elif CPU(X86)
-        poke(GPRInfo::callFrameRegister, 0);
-        poke(TrustedImmPtr(argument), 1);
-        poke(TrustedImmPtr(buffer), 2);
-        GPRReg scratch = GPRInfo::regT0;
-#else
-#error "JIT not supported on this platform."
-#endif
-        move(TrustedImmPtr(reinterpret_cast<void*>(function)), scratch);
-        call(scratch);
-
-        move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
-        storePtr(TrustedImmPtr(0), GPRInfo::regT0);
-
-        for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
-            move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
-            loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
-        }
-        for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
-#if USE(JSVALUE64)
-            load64(buffer + i, GPRInfo::toRegister(i));
-#else
-            load32(buffer + i, GPRInfo::toRegister(i));
-#endif
-        }
-    }
+    void debugCall(VM&, V_DebugOperation_EPP function, void* argument);
 
     // These methods JIT generate dynamic, debug-only checks - akin to ASSERTs.
 #if !ASSERT_DISABLED
@@ -743,6 +1017,11 @@ public:
     void jitAssertTagsInPlace() { }
     void jitAssertArgumentCountSane() { }
 #endif
+
+    void jitReleaseAssertNoException(VM&);
+
+    void incrementSuperSamplerCount();
+    void decrementSuperSamplerCount();
     
     void purifyNaN(FPRReg);
 
@@ -755,23 +1034,28 @@ public:
         jitAssertIsJSDouble(gpr);
         return gpr;
     }
-    FPRReg unboxDoubleWithoutAssertions(GPRReg gpr, FPRReg fpr)
+    FPRReg unboxDoubleWithoutAssertions(GPRReg gpr, GPRReg resultGPR, FPRReg fpr)
     {
-        add64(GPRInfo::tagTypeNumberRegister, gpr);
-        move64ToDouble(gpr, fpr);
+        add64(GPRInfo::tagTypeNumberRegister, gpr, resultGPR);
+        move64ToDouble(resultGPR, fpr);
         return fpr;
     }
-    FPRReg unboxDouble(GPRReg gpr, FPRReg fpr)
+    FPRReg unboxDouble(GPRReg gpr, GPRReg resultGPR, FPRReg fpr)
     {
         jitAssertIsJSDouble(gpr);
-        return unboxDoubleWithoutAssertions(gpr, fpr);
+        return unboxDoubleWithoutAssertions(gpr, resultGPR, fpr);
     }
     
     void boxDouble(FPRReg fpr, JSValueRegs regs)
     {
         boxDouble(fpr, regs.gpr());
     }
-    
+
+    void unboxDoubleNonDestructive(JSValueRegs regs, FPRReg destFPR, GPRReg resultGPR, FPRReg)
+    {
+        unboxDouble(regs.payloadGPR(), resultGPR, destFPR);
+    }
+
     // Here are possible arrangements of source, target, scratch:
     // - source, target, scratch can all be separate registers.
     // - source and target can be the same but scratch is separate.
@@ -809,6 +1093,15 @@ public:
     {
         boxDouble(fpr, regs.tagGPR(), regs.payloadGPR());
     }
+    void unboxDouble(JSValueRegs regs, FPRReg fpr, FPRReg scratchFPR)
+    {
+        unboxDouble(regs.tagGPR(), regs.payloadGPR(), fpr, scratchFPR);
+    }
+
+    void unboxDoubleNonDestructive(const JSValueRegs regs, FPRReg destFPR, GPRReg, FPRReg scratchFPR)
+    {
+        unboxDouble(regs, destFPR, scratchFPR);
+    }
 #endif
     
     void boxBooleanPayload(GPRReg boolGPR, GPRReg payloadGPR)
@@ -820,6 +1113,15 @@ public:
 #endif
     }
 
+    void boxBooleanPayload(bool value, GPRReg payloadGPR)
+    {
+#if USE(JSVALUE64)
+        move(TrustedImm32(ValueFalse + value), payloadGPR);
+#else
+        move(TrustedImm32(value), payloadGPR);
+#endif
+    }
+
     void boxBoolean(GPRReg boolGPR, JSValueRegs boxedRegs)
     {
         boxBooleanPayload(boolGPR, boxedRegs.payloadGPR());
@@ -827,13 +1129,40 @@ public:
         move(TrustedImm32(JSValue::BooleanTag), boxedRegs.tagGPR());
 #endif
     }
+
+    void boxInt32(GPRReg intGPR, JSValueRegs boxedRegs, TagRegistersMode mode = HaveTagRegisters)
+    {
+#if USE(JSVALUE64)
+        if (mode == DoNotHaveTagRegisters) {
+            move(intGPR, boxedRegs.gpr());
+            or64(TrustedImm64(TagTypeNumber), boxedRegs.gpr());
+        } else
+            or64(GPRInfo::tagTypeNumberRegister, intGPR, boxedRegs.gpr());
+#else
+        UNUSED_PARAM(mode);
+        move(intGPR, boxedRegs.payloadGPR());
+        move(TrustedImm32(JSValue::Int32Tag), boxedRegs.tagGPR());
+#endif
+    }
+
+    void boxCell(GPRReg cellGPR, JSValueRegs boxedRegs)
+    {
+#if USE(JSVALUE64)
+        move(cellGPR, boxedRegs.gpr());
+#else
+        move(cellGPR, boxedRegs.payloadGPR());
+        move(TrustedImm32(JSValue::CellTag), boxedRegs.tagGPR());
+#endif
+    }
     
-    void callExceptionFuzz();
+    void callExceptionFuzz(VM&);
     
     enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck };
     enum ExceptionJumpWidth { NormalJumpWidth, FarJumpWidth };
-    Jump emitExceptionCheck(
-        ExceptionCheckKind = NormalExceptionCheck, ExceptionJumpWidth = NormalJumpWidth);
+    JS_EXPORT_PRIVATE Jump emitExceptionCheck(
+        VM&, ExceptionCheckKind = NormalExceptionCheck, ExceptionJumpWidth = NormalJumpWidth);
+    JS_EXPORT_PRIVATE Jump emitNonPatchableExceptionCheck(VM&);
+    Jump emitJumpIfException(VM&);
 
 #if ENABLE(SAMPLING_COUNTERS)
     static void emitCount(MacroAssembler& jit, AbstractSamplingCounter& counter, int32_t increment = 1)
@@ -860,7 +1189,7 @@ public:
     {
         if (!codeOrigin.inlineCallFrame)
             return codeBlock()->isStrictMode();
-        return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->isStrictMode();
+        return codeOrigin.inlineCallFrame->isStrictMode();
     }
     
     ECMAMode ecmaModeFor(CodeOrigin codeOrigin)
@@ -902,30 +1231,21 @@ public:
     {
         return argumentsStart(codeOrigin.inlineCallFrame);
     }
-    
-    void emitLoadStructure(RegisterID source, RegisterID dest, RegisterID scratch)
+
+    static VirtualRegister argumentCount(InlineCallFrame* inlineCallFrame)
     {
-#if USE(JSVALUE64)
-        load32(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest);
-        loadPtr(vm()->heap.structureIDTable().base(), scratch);
-        loadPtr(MacroAssembler::BaseIndex(scratch, dest, MacroAssembler::TimesEight), dest);
-#else
-        UNUSED_PARAM(scratch);
-        loadPtr(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest);
-#endif
+        ASSERT(!inlineCallFrame || inlineCallFrame->isVarargs());
+        if (!inlineCallFrame)
+            return VirtualRegister(CallFrameSlot::argumentCount);
+        return inlineCallFrame->argumentCountRegister;
     }
 
-    static void emitLoadStructure(AssemblyHelpers& jit, RegisterID base, RegisterID dest, RegisterID scratch)
+    static VirtualRegister argumentCount(const CodeOrigin& codeOrigin)
     {
-#if USE(JSVALUE64)
-        jit.load32(MacroAssembler::Address(base, JSCell::structureIDOffset()), dest);
-        jit.loadPtr(jit.vm()->heap.structureIDTable().base(), scratch);
-        jit.loadPtr(MacroAssembler::BaseIndex(scratch, dest, MacroAssembler::TimesEight), dest);
-#else
-        UNUSED_PARAM(scratch);
-        jit.loadPtr(MacroAssembler::Address(base, JSCell::structureIDOffset()), dest);
-#endif
+        return argumentCount(codeOrigin.inlineCallFrame);
     }
+    
+    void emitLoadStructure(VM&, RegisterID source, RegisterID dest, RegisterID scratch);
 
     void emitStoreStructureWithTypeInfo(TrustedImmPtr structure, RegisterID dest, RegisterID)
     {
@@ -939,8 +1259,8 @@ public:
         store64(scratch, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
 #else
         // Store all the info flags using a single 32-bit wide load and store.
-        load32(MacroAssembler::Address(structure, Structure::indexingTypeOffset()), scratch);
-        store32(scratch, MacroAssembler::Address(dest, JSCell::indexingTypeOffset()));
+        load32(MacroAssembler::Address(structure, Structure::indexingTypeIncludingHistoryOffset()), scratch);
+        store32(scratch, MacroAssembler::Address(dest, JSCell::indexingTypeAndMiscOffset()));
 
         // Store the StructureID
         storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
@@ -949,15 +1269,87 @@ public:
 
     static void emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest);
 
-    Jump jumpIfIsRememberedOrInEden(GPRReg cell)
+    Jump barrierBranchWithoutFence(GPRReg cell)
+    {
+        return branch8(Above, Address(cell, JSCell::cellStateOffset()), TrustedImm32(blackThreshold));
+    }
+
+    Jump barrierBranchWithoutFence(JSCell* cell)
+    {
+        uint8_t* address = reinterpret_cast<uint8_t*>(cell) + JSCell::cellStateOffset();
+        return branch8(Above, AbsoluteAddress(address), TrustedImm32(blackThreshold));
+    }
+    
+    Jump barrierBranch(VM& vm, GPRReg cell, GPRReg scratchGPR)
     {
-        return branchTest8(MacroAssembler::NonZero, MacroAssembler::Address(cell, JSCell::gcDataOffset()));
+        load8(Address(cell, JSCell::cellStateOffset()), scratchGPR);
+        return branch32(Above, scratchGPR, AbsoluteAddress(vm.heap.addressOfBarrierThreshold()));
     }
 
-    Jump jumpIfIsRememberedOrInEden(JSCell* cell)
+    Jump barrierBranch(VM& vm, JSCell* cell, GPRReg scratchGPR)
+    {
+        uint8_t* address = reinterpret_cast<uint8_t*>(cell) + JSCell::cellStateOffset();
+        load8(address, scratchGPR);
+        return branch32(Above, scratchGPR, AbsoluteAddress(vm.heap.addressOfBarrierThreshold()));
+    }
+    
+    void barrierStoreLoadFence(VM& vm)
+    {
+        if (!Options::useConcurrentBarriers())
+            return;
+        Jump ok = jumpIfMutatorFenceNotNeeded(vm);
+        memoryFence();
+        ok.link(this);
+    }
+    
+    void mutatorFence(VM& vm)
+    {
+        if (isX86())
+            return;
+        Jump ok = jumpIfMutatorFenceNotNeeded(vm);
+        storeFence();
+        ok.link(this);
+    }
+    
+    void storeButterfly(VM& vm, GPRReg butterfly, GPRReg object)
+    {
+        if (isX86()) {
+            storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
+            return;
+        }
+        
+        Jump ok = jumpIfMutatorFenceNotNeeded(vm);
+        storeFence();
+        storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
+        storeFence();
+        Jump done = jump();
+        ok.link(this);
+        storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
+        done.link(this);
+    }
+    
+    void nukeStructureAndStoreButterfly(VM& vm, GPRReg butterfly, GPRReg object)
     {
-        uint8_t* address = reinterpret_cast<uint8_t*>(cell) + JSCell::gcDataOffset();
-        return branchTest8(MacroAssembler::NonZero, MacroAssembler::AbsoluteAddress(address));
+        if (isX86()) {
+            or32(TrustedImm32(bitwise_cast<int32_t>(nukedStructureIDBit())), Address(object, JSCell::structureIDOffset()));
+            storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
+            return;
+        }
+        
+        Jump ok = jumpIfMutatorFenceNotNeeded(vm);
+        or32(TrustedImm32(bitwise_cast<int32_t>(nukedStructureIDBit())), Address(object, JSCell::structureIDOffset()));
+        storeFence();
+        storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
+        storeFence();
+        Jump done = jump();
+        ok.link(this);
+        storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
+        done.link(this);
+    }
+    
+    Jump jumpIfMutatorFenceNotNeeded(VM& vm)
+    {
+        return branchTest8(Zero, AbsoluteAddress(vm.heap.addressOfMutatorShouldBeFenced()));
     }
     
     // Emits the branch structure for typeof. The code emitted by this doesn't fall through. The
@@ -1035,11 +1427,197 @@ public:
         
         functor(TypeofType::Undefined, true);
     }
-
+    
+    void emitDumbVirtualCall(VM&, CallLinkInfo*);
+    
     Vector<BytecodeAndMachineOffset>& decodedCodeMapFor(CodeBlock*);
+
+    void makeSpaceOnStackForCCall();
+    void reclaimSpaceOnStackForCCall();
+
+#if USE(JSVALUE64)
+    void emitRandomThunk(JSGlobalObject*, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result);
+    void emitRandomThunk(VM&, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result);
+#endif
+
+    // Call this if you know that the value held in allocatorGPR is non-null. This DOES NOT mean
+    // that allocator is non-null; allocator can be null as a signal that we don't know what the
+    // value of allocatorGPR is.
+    void emitAllocateWithNonNullAllocator(GPRReg resultGPR, MarkedAllocator* allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath)
+    {
+        // NOTE: This is carefully written so that we can call it while we disallow scratch
+        // register usage.
+        
+        if (Options::forceGCSlowPaths()) {
+            slowPath.append(jump());
+            return;
+        }
+        
+        Jump popPath;
+        Jump done;
+        
+        load32(Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + FreeList::offsetOfRemaining()), resultGPR);
+        popPath = branchTest32(Zero, resultGPR);
+        if (allocator)
+            add32(TrustedImm32(-allocator->cellSize()), resultGPR, scratchGPR);
+        else {
+            if (isX86()) {
+                move(resultGPR, scratchGPR);
+                sub32(Address(allocatorGPR, MarkedAllocator::offsetOfCellSize()), scratchGPR);
+            } else {
+                load32(Address(allocatorGPR, MarkedAllocator::offsetOfCellSize()), scratchGPR);
+                sub32(resultGPR, scratchGPR, scratchGPR);
+            }
+        }
+        negPtr(resultGPR);
+        store32(scratchGPR, Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + FreeList::offsetOfRemaining()));
+        Address payloadEndAddr = Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + FreeList::offsetOfPayloadEnd());
+        if (isX86())
+            addPtr(payloadEndAddr, resultGPR);
+        else {
+            loadPtr(payloadEndAddr, scratchGPR);
+            addPtr(scratchGPR, resultGPR);
+        }
+        
+        done = jump();
+        
+        popPath.link(this);
+        
+        loadPtr(Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + FreeList::offsetOfScrambledHead()), resultGPR);
+        if (isX86())
+            xorPtr(Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + FreeList::offsetOfSecret()), resultGPR);
+        else {
+            loadPtr(Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + FreeList::offsetOfSecret()), scratchGPR);
+            xorPtr(scratchGPR, resultGPR);
+        }
+        slowPath.append(branchTestPtr(Zero, resultGPR));
+        
+        // The object is half-allocated: we have what we know is a fresh object, but
+        // it's still on the GC's free list.
+        loadPtr(Address(resultGPR), scratchGPR);
+        storePtr(scratchGPR, Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + FreeList::offsetOfScrambledHead()));
+        
+        done.link(this);
+    }
+    
+    void emitAllocate(GPRReg resultGPR, MarkedAllocator* allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath)
+    {
+        if (!allocator)
+            slowPath.append(branchTestPtr(Zero, allocatorGPR));
+        emitAllocateWithNonNullAllocator(resultGPR, allocator, allocatorGPR, scratchGPR, slowPath);
+    }
+    
+    template<typename StructureType>
+    void emitAllocateJSCell(GPRReg resultGPR, MarkedAllocator* allocator, GPRReg allocatorGPR, StructureType structure, GPRReg scratchGPR, JumpList& slowPath)
+    {
+        emitAllocate(resultGPR, allocator, allocatorGPR, scratchGPR, slowPath);
+        emitStoreStructureWithTypeInfo(structure, resultGPR, scratchGPR);
+    }
     
+    template<typename StructureType, typename StorageType>
+    void emitAllocateJSObject(GPRReg resultGPR, MarkedAllocator* allocator, GPRReg allocatorGPR, StructureType structure, StorageType storage, GPRReg scratchGPR, JumpList& slowPath)
+    {
+        emitAllocateJSCell(resultGPR, allocator, allocatorGPR, structure, scratchGPR, slowPath);
+        storePtr(storage, Address(resultGPR, JSObject::butterflyOffset()));
+    }
+    
+    template<typename ClassType, typename StructureType, typename StorageType>
+    void emitAllocateJSObjectWithKnownSize(
+        VM& vm, GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1,
+        GPRReg scratchGPR2, JumpList& slowPath, size_t size)
+    {
+        MarkedAllocator* allocator = subspaceFor<ClassType>(vm)->allocatorFor(size);
+        if (!allocator) {
+            slowPath.append(jump());
+            return;
+        }
+        move(TrustedImmPtr(allocator), scratchGPR1);
+        emitAllocateJSObject(resultGPR, allocator, scratchGPR1, structure, storage, scratchGPR2, slowPath);
+    }
+    
+    template<typename ClassType, typename StructureType, typename StorageType>
+    void emitAllocateJSObject(VM& vm, GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+    {
+        emitAllocateJSObjectWithKnownSize<ClassType>(vm, resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath, ClassType::allocationSize(0));
+    }
+    
+    // allocationSize can be aliased with any of the other input GPRs. If it's not aliased then it
+    // won't be clobbered.
+    void emitAllocateVariableSized(GPRReg resultGPR, Subspace& subspace, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+    {
+        static_assert(!(MarkedSpace::sizeStep & (MarkedSpace::sizeStep - 1)), "MarkedSpace::sizeStep must be a power of two.");
+        
+        unsigned stepShift = getLSBSet(MarkedSpace::sizeStep);
+        
+        add32(TrustedImm32(MarkedSpace::sizeStep - 1), allocationSize, scratchGPR1);
+        urshift32(TrustedImm32(stepShift), scratchGPR1);
+        slowPath.append(branch32(Above, scratchGPR1, TrustedImm32(MarkedSpace::largeCutoff >> stepShift)));
+        move(TrustedImmPtr(subspace.allocatorForSizeStep() - 1), scratchGPR2);
+        loadPtr(BaseIndex(scratchGPR2, scratchGPR1, timesPtr()), scratchGPR1);
+        
+        emitAllocate(resultGPR, nullptr, scratchGPR1, scratchGPR2, slowPath);
+    }
+    
+    template<typename ClassType, typename StructureType>
+    void emitAllocateVariableSizedCell(VM& vm, GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+    {
+        Subspace& subspace = *subspaceFor<ClassType>(vm);
+        emitAllocateVariableSized(resultGPR, subspace, allocationSize, scratchGPR1, scratchGPR2, slowPath);
+        emitStoreStructureWithTypeInfo(structure, resultGPR, scratchGPR2);
+    }
+
+    template<typename ClassType, typename StructureType>
+    void emitAllocateVariableSizedJSObject(VM& vm, GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+    {
+        emitAllocateVariableSizedCell<ClassType>(vm, resultGPR, structure, allocationSize, scratchGPR1, scratchGPR2, slowPath);
+        storePtr(TrustedImmPtr(0), Address(resultGPR, JSObject::butterflyOffset()));
+    }
+
+    void emitConvertValueToBoolean(VM&, JSValueRegs value, GPRReg result, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg, FPRReg, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject*, bool negateResult = false);
+    
+    template<typename ClassType>
+    void emitAllocateDestructibleObject(VM& vm, GPRReg resultGPR, Structure* structure, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+    {
+        emitAllocateJSObject<ClassType>(vm, resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratchGPR1, scratchGPR2, slowPath);
+        storePtr(TrustedImmPtr(structure->classInfo()), Address(resultGPR, JSDestructibleObject::classInfoOffset()));
+    }
+    
+    void emitInitializeInlineStorage(GPRReg baseGPR, unsigned inlineCapacity)
+    {
+        for (unsigned i = 0; i < inlineCapacity; ++i)
+            storeTrustedValue(JSValue(), Address(baseGPR, JSObject::offsetOfInlineStorage() + i * sizeof(EncodedJSValue)));
+    }
+
+    void emitInitializeInlineStorage(GPRReg baseGPR, GPRReg inlineCapacity)
+    {
+        Jump empty = branchTest32(Zero, inlineCapacity);
+        Label loop = label();
+        sub32(TrustedImm32(1), inlineCapacity);
+        storeTrustedValue(JSValue(), BaseIndex(baseGPR, inlineCapacity, TimesEight, JSObject::offsetOfInlineStorage()));
+        branchTest32(NonZero, inlineCapacity).linkTo(loop, this);
+        empty.link(this);
+    }
+
+    void emitInitializeOutOfLineStorage(GPRReg butterflyGPR, unsigned outOfLineCapacity)
+    {
+        for (unsigned i = 0; i < outOfLineCapacity; ++i)
+            storeTrustedValue(JSValue(), Address(butterflyGPR, -sizeof(IndexingHeader) - (i + 1) * sizeof(EncodedJSValue)));
+    }
+    
+#if USE(JSVALUE64)
+    void wangsInt64Hash(GPRReg inputAndResult, GPRReg scratch);
+#endif
+
+#if ENABLE(WEBASSEMBLY)
+    void loadWasmContext(GPRReg dst);
+    void storeWasmContext(GPRReg src);
+    static bool loadWasmContextNeedsMacroScratchRegister();
+    static bool storeWasmContextNeedsMacroScratchRegister();
+#endif
+
 protected:
-    VM* m_vm;
+    void copyCalleeSavesToVMEntryFrameCalleeSavesBufferImpl(GPRReg calleeSavesBuffer);
+
     CodeBlock* m_codeBlock;
     CodeBlock* m_baselineCodeBlock;
 
@@ -1049,6 +1627,3 @@ protected:
 } // namespace JSC
 
 #endif // ENABLE(JIT)
-
-#endif // AssemblyHelpers_h
-