Remove excessive headers from JavaScriptCore
[WebKit-https.git] / Source / JavaScriptCore / jit / AssemblyHelpers.h
index 49c7f9c..71facd2 100644 (file)
 #include "JITCode.h"
 #include "MacroAssembler.h"
 #include "MarkedSpace.h"
-#include "MaxFrameExtentForSlowPathCall.h"
 #include "RegisterAtOffsetList.h"
 #include "RegisterSet.h"
-#include "SuperSampler.h"
+#include "TagRegistersMode.h"
 #include "TypeofType.h"
 #include "VM.h"
 
@@ -48,9 +47,8 @@ typedef void (*V_DebugOperation_EPP)(ExecState*, void*, void*);
 
 class AssemblyHelpers : public MacroAssembler {
 public:
-    AssemblyHelpers(VM* vm, CodeBlock* codeBlock)
-        : m_vm(vm)
-        , m_codeBlock(codeBlock)
+    AssemblyHelpers(CodeBlock* codeBlock)
+        : m_codeBlock(codeBlock)
         , m_baselineCodeBlock(codeBlock ? codeBlock->baselineAlternative() : 0)
     {
         if (m_codeBlock) {
@@ -61,7 +59,6 @@ public:
     }
 
     CodeBlock* codeBlock() { return m_codeBlock; }
-    VM* vm() { return m_vm; }
     AssemblerType_T& assembler() { return m_assembler; }
 
     void checkStackPointerAlignment()
@@ -230,6 +227,31 @@ public:
         store32(TrustedImm32(value.payload()), address.withOffset(PayloadOffset));
 #endif
     }
+    
+    Address addressFor(const RegisterAtOffset& entry)
+    {
+        return Address(GPRInfo::callFrameRegister, entry.offset());
+    }
+    
+    void emitSave(const RegisterAtOffsetList& list)
+    {
+        for (const RegisterAtOffset& entry : list) {
+            if (entry.reg().isGPR())
+                storePtr(entry.reg().gpr(), addressFor(entry));
+            else
+                storeDouble(entry.reg().fpr(), addressFor(entry));
+        }
+    }
+    
+    void emitRestore(const RegisterAtOffsetList& list)
+    {
+        for (const RegisterAtOffset& entry : list) {
+            if (entry.reg().isGPR())
+                loadPtr(addressFor(entry), entry.reg().gpr());
+            else
+                loadDouble(addressFor(entry), entry.reg().fpr());
+        }
+    }
 
     void emitSaveCalleeSavesFor(CodeBlock* codeBlock)
     {
@@ -334,35 +356,32 @@ public:
 #endif
     }
 
-    void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() })
+    // If you use this, be aware that vmGPR will get trashed.
+    void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(GPRReg vmGPR)
     {
 #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
-        GPRReg temp1 = usedRegisters.getFreeGPR(0);
-
-        loadPtr(&m_vm->topVMEntryFrame, temp1);
-        addPtr(TrustedImm32(VMEntryFrame::calleeSaveRegistersBufferOffset()), temp1);
+        loadPtr(Address(vmGPR, VM::topVMEntryFrameOffset()), vmGPR);
+        copyCalleeSavesToVMEntryFrameCalleeSavesBufferImpl(vmGPR);
+#else
+        UNUSED_PARAM(vmGPR);
+#endif
+    }
 
-        RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets();
-        RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
-        unsigned registerCount = allCalleeSaves->size();
-        
-        for (unsigned i = 0; i < registerCount; i++) {
-            RegisterAtOffset entry = allCalleeSaves->at(i);
-            if (dontCopyRegisters.get(entry.reg()))
-                continue;
-            if (entry.reg().isGPR())
-                storePtr(entry.reg().gpr(), Address(temp1, entry.offset()));
-            else
-                storeDouble(entry.reg().fpr(), Address(temp1, entry.offset()));
-        }
+    void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(VM& vm, const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() })
+    {
+#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
+        GPRReg temp1 = usedRegisters.getFreeGPR(0);
+        loadPtr(&vm.topVMEntryFrame, temp1);
+        copyCalleeSavesToVMEntryFrameCalleeSavesBufferImpl(temp1);
 #else
+        UNUSED_PARAM(vm);
         UNUSED_PARAM(usedRegisters);
 #endif
     }
 
-    void restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer();
+    void restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(VM&);
 
-    void copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer(const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() })
+    void copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer(VM& vm, const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() })
     {
 #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
         GPRReg temp1 = usedRegisters.getFreeGPR(0);
@@ -373,10 +392,10 @@ public:
         ASSERT(codeBlock());
 
         // Copy saved calleeSaves on stack or unsaved calleeSaves in register to vm calleeSave buffer
-        loadPtr(&m_vm->topVMEntryFrame, temp1);
+        loadPtr(&vm.topVMEntryFrame, temp1);
         addPtr(TrustedImm32(VMEntryFrame::calleeSaveRegistersBufferOffset()), temp1);
 
-        RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets();
+        RegisterAtOffsetList* allCalleeSaves = VM::getAllCalleeSaveRegisterOffsets();
         RegisterAtOffsetList* currentCalleeSaves = codeBlock()->calleeSaveRegisters();
         RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
         unsigned registerCount = allCalleeSaves->size();
@@ -412,6 +431,7 @@ public:
             }
         }
 #else
+        UNUSED_PARAM(vm);
         UNUSED_PARAM(usedRegisters);
 #endif
     }
@@ -973,60 +993,7 @@ public:
     }
 
     // Add a debug call. This call has no effect on JIT code execution state.
-    void debugCall(V_DebugOperation_EPP function, void* argument)
-    {
-        size_t scratchSize = sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters);
-        ScratchBuffer* scratchBuffer = m_vm->scratchBufferForSize(scratchSize);
-        EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
-
-        for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
-#if USE(JSVALUE64)
-            store64(GPRInfo::toRegister(i), buffer + i);
-#else
-            store32(GPRInfo::toRegister(i), buffer + i);
-#endif
-        }
-
-        for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
-            move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
-            storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
-        }
-
-        // Tell GC mark phase how much of the scratch buffer is active during call.
-        move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
-        storePtr(TrustedImmPtr(scratchSize), GPRInfo::regT0);
-
-#if CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS)
-        move(TrustedImmPtr(buffer), GPRInfo::argumentGPR2);
-        move(TrustedImmPtr(argument), GPRInfo::argumentGPR1);
-        move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
-        GPRReg scratch = selectScratchGPR(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR2);
-#elif CPU(X86)
-        poke(GPRInfo::callFrameRegister, 0);
-        poke(TrustedImmPtr(argument), 1);
-        poke(TrustedImmPtr(buffer), 2);
-        GPRReg scratch = GPRInfo::regT0;
-#else
-#error "JIT not supported on this platform."
-#endif
-        move(TrustedImmPtr(reinterpret_cast<void*>(function)), scratch);
-        call(scratch);
-
-        move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
-        storePtr(TrustedImmPtr(0), GPRInfo::regT0);
-
-        for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
-            move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
-            loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
-        }
-        for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
-#if USE(JSVALUE64)
-            load64(buffer + i, GPRInfo::toRegister(i));
-#else
-            load32(buffer + i, GPRInfo::toRegister(i));
-#endif
-        }
-    }
+    void debugCall(VM&, V_DebugOperation_EPP function, void* argument);
 
     // These methods JIT generate dynamic, debug-only checks - akin to ASSERTs.
 #if !ASSERT_DISABLED
@@ -1051,7 +1018,7 @@ public:
     void jitAssertArgumentCountSane() { }
 #endif
 
-    void jitReleaseAssertNoException();
+    void jitReleaseAssertNoException(VM&);
 
     void incrementSuperSamplerCount();
     void decrementSuperSamplerCount();
@@ -1188,14 +1155,14 @@ public:
 #endif
     }
     
-    void callExceptionFuzz();
+    void callExceptionFuzz(VM&);
     
     enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck };
     enum ExceptionJumpWidth { NormalJumpWidth, FarJumpWidth };
     JS_EXPORT_PRIVATE Jump emitExceptionCheck(
-        ExceptionCheckKind = NormalExceptionCheck, ExceptionJumpWidth = NormalJumpWidth);
-    JS_EXPORT_PRIVATE Jump emitNonPatchableExceptionCheck();
-    Jump emitJumpIfException();
+        VM&, ExceptionCheckKind = NormalExceptionCheck, ExceptionJumpWidth = NormalJumpWidth);
+    JS_EXPORT_PRIVATE Jump emitNonPatchableExceptionCheck(VM&);
+    Jump emitJumpIfException(VM&);
 
 #if ENABLE(SAMPLING_COUNTERS)
     static void emitCount(MacroAssembler& jit, AbstractSamplingCounter& counter, int32_t increment = 1)
@@ -1278,7 +1245,7 @@ public:
         return argumentCount(codeOrigin.inlineCallFrame);
     }
     
-    void emitLoadStructure(RegisterID source, RegisterID dest, RegisterID scratch);
+    void emitLoadStructure(VM&, RegisterID source, RegisterID dest, RegisterID scratch);
 
     void emitStoreStructureWithTypeInfo(TrustedImmPtr structure, RegisterID dest, RegisterID)
     {
@@ -1313,45 +1280,45 @@ public:
         return branch8(Above, AbsoluteAddress(address), TrustedImm32(blackThreshold));
     }
     
-    Jump barrierBranch(GPRReg cell, GPRReg scratchGPR)
+    Jump barrierBranch(VM& vm, GPRReg cell, GPRReg scratchGPR)
     {
         load8(Address(cell, JSCell::cellStateOffset()), scratchGPR);
-        return branch32(Above, scratchGPR, AbsoluteAddress(vm()->heap.addressOfBarrierThreshold()));
+        return branch32(Above, scratchGPR, AbsoluteAddress(vm.heap.addressOfBarrierThreshold()));
     }
 
-    Jump barrierBranch(JSCell* cell, GPRReg scratchGPR)
+    Jump barrierBranch(VM& vm, JSCell* cell, GPRReg scratchGPR)
     {
         uint8_t* address = reinterpret_cast<uint8_t*>(cell) + JSCell::cellStateOffset();
         load8(address, scratchGPR);
-        return branch32(Above, scratchGPR, AbsoluteAddress(vm()->heap.addressOfBarrierThreshold()));
+        return branch32(Above, scratchGPR, AbsoluteAddress(vm.heap.addressOfBarrierThreshold()));
     }
     
-    void barrierStoreLoadFence()
+    void barrierStoreLoadFence(VM& vm)
     {
         if (!Options::useConcurrentBarriers())
             return;
-        Jump ok = jumpIfMutatorFenceNotNeeded();
+        Jump ok = jumpIfMutatorFenceNotNeeded(vm);
         memoryFence();
         ok.link(this);
     }
     
-    void mutatorFence()
+    void mutatorFence(VM& vm)
     {
         if (isX86())
             return;
-        Jump ok = jumpIfMutatorFenceNotNeeded();
+        Jump ok = jumpIfMutatorFenceNotNeeded(vm);
         storeFence();
         ok.link(this);
     }
     
-    void storeButterfly(GPRReg butterfly, GPRReg object)
+    void storeButterfly(VM& vm, GPRReg butterfly, GPRReg object)
     {
         if (isX86()) {
             storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
             return;
         }
         
-        Jump ok = jumpIfMutatorFenceNotNeeded();
+        Jump ok = jumpIfMutatorFenceNotNeeded(vm);
         storeFence();
         storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
         storeFence();
@@ -1361,7 +1328,7 @@ public:
         done.link(this);
     }
     
-    void nukeStructureAndStoreButterfly(GPRReg butterfly, GPRReg object)
+    void nukeStructureAndStoreButterfly(VM& vm, GPRReg butterfly, GPRReg object)
     {
         if (isX86()) {
             or32(TrustedImm32(bitwise_cast<int32_t>(nukedStructureIDBit())), Address(object, JSCell::structureIDOffset()));
@@ -1369,7 +1336,7 @@ public:
             return;
         }
         
-        Jump ok = jumpIfMutatorFenceNotNeeded();
+        Jump ok = jumpIfMutatorFenceNotNeeded(vm);
         or32(TrustedImm32(bitwise_cast<int32_t>(nukedStructureIDBit())), Address(object, JSCell::structureIDOffset()));
         storeFence();
         storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
@@ -1380,9 +1347,9 @@ public:
         done.link(this);
     }
     
-    Jump jumpIfMutatorFenceNotNeeded()
+    Jump jumpIfMutatorFenceNotNeeded(VM& vm)
     {
-        return branchTest8(Zero, AbsoluteAddress(vm()->heap.addressOfMutatorShouldBeFenced()));
+        return branchTest8(Zero, AbsoluteAddress(vm.heap.addressOfMutatorShouldBeFenced()));
     }
     
     // Emits the branch structure for typeof. The code emitted by this doesn't fall through. The
@@ -1461,7 +1428,7 @@ public:
         functor(TypeofType::Undefined, true);
     }
     
-    void emitDumbVirtualCall(CallLinkInfo*);
+    void emitDumbVirtualCall(VM&, CallLinkInfo*);
     
     Vector<BytecodeAndMachineOffset>& decodedCodeMapFor(CodeBlock*);
 
@@ -1470,7 +1437,7 @@ public:
 
 #if USE(JSVALUE64)
     void emitRandomThunk(JSGlobalObject*, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result);
-    void emitRandomThunk(GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result);
+    void emitRandomThunk(VM&, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result);
 #endif
 
     // Call this if you know that the value held in allocatorGPR is non-null. This DOES NOT mean
@@ -1489,7 +1456,7 @@ public:
         Jump popPath;
         Jump done;
         
-        load32(Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + OBJECT_OFFSETOF(FreeList, remaining)), resultGPR);
+        load32(Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + FreeList::offsetOfRemaining()), resultGPR);
         popPath = branchTest32(Zero, resultGPR);
         if (allocator)
             add32(TrustedImm32(-allocator->cellSize()), resultGPR, scratchGPR);
@@ -1503,8 +1470,8 @@ public:
             }
         }
         negPtr(resultGPR);
-        store32(scratchGPR, Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + OBJECT_OFFSETOF(FreeList, remaining)));
-        Address payloadEndAddr = Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + OBJECT_OFFSETOF(FreeList, payloadEnd));
+        store32(scratchGPR, Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + FreeList::offsetOfRemaining()));
+        Address payloadEndAddr = Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + FreeList::offsetOfPayloadEnd());
         if (isX86())
             addPtr(payloadEndAddr, resultGPR);
         else {
@@ -1516,13 +1483,19 @@ public:
         
         popPath.link(this);
         
-        loadPtr(Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + OBJECT_OFFSETOF(FreeList, head)), resultGPR);
+        loadPtr(Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + FreeList::offsetOfScrambledHead()), resultGPR);
+        if (isX86())
+            xorPtr(Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + FreeList::offsetOfSecret()), resultGPR);
+        else {
+            loadPtr(Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + FreeList::offsetOfSecret()), scratchGPR);
+            xorPtr(scratchGPR, resultGPR);
+        }
         slowPath.append(branchTestPtr(Zero, resultGPR));
         
         // The object is half-allocated: we have what we know is a fresh object, but
         // it's still on the GC's free list.
         loadPtr(Address(resultGPR), scratchGPR);
-        storePtr(scratchGPR, Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + OBJECT_OFFSETOF(FreeList, head)));
+        storePtr(scratchGPR, Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + FreeList::offsetOfScrambledHead()));
         
         done.link(this);
     }
@@ -1550,10 +1523,10 @@ public:
     
     template<typename ClassType, typename StructureType, typename StorageType>
     void emitAllocateJSObjectWithKnownSize(
-        GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1,
+        VM& vm, GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1,
         GPRReg scratchGPR2, JumpList& slowPath, size_t size)
     {
-        MarkedAllocator* allocator = subspaceFor<ClassType>(*vm())->allocatorFor(size);
+        MarkedAllocator* allocator = subspaceFor<ClassType>(vm)->allocatorFor(size);
         if (!allocator) {
             slowPath.append(jump());
             return;
@@ -1563,9 +1536,9 @@ public:
     }
     
     template<typename ClassType, typename StructureType, typename StorageType>
-    void emitAllocateJSObject(GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+    void emitAllocateJSObject(VM& vm, GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
     {
-        emitAllocateJSObjectWithKnownSize<ClassType>(resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath, ClassType::allocationSize(0));
+        emitAllocateJSObjectWithKnownSize<ClassType>(vm, resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath, ClassType::allocationSize(0));
     }
     
     // allocationSize can be aliased with any of the other input GPRs. If it's not aliased then it
@@ -1586,26 +1559,26 @@ public:
     }
     
     template<typename ClassType, typename StructureType>
-    void emitAllocateVariableSizedCell(GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+    void emitAllocateVariableSizedCell(VM& vm, GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
     {
-        Subspace& subspace = *subspaceFor<ClassType>(*vm());
+        Subspace& subspace = *subspaceFor<ClassType>(vm);
         emitAllocateVariableSized(resultGPR, subspace, allocationSize, scratchGPR1, scratchGPR2, slowPath);
         emitStoreStructureWithTypeInfo(structure, resultGPR, scratchGPR2);
     }
 
     template<typename ClassType, typename StructureType>
-    void emitAllocateVariableSizedJSObject(GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+    void emitAllocateVariableSizedJSObject(VM& vm, GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
     {
-        emitAllocateVariableSizedCell<ClassType>(resultGPR, structure, allocationSize, scratchGPR1, scratchGPR2, slowPath);
+        emitAllocateVariableSizedCell<ClassType>(vm, resultGPR, structure, allocationSize, scratchGPR1, scratchGPR2, slowPath);
         storePtr(TrustedImmPtr(0), Address(resultGPR, JSObject::butterflyOffset()));
     }
 
-    void emitConvertValueToBoolean(JSValueRegs value, GPRReg result, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg, FPRReg, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject*, bool negateResult = false);
+    void emitConvertValueToBoolean(VM&, JSValueRegs value, GPRReg result, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg, FPRReg, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject*, bool negateResult = false);
     
     template<typename ClassType>
-    void emitAllocateDestructibleObject(GPRReg resultGPR, Structure* structure, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+    void emitAllocateDestructibleObject(VM& vm, GPRReg resultGPR, Structure* structure, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
     {
-        emitAllocateJSObject<ClassType>(resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratchGPR1, scratchGPR2, slowPath);
+        emitAllocateJSObject<ClassType>(vm, resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratchGPR1, scratchGPR2, slowPath);
         storePtr(TrustedImmPtr(structure->classInfo()), Address(resultGPR, JSDestructibleObject::classInfoOffset()));
     }
     
@@ -1634,9 +1607,17 @@ public:
 #if USE(JSVALUE64)
     void wangsInt64Hash(GPRReg inputAndResult, GPRReg scratch);
 #endif
-    
+
+#if ENABLE(WEBASSEMBLY)
+    void loadWasmContext(GPRReg dst);
+    void storeWasmContext(GPRReg src);
+    static bool loadWasmContextNeedsMacroScratchRegister();
+    static bool storeWasmContextNeedsMacroScratchRegister();
+#endif
+
 protected:
-    VM* m_vm;
+    void copyCalleeSavesToVMEntryFrameCalleeSavesBufferImpl(GPRReg calleeSavesBuffer);
+
     CodeBlock* m_codeBlock;
     CodeBlock* m_baselineCodeBlock;