AssemblyHelpers should not have a VM field
authorsbarati@apple.com <sbarati@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 29 Mar 2017 06:15:23 +0000 (06:15 +0000)
committersbarati@apple.com <sbarati@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 29 Mar 2017 06:15:23 +0000 (06:15 +0000)
https://bugs.webkit.org/show_bug.cgi?id=170207

Reviewed by Yusuke Suzuki.

APIs that need VM should take one as a parameter. When doing position
independent code for Wasm, we can't tie code generation to a VM.

* b3/B3Compile.cpp:
(JSC::B3::compile):
* b3/air/testair.cpp:
* b3/testb3.cpp:
(JSC::B3::testEntrySwitchSimple):
(JSC::B3::testEntrySwitchNoEntrySwitch):
(JSC::B3::testEntrySwitchWithCommonPaths):
(JSC::B3::testEntrySwitchWithCommonPathsAndNonTrivialEntrypoint):
(JSC::B3::testEntrySwitchLoop):
* bytecode/AccessCase.cpp:
(JSC::AccessCase::generateWithGuard):
(JSC::AccessCase::generateImpl):
* bytecode/DOMJITAccessCasePatchpointParams.cpp:
(JSC::SlowPathCallGeneratorWithArguments::generateImpl):
* bytecode/InlineAccess.cpp:
(JSC::InlineAccess::dumpCacheSizesAndCrash):
(JSC::InlineAccess::generateSelfPropertyAccess):
(JSC::InlineAccess::generateSelfPropertyReplace):
(JSC::InlineAccess::generateArrayLength):
(JSC::InlineAccess::rewireStubAsJump):
* bytecode/InlineAccess.h:
* bytecode/PolymorphicAccess.cpp:
(JSC::AccessGenerationState::emitExplicitExceptionHandler):
(JSC::PolymorphicAccess::regenerate):
* bytecode/PolymorphicAccess.h:
(JSC::AccessGenerationState::AccessGenerationState):
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::JITCompiler):
(JSC::DFG::JITCompiler::compileExceptionHandlers):
(JSC::DFG::JITCompiler::link):
(JSC::DFG::JITCompiler::compile):
(JSC::DFG::JITCompiler::compileFunction):
(JSC::DFG::JITCompiler::exceptionCheck):
* dfg/DFGJITCompiler.h:
(JSC::DFG::JITCompiler::exceptionCheckWithCallFrameRollback):
(JSC::DFG::JITCompiler::fastExceptionCheck):
(JSC::DFG::JITCompiler::vm):
* dfg/DFGOSRExitCompiler.cpp:
* dfg/DFGOSRExitCompiler.h:
* dfg/DFGOSRExitCompiler32_64.cpp:
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGOSRExitCompiler64.cpp:
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGOSRExitCompilerCommon.cpp:
(JSC::DFG::adjustAndJumpToTarget):
* dfg/DFGOSRExitCompilerCommon.h:
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::emitAllocateRawObject):
(JSC::DFG::SpeculativeJIT::checkArray):
(JSC::DFG::SpeculativeJIT::compileCurrentBlock):
(JSC::DFG::SpeculativeJIT::compileInstanceOfForObject):
(JSC::DFG::SpeculativeJIT::compileMakeRope):
(JSC::DFG::SpeculativeJIT::compileGetGlobalObject):
(JSC::DFG::SpeculativeJIT::compileNewFunctionCommon):
(JSC::DFG::SpeculativeJIT::compileCreateActivation):
(JSC::DFG::SpeculativeJIT::compileCreateDirectArguments):
(JSC::DFG::SpeculativeJIT::compileSpread):
(JSC::DFG::SpeculativeJIT::compileArraySlice):
(JSC::DFG::SpeculativeJIT::compileNukeStructureAndSetButterfly):
(JSC::DFG::SpeculativeJIT::compileNewStringObject):
(JSC::DFG::SpeculativeJIT::compileNewTypedArray):
(JSC::DFG::SpeculativeJIT::compileStoreBarrier):
* dfg/DFGSpeculativeJIT.h:
(JSC::DFG::SpeculativeJIT::emitAllocateJSObjectWithKnownSize):
(JSC::DFG::SpeculativeJIT::emitAllocateJSObject):
(JSC::DFG::SpeculativeJIT::emitAllocateVariableSizedJSObject):
(JSC::DFG::SpeculativeJIT::emitAllocateDestructibleObject):
* dfg/DFGSpeculativeJIT32_64.cpp:
(JSC::DFG::SpeculativeJIT::emitCall):
(JSC::DFG::SpeculativeJIT::compileLogicalNot):
(JSC::DFG::SpeculativeJIT::emitBranch):
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGSpeculativeJIT64.cpp:
(JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeCompareNullOrUndefined):
(JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranchNullOrUndefined):
(JSC::DFG::SpeculativeJIT::emitCall):
(JSC::DFG::SpeculativeJIT::compileObjectOrOtherLogicalNot):
(JSC::DFG::SpeculativeJIT::compileLogicalNot):
(JSC::DFG::SpeculativeJIT::emitObjectOrOtherBranch):
(JSC::DFG::SpeculativeJIT::emitBranch):
(JSC::DFG::SpeculativeJIT::compile):
(JSC::DFG::SpeculativeJIT::compileAllocateNewArrayWithSize):
* dfg/DFGThunks.cpp:
(JSC::DFG::osrEntryThunkGenerator):
* ftl/FTLCompile.cpp:
(JSC::FTL::compile):
* ftl/FTLJITFinalizer.h:
* ftl/FTLLazySlowPath.cpp:
(JSC::FTL::LazySlowPath::generate):
* ftl/FTLLazySlowPathCall.h:
(JSC::FTL::createLazyCallGenerator):
* ftl/FTLLink.cpp:
(JSC::FTL::link):
* ftl/FTLLowerDFGToB3.cpp:
(JSC::FTL::DFG::LowerDFGToB3::lower):
(JSC::FTL::DFG::LowerDFGToB3::compileCreateActivation):
(JSC::FTL::DFG::LowerDFGToB3::compileNewFunction):
(JSC::FTL::DFG::LowerDFGToB3::compileCreateDirectArguments):
(JSC::FTL::DFG::LowerDFGToB3::compileNewTypedArray):
(JSC::FTL::DFG::LowerDFGToB3::compileMakeRope):
(JSC::FTL::DFG::LowerDFGToB3::compileNotifyWrite):
(JSC::FTL::DFG::LowerDFGToB3::compileCallOrConstructVarargsSpread):
(JSC::FTL::DFG::LowerDFGToB3::compileCallOrConstructVarargs):
(JSC::FTL::DFG::LowerDFGToB3::compileCallEval):
(JSC::FTL::DFG::LowerDFGToB3::compileIsObjectOrNull):
(JSC::FTL::DFG::LowerDFGToB3::compileIsFunction):
(JSC::FTL::DFG::LowerDFGToB3::compileMaterializeNewObject):
(JSC::FTL::DFG::LowerDFGToB3::compileMaterializeCreateActivation):
(JSC::FTL::DFG::LowerDFGToB3::compileCheckTraps):
(JSC::FTL::DFG::LowerDFGToB3::allocatePropertyStorageWithSizeImpl):
(JSC::FTL::DFG::LowerDFGToB3::allocateObject):
(JSC::FTL::DFG::LowerDFGToB3::allocateJSArray):
(JSC::FTL::DFG::LowerDFGToB3::buildTypeOf):
* ftl/FTLOSRExitCompiler.cpp:
(JSC::FTL::compileStub):
* ftl/FTLSlowPathCall.h:
(JSC::FTL::callOperation):
* ftl/FTLState.h:
(JSC::FTL::State::vm):
* ftl/FTLThunks.cpp:
(JSC::FTL::genericGenerationThunkGenerator):
(JSC::FTL::slowPathCallThunkGenerator):
* jit/AssemblyHelpers.cpp:
(JSC::AssemblyHelpers::jitReleaseAssertNoException):
(JSC::AssemblyHelpers::callExceptionFuzz):
(JSC::AssemblyHelpers::emitJumpIfException):
(JSC::AssemblyHelpers::emitExceptionCheck):
(JSC::AssemblyHelpers::emitNonPatchableExceptionCheck):
(JSC::AssemblyHelpers::emitLoadStructure):
(JSC::AssemblyHelpers::emitRandomThunk):
(JSC::AssemblyHelpers::restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer):
(JSC::AssemblyHelpers::emitConvertValueToBoolean):
(JSC::AssemblyHelpers::debugCall):
* jit/AssemblyHelpers.h:
(JSC::AssemblyHelpers::AssemblyHelpers):
(JSC::AssemblyHelpers::codeBlock):
(JSC::AssemblyHelpers::copyCalleeSavesToVMEntryFrameCalleeSavesBuffer):
(JSC::AssemblyHelpers::copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer):
(JSC::AssemblyHelpers::barrierBranch):
(JSC::AssemblyHelpers::barrierStoreLoadFence):
(JSC::AssemblyHelpers::mutatorFence):
(JSC::AssemblyHelpers::storeButterfly):
(JSC::AssemblyHelpers::nukeStructureAndStoreButterfly):
(JSC::AssemblyHelpers::jumpIfMutatorFenceNotNeeded):
(JSC::AssemblyHelpers::emitAllocateJSObjectWithKnownSize):
(JSC::AssemblyHelpers::emitAllocateJSObject):
(JSC::AssemblyHelpers::emitAllocateVariableSizedCell):
(JSC::AssemblyHelpers::emitAllocateVariableSizedJSObject):
(JSC::AssemblyHelpers::emitAllocateDestructibleObject):
(JSC::AssemblyHelpers::vm): Deleted.
(JSC::AssemblyHelpers::debugCall): Deleted.
* jit/CCallHelpers.cpp:
(JSC::CCallHelpers::ensureShadowChickenPacket):
* jit/CCallHelpers.h:
(JSC::CCallHelpers::CCallHelpers):
(JSC::CCallHelpers::jumpToExceptionHandler):
* jit/JIT.cpp:
(JSC::JIT::emitEnterOptimizationCheck):
(JSC::JIT::privateCompileExceptionHandlers):
* jit/JIT.h:
(JSC::JIT::exceptionCheck):
(JSC::JIT::exceptionCheckWithCallFrameRollback):
* jit/JITMathIC.h:
(JSC::JITMathIC::generateOutOfLine):
* jit/JITOpcodes.cpp:
(JSC::JIT::emit_op_instanceof):
(JSC::JIT::emit_op_is_undefined):
(JSC::JIT::emit_op_jfalse):
(JSC::JIT::emit_op_jeq_null):
(JSC::JIT::emit_op_jneq_null):
(JSC::JIT::emit_op_jtrue):
(JSC::JIT::emit_op_throw):
(JSC::JIT::emit_op_catch):
(JSC::JIT::emit_op_eq_null):
(JSC::JIT::emit_op_neq_null):
(JSC::JIT::emitSlow_op_loop_hint):
(JSC::JIT::emit_op_log_shadow_chicken_prologue):
(JSC::JIT::emit_op_log_shadow_chicken_tail):
* jit/JITOpcodes32_64.cpp:
(JSC::JIT::privateCompileCTINativeCall):
(JSC::JIT::emit_op_new_object):
(JSC::JIT::emit_op_jfalse):
(JSC::JIT::emit_op_jtrue):
(JSC::JIT::emit_op_throw):
(JSC::JIT::emit_op_catch):
(JSC::JIT::emit_op_create_this):
(JSC::JIT::emit_op_log_shadow_chicken_prologue):
(JSC::JIT::emit_op_log_shadow_chicken_tail):
* jit/JITPropertyAccess.cpp:
(JSC::JIT::emitWriteBarrier):
* jit/JSInterfaceJIT.h:
(JSC::JSInterfaceJIT::JSInterfaceJIT):
(JSC::JSInterfaceJIT::vm):
* jit/Repatch.cpp:
(JSC::tryCacheGetByID):
(JSC::tryCachePutByID):
(JSC::linkPolymorphicCall):
(JSC::resetGetByID):
(JSC::resetPutByID):
* jit/SetupVarargsFrame.cpp:
(JSC::emitSetupVarargsFrameFastCase):
* jit/SetupVarargsFrame.h:
* jit/SpecializedThunkJIT.h:
(JSC::SpecializedThunkJIT::loadArgumentWithSpecificClass):
* jit/ThunkGenerators.cpp:
(JSC::throwExceptionFromCallSlowPathGenerator):
(JSC::linkCallThunkGenerator):
(JSC::linkPolymorphicCallThunkGenerator):
(JSC::virtualThunkFor):
(JSC::nativeForGenerator):
(JSC::randomThunkGenerator):
(JSC::boundThisNoArgsFunctionCallGenerator):
(JSC::throwExceptionFromWasmThunkGenerator):
* wasm/WasmB3IRGenerator.cpp:
(JSC::Wasm::parseAndCompile):
* wasm/WasmBinding.cpp:
(JSC::Wasm::wasmToJs):
(JSC::Wasm::wasmToWasm):

git-svn-id: http://svn.webkit.org/repository/webkit/trunk@214531 268f45cc-cd09-0410-ab3c-d52691b4dbfc

51 files changed:
Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/b3/B3Compile.cpp
Source/JavaScriptCore/b3/air/testair.cpp
Source/JavaScriptCore/b3/testb3.cpp
Source/JavaScriptCore/bytecode/AccessCase.cpp
Source/JavaScriptCore/bytecode/DOMJITAccessCasePatchpointParams.cpp
Source/JavaScriptCore/bytecode/InlineAccess.cpp
Source/JavaScriptCore/bytecode/InlineAccess.h
Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp
Source/JavaScriptCore/bytecode/PolymorphicAccess.h
Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
Source/JavaScriptCore/dfg/DFGJITCompiler.h
Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h
Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp
Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.h
Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
Source/JavaScriptCore/dfg/DFGThunks.cpp
Source/JavaScriptCore/ftl/FTLCompile.cpp
Source/JavaScriptCore/ftl/FTLJITFinalizer.h
Source/JavaScriptCore/ftl/FTLLazySlowPath.cpp
Source/JavaScriptCore/ftl/FTLLazySlowPathCall.h
Source/JavaScriptCore/ftl/FTLLink.cpp
Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp
Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp
Source/JavaScriptCore/ftl/FTLSlowPathCall.h
Source/JavaScriptCore/ftl/FTLState.h
Source/JavaScriptCore/ftl/FTLThunks.cpp
Source/JavaScriptCore/jit/AssemblyHelpers.cpp
Source/JavaScriptCore/jit/AssemblyHelpers.h
Source/JavaScriptCore/jit/CCallHelpers.cpp
Source/JavaScriptCore/jit/CCallHelpers.h
Source/JavaScriptCore/jit/JIT.cpp
Source/JavaScriptCore/jit/JIT.h
Source/JavaScriptCore/jit/JITMathIC.h
Source/JavaScriptCore/jit/JITOpcodes.cpp
Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
Source/JavaScriptCore/jit/JITPropertyAccess.cpp
Source/JavaScriptCore/jit/JSInterfaceJIT.h
Source/JavaScriptCore/jit/Repatch.cpp
Source/JavaScriptCore/jit/SetupVarargsFrame.cpp
Source/JavaScriptCore/jit/SetupVarargsFrame.h
Source/JavaScriptCore/jit/SpecializedThunkJIT.h
Source/JavaScriptCore/jit/ThunkGenerators.cpp
Source/JavaScriptCore/wasm/WasmB3IRGenerator.cpp
Source/JavaScriptCore/wasm/WasmBinding.cpp

index 5a04fc4..130cdae 100644 (file)
@@ -1,3 +1,232 @@
+2017-03-28  Saam Barati  <sbarati@apple.com>
+
+        AssemblyHelpers should not have a VM field
+        https://bugs.webkit.org/show_bug.cgi?id=170207
+
+        Reviewed by Yusuke Suzuki.
+
+        APIs that need VM should take one as a parameter. When doing position
+        independent code for Wasm, we can't tie code generation to a VM.
+
+        * b3/B3Compile.cpp:
+        (JSC::B3::compile):
+        * b3/air/testair.cpp:
+        * b3/testb3.cpp:
+        (JSC::B3::testEntrySwitchSimple):
+        (JSC::B3::testEntrySwitchNoEntrySwitch):
+        (JSC::B3::testEntrySwitchWithCommonPaths):
+        (JSC::B3::testEntrySwitchWithCommonPathsAndNonTrivialEntrypoint):
+        (JSC::B3::testEntrySwitchLoop):
+        * bytecode/AccessCase.cpp:
+        (JSC::AccessCase::generateWithGuard):
+        (JSC::AccessCase::generateImpl):
+        * bytecode/DOMJITAccessCasePatchpointParams.cpp:
+        (JSC::SlowPathCallGeneratorWithArguments::generateImpl):
+        * bytecode/InlineAccess.cpp:
+        (JSC::InlineAccess::dumpCacheSizesAndCrash):
+        (JSC::InlineAccess::generateSelfPropertyAccess):
+        (JSC::InlineAccess::generateSelfPropertyReplace):
+        (JSC::InlineAccess::generateArrayLength):
+        (JSC::InlineAccess::rewireStubAsJump):
+        * bytecode/InlineAccess.h:
+        * bytecode/PolymorphicAccess.cpp:
+        (JSC::AccessGenerationState::emitExplicitExceptionHandler):
+        (JSC::PolymorphicAccess::regenerate):
+        * bytecode/PolymorphicAccess.h:
+        (JSC::AccessGenerationState::AccessGenerationState):
+        * dfg/DFGJITCompiler.cpp:
+        (JSC::DFG::JITCompiler::JITCompiler):
+        (JSC::DFG::JITCompiler::compileExceptionHandlers):
+        (JSC::DFG::JITCompiler::link):
+        (JSC::DFG::JITCompiler::compile):
+        (JSC::DFG::JITCompiler::compileFunction):
+        (JSC::DFG::JITCompiler::exceptionCheck):
+        * dfg/DFGJITCompiler.h:
+        (JSC::DFG::JITCompiler::exceptionCheckWithCallFrameRollback):
+        (JSC::DFG::JITCompiler::fastExceptionCheck):
+        (JSC::DFG::JITCompiler::vm):
+        * dfg/DFGOSRExitCompiler.cpp:
+        * dfg/DFGOSRExitCompiler.h:
+        * dfg/DFGOSRExitCompiler32_64.cpp:
+        (JSC::DFG::OSRExitCompiler::compileExit):
+        * dfg/DFGOSRExitCompiler64.cpp:
+        (JSC::DFG::OSRExitCompiler::compileExit):
+        * dfg/DFGOSRExitCompilerCommon.cpp:
+        (JSC::DFG::adjustAndJumpToTarget):
+        * dfg/DFGOSRExitCompilerCommon.h:
+        * dfg/DFGSpeculativeJIT.cpp:
+        (JSC::DFG::SpeculativeJIT::emitAllocateRawObject):
+        (JSC::DFG::SpeculativeJIT::checkArray):
+        (JSC::DFG::SpeculativeJIT::compileCurrentBlock):
+        (JSC::DFG::SpeculativeJIT::compileInstanceOfForObject):
+        (JSC::DFG::SpeculativeJIT::compileMakeRope):
+        (JSC::DFG::SpeculativeJIT::compileGetGlobalObject):
+        (JSC::DFG::SpeculativeJIT::compileNewFunctionCommon):
+        (JSC::DFG::SpeculativeJIT::compileCreateActivation):
+        (JSC::DFG::SpeculativeJIT::compileCreateDirectArguments):
+        (JSC::DFG::SpeculativeJIT::compileSpread):
+        (JSC::DFG::SpeculativeJIT::compileArraySlice):
+        (JSC::DFG::SpeculativeJIT::compileNukeStructureAndSetButterfly):
+        (JSC::DFG::SpeculativeJIT::compileNewStringObject):
+        (JSC::DFG::SpeculativeJIT::compileNewTypedArray):
+        (JSC::DFG::SpeculativeJIT::compileStoreBarrier):
+        * dfg/DFGSpeculativeJIT.h:
+        (JSC::DFG::SpeculativeJIT::emitAllocateJSObjectWithKnownSize):
+        (JSC::DFG::SpeculativeJIT::emitAllocateJSObject):
+        (JSC::DFG::SpeculativeJIT::emitAllocateVariableSizedJSObject):
+        (JSC::DFG::SpeculativeJIT::emitAllocateDestructibleObject):
+        * dfg/DFGSpeculativeJIT32_64.cpp:
+        (JSC::DFG::SpeculativeJIT::emitCall):
+        (JSC::DFG::SpeculativeJIT::compileLogicalNot):
+        (JSC::DFG::SpeculativeJIT::emitBranch):
+        (JSC::DFG::SpeculativeJIT::compile):
+        * dfg/DFGSpeculativeJIT64.cpp:
+        (JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeCompareNullOrUndefined):
+        (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranchNullOrUndefined):
+        (JSC::DFG::SpeculativeJIT::emitCall):
+        (JSC::DFG::SpeculativeJIT::compileObjectOrOtherLogicalNot):
+        (JSC::DFG::SpeculativeJIT::compileLogicalNot):
+        (JSC::DFG::SpeculativeJIT::emitObjectOrOtherBranch):
+        (JSC::DFG::SpeculativeJIT::emitBranch):
+        (JSC::DFG::SpeculativeJIT::compile):
+        (JSC::DFG::SpeculativeJIT::compileAllocateNewArrayWithSize):
+        * dfg/DFGThunks.cpp:
+        (JSC::DFG::osrEntryThunkGenerator):
+        * ftl/FTLCompile.cpp:
+        (JSC::FTL::compile):
+        * ftl/FTLJITFinalizer.h:
+        * ftl/FTLLazySlowPath.cpp:
+        (JSC::FTL::LazySlowPath::generate):
+        * ftl/FTLLazySlowPathCall.h:
+        (JSC::FTL::createLazyCallGenerator):
+        * ftl/FTLLink.cpp:
+        (JSC::FTL::link):
+        * ftl/FTLLowerDFGToB3.cpp:
+        (JSC::FTL::DFG::LowerDFGToB3::lower):
+        (JSC::FTL::DFG::LowerDFGToB3::compileCreateActivation):
+        (JSC::FTL::DFG::LowerDFGToB3::compileNewFunction):
+        (JSC::FTL::DFG::LowerDFGToB3::compileCreateDirectArguments):
+        (JSC::FTL::DFG::LowerDFGToB3::compileNewTypedArray):
+        (JSC::FTL::DFG::LowerDFGToB3::compileMakeRope):
+        (JSC::FTL::DFG::LowerDFGToB3::compileNotifyWrite):
+        (JSC::FTL::DFG::LowerDFGToB3::compileCallOrConstructVarargsSpread):
+        (JSC::FTL::DFG::LowerDFGToB3::compileCallOrConstructVarargs):
+        (JSC::FTL::DFG::LowerDFGToB3::compileCallEval):
+        (JSC::FTL::DFG::LowerDFGToB3::compileIsObjectOrNull):
+        (JSC::FTL::DFG::LowerDFGToB3::compileIsFunction):
+        (JSC::FTL::DFG::LowerDFGToB3::compileMaterializeNewObject):
+        (JSC::FTL::DFG::LowerDFGToB3::compileMaterializeCreateActivation):
+        (JSC::FTL::DFG::LowerDFGToB3::compileCheckTraps):
+        (JSC::FTL::DFG::LowerDFGToB3::allocatePropertyStorageWithSizeImpl):
+        (JSC::FTL::DFG::LowerDFGToB3::allocateObject):
+        (JSC::FTL::DFG::LowerDFGToB3::allocateJSArray):
+        (JSC::FTL::DFG::LowerDFGToB3::buildTypeOf):
+        * ftl/FTLOSRExitCompiler.cpp:
+        (JSC::FTL::compileStub):
+        * ftl/FTLSlowPathCall.h:
+        (JSC::FTL::callOperation):
+        * ftl/FTLState.h:
+        (JSC::FTL::State::vm):
+        * ftl/FTLThunks.cpp:
+        (JSC::FTL::genericGenerationThunkGenerator):
+        (JSC::FTL::slowPathCallThunkGenerator):
+        * jit/AssemblyHelpers.cpp:
+        (JSC::AssemblyHelpers::jitReleaseAssertNoException):
+        (JSC::AssemblyHelpers::callExceptionFuzz):
+        (JSC::AssemblyHelpers::emitJumpIfException):
+        (JSC::AssemblyHelpers::emitExceptionCheck):
+        (JSC::AssemblyHelpers::emitNonPatchableExceptionCheck):
+        (JSC::AssemblyHelpers::emitLoadStructure):
+        (JSC::AssemblyHelpers::emitRandomThunk):
+        (JSC::AssemblyHelpers::restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer):
+        (JSC::AssemblyHelpers::emitConvertValueToBoolean):
+        (JSC::AssemblyHelpers::debugCall):
+        * jit/AssemblyHelpers.h:
+        (JSC::AssemblyHelpers::AssemblyHelpers):
+        (JSC::AssemblyHelpers::codeBlock):
+        (JSC::AssemblyHelpers::copyCalleeSavesToVMEntryFrameCalleeSavesBuffer):
+        (JSC::AssemblyHelpers::copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer):
+        (JSC::AssemblyHelpers::barrierBranch):
+        (JSC::AssemblyHelpers::barrierStoreLoadFence):
+        (JSC::AssemblyHelpers::mutatorFence):
+        (JSC::AssemblyHelpers::storeButterfly):
+        (JSC::AssemblyHelpers::nukeStructureAndStoreButterfly):
+        (JSC::AssemblyHelpers::jumpIfMutatorFenceNotNeeded):
+        (JSC::AssemblyHelpers::emitAllocateJSObjectWithKnownSize):
+        (JSC::AssemblyHelpers::emitAllocateJSObject):
+        (JSC::AssemblyHelpers::emitAllocateVariableSizedCell):
+        (JSC::AssemblyHelpers::emitAllocateVariableSizedJSObject):
+        (JSC::AssemblyHelpers::emitAllocateDestructibleObject):
+        (JSC::AssemblyHelpers::vm): Deleted.
+        (JSC::AssemblyHelpers::debugCall): Deleted.
+        * jit/CCallHelpers.cpp:
+        (JSC::CCallHelpers::ensureShadowChickenPacket):
+        * jit/CCallHelpers.h:
+        (JSC::CCallHelpers::CCallHelpers):
+        (JSC::CCallHelpers::jumpToExceptionHandler):
+        * jit/JIT.cpp:
+        (JSC::JIT::emitEnterOptimizationCheck):
+        (JSC::JIT::privateCompileExceptionHandlers):
+        * jit/JIT.h:
+        (JSC::JIT::exceptionCheck):
+        (JSC::JIT::exceptionCheckWithCallFrameRollback):
+        * jit/JITMathIC.h:
+        (JSC::JITMathIC::generateOutOfLine):
+        * jit/JITOpcodes.cpp:
+        (JSC::JIT::emit_op_instanceof):
+        (JSC::JIT::emit_op_is_undefined):
+        (JSC::JIT::emit_op_jfalse):
+        (JSC::JIT::emit_op_jeq_null):
+        (JSC::JIT::emit_op_jneq_null):
+        (JSC::JIT::emit_op_jtrue):
+        (JSC::JIT::emit_op_throw):
+        (JSC::JIT::emit_op_catch):
+        (JSC::JIT::emit_op_eq_null):
+        (JSC::JIT::emit_op_neq_null):
+        (JSC::JIT::emitSlow_op_loop_hint):
+        (JSC::JIT::emit_op_log_shadow_chicken_prologue):
+        (JSC::JIT::emit_op_log_shadow_chicken_tail):
+        * jit/JITOpcodes32_64.cpp:
+        (JSC::JIT::privateCompileCTINativeCall):
+        (JSC::JIT::emit_op_new_object):
+        (JSC::JIT::emit_op_jfalse):
+        (JSC::JIT::emit_op_jtrue):
+        (JSC::JIT::emit_op_throw):
+        (JSC::JIT::emit_op_catch):
+        (JSC::JIT::emit_op_create_this):
+        (JSC::JIT::emit_op_log_shadow_chicken_prologue):
+        (JSC::JIT::emit_op_log_shadow_chicken_tail):
+        * jit/JITPropertyAccess.cpp:
+        (JSC::JIT::emitWriteBarrier):
+        * jit/JSInterfaceJIT.h:
+        (JSC::JSInterfaceJIT::JSInterfaceJIT):
+        (JSC::JSInterfaceJIT::vm):
+        * jit/Repatch.cpp:
+        (JSC::tryCacheGetByID):
+        (JSC::tryCachePutByID):
+        (JSC::linkPolymorphicCall):
+        (JSC::resetGetByID):
+        (JSC::resetPutByID):
+        * jit/SetupVarargsFrame.cpp:
+        (JSC::emitSetupVarargsFrameFastCase):
+        * jit/SetupVarargsFrame.h:
+        * jit/SpecializedThunkJIT.h:
+        (JSC::SpecializedThunkJIT::loadArgumentWithSpecificClass):
+        * jit/ThunkGenerators.cpp:
+        (JSC::throwExceptionFromCallSlowPathGenerator):
+        (JSC::linkCallThunkGenerator):
+        (JSC::linkPolymorphicCallThunkGenerator):
+        (JSC::virtualThunkFor):
+        (JSC::nativeForGenerator):
+        (JSC::randomThunkGenerator):
+        (JSC::boundThisNoArgsFunctionCallGenerator):
+        (JSC::throwExceptionFromWasmThunkGenerator):
+        * wasm/WasmB3IRGenerator.cpp:
+        (JSC::Wasm::parseAndCompile):
+        * wasm/WasmBinding.cpp:
+        (JSC::Wasm::wasmToJs):
+        (JSC::Wasm::wasmToWasm):
+
 2017-03-28  Keith Miller  <keith_miller@apple.com>
 
         WebAssembly: We should have Origins
index 980390a..42d6ad2 100644 (file)
@@ -44,7 +44,7 @@ Compilation compile(VM& vm, Procedure& proc, unsigned optLevel)
     
     prepareForGeneration(proc, optLevel);
     
-    CCallHelpers jit(&vm);
+    CCallHelpers jit;
     generate(proc, jit);
     LinkBuffer linkBuffer(vm, jit, nullptr);
 
index c3f5f3c..c39c4a8 100644 (file)
@@ -85,7 +85,7 @@ VM* vm;
 std::unique_ptr<B3::Compilation> compile(B3::Procedure& proc)
 {
     prepareForGeneration(proc.code());
-    CCallHelpers jit(vm);
+    CCallHelpers jit;
     generate(proc.code(), jit);
     LinkBuffer linkBuffer(*vm, jit, nullptr);
 
index 5a55b4d..672d79b 100644 (file)
@@ -13134,7 +13134,7 @@ void testEntrySwitchSimple()
     
     prepareForGeneration(proc);
     
-    CCallHelpers jit(vm);
+    CCallHelpers jit;
     generate(proc, jit);
     LinkBuffer linkBuffer(*vm, jit, nullptr);
     CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
@@ -13167,7 +13167,7 @@ void testEntrySwitchNoEntrySwitch()
     
     prepareForGeneration(proc);
     
-    CCallHelpers jit(vm);
+    CCallHelpers jit;
     generate(proc, jit);
     LinkBuffer linkBuffer(*vm, jit, nullptr);
     CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
@@ -13254,7 +13254,7 @@ void testEntrySwitchWithCommonPaths()
     
     prepareForGeneration(proc);
     
-    CCallHelpers jit(vm);
+    CCallHelpers jit;
     generate(proc, jit);
     LinkBuffer linkBuffer(*vm, jit, nullptr);
     CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
@@ -13371,7 +13371,7 @@ void testEntrySwitchWithCommonPathsAndNonTrivialEntrypoint()
     
     prepareForGeneration(proc);
     
-    CCallHelpers jit(vm);
+    CCallHelpers jit;
     generate(proc, jit);
     LinkBuffer linkBuffer(*vm, jit, nullptr);
     CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
@@ -13449,7 +13449,7 @@ void testEntrySwitchLoop()
     
     prepareForGeneration(proc);
     
-    CCallHelpers jit(vm);
+    CCallHelpers jit;
     generate(proc, jit);
     LinkBuffer linkBuffer(*vm, jit, nullptr);
     CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
index 7c5617a..30aedc3 100644 (file)
@@ -292,7 +292,7 @@ void AccessCase::generateWithGuard(
     m_state = Generated;
 
     CCallHelpers& jit = *state.jit;
-    VM& vm = *jit.vm();
+    VM& vm = state.m_vm;
     JSValueRegs valueRegs = state.valueRegs;
     GPRReg baseGPR = state.baseGPR;
     GPRReg scratchGPR = state.scratchGPR;
@@ -412,7 +412,7 @@ void AccessCase::generateImpl(AccessGenerationState& state)
     ASSERT(m_state == Generated); // We rely on the callers setting this for us.
 
     CCallHelpers& jit = *state.jit;
-    VM& vm = *jit.vm();
+    VM& vm = state.m_vm;
     CodeBlock* codeBlock = jit.codeBlock();
     StructureStubInfo& stubInfo = *state.stubInfo;
     const Identifier& ident = *state.ident;
@@ -767,7 +767,7 @@ void AccessCase::generateImpl(AccessGenerationState& state)
             jit.reclaimSpaceOnStackForCCall();
 
             CCallHelpers::Jump noException =
-            jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+            jit.emitExceptionCheck(vm, CCallHelpers::InvertedExceptionCheck);
 
             state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
             state.emitExplicitExceptionHandler();
@@ -930,7 +930,7 @@ void AccessCase::generateImpl(AccessGenerationState& state)
                 jit.reclaimSpaceOnStackForCCall();
                 jit.move(GPRInfo::returnValueGPR, scratchGPR);
                 
-                CCallHelpers::Jump noException = jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+                CCallHelpers::Jump noException = jit.emitExceptionCheck(vm, CCallHelpers::InvertedExceptionCheck);
                 
                 state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
                 state.emitExplicitExceptionHandler();
@@ -959,7 +959,7 @@ void AccessCase::generateImpl(AccessGenerationState& state)
             // We set the new butterfly and the structure last. Doing it this way ensures that
             // whatever we had done up to this point is forgotten if we choose to branch to slow
             // path.
-            jit.nukeStructureAndStoreButterfly(scratchGPR, baseGPR);
+            jit.nukeStructureAndStoreButterfly(vm, scratchGPR, baseGPR);
         }
         
         uint32_t structureBits = bitwise_cast<uint32_t>(newStructure()->id());
index 790d9c0..30087d1 100644 (file)
@@ -73,7 +73,7 @@ public:
         jit.setupResults(m_result);
         jit.reclaimSpaceOnStackForCCall();
 
-        CCallHelpers::Jump noException = jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+        CCallHelpers::Jump noException = jit.emitExceptionCheck(state.m_vm, CCallHelpers::InvertedExceptionCheck);
 
         state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
         exceptions.append(jit.jump());
index 667492a..eb368d8 100644 (file)
 #include "ScratchRegisterAllocator.h"
 #include "Structure.h"
 #include "StructureStubInfo.h"
-#include "VM.h"
 
 namespace JSC {
 
-void InlineAccess::dumpCacheSizesAndCrash(VM& vm)
+void InlineAccess::dumpCacheSizesAndCrash()
 {
     GPRReg base = GPRInfo::regT0;
     GPRReg value = GPRInfo::regT1;
@@ -50,7 +49,7 @@ void InlineAccess::dumpCacheSizesAndCrash(VM& vm)
 #endif
 
     {
-        CCallHelpers jit(&vm);
+        CCallHelpers jit;
 
         GPRReg scratchGPR = value;
         jit.load8(CCallHelpers::Address(base, JSCell::indexingTypeAndMiscOffset()), value);
@@ -65,7 +64,7 @@ void InlineAccess::dumpCacheSizesAndCrash(VM& vm)
     }
 
     {
-        CCallHelpers jit(&vm);
+        CCallHelpers jit;
 
         jit.patchableBranch32(
             MacroAssembler::NotEqual,
@@ -82,7 +81,7 @@ void InlineAccess::dumpCacheSizesAndCrash(VM& vm)
     }
 
     {
-        CCallHelpers jit(&vm);
+        CCallHelpers jit;
 
         jit.patchableBranch32(
             MacroAssembler::NotEqual,
@@ -95,7 +94,7 @@ void InlineAccess::dumpCacheSizesAndCrash(VM& vm)
     }
 
     {
-        CCallHelpers jit(&vm);
+        CCallHelpers jit;
 
         jit.patchableBranch32(
             MacroAssembler::NotEqual,
@@ -109,7 +108,7 @@ void InlineAccess::dumpCacheSizesAndCrash(VM& vm)
     }
 
     {
-        CCallHelpers jit(&vm);
+        CCallHelpers jit;
 
         jit.patchableBranch32(
             MacroAssembler::NotEqual,
@@ -155,9 +154,9 @@ ALWAYS_INLINE static bool linkCodeInline(const char* name, CCallHelpers& jit, St
     return false;
 }
 
-bool InlineAccess::generateSelfPropertyAccess(VM& vm, StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
+bool InlineAccess::generateSelfPropertyAccess(StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
 {
-    CCallHelpers jit(&vm);
+    CCallHelpers jit;
     
     GPRReg base = static_cast<GPRReg>(stubInfo.patch.baseGPR);
     JSValueRegs value = stubInfo.valueRegs();
@@ -211,11 +210,11 @@ bool InlineAccess::canGenerateSelfPropertyReplace(StructureStubInfo& stubInfo, P
     return hasFreeRegister(stubInfo);
 }
 
-bool InlineAccess::generateSelfPropertyReplace(VM& vm, StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
+bool InlineAccess::generateSelfPropertyReplace(StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
 {
     ASSERT(canGenerateSelfPropertyReplace(stubInfo, offset));
 
-    CCallHelpers jit(&vm);
+    CCallHelpers jit;
 
     GPRReg base = static_cast<GPRReg>(stubInfo.patch.baseGPR);
     JSValueRegs value = stubInfo.valueRegs();
@@ -255,11 +254,11 @@ bool InlineAccess::isCacheableArrayLength(StructureStubInfo& stubInfo, JSArray*
         || array->indexingType() == ArrayWithContiguous;
 }
 
-bool InlineAccess::generateArrayLength(VM& vm, StructureStubInfo& stubInfo, JSArray* array)
+bool InlineAccess::generateArrayLength(StructureStubInfo& stubInfo, JSArray* array)
 {
     ASSERT(isCacheableArrayLength(stubInfo, array));
 
-    CCallHelpers jit(&vm);
+    CCallHelpers jit;
 
     GPRReg base = static_cast<GPRReg>(stubInfo.patch.baseGPR);
     JSValueRegs value = stubInfo.valueRegs();
@@ -279,9 +278,9 @@ bool InlineAccess::generateArrayLength(VM& vm, StructureStubInfo& stubInfo, JSAr
     return linkedCodeInline;
 }
 
-void InlineAccess::rewireStubAsJump(VM& vm, StructureStubInfo& stubInfo, CodeLocationLabel target)
+void InlineAccess::rewireStubAsJump(StructureStubInfo& stubInfo, CodeLocationLabel target)
 {
-    CCallHelpers jit(&vm);
+    CCallHelpers jit;
 
     auto jump = jit.jump();
 
index ca07e3a..1baabe0 100644 (file)
@@ -110,18 +110,18 @@ public:
         return std::max(size, sizeForPropertyAccess());
     }
 
-    static bool generateSelfPropertyAccess(VM&, StructureStubInfo&, Structure*, PropertyOffset);
+    static bool generateSelfPropertyAccess(StructureStubInfo&, Structure*, PropertyOffset);
     static bool canGenerateSelfPropertyReplace(StructureStubInfo&, PropertyOffset);
-    static bool generateSelfPropertyReplace(VM&, StructureStubInfo&, Structure*, PropertyOffset);
+    static bool generateSelfPropertyReplace(StructureStubInfo&, Structure*, PropertyOffset);
     static bool isCacheableArrayLength(StructureStubInfo&, JSArray*);
-    static bool generateArrayLength(VM&, StructureStubInfo&, JSArray*);
-    static void rewireStubAsJump(VM&, StructureStubInfo&, CodeLocationLabel);
+    static bool generateArrayLength(StructureStubInfo&, JSArray*);
+    static void rewireStubAsJump(StructureStubInfo&, CodeLocationLabel);
 
     // This is helpful when determining the size of an IC on
     // various platforms. When adding a new type of IC, implement
     // its placeholder code here, and log the size. That way we
     // can intelligently choose sizes on various platforms.
-    NO_RETURN_DUE_TO_CRASH static void dumpCacheSizesAndCrash(VM&);
+    NO_RETURN_DUE_TO_CRASH static void dumpCacheSizesAndCrash();
 };
 
 } // namespace JSC
index 0ca9cf1..6d11bb7 100644 (file)
@@ -176,14 +176,14 @@ CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stub
 void AccessGenerationState::emitExplicitExceptionHandler()
 {
     restoreScratch();
-    jit->copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+    jit->copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(m_vm);
     if (needsToRestoreRegistersIfException()) {
         // To the JIT that produces the original exception handling
         // call site, they will expect the OSR exit to be arrived
         // at from genericUnwind. Therefore we must model what genericUnwind
         // does here. I.e, set callFrameForCatch and copy callee saves.
 
-        jit->storePtr(GPRInfo::callFrameRegister, jit->vm()->addressOfCallFrameForCatch());
+        jit->storePtr(GPRInfo::callFrameRegister, m_vm.addressOfCallFrameForCatch());
         CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit->jump();
 
         // We don't need to insert a new exception handler in the table
@@ -195,13 +195,13 @@ void AccessGenerationState::emitExplicitExceptionHandler()
                 linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
             });
     } else {
-        jit->setupArguments(CCallHelpers::TrustedImmPtr(jit->vm()), GPRInfo::callFrameRegister);
+        jit->setupArguments(CCallHelpers::TrustedImmPtr(&m_vm), GPRInfo::callFrameRegister);
         CCallHelpers::Call lookupExceptionHandlerCall = jit->call();
         jit->addLinkTask(
             [=] (LinkBuffer& linkBuffer) {
                 linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler);
             });
-        jit->jumpToExceptionHandler();
+        jit->jumpToExceptionHandler(m_vm);
     }
 }
 
@@ -336,7 +336,7 @@ AccessGenerationResult PolymorphicAccess::regenerate(
     if (verbose)
         dataLog("Regenerate with m_list: ", listDump(m_list), "\n");
     
-    AccessGenerationState state;
+    AccessGenerationState state(vm);
 
     state.access = this;
     state.stubInfo = &stubInfo;
@@ -358,7 +358,7 @@ AccessGenerationResult PolymorphicAccess::regenerate(
 
     state.scratchGPR = allocator.allocateScratchGPR();
     
-    CCallHelpers jit(&vm, codeBlock);
+    CCallHelpers jit(codeBlock);
     state.jit = &jit;
 
     state.preservedReusedRegisterState =
index b25cdd4..a246d76 100644 (file)
@@ -177,12 +177,14 @@ private:
 };
 
 struct AccessGenerationState {
-    AccessGenerationState()
-        : m_calculatedRegistersForCallAndExceptionHandling(false)
+    AccessGenerationState(VM& vm)
+        : m_vm(vm) 
+        , m_calculatedRegistersForCallAndExceptionHandling(false)
         , m_needsToRestoreRegistersIfException(false)
         , m_calculatedCallSiteIndex(false)
     {
     }
+    VM& m_vm;
     CCallHelpers* jit { nullptr };
     ScratchRegisterAllocator* allocator;
     ScratchRegisterAllocator::PreservedState preservedReusedRegisterState;
index 15f9006..1f69997 100644 (file)
@@ -49,7 +49,7 @@
 namespace JSC { namespace DFG {
 
 JITCompiler::JITCompiler(Graph& dfg)
-    : CCallHelpers(&dfg.m_vm, dfg.m_codeBlock)
+    : CCallHelpers(dfg.m_codeBlock)
     , m_graph(dfg)
     , m_jitCode(adoptRef(new JITCode()))
     , m_blockHeads(dfg.numBlocks())
@@ -139,7 +139,7 @@ void JITCompiler::compileExceptionHandlers()
     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
         m_exceptionChecksWithCallFrameRollback.link(this);
 
-        copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+        copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(*vm());
 
         // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
@@ -153,13 +153,13 @@ void JITCompiler::compileExceptionHandlers()
 #endif
         m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame));
 
-        jumpToExceptionHandler();
+        jumpToExceptionHandler(*vm());
     }
 
     if (!m_exceptionChecks.empty()) {
         m_exceptionChecks.link(this);
 
-        copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+        copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(*vm());
 
         // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
@@ -172,7 +172,7 @@ void JITCompiler::compileExceptionHandlers()
 #endif
         m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
 
-        jumpToExceptionHandler();
+        jumpToExceptionHandler(*vm());
     }
 }
 
@@ -278,7 +278,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
     
     for (auto& record : m_jsCalls) {
         CallLinkInfo& info = *record.info;
-        linkBuffer.link(record.slowCall, FunctionPtr(m_vm->getCTIStub(linkCallThunkGenerator).code().executableAddress()));
+        linkBuffer.link(record.slowCall, FunctionPtr(vm()->getCTIStub(linkCallThunkGenerator).code().executableAddress()));
         info.setCallLocations(
             CodeLocationLabel(linkBuffer.locationOfNearCall(record.slowCall)),
             CodeLocationLabel(linkBuffer.locationOf(record.targetToCheck)),
@@ -361,7 +361,7 @@ void JITCompiler::compile()
 
     // Plant a check that sufficient space is available in the JSStack.
     addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
-    Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), GPRInfo::regT1);
+    Jump stackOverflow = branchPtr(Above, AbsoluteAddress(vm()->addressOfSoftStackLimit()), GPRInfo::regT1);
 
     addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
     checkStackPointerAlignment();
@@ -394,7 +394,7 @@ void JITCompiler::compile()
     m_speculative->createOSREntries();
     setEndOfCode();
 
-    auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
+    auto linkBuffer = std::make_unique<LinkBuffer>(*vm(), *this, m_codeBlock, JITCompilationCanFail);
     if (linkBuffer->didFailToAllocate()) {
         m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
         return;
@@ -424,7 +424,7 @@ void JITCompiler::compileFunction()
     Label fromArityCheck(this);
     // Plant a check that sufficient space is available in the JSStack.
     addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
-    Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), GPRInfo::regT1);
+    Jump stackOverflow = branchPtr(Above, AbsoluteAddress(vm()->addressOfSoftStackLimit()), GPRInfo::regT1);
 
     // Move the stack pointer down to accommodate locals
     addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
@@ -488,7 +488,7 @@ void JITCompiler::compileFunction()
     setEndOfCode();
 
     // === Link ===
-    auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
+    auto linkBuffer = std::make_unique<LinkBuffer>(*vm(), *this, m_codeBlock, JITCompilationCanFail);
     if (linkBuffer->didFailToAllocate()) {
         m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
         return;
@@ -499,7 +499,7 @@ void JITCompiler::compileFunction()
     m_jitCode->shrinkToFit();
     codeBlock()->shrinkToFit(CodeBlock::LateShrink);
     
-    linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixupGenerator)).code().executableAddress()));
+    linkBuffer->link(m_callArityFixup, FunctionPtr((vm()->getCTIStub(arityFixupGenerator)).code().executableAddress()));
     
     disassemble(*linkBuffer);
 
@@ -624,11 +624,11 @@ void JITCompiler::exceptionCheck()
     bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_speculative->m_currentNode->origin.forExit, opCatchOrigin, exceptionHandler); 
     if (willCatchException) {
         unsigned streamIndex = m_speculative->m_outOfLineStreamIndex ? *m_speculative->m_outOfLineStreamIndex : m_speculative->m_stream->size();
-        MacroAssembler::Jump hadException = emitNonPatchableExceptionCheck();
+        MacroAssembler::Jump hadException = emitNonPatchableExceptionCheck(*vm());
         // We assume here that this is called after callOpeartion()/appendCall() is called.
         appendExceptionHandlingOSRExit(ExceptionCheck, streamIndex, opCatchOrigin, exceptionHandler, m_jitCode->common.lastCallSite(), hadException);
     } else
-        m_exceptionChecks.append(emitExceptionCheck());
+        m_exceptionChecks.append(emitExceptionCheck(*vm()));
 }
 
 CallSiteIndex JITCompiler::recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(const CodeOrigin& callSiteCodeOrigin, unsigned eventStreamIndex)
index 425dfcd..86e3733 100644 (file)
@@ -170,13 +170,13 @@ public:
 
     void exceptionCheckWithCallFrameRollback()
     {
-        m_exceptionChecksWithCallFrameRollback.append(emitExceptionCheck());
+        m_exceptionChecksWithCallFrameRollback.append(emitExceptionCheck(*vm()));
     }
 
     // Add a call out from JIT code, with a fast exception check that tests if the return value is zero.
     void fastExceptionCheck()
     {
-        callExceptionFuzz();
+        callExceptionFuzz(*vm());
         m_exceptionChecks.append(branchTestPtr(Zero, GPRInfo::returnValueGPR));
     }
     
@@ -268,6 +268,8 @@ public:
 
     PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder() { return m_pcToCodeOriginMapBuilder; }
 
+    VM* vm() { return &m_graph.m_vm; }
+
 private:
     friend class OSRExitJumpPlaceholder;
     
@@ -300,6 +302,7 @@ private:
     
     Vector<Label> m_blockHeads;
 
+
     struct JSCallRecord {
         JSCallRecord(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, CallLinkInfo* info)
             : fastCall(fastCall)
@@ -342,6 +345,7 @@ private:
         Label slowPath;
         CallLinkInfo* info;
     };
+
     
     Vector<InlineCacheWrapper<JITGetByIdGenerator>, 4> m_getByIds;
     Vector<InlineCacheWrapper<JITGetByIdWithThisGenerator>, 4> m_getByIdsWithThis;
index 1fa3c0a..d8d19ec 100644 (file)
@@ -146,13 +146,13 @@ void compileOSRExit(ExecState* exec)
         recovery = &codeBlock->jitCode()->dfg()->speculationRecovery[exit.m_recoveryIndex];
 
     {
-        CCallHelpers jit(vm, codeBlock);
+        CCallHelpers jit(codeBlock);
         OSRExitCompiler exitCompiler(jit);
 
         if (exit.m_kind == GenericUnwind) {
             // We are acting as a defacto op_catch because we arrive here from genericUnwind().
             // So, we must restore our call frame and stack pointer.
-            jit.restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer();
+            jit.restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(*vm);
             jit.loadPtr(vm->addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
         }
         jit.addPtr(
@@ -171,7 +171,7 @@ void compileOSRExit(ExecState* exec)
             jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress()));
         }
 
-        exitCompiler.compileExit(exit, operands, recovery);
+        exitCompiler.compileExit(*vm, exit, operands, recovery);
         
         LinkBuffer patchBuffer(*vm, jit, codeBlock);
         exit.m_code = FINALIZE_CODE_IF(
index b94461f..2a36920 100644 (file)
@@ -45,7 +45,7 @@ public:
     {
     }
     
-    void compileExit(const OSRExit&, const Operands<ValueRecovery>&, SpeculationRecovery*);
+    void compileExit(VM&, const OSRExit&, const Operands<ValueRecovery>&, SpeculationRecovery*);
 
 private:
     void emitRestoreArguments(const Operands<ValueRecovery>&);
index 79eff76..09a1558 100644 (file)
@@ -36,7 +36,7 @@
 
 namespace JSC { namespace DFG {
 
-void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
+void OSRExitCompiler::compileExit(VM& vm, const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
 {
     // Pro-forma stuff.
     if (Options::printEachOSRExit()) {
@@ -45,9 +45,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
         debugInfo->kind = exit.m_kind;
         debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
         
-        m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
+        m_jit.debugCall(vm, debugOperationPrintSpeculationFailure, debugInfo);
     }
-    
+
     // Perform speculation recovery. This only comes into play when an operation
     // starts mutating state before verifying the speculation it has already made.
     
@@ -160,7 +160,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
 
     // Save all state from GPRs into the scratch buffer.
     
-    ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
+    ScratchBuffer* scratchBuffer = vm.scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
     EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
     
     for (size_t index = 0; index < operands.size(); ++index) {
@@ -255,7 +255,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
     m_jit.emitSaveCalleeSavesFor(m_jit.baselineCodeBlock());
 
     if (exit.isExceptionHandler())
-        m_jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+        m_jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(vm);
 
     // Do all data format conversions and store the results into the stack.
     
@@ -403,7 +403,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
     reifyInlinedCallFrames(m_jit, exit);
     
     // And finish.
-    adjustAndJumpToTarget(m_jit, exit);
+    adjustAndJumpToTarget(vm, m_jit, exit);
 }
 
 } } // namespace JSC::DFG
index 2b7ac88..14eac64 100644 (file)
@@ -38,7 +38,7 @@
 
 namespace JSC { namespace DFG {
 
-void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
+void OSRExitCompiler::compileExit(VM& vm, const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
 {
     m_jit.jitAssertTagsInPlace();
 
@@ -49,9 +49,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
         debugInfo->kind = exit.m_kind;
         debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
         
-        m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
+        m_jit.debugCall(vm, debugOperationPrintSpeculationFailure, debugInfo);
     }
-    
+
     // Perform speculation recovery. This only comes into play when an operation
     // starts mutating state before verifying the speculation it has already made.
     
@@ -184,7 +184,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
 
     // Save all state from GPRs into the scratch buffer.
     
-    ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
+    ScratchBuffer* scratchBuffer = vm.scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
     EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
     
     for (size_t index = 0; index < operands.size(); ++index) {
@@ -265,7 +265,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
     m_jit.emitMaterializeTagCheckRegisters();
 
     if (exit.isExceptionHandler())
-        m_jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+        m_jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(vm);
 
     // Do all data format conversions and store the results into the stack.
     
@@ -390,7 +390,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
     reifyInlinedCallFrames(m_jit, exit);
 
     // And finish.
-    adjustAndJumpToTarget(m_jit, exit);
+    adjustAndJumpToTarget(vm, m_jit, exit);
 }
 
 } } // namespace JSC::DFG
index c63f53f..f76ccb8 100644 (file)
@@ -267,7 +267,7 @@ static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch)
     ownerIsRememberedOrInEden.link(&jit);
 }
 
-void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit)
+void adjustAndJumpToTarget(VM& vm, CCallHelpers& jit, const OSRExitBase& exit)
 {
     jit.memoryFence();
     
@@ -309,7 +309,7 @@ void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit)
     jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(codeBlockForExit) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister);
     if (exit.isExceptionHandler()) {
         // Since we're jumping to op_catch, we need to set callFrameForCatch.
-        jit.storePtr(GPRInfo::callFrameRegister, jit.vm()->addressOfCallFrameForCatch());
+        jit.storePtr(GPRInfo::callFrameRegister, vm.addressOfCallFrameForCatch());
     }
     
     jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
index 8d278ce..108a0f5 100644 (file)
@@ -38,7 +38,7 @@ namespace JSC { namespace DFG {
 
 void handleExitCounts(CCallHelpers&, const OSRExitBase&);
 void reifyInlinedCallFrames(CCallHelpers&, const OSRExitBase&);
-void adjustAndJumpToTarget(CCallHelpers&, const OSRExitBase&);
+void adjustAndJumpToTarget(VM&, CCallHelpers&, const OSRExitBase&);
 
 template <typename JITCodeType>
 void adjustFrameAndStackInOSRExitCompilerThunk(MacroAssembler& jit, VM* vm, JITCode::JITType jitType)
index 3727940..78a277e 100644 (file)
@@ -166,7 +166,7 @@ void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure
     
     m_jit.emitInitializeOutOfLineStorage(storageGPR, structure->outOfLineCapacity());
     
-    m_jit.mutatorFence();
+    m_jit.mutatorFence(*m_jit.vm());
 }
 
 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
@@ -872,7 +872,7 @@ void SpeculativeJIT::checkArray(Node* node)
     
     GPRTemporary temp(this);
     GPRTemporary temp2(this);
-    m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
+    m_jit.emitLoadStructure(*m_jit.vm(), baseReg, temp.gpr(), temp2.gpr());
     speculationCheck(
         BadType, JSValueSource::unboxedCell(baseReg), node,
         m_jit.branchPtr(
@@ -1745,7 +1745,7 @@ void SpeculativeJIT::compileCurrentBlock()
         }
 
         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
-            m_jit.jitReleaseAssertNoException();
+            m_jit.jitReleaseAssertNoException(*m_jit.vm());
 
         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.labelIgnoringWatchpoints(), m_origin.semantic);
 
@@ -3085,7 +3085,7 @@ void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg p
     MacroAssembler::Label loop(&m_jit);
     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
-    m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
+    m_jit.emitLoadStructure(*m_jit.vm(), scratchReg, scratchReg, scratch2Reg);
     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
 #if USE(JSVALUE64)
@@ -4022,7 +4022,7 @@ void SpeculativeJIT::compileMakeRope(Node* node)
     }
     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
     
-    m_jit.mutatorFence();
+    m_jit.mutatorFence(*m_jit.vm());
     
     switch (numOpGPRs) {
     case 2:
@@ -6321,7 +6321,7 @@ void SpeculativeJIT::compileGetGlobalObject(Node* node)
     SpeculateCellOperand object(this, node->child1());
     GPRTemporary result(this);
     GPRTemporary scratch(this);
-    m_jit.emitLoadStructure(object.gpr(), result.gpr(), scratch.gpr());
+    m_jit.emitLoadStructure(*m_jit.vm(), object.gpr(), result.gpr(), scratch.gpr());
     m_jit.loadPtr(JITCompiler::Address(result.gpr(), Structure::globalObjectOffset()), result.gpr());
     cellResult(result.gpr(), node);
 }
@@ -6450,7 +6450,7 @@ template <typename ClassType> void SpeculativeJIT::compileNewFunctionCommon(GPRR
     m_jit.storePtr(TrustedImmPtr::weakPointer(m_jit.graph(), executable), JITCompiler::Address(resultGPR, offsetOfExecutable));
     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, offsetOfRareData));
     
-    m_jit.mutatorFence();
+    m_jit.mutatorFence(*m_jit.vm());
 }
 
 void SpeculativeJIT::compileNewFunction(Node* node)
@@ -6652,7 +6652,7 @@ void SpeculativeJIT::compileCreateActivation(Node* node)
                 resultGPR, JSLexicalEnvironment::offsetOfVariable(ScopeOffset(i))));
     }
     
-    m_jit.mutatorFence();
+    m_jit.mutatorFence(*m_jit.vm());
 
 #if USE(JSVALUE64)
     addSlowPathGenerator(
@@ -6815,7 +6815,7 @@ void SpeculativeJIT::compileCreateDirectArguments(Node* node)
             done.link(&m_jit);
     }
         
-    m_jit.mutatorFence();
+    m_jit.mutatorFence(*m_jit.vm());
         
     cellResult(resultGPR, node);
 }
@@ -7038,7 +7038,7 @@ void SpeculativeJIT::compileSpread(Node* node)
         m_jit.lshift32(TrustedImm32(3), scratch1GPR);
         m_jit.add32(TrustedImm32(JSFixedArray::offsetOfData()), scratch1GPR);
 
-        m_jit.emitAllocateVariableSizedCell<JSFixedArray>(resultGPR, TrustedImmPtr(m_jit.graph().registerStructure(m_jit.graph().m_vm.fixedArrayStructure.get())), scratch1GPR, scratch1GPR, scratch2GPR, slowPath);
+        m_jit.emitAllocateVariableSizedCell<JSFixedArray>(*m_jit.vm(), resultGPR, TrustedImmPtr(m_jit.graph().registerStructure(m_jit.graph().m_vm.fixedArrayStructure.get())), scratch1GPR, scratch1GPR, scratch2GPR, slowPath);
         m_jit.store32(lengthGPR, MacroAssembler::Address(resultGPR, JSFixedArray::offsetOfSize()));
 
         m_jit.loadPtr(MacroAssembler::Address(argument, JSObject::butterflyOffset()), scratch1GPR);
@@ -7080,7 +7080,7 @@ void SpeculativeJIT::compileSpread(Node* node)
             done.append(m_jit.jump());
         }
         
-        m_jit.mutatorFence();
+        m_jit.mutatorFence(*m_jit.vm());
 
         slowPath.link(&m_jit);
         addSlowPathGenerator(slowPathCall(m_jit.jump(), this, operationSpreadFastArray, resultGPR, argument));
@@ -7382,7 +7382,7 @@ void SpeculativeJIT::compileArraySlice(Node* node)
             emitAllocateButterfly(storageResultGPR, sizeGPR, scratchGPR, scratch2GPR, resultGPR, slowCases);
             emitInitializeButterfly(storageResultGPR, sizeGPR, emptyValueRegs, scratchGPR);
             emitAllocateJSObject<JSArray>(resultGPR, tempValue, storageResultGPR, scratchGPR, scratch2GPR, slowCases);
-            m_jit.mutatorFence();
+            m_jit.mutatorFence(*m_jit.vm());
 
             addSlowPathGenerator(std::make_unique<CallArrayAllocatorWithVariableStructureVariableSizeSlowPathGenerator>(
                 slowCases, this, operationNewArrayWithSize, resultGPR, tempValue, sizeGPR, storageResultGPR, scratchGPR));
@@ -7745,7 +7745,7 @@ void SpeculativeJIT::compileNukeStructureAndSetButterfly(Node* node)
     GPRReg baseGPR = base.gpr();
     GPRReg storageGPR = storage.gpr();
     
-    m_jit.nukeStructureAndStoreButterfly(storageGPR, baseGPR);
+    m_jit.nukeStructureAndStoreButterfly(*m_jit.vm(), storageGPR, baseGPR);
     
     noResult(node);
 }
@@ -8184,7 +8184,7 @@ void SpeculativeJIT::compileNewStringObject(Node* node)
         JITCompiler::Address(resultGPR, JSWrapperObject::internalValueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
 #endif
     
-    m_jit.mutatorFence();
+    m_jit.mutatorFence(*m_jit.vm());
     
     addSlowPathGenerator(slowPathCall(
         slowPath, this, operationNewStringObject, resultGPR, operandGPR, node->structure()));
@@ -8263,7 +8263,7 @@ void SpeculativeJIT::compileNewTypedArray(Node* node)
         TrustedImm32(FastTypedArray),
         MacroAssembler::Address(resultGPR, JSArrayBufferView::offsetOfMode()));
     
-    m_jit.mutatorFence();
+    m_jit.mutatorFence(*m_jit.vm());
     
     addSlowPathGenerator(slowPathCall(
         slowCases, this, operationNewTypedArrayWithSizeForType(type),
@@ -9332,9 +9332,9 @@ void SpeculativeJIT::compileStoreBarrier(Node* node)
     JITCompiler::JumpList ok;
     
     if (isFenced) {
-        ok.append(m_jit.barrierBranch(baseGPR, scratch1GPR));
+        ok.append(m_jit.barrierBranch(*m_jit.vm(), baseGPR, scratch1GPR));
         
-        JITCompiler::Jump noFence = m_jit.jumpIfMutatorFenceNotNeeded();
+        JITCompiler::Jump noFence = m_jit.jumpIfMutatorFenceNotNeeded(*m_jit.vm());
         m_jit.memoryFence();
         ok.append(m_jit.barrierBranchWithoutFence(baseGPR));
         noFence.link(&m_jit);
index 5a80d48..59221cd 100644 (file)
@@ -2889,7 +2889,7 @@ public:
         GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1,
         GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath, size_t size)
     {
-        m_jit.emitAllocateJSObjectWithKnownSize<ClassType>(resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath, size);
+        m_jit.emitAllocateJSObjectWithKnownSize<ClassType>(*m_jit.vm(), resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath, size);
     }
 
     // Convenience allocator for a built-in object.
@@ -2897,20 +2897,20 @@ public:
     void emitAllocateJSObject(GPRReg resultGPR, StructureType structure, StorageType storage,
         GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
     {
-        m_jit.emitAllocateJSObject<ClassType>(resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath);
+        m_jit.emitAllocateJSObject<ClassType>(*m_jit.vm(), resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath);
     }
 
     template <typename ClassType, typename StructureType> // StructureType and StorageType can be GPR or ImmPtr.
     void emitAllocateVariableSizedJSObject(GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
     {
-        m_jit.emitAllocateVariableSizedJSObject<ClassType>(resultGPR, structure, allocationSize, scratchGPR1, scratchGPR2, slowPath);
+        m_jit.emitAllocateVariableSizedJSObject<ClassType>(*m_jit.vm(), resultGPR, structure, allocationSize, scratchGPR1, scratchGPR2, slowPath);
     }
 
     template<typename ClassType>
     void emitAllocateDestructibleObject(GPRReg resultGPR, RegisteredStructure structure, 
         GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
     {
-        m_jit.emitAllocateDestructibleObject<ClassType>(resultGPR, structure.get(), scratchGPR1, scratchGPR2, slowPath);
+        m_jit.emitAllocateDestructibleObject<ClassType>(*m_jit.vm(), resultGPR, structure.get(), scratchGPR1, scratchGPR2, slowPath);
     }
 
     void emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength);
index ced5495..c915f51 100644 (file)
@@ -816,7 +816,7 @@ void SpeculativeJIT::emitCall(Node* node)
             else
                 inlineCallFrame = node->origin.semantic.inlineCallFrame;
             // emitSetupVarargsFrameFastCase modifies the stack pointer if it succeeds.
-            emitSetupVarargsFrameFastCase(m_jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase);
+            emitSetupVarargsFrameFastCase(*m_jit.vm(), m_jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase);
             JITCompiler::Jump done = m_jit.jump();
             slowCase.link(&m_jit);
             callOperation(operationThrowStackOverflowForVarargs);
@@ -1834,7 +1834,7 @@ void SpeculativeJIT::compileLogicalNot(Node* node)
         bool shouldCheckMasqueradesAsUndefined = !masqueradesAsUndefinedWatchpointIsStillValid();
         JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
         bool negateResult = true;
-        m_jit.emitConvertValueToBoolean(arg1.jsValueRegs(), resultGPR, temp.gpr(), valueFPR.fpr(), tempFPR.fpr(), shouldCheckMasqueradesAsUndefined, globalObject, negateResult);
+        m_jit.emitConvertValueToBoolean(*m_jit.vm(), arg1.jsValueRegs(), resultGPR, temp.gpr(), valueFPR.fpr(), tempFPR.fpr(), shouldCheckMasqueradesAsUndefined, globalObject, negateResult);
         booleanResult(resultGPR, node);
         return;
     }
@@ -1979,7 +1979,7 @@ void SpeculativeJIT::emitBranch(Node* node)
 
         bool shouldCheckMasqueradesAsUndefined = !masqueradesAsUndefinedWatchpointIsStillValid();
         JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
-        m_jit.emitConvertValueToBoolean(valueRegs, resultGPR, temp.gpr(), valueFPR.fpr(), tempFPR.fpr(), shouldCheckMasqueradesAsUndefined, globalObject);
+        m_jit.emitConvertValueToBoolean(*m_jit.vm(), valueRegs, resultGPR, temp.gpr(), valueFPR.fpr(), tempFPR.fpr(), shouldCheckMasqueradesAsUndefined, globalObject);
         branchTest32(JITCompiler::Zero, resultGPR, notTaken);
         jump(taken, ForceJump);
 
@@ -5530,7 +5530,7 @@ void SpeculativeJIT::compile(Node* node)
         GPRTemporary scratch2(this);
         GPRReg scratch2Reg = scratch2.gpr();
 
-        m_jit.ensureShadowChickenPacket(shadowPacketReg, scratch1Reg, scratch2Reg);
+        m_jit.ensureShadowChickenPacket(*m_jit.vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
 
         SpeculateCellOperand scope(this, node->child1());
         GPRReg scopeReg = scope.gpr();
@@ -5552,7 +5552,7 @@ void SpeculativeJIT::compile(Node* node)
         GPRTemporary scratch2(this);
         GPRReg scratch2Reg = scratch2.gpr();
 
-        m_jit.ensureShadowChickenPacket(shadowPacketReg, scratch1Reg, scratch2Reg);
+        m_jit.ensureShadowChickenPacket(*m_jit.vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
 
         JSValueOperand thisValue(this, node->child1());
         JSValueRegs thisRegs = thisValue.jsValueRegs();
index 326d143..85eeb6b 100644 (file)
@@ -279,7 +279,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNullOrUndefined(Edge operan
         GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
         GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
         m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR);
-        m_jit.emitLoadStructure(argGPR, resultGPR, scratch.gpr());
+        m_jit.emitLoadStructure(*m_jit.vm(), argGPR, resultGPR, scratch.gpr());
         m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
         m_jit.comparePtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultGPR);
         done.append(m_jit.jump());
@@ -334,7 +334,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNullOrUndefined(Edge operand, N
         GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
         GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
         m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR);
-        m_jit.emitLoadStructure(argGPR, resultGPR, scratch.gpr());
+        m_jit.emitLoadStructure(*m_jit.vm(), argGPR, resultGPR, scratch.gpr());
         m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
         branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, taken);
 
@@ -781,7 +781,7 @@ void SpeculativeJIT::emitCall(Node* node)
             else
                 inlineCallFrame = node->origin.semantic.inlineCallFrame;
             // emitSetupVarargsFrameFastCase modifies the stack pointer if it succeeds.
-            emitSetupVarargsFrameFastCase(m_jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase);
+            emitSetupVarargsFrameFastCase(*m_jit.vm(), m_jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase);
             JITCompiler::Jump done = m_jit.jump();
             slowCase.link(&m_jit);
             callOperation(operationThrowStackOverflowForVarargs);
@@ -1876,7 +1876,7 @@ void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse)
                 MacroAssembler::Address(valueGPR, JSCell::typeInfoFlagsOffset()), 
                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined));
 
-        m_jit.emitLoadStructure(valueGPR, structureGPR, scratchGPR);
+        m_jit.emitLoadStructure(*m_jit.vm(), valueGPR, structureGPR, scratchGPR);
         speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse, 
             m_jit.branchPtr(
                 MacroAssembler::Equal, 
@@ -1982,7 +1982,7 @@ void SpeculativeJIT::compileLogicalNot(Node* node)
             scratchGPR = scratch->gpr();
         }
         bool negateResult = true;
-        m_jit.emitConvertValueToBoolean(JSValueRegs(arg1GPR), resultGPR, scratchGPR, valueFPR.fpr(), tempFPR.fpr(), shouldCheckMasqueradesAsUndefined, globalObject, negateResult);
+        m_jit.emitConvertValueToBoolean(*m_jit.vm(), JSValueRegs(arg1GPR), resultGPR, scratchGPR, valueFPR.fpr(), tempFPR.fpr(), shouldCheckMasqueradesAsUndefined, globalObject, negateResult);
         m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
         jsValueResult(resultGPR, node, DataFormatJSBoolean);
         return;
@@ -2027,7 +2027,7 @@ void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, Ba
             MacroAssembler::Address(valueGPR, JSCell::typeInfoFlagsOffset()), 
             TrustedImm32(MasqueradesAsUndefined));
 
-        m_jit.emitLoadStructure(valueGPR, structureGPR, scratchGPR);
+        m_jit.emitLoadStructure(*m_jit.vm(), valueGPR, structureGPR, scratchGPR);
         speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse,
             m_jit.branchPtr(
                 MacroAssembler::Equal, 
@@ -2155,7 +2155,7 @@ void SpeculativeJIT::emitBranch(Node* node)
             value.use();
 
             JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
-            m_jit.emitConvertValueToBoolean(JSValueRegs(valueGPR), resultGPR, scratchGPR, valueFPR, tempFPR, shouldCheckMasqueradesAsUndefined, globalObject);
+            m_jit.emitConvertValueToBoolean(*m_jit.vm(), JSValueRegs(valueGPR), resultGPR, scratchGPR, valueFPR, tempFPR, shouldCheckMasqueradesAsUndefined, globalObject);
     
             branchTest32(MacroAssembler::NonZero, resultGPR, taken);
             jump(notTaken);
@@ -4118,7 +4118,7 @@ void SpeculativeJIT::compile(Node* node)
         m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfRareData()), rareDataGPR);
         m_jit.load32(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfInlineCapacity()), inlineCapacityGPR);
         m_jit.emitInitializeInlineStorage(resultGPR, inlineCapacityGPR);
-        m_jit.mutatorFence();
+        m_jit.mutatorFence(*m_jit.vm());
 
         addSlowPathGenerator(slowPathCall(slowPath, this, operationCreateThis, resultGPR, calleeGPR, node->inlineCapacity()));
         
@@ -4144,7 +4144,7 @@ void SpeculativeJIT::compile(Node* node)
         m_jit.move(TrustedImmPtr(allocatorPtr), allocatorGPR);
         emitAllocateJSObject(resultGPR, allocatorPtr, allocatorGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratchGPR, slowPath);
         m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
-        m_jit.mutatorFence();
+        m_jit.mutatorFence(*m_jit.vm());
 
         addSlowPathGenerator(slowPathCall(slowPath, this, operationNewObject, resultGPR, structure));
         
@@ -4741,7 +4741,7 @@ void SpeculativeJIT::compile(Node* node)
             GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
             GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
             m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.globalObjectFor(node->origin.semantic)), localGlobalObjectGPR);
-            m_jit.emitLoadStructure(value.gpr(), result.gpr(), scratch.gpr());
+            m_jit.emitLoadStructure(*m_jit.vm(), value.gpr(), result.gpr(), scratch.gpr());
             m_jit.loadPtr(JITCompiler::Address(result.gpr(), Structure::globalObjectOffset()), remoteGlobalObjectGPR); 
             m_jit.comparePtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, result.gpr());
         }
@@ -5702,7 +5702,7 @@ void SpeculativeJIT::compile(Node* node)
         GPRTemporary shadowPacket(this);
         GPRReg shadowPacketReg = shadowPacket.gpr();
 
-        m_jit.ensureShadowChickenPacket(shadowPacketReg, scratch1Reg, scratch2Reg);
+        m_jit.ensureShadowChickenPacket(*m_jit.vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
 
         SpeculateCellOperand scope(this, node->child1());
         GPRReg scopeReg = scope.gpr();
@@ -5724,7 +5724,7 @@ void SpeculativeJIT::compile(Node* node)
         GPRTemporary shadowPacket(this);
         GPRReg shadowPacketReg = shadowPacket.gpr();
 
-        m_jit.ensureShadowChickenPacket(shadowPacketReg, scratch1Reg, scratch2Reg);
+        m_jit.ensureShadowChickenPacket(*m_jit.vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
 
         JSValueOperand thisValue(this, node->child1());
         JSValueRegs thisRegs = JSValueRegs(thisValue.gpr());
@@ -6007,7 +6007,7 @@ void SpeculativeJIT::compileAllocateNewArrayWithSize(JSGlobalObject* globalObjec
     
     emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
     
-    m_jit.mutatorFence();
+    m_jit.mutatorFence(*m_jit.vm());
     
     addSlowPathGenerator(std::make_unique<CallArrayAllocatorWithVariableSizeSlowPathGenerator>(
         slowCases, this, operationNewArrayWithSize, resultGPR,
index 38bdd11..cf4854b 100644 (file)
@@ -102,7 +102,7 @@ MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM* vm)
 
 MacroAssemblerCodeRef osrEntryThunkGenerator(VM* vm)
 {
-    AssemblyHelpers jit(vm, nullptr);
+    AssemblyHelpers jit(nullptr);
 
     // We get passed the address of a scratch buffer. The first 8-byte slot of the buffer
     // is the frame size. The second 8-byte slot is the pointer to where we are supposed to
@@ -135,7 +135,7 @@ MacroAssemblerCodeRef osrEntryThunkGenerator(VM* vm)
     jit.abortWithReason(DFGUnreasonableOSREntryJumpDestination);
 
     ok.link(&jit);
-    jit.restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer();
+    jit.restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(*vm);
     jit.emitMaterializeTagCheckRegisters();
 
     jit.jump(GPRInfo::regT1);
index 1cdfc63..a9f8665 100644 (file)
@@ -126,16 +126,16 @@ void compile(State& state, Safepoint::Result& safepointResult)
     // We will add exception handlers while generating.
     codeBlock->clearExceptionHandlers();
 
-    CCallHelpers jit(&vm, codeBlock);
+    CCallHelpers jit(codeBlock);
     B3::generate(*state.proc, jit);
 
     // Emit the exception handler.
     *state.exceptionHandler = jit.label();
-    jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
-    jit.move(MacroAssembler::TrustedImmPtr(jit.vm()), GPRInfo::argumentGPR0);
+    jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(vm);
+    jit.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
     jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
     CCallHelpers::Call call = jit.call();
-    jit.jumpToExceptionHandler();
+    jit.jumpToExceptionHandler(vm);
     jit.addLinkTask(
         [=] (LinkBuffer& linkBuffer) {
             linkBuffer.link(call, FunctionPtr(lookupExceptionHandler));
index 77c431b..679bbff 100644 (file)
@@ -30,7 +30,6 @@
 #include "DFGFinalizer.h"
 #include "FTLGeneratedFunction.h"
 #include "FTLJITCode.h"
-#include "FTLSlowPathCall.h"
 #include "LinkBuffer.h"
 #include "MacroAssembler.h"
 
index 9bdc06d..ab03220 100644 (file)
@@ -57,7 +57,7 @@ void LazySlowPath::generate(CodeBlock* codeBlock)
 
     VM& vm = *codeBlock->vm();
 
-    CCallHelpers jit(&vm, codeBlock);
+    CCallHelpers jit(codeBlock);
     GenerationParams params;
     CCallHelpers::JumpList exceptionJumps;
     params.exceptionJumps = m_exceptionTarget ? &exceptionJumps : nullptr;
index 29214ac..b768c4f 100644 (file)
@@ -38,12 +38,12 @@ namespace JSC { namespace FTL {
 
 template<typename ResultType, typename... ArgumentTypes>
 RefPtr<LazySlowPath::Generator> createLazyCallGenerator(
-    FunctionPtr function, ResultType result, ArgumentTypes... arguments)
+    VM& vm, FunctionPtr function, ResultType result, ArgumentTypes... arguments)
 {
     return LazySlowPath::createGenerator(
-        [=] (CCallHelpers& jit, LazySlowPath::GenerationParams& params) {
+        [=, &vm] (CCallHelpers& jit, LazySlowPath::GenerationParams& params) {
             callOperation(
-                params.lazySlowPath->usedRegisters(), jit, params.lazySlowPath->callSiteIndex(),
+                vm, params.lazySlowPath->usedRegisters(), jit, params.lazySlowPath->callSiteIndex(),
                 params.exceptionJumps, function, result, arguments...);
             params.doneJumps.append(jit.jump());
         });
index 0a56ba1..89cd376 100644 (file)
@@ -60,7 +60,7 @@ void link(State& state)
 
     // Create the entrypoint. Note that we use this entrypoint totally differently
     // depending on whether we're doing OSR entry or not.
-    CCallHelpers jit(&vm, codeBlock);
+    CCallHelpers jit(codeBlock);
     
     std::unique_ptr<LinkBuffer> linkBuffer;
 
@@ -142,11 +142,11 @@ void link(State& state)
             CCallHelpers::Call callArityCheck = jit.call();
 
             auto noException = jit.branch32(CCallHelpers::GreaterThanOrEqual, GPRInfo::returnValueGPR, CCallHelpers::TrustedImm32(0));
-            jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
-            jit.move(CCallHelpers::TrustedImmPtr(jit.vm()), GPRInfo::argumentGPR0);
+            jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(vm);
+            jit.move(CCallHelpers::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
             jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
             CCallHelpers::Call callLookupExceptionHandlerFromCallerFrame = jit.call();
-            jit.jumpToExceptionHandler();
+            jit.jumpToExceptionHandler(vm);
             noException.link(&jit);
 
             if (!ASSERT_DISABLED) {
index c2f1b7a..a117b20 100644 (file)
@@ -200,9 +200,11 @@ public:
         // that would cause it to always get collected.
         m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), addressFor(CallFrameSlot::codeBlock));
 
+        VM* vm = &this->vm();
+
         // Stack Overflow Check.
         unsigned exitFrameSize = m_graph.requiredRegisterCountForExit() * sizeof(Register);
-        MacroAssembler::AbsoluteAddress addressOfStackLimit(vm().addressOfSoftStackLimit());
+        MacroAssembler::AbsoluteAddress addressOfStackLimit(vm->addressOfSoftStackLimit());
         PatchpointValue* stackOverflowHandler = m_out.patchpoint(Void);
         CallSiteIndex callSiteIndex = callSiteIndexForCodeOrigin(m_ftlState, CodeOrigin(0));
         stackOverflowHandler->appendSomeRegister(m_callFrame);
@@ -226,16 +228,16 @@ public:
                     jit.store32(
                         MacroAssembler::TrustedImm32(callSiteIndex.bits()),
                         CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
-                    jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+                    jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(*vm);
 
                     jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
                     jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::argumentGPR1);
                     CCallHelpers::Call throwCall = jit.call();
 
-                    jit.move(CCallHelpers::TrustedImmPtr(jit.vm()), GPRInfo::argumentGPR0);
+                    jit.move(CCallHelpers::TrustedImmPtr(vm), GPRInfo::argumentGPR0);
                     jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
                     CCallHelpers::Call lookupExceptionHandlerCall = jit.call();
-                    jit.jumpToExceptionHandler();
+                    jit.jumpToExceptionHandler(*vm);
 
                     jit.addLinkTask(
                         [=] (LinkBuffer& linkBuffer) {
@@ -4072,9 +4074,10 @@ private:
         m_out.jump(continuation);
         
         m_out.appendTo(slowPath, continuation);
+        VM& vm = this->vm();
         LValue callResult = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(
+            [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm,
                     operationCreateActivationDirect, locations[0].directGPR(),
                     CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
                     CCallHelpers::TrustedImmPtr(table),
@@ -4136,21 +4139,22 @@ private:
 
         Vector<LValue> slowPathArguments;
         slowPathArguments.append(scope);
+        VM& vm = this->vm();
         LValue callResult = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+            [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
                 if (isGeneratorFunction) {
-                    return createLazyCallGenerator(
+                    return createLazyCallGenerator(vm,
                         operationNewGeneratorFunctionWithInvalidatedReallocationWatchpoint,
                         locations[0].directGPR(), locations[1].directGPR(),
                         CCallHelpers::TrustedImmPtr(executable));
                 }
                 if (isAsyncFunction) {
-                    return createLazyCallGenerator(
+                    return createLazyCallGenerator(vm,
                         operationNewAsyncFunctionWithInvalidatedReallocationWatchpoint,
                         locations[0].directGPR(), locations[1].directGPR(),
                         CCallHelpers::TrustedImmPtr(executable));
                 }
-                return createLazyCallGenerator(
+                return createLazyCallGenerator(vm,
                     operationNewFunctionWithInvalidatedReallocationWatchpoint,
                     locations[0].directGPR(), locations[1].directGPR(),
                     CCallHelpers::TrustedImmPtr(executable));
@@ -4208,9 +4212,10 @@ private:
         m_out.jump(continuation);
         
         m_out.appendTo(slowPath, continuation);
+        VM& vm = this->vm();
         LValue callResult = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(
+            [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm,
                     operationCreateDirectArguments, locations[0].directGPR(),
                     CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
                     CCallHelpers::TrustedImm32(minCapacity));
@@ -4820,9 +4825,10 @@ private:
             m_out.appendTo(slowCase, continuation);
             LValue storageValue = m_out.phi(pointerType(), noStorage, haveStorage);
 
+            VM& vm = this->vm();
             LValue slowResultValue = lazySlowPath(
-                [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                    return createLazyCallGenerator(
+                [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                    return createLazyCallGenerator(vm,
                         operationNewTypedArrayWithSizeForType(type), locations[0].directGPR(),
                         CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
                         locations[2].directGPR());
@@ -5083,19 +5089,20 @@ private:
         
         m_out.appendTo(slowPath, continuation);
         LValue slowResultValue;
+        VM& vm = this->vm();
         switch (numKids) {
         case 2:
             slowResultValue = lazySlowPath(
-                [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                    return createLazyCallGenerator(
+                [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                    return createLazyCallGenerator(vm,
                         operationMakeRope2, locations[0].directGPR(), locations[1].directGPR(),
                         locations[2].directGPR());
                 }, kids[0], kids[1]);
             break;
         case 3:
             slowResultValue = lazySlowPath(
-                [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                    return createLazyCallGenerator(
+                [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                    return createLazyCallGenerator(vm,
                         operationMakeRope3, locations[0].directGPR(), locations[1].directGPR(),
                         locations[2].directGPR(), locations[3].directGPR());
                 }, kids[0], kids[1], kids[2]);
@@ -5512,9 +5519,10 @@ private:
         
         LBasicBlock lastNext = m_out.appendTo(isNotInvalidated, continuation);
 
+        VM& vm = this->vm();
         lazySlowPath(
-            [=] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(
+            [=, &vm] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm,
                     operationNotifyWrite, InvalidGPRReg, CCallHelpers::TrustedImmPtr(set));
             });
         m_out.jump(continuation);
@@ -6448,7 +6456,7 @@ private:
                 auto callWithExceptionCheck = [&] (void* callee) {
                     jit.move(CCallHelpers::TrustedImmPtr(callee), GPRInfo::nonPreservedNonArgumentGPR);
                     jit.call(GPRInfo::nonPreservedNonArgumentGPR);
-                    exceptions->append(jit.emitExceptionCheck(AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
+                    exceptions->append(jit.emitExceptionCheck(state->vm(), AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
                 };
 
                 CCallHelpers::JumpList slowCase;
@@ -6755,7 +6763,7 @@ private:
                 auto callWithExceptionCheck = [&] (void* callee) {
                     jit.move(CCallHelpers::TrustedImmPtr(callee), GPRInfo::nonPreservedNonArgumentGPR);
                     jit.call(GPRInfo::nonPreservedNonArgumentGPR);
-                    exceptions->append(jit.emitExceptionCheck(AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
+                    exceptions->append(jit.emitExceptionCheck(state->vm(), AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
                 };
 
                 unsigned originalStackHeight = params.proc().frameSize();
@@ -6771,7 +6779,7 @@ private:
                         inlineCallFrame = node->origin.semantic.inlineCallFrame;
 
                     // emitSetupVarargsFrameFastCase modifies the stack pointer if it succeeds.
-                    emitSetupVarargsFrameFastCase(jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase);
+                    emitSetupVarargsFrameFastCase(state->vm(), jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase);
 
                     CCallHelpers::Jump done = jit.jump();
                     slowCase.link(&jit);
@@ -6942,7 +6950,7 @@ private:
                 jit.setupArgumentsWithExecState(GPRInfo::regT1);
                 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCallEval)), GPRInfo::nonPreservedNonArgumentGPR);
                 jit.call(GPRInfo::nonPreservedNonArgumentGPR);
-                exceptions->append(jit.emitExceptionCheck(AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
+                exceptions->append(jit.emitExceptionCheck(state->vm(), AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
                 
                 CCallHelpers::Jump done = jit.branchTest64(CCallHelpers::NonZero, GPRInfo::returnValueGPR);
                 
@@ -7985,9 +7993,10 @@ private:
             rarely(slowPath), usually(continuation));
         
         m_out.appendTo(slowPath, notCellCase);
+        VM& vm = this->vm();
         LValue slowResultValue = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(
+            [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm,
                     operationObjectIsObject, locations[0].directGPR(),
                     CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
             }, value);
@@ -8035,9 +8044,10 @@ private:
             rarely(slowPath), usually(continuation));
         
         m_out.appendTo(slowPath, continuation);
+        VM& vm = this->vm();
         LValue slowResultValue = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(
+            [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm,
                     operationObjectIsFunction, locations[0].directGPR(),
                     CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
             }, value);
@@ -8779,11 +8789,12 @@ private:
                 
                 LValue butterflyValue = m_out.phi(pointerType(), noButterfly, haveButterfly);
 
+                VM& vm = this->vm();
                 LValue slowObjectValue;
                 if (hasIndexingHeader) {
                     slowObjectValue = lazySlowPath(
-                        [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                            return createLazyCallGenerator(
+                        [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                            return createLazyCallGenerator(vm,
                                 operationNewObjectWithButterflyWithIndexingHeaderAndVectorLength,
                                 locations[0].directGPR(), CCallHelpers::TrustedImmPtr(structure.get()),
                                 locations[1].directGPR(), locations[2].directGPR());
@@ -8791,8 +8802,8 @@ private:
                         vectorLength, butterflyValue);
                 } else {
                     slowObjectValue = lazySlowPath(
-                        [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                            return createLazyCallGenerator(
+                        [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                            return createLazyCallGenerator(vm,
                                 operationNewObjectWithButterfly, locations[0].directGPR(),
                                 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR());
                         },
@@ -8990,9 +9001,10 @@ private:
         // because all fields will be overwritten.
         // FIXME: It may be worth creating an operation that calls a constructor on JSLexicalEnvironment that 
         // doesn't initialize every slot because we are guaranteed to do that here.
+        VM& vm = this->vm();
         LValue callResult = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(
+            [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm,
                     operationCreateActivationDirect, locations[0].directGPR(),
                     CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
                     CCallHelpers::TrustedImmPtr(table),
@@ -9045,9 +9057,10 @@ private:
 
         LBasicBlock lastNext = m_out.appendTo(needTrapHandling, continuation);
 
+        VM& vm = this->vm();
         lazySlowPath(
-            [=] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(operationHandleTraps, InvalidGPRReg);
+            [=, &vm] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm, operationHandleTraps, InvalidGPRReg);
             });
         m_out.jump(continuation);
         
@@ -9712,17 +9725,18 @@ private:
         m_out.appendTo(slowPath, continuation);
         
         LValue slowButterflyValue;
+        VM& vm = this->vm();
         if (sizeInValues == initialOutOfLineCapacity) {
             slowButterflyValue = lazySlowPath(
-                [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                    return createLazyCallGenerator(
+                [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                    return createLazyCallGenerator(vm,
                         operationAllocateSimplePropertyStorageWithInitialCapacity,
                         locations[0].directGPR());
                 });
         } else {
             slowButterflyValue = lazySlowPath(
-                [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                    return createLazyCallGenerator(
+                [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                    return createLazyCallGenerator(vm,
                         operationAllocateSimplePropertyStorage, locations[0].directGPR(),
                         CCallHelpers::TrustedImmPtr(sizeInValues));
                 });
@@ -10801,9 +10815,10 @@ private:
         
         m_out.appendTo(slowPath, continuation);
 
+        VM& vm = this->vm();
         LValue slowResultValue = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(
+            [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm,
                     operationNewObject, locations[0].directGPR(),
                     CCallHelpers::TrustedImmPtr(structure.get()));
             });
@@ -10919,9 +10934,10 @@ private:
         LValue structureValue = m_out.phi(pointerType(), largeStructure, failStructure);
         LValue butterflyValue = m_out.phi(pointerType(), noButterfly, haveButterfly);
 
+        VM& vm = this->vm();
         LValue slowResultValue = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(
+            [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm,
                     operationNewArrayWithSize, locations[0].directGPR(),
                     locations[1].directGPR(), locations[2].directGPR(), locations[3].directGPR());
             },
@@ -11667,9 +11683,10 @@ private:
         functor(TypeofType::Object);
         
         m_out.appendTo(slowPath, unreachable);
+        VM& vm = this->vm();
         LValue result = lazySlowPath(
-            [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
-                return createLazyCallGenerator(
+            [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+                return createLazyCallGenerator(vm,
                     operationTypeOfObjectAsTypeofType, locations[0].directGPR(),
                     CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
             }, value);
index 0aae5c8..7f37820 100644 (file)
@@ -181,12 +181,12 @@ static void compileStub(
     // This code requires framePointerRegister is the same as callFrameRegister
     static_assert(MacroAssembler::framePointerRegister == GPRInfo::callFrameRegister, "MacroAssembler::framePointerRegister and GPRInfo::callFrameRegister must be the same");
 
-    CCallHelpers jit(vm, codeBlock);
+    CCallHelpers jit(codeBlock);
 
     // The first thing we need to do is restablish our frame in the case of an exception.
     if (exit.isGenericUnwindHandler()) {
         RELEASE_ASSERT(vm->callFrameForCatch); // The first time we hit this exit, like at all other times, this field should be non-null.
-        jit.restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer();
+        jit.restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(*vm);
         jit.loadPtr(vm->addressOfCallFrameForCatch(), MacroAssembler::framePointerRegister);
         jit.addPtr(CCallHelpers::TrustedImm32(codeBlock->stackPointerOffset() * sizeof(Register)),
             MacroAssembler::framePointerRegister, CCallHelpers::stackPointerRegister);
@@ -489,7 +489,7 @@ static void compileStub(
     
     handleExitCounts(jit, exit);
     reifyInlinedCallFrames(jit, exit);
-    adjustAndJumpToTarget(jit, exit);
+    adjustAndJumpToTarget(*vm, jit, exit);
     
     LinkBuffer patchBuffer(*vm, jit, codeBlock);
     exit.m_code = FINALIZE_CODE_IF(
index 1d6eb96..5356e0e 100644 (file)
 
 #include "CCallHelpers.h"
 #include "FTLSlowPathCallKey.h"
-#include "JITOperations.h"
+#include "FTLState.h"
 
 namespace JSC { namespace FTL {
 
-class State;
-
 class SlowPathCall {
 public:
     SlowPathCall() { }
@@ -79,7 +77,7 @@ private:
 
 template<typename... ArgumentTypes>
 SlowPathCall callOperation(
-    const RegisterSet& usedRegisters, CCallHelpers& jit, CCallHelpers::JumpList* exceptionTarget,
+    VM& vm, const RegisterSet& usedRegisters, CCallHelpers& jit, CCallHelpers::JumpList* exceptionTarget,
     FunctionPtr function, GPRReg resultGPR, ArgumentTypes... arguments)
 {
     SlowPathCall call;
@@ -89,13 +87,13 @@ SlowPathCall callOperation(
         call = context.makeCall(function.value());
     }
     if (exceptionTarget)
-        exceptionTarget->append(jit.emitExceptionCheck());
+        exceptionTarget->append(jit.emitExceptionCheck(vm));
     return call;
 }
 
 template<typename... ArgumentTypes>
 SlowPathCall callOperation(
-    const RegisterSet& usedRegisters, CCallHelpers& jit, CallSiteIndex callSiteIndex,
+    VM& vm, const RegisterSet& usedRegisters, CCallHelpers& jit, CallSiteIndex callSiteIndex,
     CCallHelpers::JumpList* exceptionTarget, FunctionPtr function, GPRReg resultGPR,
     ArgumentTypes... arguments)
 {
@@ -104,7 +102,7 @@ SlowPathCall callOperation(
             CCallHelpers::TrustedImm32(callSiteIndex.bits()),
             CCallHelpers::tagFor(CallFrameSlot::argumentCount));
     }
-    return callOperation(usedRegisters, jit, exceptionTarget, function, resultGPR, arguments...);
+    return callOperation(vm, usedRegisters, jit, exceptionTarget, function, resultGPR, arguments...);
 }
 
 CallSiteIndex callSiteIndexForCodeOrigin(State&, CodeOrigin);
@@ -115,7 +113,7 @@ SlowPathCall callOperation(
     CCallHelpers::JumpList* exceptionTarget, FunctionPtr function, GPRReg result, ArgumentTypes... arguments)
 {
     return callOperation(
-        usedRegisters, jit, callSiteIndexForCodeOrigin(state, codeOrigin), exceptionTarget, function,
+        state.vm(), usedRegisters, jit, callSiteIndexForCodeOrigin(state, codeOrigin), exceptionTarget, function,
         result, arguments...);
 }
 
index bc43430..9df4b3d 100644 (file)
@@ -64,6 +64,8 @@ class State {
 public:
     State(DFG::Graph& graph);
     ~State();
+
+    VM& vm() { return graph.m_vm; }
     
     // None of these things is owned by State. It is the responsibility of
     // FTL phases to properly manage the lifecycle of the module and function.
index cfbc75b..bb77ea6 100644 (file)
@@ -49,7 +49,7 @@ enum class FrameAndStackAdjustmentRequirement {
 static MacroAssemblerCodeRef genericGenerationThunkGenerator(
     VM* vm, FunctionPtr generationFunction, const char* name, unsigned extraPopsToRestore, FrameAndStackAdjustmentRequirement frameAndStackAdjustmentRequirement)
 {
-    AssemblyHelpers jit(vm, 0);
+    AssemblyHelpers jit(nullptr);
 
     if (frameAndStackAdjustmentRequirement == FrameAndStackAdjustmentRequirement::Needed) {
         // This needs to happen before we use the scratch buffer because this function also uses the scratch buffer.
@@ -166,7 +166,7 @@ static void registerClobberCheck(AssemblyHelpers& jit, RegisterSet dontClobber)
 
 MacroAssemblerCodeRef slowPathCallThunkGenerator(VM& vm, const SlowPathCallKey& key)
 {
-    AssemblyHelpers jit(&vm, 0);
+    AssemblyHelpers jit(nullptr);
     
     // We want to save the given registers at the given offset, then we want to save the
     // old return address somewhere past that offset, and then finally we want to make the
index de1fb21..0e61d8e 100644 (file)
@@ -305,24 +305,24 @@ void AssemblyHelpers::jitAssertArgumentCountSane()
 
 #endif // !ASSERT_DISABLED
 
-void AssemblyHelpers::jitReleaseAssertNoException()
+void AssemblyHelpers::jitReleaseAssertNoException(VM& vm)
 {
     Jump noException;
 #if USE(JSVALUE64)
-    noException = branchTest64(Zero, AbsoluteAddress(vm()->addressOfException()));
+    noException = branchTest64(Zero, AbsoluteAddress(vm.addressOfException()));
 #elif USE(JSVALUE32_64)
-    noException = branch32(Equal, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0));
+    noException = branch32(Equal, AbsoluteAddress(vm.addressOfException()), TrustedImm32(0));
 #endif
     abortWithReason(JITUncoughtExceptionAfterCall);
     noException.link(this);
 }
 
-void AssemblyHelpers::callExceptionFuzz()
+void AssemblyHelpers::callExceptionFuzz(VM& vm)
 {
     if (!Options::useExceptionFuzz())
         return;
 
-    EncodedJSValue* buffer = vm()->exceptionFuzzingBuffer(sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters));
+    EncodedJSValue* buffer = vm.exceptionFuzzingBuffer(sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters));
 
     for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
 #if USE(JSVALUE64)
@@ -358,23 +358,23 @@ void AssemblyHelpers::callExceptionFuzz()
     }
 }
 
-AssemblyHelpers::Jump AssemblyHelpers::emitJumpIfException()
+AssemblyHelpers::Jump AssemblyHelpers::emitJumpIfException(VM& vm)
 {
-    return emitExceptionCheck(NormalExceptionCheck);
+    return emitExceptionCheck(vm, NormalExceptionCheck);
 }
 
-AssemblyHelpers::Jump AssemblyHelpers::emitExceptionCheck(ExceptionCheckKind kind, ExceptionJumpWidth width)
+AssemblyHelpers::Jump AssemblyHelpers::emitExceptionCheck(VM& vm, ExceptionCheckKind kind, ExceptionJumpWidth width)
 {
-    callExceptionFuzz();
+    callExceptionFuzz(vm);
 
     if (width == FarJumpWidth)
         kind = (kind == NormalExceptionCheck ? InvertedExceptionCheck : NormalExceptionCheck);
     
     Jump result;
 #if USE(JSVALUE64)
-    result = branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(vm()->addressOfException()));
+    result = branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(vm.addressOfException()));
 #elif USE(JSVALUE32_64)
-    result = branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0));
+    result = branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(vm.addressOfException()), TrustedImm32(0));
 #endif
     
     if (width == NormalJumpWidth)
@@ -386,15 +386,15 @@ AssemblyHelpers::Jump AssemblyHelpers::emitExceptionCheck(ExceptionCheckKind kin
     return realJump.m_jump;
 }
 
-AssemblyHelpers::Jump AssemblyHelpers::emitNonPatchableExceptionCheck()
+AssemblyHelpers::Jump AssemblyHelpers::emitNonPatchableExceptionCheck(VM& vm)
 {
-    callExceptionFuzz();
+    callExceptionFuzz(vm);
 
     Jump result;
 #if USE(JSVALUE64)
-    result = branchTest64(NonZero, AbsoluteAddress(vm()->addressOfException()));
+    result = branchTest64(NonZero, AbsoluteAddress(vm.addressOfException()));
 #elif USE(JSVALUE32_64)
-    result = branch32(NotEqual, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0));
+    result = branch32(NotEqual, AbsoluteAddress(vm.addressOfException()), TrustedImm32(0));
 #endif
     
     return result;
@@ -453,15 +453,16 @@ void AssemblyHelpers::loadProperty(GPRReg object, GPRReg offset, JSValueRegs res
         result);
 }
 
-void AssemblyHelpers::emitLoadStructure(RegisterID source, RegisterID dest, RegisterID scratch)
+void AssemblyHelpers::emitLoadStructure(VM& vm, RegisterID source, RegisterID dest, RegisterID scratch)
 {
 #if USE(JSVALUE64)
     ASSERT(dest != scratch);
     load32(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest);
-    loadPtr(vm()->heap.structureIDTable().base(), scratch);
+    loadPtr(vm.heap.structureIDTable().base(), scratch);
     loadPtr(MacroAssembler::BaseIndex(scratch, dest, MacroAssembler::TimesEight), dest);
 #else
     UNUSED_PARAM(scratch);
+    UNUSED_PARAM(vm);
     loadPtr(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest);
 #endif
 }
@@ -553,10 +554,10 @@ void AssemblyHelpers::emitRandomThunk(JSGlobalObject* globalObject, GPRReg scrat
     emitRandomThunkImpl(*this, scratch0, scratch1, scratch2, result, loadFromHigh, storeToHigh, loadFromLow, storeToLow);
 }
 
-void AssemblyHelpers::emitRandomThunk(GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result)
+void AssemblyHelpers::emitRandomThunk(VM& vm, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result)
 {
     emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, scratch3);
-    emitLoadStructure(scratch3, scratch3, scratch0);
+    emitLoadStructure(vm, scratch3, scratch3, scratch0);
     loadPtr(Address(scratch3, Structure::globalObjectOffset()), scratch3);
     // Now, scratch3 holds JSGlobalObject*.
 
@@ -577,10 +578,10 @@ void AssemblyHelpers::emitRandomThunk(GPRReg scratch0, GPRReg scratch1, GPRReg s
 }
 #endif
 
-void AssemblyHelpers::restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer()
+void AssemblyHelpers::restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(VM& vm)
 {
 #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
-    RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets();
+    RegisterAtOffsetList* allCalleeSaves = vm.getAllCalleeSaveRegisterOffsets();
     RegisterSet dontRestoreRegisters = RegisterSet::stackRegisters();
     unsigned registerCount = allCalleeSaves->size();
 
@@ -600,7 +601,7 @@ void AssemblyHelpers::restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer()
     }
     ASSERT(scratch != InvalidGPRReg);
 
-    loadPtr(&m_vm->topVMEntryFrame, scratch);
+    loadPtr(&vm.topVMEntryFrame, scratch);
     addPtr(TrustedImm32(VMEntryFrame::calleeSaveRegistersBufferOffset()), scratch);
 
     // Restore all callee saves except for the scratch.
@@ -621,6 +622,8 @@ void AssemblyHelpers::restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer()
     ASSERT(entry.reg().isGPR());
     ASSERT(scratch == entry.reg().gpr());
     loadPtr(Address(scratch, entry.offset()), scratch);
+#else
+    UNUSED_PARAM(vm);
 #endif
 }
 
@@ -682,7 +685,7 @@ void AssemblyHelpers::wangsInt64Hash(GPRReg inputAndResult, GPRReg scratch)
 }
 #endif // USE(JSVALUE64)
 
-void AssemblyHelpers::emitConvertValueToBoolean(JSValueRegs value, GPRReg result, GPRReg scratch, FPRReg valueAsFPR, FPRReg tempFPR, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject* globalObject, bool negateResult)
+void AssemblyHelpers::emitConvertValueToBoolean(VM& vm, JSValueRegs value, GPRReg result, GPRReg scratch, FPRReg valueAsFPR, FPRReg tempFPR, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject* globalObject, bool negateResult)
 {
     // Implements the following control flow structure:
     // if (value is boolean) {
@@ -752,7 +755,7 @@ void AssemblyHelpers::emitConvertValueToBoolean(JSValueRegs value, GPRReg result
         ASSERT(scratch != InvalidGPRReg);
         JumpList isNotMasqueradesAsUndefined;
         isNotMasqueradesAsUndefined.append(branchTest8(Zero, Address(value.payloadGPR(), JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)));
-        emitLoadStructure(value.payloadGPR(), result, scratch);
+        emitLoadStructure(vm, value.payloadGPR(), result, scratch);
         move(TrustedImmPtr(globalObject), scratch);
         isNotMasqueradesAsUndefined.append(branchPtr(NotEqual, Address(result, Structure::globalObjectOffset()), scratch));
         // We act like we are "undefined" here.
@@ -811,7 +814,62 @@ bool AssemblyHelpers::storeWasmContextNeedsMacroScratchRegister()
     return false;
 }
 
+#endif // ENABLE(WEBASSEMBLY)
+
+void AssemblyHelpers::debugCall(VM& vm, V_DebugOperation_EPP function, void* argument)
+{
+    size_t scratchSize = sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters);
+    ScratchBuffer* scratchBuffer = vm.scratchBufferForSize(scratchSize);
+    EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
+
+    for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+#if USE(JSVALUE64)
+        store64(GPRInfo::toRegister(i), buffer + i);
+#else
+        store32(GPRInfo::toRegister(i), buffer + i);
 #endif
+    }
+
+    for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+        move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
+        storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
+    }
+
+    // Tell GC mark phase how much of the scratch buffer is active during call.
+    move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
+    storePtr(TrustedImmPtr(scratchSize), GPRInfo::regT0);
+
+#if CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS)
+    move(TrustedImmPtr(buffer), GPRInfo::argumentGPR2);
+    move(TrustedImmPtr(argument), GPRInfo::argumentGPR1);
+    move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+    GPRReg scratch = selectScratchGPR(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR2);
+#elif CPU(X86)
+    poke(GPRInfo::callFrameRegister, 0);
+    poke(TrustedImmPtr(argument), 1);
+    poke(TrustedImmPtr(buffer), 2);
+    GPRReg scratch = GPRInfo::regT0;
+#else
+#error "JIT not supported on this platform."
+#endif
+    move(TrustedImmPtr(reinterpret_cast<void*>(function)), scratch);
+    call(scratch);
+
+    move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
+    storePtr(TrustedImmPtr(0), GPRInfo::regT0);
+
+    for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+        move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
+        loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
+    }
+    for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+#if USE(JSVALUE64)
+        load64(buffer + i, GPRInfo::toRegister(i));
+#else
+        load32(buffer + i, GPRInfo::toRegister(i));
+#endif
+    }
+}
 
 } // namespace JSC
 
index a78cc69..2565944 100644 (file)
@@ -49,9 +49,8 @@ typedef void (*V_DebugOperation_EPP)(ExecState*, void*, void*);
 
 class AssemblyHelpers : public MacroAssembler {
 public:
-    AssemblyHelpers(VM* vm, CodeBlock* codeBlock)
-        : m_vm(vm)
-        , m_codeBlock(codeBlock)
+    AssemblyHelpers(CodeBlock* codeBlock)
+        : m_codeBlock(codeBlock)
         , m_baselineCodeBlock(codeBlock ? codeBlock->baselineAlternative() : 0)
     {
         if (m_codeBlock) {
@@ -62,7 +61,6 @@ public:
     }
 
     CodeBlock* codeBlock() { return m_codeBlock; }
-    VM* vm() { return m_vm; }
     AssemblerType_T& assembler() { return m_assembler; }
 
     void checkStackPointerAlignment()
@@ -335,15 +333,15 @@ public:
 #endif
     }
 
-    void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() })
+    void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(VM& vm, const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() })
     {
 #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
         GPRReg temp1 = usedRegisters.getFreeGPR(0);
 
-        loadPtr(&m_vm->topVMEntryFrame, temp1);
+        loadPtr(&vm.topVMEntryFrame, temp1);
         addPtr(TrustedImm32(VMEntryFrame::calleeSaveRegistersBufferOffset()), temp1);
 
-        RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets();
+        RegisterAtOffsetList* allCalleeSaves = vm.getAllCalleeSaveRegisterOffsets();
         RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
         unsigned registerCount = allCalleeSaves->size();
         
@@ -357,13 +355,14 @@ public:
                 storeDouble(entry.reg().fpr(), Address(temp1, entry.offset()));
         }
 #else
+        UNUSED_PARAM(vm);
         UNUSED_PARAM(usedRegisters);
 #endif
     }
 
-    void restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer();
+    void restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(VM&);
 
-    void copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer(const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() })
+    void copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer(VM& vm, const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() })
     {
 #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
         GPRReg temp1 = usedRegisters.getFreeGPR(0);
@@ -374,10 +373,10 @@ public:
         ASSERT(codeBlock());
 
         // Copy saved calleeSaves on stack or unsaved calleeSaves in register to vm calleeSave buffer
-        loadPtr(&m_vm->topVMEntryFrame, temp1);
+        loadPtr(&vm.topVMEntryFrame, temp1);
         addPtr(TrustedImm32(VMEntryFrame::calleeSaveRegistersBufferOffset()), temp1);
 
-        RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets();
+        RegisterAtOffsetList* allCalleeSaves = vm.getAllCalleeSaveRegisterOffsets();
         RegisterAtOffsetList* currentCalleeSaves = codeBlock()->calleeSaveRegisters();
         RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
         unsigned registerCount = allCalleeSaves->size();
@@ -413,6 +412,7 @@ public:
             }
         }
 #else
+        UNUSED_PARAM(vm);
         UNUSED_PARAM(usedRegisters);
 #endif
     }
@@ -974,60 +974,7 @@ public:
     }
 
     // Add a debug call. This call has no effect on JIT code execution state.
-    void debugCall(V_DebugOperation_EPP function, void* argument)
-    {
-        size_t scratchSize = sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters);
-        ScratchBuffer* scratchBuffer = m_vm->scratchBufferForSize(scratchSize);
-        EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
-
-        for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
-#if USE(JSVALUE64)
-            store64(GPRInfo::toRegister(i), buffer + i);
-#else
-            store32(GPRInfo::toRegister(i), buffer + i);
-#endif
-        }
-
-        for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
-            move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
-            storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
-        }
-
-        // Tell GC mark phase how much of the scratch buffer is active during call.
-        move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
-        storePtr(TrustedImmPtr(scratchSize), GPRInfo::regT0);
-
-#if CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS)
-        move(TrustedImmPtr(buffer), GPRInfo::argumentGPR2);
-        move(TrustedImmPtr(argument), GPRInfo::argumentGPR1);
-        move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
-        GPRReg scratch = selectScratchGPR(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR2);
-#elif CPU(X86)
-        poke(GPRInfo::callFrameRegister, 0);
-        poke(TrustedImmPtr(argument), 1);
-        poke(TrustedImmPtr(buffer), 2);
-        GPRReg scratch = GPRInfo::regT0;
-#else
-#error "JIT not supported on this platform."
-#endif
-        move(TrustedImmPtr(reinterpret_cast<void*>(function)), scratch);
-        call(scratch);
-
-        move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
-        storePtr(TrustedImmPtr(0), GPRInfo::regT0);
-
-        for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
-            move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
-            loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
-        }
-        for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
-#if USE(JSVALUE64)
-            load64(buffer + i, GPRInfo::toRegister(i));
-#else
-            load32(buffer + i, GPRInfo::toRegister(i));
-#endif
-        }
-    }
+    void debugCall(VM&, V_DebugOperation_EPP function, void* argument);
 
     // These methods JIT generate dynamic, debug-only checks - akin to ASSERTs.
 #if !ASSERT_DISABLED
@@ -1052,7 +999,7 @@ public:
     void jitAssertArgumentCountSane() { }
 #endif
 
-    void jitReleaseAssertNoException();
+    void jitReleaseAssertNoException(VM&);
 
     void incrementSuperSamplerCount();
     void decrementSuperSamplerCount();
@@ -1189,14 +1136,14 @@ public:
 #endif
     }
     
-    void callExceptionFuzz();
+    void callExceptionFuzz(VM&);
     
     enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck };
     enum ExceptionJumpWidth { NormalJumpWidth, FarJumpWidth };
     JS_EXPORT_PRIVATE Jump emitExceptionCheck(
-        ExceptionCheckKind = NormalExceptionCheck, ExceptionJumpWidth = NormalJumpWidth);
-    JS_EXPORT_PRIVATE Jump emitNonPatchableExceptionCheck();
-    Jump emitJumpIfException();
+        VM&, ExceptionCheckKind = NormalExceptionCheck, ExceptionJumpWidth = NormalJumpWidth);
+    JS_EXPORT_PRIVATE Jump emitNonPatchableExceptionCheck(VM&);
+    Jump emitJumpIfException(VM&);
 
 #if ENABLE(SAMPLING_COUNTERS)
     static void emitCount(MacroAssembler& jit, AbstractSamplingCounter& counter, int32_t increment = 1)
@@ -1279,7 +1226,7 @@ public:
         return argumentCount(codeOrigin.inlineCallFrame);
     }
     
-    void emitLoadStructure(RegisterID source, RegisterID dest, RegisterID scratch);
+    void emitLoadStructure(VM&, RegisterID source, RegisterID dest, RegisterID scratch);
 
     void emitStoreStructureWithTypeInfo(TrustedImmPtr structure, RegisterID dest, RegisterID)
     {
@@ -1314,45 +1261,45 @@ public:
         return branch8(Above, AbsoluteAddress(address), TrustedImm32(blackThreshold));
     }
     
-    Jump barrierBranch(GPRReg cell, GPRReg scratchGPR)
+    Jump barrierBranch(VM& vm, GPRReg cell, GPRReg scratchGPR)
     {
         load8(Address(cell, JSCell::cellStateOffset()), scratchGPR);
-        return branch32(Above, scratchGPR, AbsoluteAddress(vm()->heap.addressOfBarrierThreshold()));
+        return branch32(Above, scratchGPR, AbsoluteAddress(vm.heap.addressOfBarrierThreshold()));
     }
 
-    Jump barrierBranch(JSCell* cell, GPRReg scratchGPR)
+    Jump barrierBranch(VM& vm, JSCell* cell, GPRReg scratchGPR)
     {
         uint8_t* address = reinterpret_cast<uint8_t*>(cell) + JSCell::cellStateOffset();
         load8(address, scratchGPR);
-        return branch32(Above, scratchGPR, AbsoluteAddress(vm()->heap.addressOfBarrierThreshold()));
+        return branch32(Above, scratchGPR, AbsoluteAddress(vm.heap.addressOfBarrierThreshold()));
     }
     
-    void barrierStoreLoadFence()
+    void barrierStoreLoadFence(VM& vm)
     {
         if (!Options::useConcurrentBarriers())
             return;
-        Jump ok = jumpIfMutatorFenceNotNeeded();
+        Jump ok = jumpIfMutatorFenceNotNeeded(vm);
         memoryFence();
         ok.link(this);
     }
     
-    void mutatorFence()
+    void mutatorFence(VM& vm)
     {
         if (isX86())
             return;
-        Jump ok = jumpIfMutatorFenceNotNeeded();
+        Jump ok = jumpIfMutatorFenceNotNeeded(vm);
         storeFence();
         ok.link(this);
     }
     
-    void storeButterfly(GPRReg butterfly, GPRReg object)
+    void storeButterfly(VM& vm, GPRReg butterfly, GPRReg object)
     {
         if (isX86()) {
             storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
             return;
         }
         
-        Jump ok = jumpIfMutatorFenceNotNeeded();
+        Jump ok = jumpIfMutatorFenceNotNeeded(vm);
         storeFence();
         storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
         storeFence();
@@ -1362,7 +1309,7 @@ public:
         done.link(this);
     }
     
-    void nukeStructureAndStoreButterfly(GPRReg butterfly, GPRReg object)
+    void nukeStructureAndStoreButterfly(VM& vm, GPRReg butterfly, GPRReg object)
     {
         if (isX86()) {
             or32(TrustedImm32(bitwise_cast<int32_t>(nukedStructureIDBit())), Address(object, JSCell::structureIDOffset()));
@@ -1370,7 +1317,7 @@ public:
             return;
         }
         
-        Jump ok = jumpIfMutatorFenceNotNeeded();
+        Jump ok = jumpIfMutatorFenceNotNeeded(vm);
         or32(TrustedImm32(bitwise_cast<int32_t>(nukedStructureIDBit())), Address(object, JSCell::structureIDOffset()));
         storeFence();
         storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
@@ -1381,9 +1328,9 @@ public:
         done.link(this);
     }
     
-    Jump jumpIfMutatorFenceNotNeeded()
+    Jump jumpIfMutatorFenceNotNeeded(VM& vm)
     {
-        return branchTest8(Zero, AbsoluteAddress(vm()->heap.addressOfMutatorShouldBeFenced()));
+        return branchTest8(Zero, AbsoluteAddress(vm.heap.addressOfMutatorShouldBeFenced()));
     }
     
     // Emits the branch structure for typeof. The code emitted by this doesn't fall through. The
@@ -1471,7 +1418,7 @@ public:
 
 #if USE(JSVALUE64)
     void emitRandomThunk(JSGlobalObject*, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result);
-    void emitRandomThunk(GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result);
+    void emitRandomThunk(VM&, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result);
 #endif
 
     // Call this if you know that the value held in allocatorGPR is non-null. This DOES NOT mean
@@ -1551,10 +1498,10 @@ public:
     
     template<typename ClassType, typename StructureType, typename StorageType>
     void emitAllocateJSObjectWithKnownSize(
-        GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1,
+        VM& vm, GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1,
         GPRReg scratchGPR2, JumpList& slowPath, size_t size)
     {
-        MarkedAllocator* allocator = subspaceFor<ClassType>(*vm())->allocatorFor(size);
+        MarkedAllocator* allocator = subspaceFor<ClassType>(vm)->allocatorFor(size);
         if (!allocator) {
             slowPath.append(jump());
             return;
@@ -1564,9 +1511,9 @@ public:
     }
     
     template<typename ClassType, typename StructureType, typename StorageType>
-    void emitAllocateJSObject(GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+    void emitAllocateJSObject(VM& vm, GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
     {
-        emitAllocateJSObjectWithKnownSize<ClassType>(resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath, ClassType::allocationSize(0));
+        emitAllocateJSObjectWithKnownSize<ClassType>(vm, resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath, ClassType::allocationSize(0));
     }
     
     // allocationSize can be aliased with any of the other input GPRs. If it's not aliased then it
@@ -1587,26 +1534,26 @@ public:
     }
     
     template<typename ClassType, typename StructureType>
-    void emitAllocateVariableSizedCell(GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+    void emitAllocateVariableSizedCell(VM& vm, GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
     {
-        Subspace& subspace = *subspaceFor<ClassType>(*vm());
+        Subspace& subspace = *subspaceFor<ClassType>(vm);
         emitAllocateVariableSized(resultGPR, subspace, allocationSize, scratchGPR1, scratchGPR2, slowPath);
         emitStoreStructureWithTypeInfo(structure, resultGPR, scratchGPR2);
     }
 
     template<typename ClassType, typename StructureType>
-    void emitAllocateVariableSizedJSObject(GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+    void emitAllocateVariableSizedJSObject(VM& vm, GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
     {
-        emitAllocateVariableSizedCell<ClassType>(resultGPR, structure, allocationSize, scratchGPR1, scratchGPR2, slowPath);
+        emitAllocateVariableSizedCell<ClassType>(vm, resultGPR, structure, allocationSize, scratchGPR1, scratchGPR2, slowPath);
         storePtr(TrustedImmPtr(0), Address(resultGPR, JSObject::butterflyOffset()));
     }
 
-    void emitConvertValueToBoolean(JSValueRegs value, GPRReg result, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg, FPRReg, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject*, bool negateResult = false);
+    void emitConvertValueToBoolean(VM&, JSValueRegs value, GPRReg result, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg, FPRReg, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject*, bool negateResult = false);
     
     template<typename ClassType>
-    void emitAllocateDestructibleObject(GPRReg resultGPR, Structure* structure, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+    void emitAllocateDestructibleObject(VM& vm, GPRReg resultGPR, Structure* structure, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
     {
-        emitAllocateJSObject<ClassType>(resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratchGPR1, scratchGPR2, slowPath);
+        emitAllocateJSObject<ClassType>(vm, resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratchGPR1, scratchGPR2, slowPath);
         storePtr(TrustedImmPtr(structure->classInfo()), Address(resultGPR, JSDestructibleObject::classInfoOffset()));
     }
     
@@ -1644,7 +1591,6 @@ public:
 #endif
 
 protected:
-    VM* m_vm;
     CodeBlock* m_codeBlock;
     CodeBlock* m_baselineCodeBlock;
 
index 3c3df61..33409ef 100644 (file)
@@ -52,16 +52,16 @@ void CCallHelpers::logShadowChickenTailPacket(GPRReg shadowPacket, JSValueRegs t
     store32(TrustedImm32(callSiteIndex.bits()), Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, callSiteIndex)));
 }
 
-void CCallHelpers::ensureShadowChickenPacket(GPRReg shadowPacket, GPRReg scratch1NonArgGPR, GPRReg scratch2)
+void CCallHelpers::ensureShadowChickenPacket(VM& vm, GPRReg shadowPacket, GPRReg scratch1NonArgGPR, GPRReg scratch2)
 {
     ASSERT(!RegisterSet::argumentGPRS().get(scratch1NonArgGPR));
-    move(TrustedImmPtr(vm()->shadowChicken().addressOfLogCursor()), scratch1NonArgGPR);
+    move(TrustedImmPtr(vm.shadowChicken().addressOfLogCursor()), scratch1NonArgGPR);
     loadPtr(Address(scratch1NonArgGPR), shadowPacket);
-    Jump ok = branchPtr(Below, shadowPacket, TrustedImmPtr(vm()->shadowChicken().logEnd()));
+    Jump ok = branchPtr(Below, shadowPacket, TrustedImmPtr(vm.shadowChicken().logEnd()));
     setupArgumentsExecState();
     move(TrustedImmPtr(bitwise_cast<void*>(operationProcessShadowChickenLog)), scratch1NonArgGPR);
     call(scratch1NonArgGPR);
-    move(TrustedImmPtr(vm()->shadowChicken().addressOfLogCursor()), scratch1NonArgGPR);
+    move(TrustedImmPtr(vm.shadowChicken().addressOfLogCursor()), scratch1NonArgGPR);
     loadPtr(Address(scratch1NonArgGPR), shadowPacket);
     ok.link(this);
     addPtr(TrustedImm32(sizeof(ShadowChicken::Packet)), shadowPacket, scratch2);
index 35b7d82..aa373de 100644 (file)
@@ -50,8 +50,8 @@ namespace JSC {
 
 class CCallHelpers : public AssemblyHelpers {
 public:
-    CCallHelpers(VM* vm, CodeBlock* codeBlock = 0)
-        : AssemblyHelpers(vm, codeBlock)
+    CCallHelpers(CodeBlock* codeBlock = 0)
+        : AssemblyHelpers(codeBlock)
     {
     }
     
@@ -2440,11 +2440,11 @@ public:
 #endif
     }
     
-    void jumpToExceptionHandler()
+    void jumpToExceptionHandler(VM& vm)
     {
         // genericUnwind() leaves the handler CallFrame* in vm->callFrameForCatch,
         // and the address of the handler in vm->targetMachinePCForThrow.
-        loadPtr(&vm()->targetMachinePCForThrow, GPRInfo::regT1);
+        loadPtr(&vm.targetMachinePCForThrow, GPRInfo::regT1);
         jump(GPRInfo::regT1);
     }
 
@@ -2640,7 +2640,7 @@ public:
     void logShadowChickenProloguePacket(GPRReg shadowPacket, GPRReg scratch1, GPRReg scope);
     void logShadowChickenTailPacket(GPRReg shadowPacket, JSValueRegs thisRegs, GPRReg scope, CodeBlock*, CallSiteIndex);
     // Leaves behind a pointer to the Packet we should write to in shadowPacket.
-    void ensureShadowChickenPacket(GPRReg shadowPacket, GPRReg scratch1NonArgGPR, GPRReg scratch2);
+    void ensureShadowChickenPacket(VM&, GPRReg shadowPacket, GPRReg scratch1NonArgGPR, GPRReg scratch2);
 };
 
 } // namespace JSC
index 186f627..4dfe329 100644 (file)
@@ -110,7 +110,7 @@ void JIT::emitEnterOptimizationCheck()
     skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
     ASSERT(!m_bytecodeOffset);
 
-    copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer();
+    copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer(*vm());
 
     callOperation(operationOptimize, m_bytecodeOffset);
     skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
@@ -872,7 +872,7 @@ void JIT::privateCompileExceptionHandlers()
     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
         m_exceptionChecksWithCallFrameRollback.link(this);
 
-        copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+        copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(*vm());
 
         // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
 
@@ -885,14 +885,14 @@ void JIT::privateCompileExceptionHandlers()
         poke(GPRInfo::argumentGPR1, 1);
 #endif
         m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value()));
-        jumpToExceptionHandler();
+        jumpToExceptionHandler(*vm());
     }
 
     if (!m_exceptionChecks.empty() || m_byValCompilationInfo.size()) {
         m_exceptionHandler = label();
         m_exceptionChecks.link(this);
 
-        copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+        copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(*vm());
 
         // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
@@ -904,7 +904,7 @@ void JIT::privateCompileExceptionHandlers()
         poke(GPRInfo::argumentGPR1, 1);
 #endif
         m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandler).value()));
-        jumpToExceptionHandler();
+        jumpToExceptionHandler(*vm());
     }
 }
 
index 51b9931..7edd0b1 100644 (file)
@@ -294,12 +294,12 @@ namespace JSC {
 
         void exceptionCheck()
         {
-            m_exceptionChecks.append(emitExceptionCheck());
+            m_exceptionChecks.append(emitExceptionCheck(*vm()));
         }
 
         void exceptionCheckWithCallFrameRollback()
         {
-            m_exceptionChecksWithCallFrameRollback.append(emitExceptionCheck());
+            m_exceptionChecksWithCallFrameRollback.append(emitExceptionCheck(*vm()));
         }
 
         void privateCompileExceptionHandlers();
index 3806f3a..6e1cbbf 100644 (file)
@@ -131,7 +131,7 @@ public:
     void generateOutOfLine(VM& vm, CodeBlock* codeBlock, FunctionPtr callReplacement)
     {
         auto linkJumpToOutOfLineSnippet = [&] () {
-            CCallHelpers jit(&vm, codeBlock);
+            CCallHelpers jit(codeBlock);
             auto jump = jit.jump();
             // We don't need a nop sled here because nobody should be jumping into the middle of an IC.
             bool needsBranchCompaction = false;
@@ -150,7 +150,7 @@ public:
 
         if (m_generateFastPathOnRepatch) {
 
-            CCallHelpers jit(&vm, codeBlock);
+            CCallHelpers jit(codeBlock);
             MathICGenerationState generationState;
             bool generatedInline = generateInline(jit, generationState, shouldEmitProfiling);
 
@@ -190,7 +190,7 @@ public:
         replaceCall();
 
         {
-            CCallHelpers jit(&vm, codeBlock);
+            CCallHelpers jit(codeBlock);
 
             MacroAssembler::JumpList endJumpList; 
             MacroAssembler::JumpList slowPathJumpList; 
index cc395c7..2755bb0 100644 (file)
@@ -162,7 +162,7 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
 
     // Load the prototype of the object in regT2.  If this is equal to regT1 - WIN!
     // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
-    emitLoadStructure(regT2, regT2, regT3);
+    emitLoadStructure(*vm(), regT2, regT2, regT3);
     load64(Address(regT2, Structure::prototypeOffset()), regT2);
     Jump isInstance = branchPtr(Equal, regT2, regT1);
     emitJumpIfJSCell(regT2).linkTo(loop, this);
@@ -210,7 +210,7 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction)
     Jump notMasqueradesAsUndefined = jump();
 
     isMasqueradesAsUndefined.link(this);
-    emitLoadStructure(regT0, regT1, regT2);
+    emitLoadStructure(*vm(), regT0, regT1, regT2);
     move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
     loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1);
     comparePtr(Equal, regT0, regT1, regT0);
@@ -351,7 +351,7 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
     bool shouldCheckMasqueradesAsUndefined = true;
 
     emitGetVirtualRegister(currentInstruction[1].u.operand, value);
-    emitConvertValueToBoolean(JSValueRegs(value), result, scratch, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject());
+    emitConvertValueToBoolean(*vm(), JSValueRegs(value), result, scratch, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject());
 
     addJump(branchTest32(Zero, result), target);
 }
@@ -366,7 +366,7 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
 
     // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
     Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
-    emitLoadStructure(regT0, regT2, regT1);
+    emitLoadStructure(*vm(), regT0, regT2, regT1);
     move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
     addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target);
     Jump masqueradesGlobalObjectIsForeign = jump();
@@ -389,7 +389,7 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
 
     // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
     addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
-    emitLoadStructure(regT0, regT2, regT1);
+    emitLoadStructure(*vm(), regT0, regT2, regT1);
     move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
     addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target);
     Jump wasNotImmediate = jump();
@@ -433,7 +433,7 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
     GPRReg scratch = regT2;
     bool shouldCheckMasqueradesAsUndefined = true;
     emitGetVirtualRegister(currentInstruction[1].u.operand, value);
-    emitConvertValueToBoolean(JSValueRegs(value), result, scratch, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject());
+    emitConvertValueToBoolean(*vm(), JSValueRegs(value), result, scratch, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject());
     addJump(branchTest32(NonZero, result), target);
 }
 
@@ -451,10 +451,10 @@ void JIT::emit_op_neq(Instruction* currentInstruction)
 void JIT::emit_op_throw(Instruction* currentInstruction)
 {
     ASSERT(regT0 == returnValueGPR);
-    copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+    copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(*vm());
     emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
     callOperationNoExceptionCheck(operationThrow, regT0);
-    jumpToExceptionHandler();
+    jumpToExceptionHandler(*vm());
 }
 
 void JIT::emit_op_push_with_scope(Instruction* currentInstruction)
@@ -530,7 +530,7 @@ void JIT::emit_op_to_string(Instruction* currentInstruction)
 
 void JIT::emit_op_catch(Instruction* currentInstruction)
 {
-    restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer();
+    restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(*vm());
 
     move(TrustedImmPtr(m_vm), regT3);
     load64(Address(regT3, VM::callFrameForCatchOffset()), callFrameRegister);
@@ -540,7 +540,7 @@ void JIT::emit_op_catch(Instruction* currentInstruction)
 
     callOperationNoExceptionCheck(operationCheckIfExceptionIsUncatchableAndNotifyProfiler);
     Jump isCatchableException = branchTest32(Zero, returnValueGPR);
-    jumpToExceptionHandler();
+    jumpToExceptionHandler(*vm());
     isCatchableException.link(this);
 
     move(TrustedImmPtr(m_vm), regT3);
@@ -640,7 +640,7 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction)
     Jump wasNotMasqueradesAsUndefined = jump();
 
     isMasqueradesAsUndefined.link(this);
-    emitLoadStructure(regT0, regT2, regT1);
+    emitLoadStructure(*vm(), regT0, regT2, regT1);
     move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
     loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
     comparePtr(Equal, regT0, regT2, regT0);
@@ -672,7 +672,7 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
     Jump wasNotMasqueradesAsUndefined = jump();
 
     isMasqueradesAsUndefined.link(this);
-    emitLoadStructure(regT0, regT2, regT1);
+    emitLoadStructure(*vm(), regT0, regT2, regT1);
     move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
     loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
     comparePtr(NotEqual, regT0, regT2, regT0);
@@ -918,7 +918,7 @@ void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& i
     if (canBeOptimized()) {
         linkSlowCase(iter);
 
-        copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer();
+        copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer(*vm());
 
         callOperation(operationOptimize, m_bytecodeOffset);
         Jump noOptimizedEntry = branchTestPtr(Zero, returnValueGPR);
@@ -1367,7 +1367,7 @@ void JIT::emit_op_log_shadow_chicken_prologue(Instruction* currentInstruction)
     GPRReg shadowPacketReg = regT0;
     GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register.
     GPRReg scratch2Reg = regT2;
-    ensureShadowChickenPacket(shadowPacketReg, scratch1Reg, scratch2Reg);
+    ensureShadowChickenPacket(*vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
     emitGetVirtualRegister(currentInstruction[1].u.operand, regT3);
     logShadowChickenProloguePacket(shadowPacketReg, scratch1Reg, regT3);
 }
@@ -1379,7 +1379,7 @@ void JIT::emit_op_log_shadow_chicken_tail(Instruction* currentInstruction)
     GPRReg shadowPacketReg = regT0;
     GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register.
     GPRReg scratch2Reg = regT2;
-    ensureShadowChickenPacket(shadowPacketReg, scratch1Reg, scratch2Reg);
+    ensureShadowChickenPacket(*vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
     emitGetVirtualRegister(currentInstruction[1].u.operand, regT2);
     emitGetVirtualRegister(currentInstruction[2].u.operand, regT3);
     logShadowChickenTailPacket(shadowPacketReg, JSValueRegs(regT2), regT3, m_codeBlock, CallSiteIndex(m_bytecodeOffset));
index b53b208..dd72ad7 100644 (file)
@@ -55,7 +55,7 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func)
 
     emitFunctionPrologue();
     emitPutToCallFrameHeader(0, CallFrameSlot::codeBlock);
-    storePtr(callFrameRegister, &m_vm->topCallFrame);
+    storePtr(callFrameRegister, &vm->topCallFrame);
 
 #if CPU(X86)
     // Calling convention:      f(ecx, edx, ...);
@@ -107,7 +107,7 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func)
     // Handle an exception
     sawException.link(this);
 
-    storePtr(callFrameRegister, &m_vm->topCallFrame);
+    storePtr(callFrameRegister, &vm->topCallFrame);
 
 #if CPU(X86)
     addPtr(TrustedImm32(-4), stackPointerRegister);
@@ -123,10 +123,10 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func)
     addPtr(TrustedImm32(8), stackPointerRegister);
 #endif
 
-    jumpToExceptionHandler();
+    jumpToExceptionHandler(*vm);
 
     // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
-    LinkBuffer patchBuffer(*m_vm, *this, GLOBAL_THUNK_ID);
+    LinkBuffer patchBuffer(*vm, *this, GLOBAL_THUNK_ID);
 
     patchBuffer.link(nativeCall, FunctionPtr(func));
     return FINALIZE_CODE(patchBuffer, ("JIT CTI native call"));
@@ -463,7 +463,7 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
     GPRReg scratch = regT2;
     GPRReg result = regT3;
     bool shouldCheckMasqueradesAsUndefined = true;
-    emitConvertValueToBoolean(value, result, scratch, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject());
+    emitConvertValueToBoolean(*vm(), value, result, scratch, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject());
 
     addJump(branchTest32(Zero, result), target);
 }
@@ -478,7 +478,7 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
     JSValueRegs value(regT1, regT0);
     GPRReg scratch = regT2;
     GPRReg result = regT3;
-    emitConvertValueToBoolean(value, result, scratch, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject());
+    emitConvertValueToBoolean(*vm(), value, result, scratch, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject());
 
     addJump(branchTest32(NonZero, result), target);
 }
@@ -760,10 +760,10 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
 void JIT::emit_op_throw(Instruction* currentInstruction)
 {
     ASSERT(regT0 == returnValueGPR);
-    copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+    copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(*vm());
     emitLoad(currentInstruction[1].u.operand, regT1, regT0);
     callOperationNoExceptionCheck(operationThrow, regT1, regT0);
-    jumpToExceptionHandler();
+    jumpToExceptionHandler(*vm());
 }
 
 void JIT::emit_op_push_with_scope(Instruction* currentInstruction)
@@ -821,7 +821,7 @@ void JIT::emitSlow_op_to_string(Instruction* currentInstruction, Vector<SlowCase
 
 void JIT::emit_op_catch(Instruction* currentInstruction)
 {
-    restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer();
+    restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(*vm());
 
     move(TrustedImmPtr(m_vm), regT3);
     // operationThrow returns the callFrame for the handler.
@@ -832,7 +832,7 @@ void JIT::emit_op_catch(Instruction* currentInstruction)
 
     callOperationNoExceptionCheck(operationCheckIfExceptionIsUncatchableAndNotifyProfiler);
     Jump isCatchableException = branchTest32(Zero, returnValueGPR);
-    jumpToExceptionHandler();
+    jumpToExceptionHandler(*vm());
     isCatchableException.link(this);
 
     move(TrustedImmPtr(m_vm), regT3);
@@ -1314,7 +1314,7 @@ void JIT::emit_op_log_shadow_chicken_prologue(Instruction* currentInstruction)
     GPRReg shadowPacketReg = regT0;
     GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register.
     GPRReg scratch2Reg = regT2;
-    ensureShadowChickenPacket(shadowPacketReg, scratch1Reg, scratch2Reg);
+    ensureShadowChickenPacket(*vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
 
     scratch1Reg = regT4;
     emitLoadPayload(currentInstruction[1].u.operand, regT3);
@@ -1328,7 +1328,7 @@ void JIT::emit_op_log_shadow_chicken_tail(Instruction* currentInstruction)
     GPRReg shadowPacketReg = regT0;
     GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register.
     GPRReg scratch2Reg = regT2;
-    ensureShadowChickenPacket(shadowPacketReg, scratch1Reg, scratch2Reg);
+    ensureShadowChickenPacket(*vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
 
     emitLoadPayload(currentInstruction[1].u.operand, regT2);
     emitLoadTag(currentInstruction[1].u.operand, regT1);
index f5b8a40..d14508e 100644 (file)
@@ -1217,7 +1217,7 @@ void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode
     if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase)
         ownerNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
 
-    Jump ownerIsRememberedOrInEden = barrierBranch(regT0, regT1);
+    Jump ownerIsRememberedOrInEden = barrierBranch(*vm(), regT0, regT1);
     callOperation(operationWriteBarrierSlowPath, regT0);
     ownerIsRememberedOrInEden.link(this);
 
@@ -1255,7 +1255,7 @@ void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode
     if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue)
         ownerNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
 
-    Jump ownerIsRememberedOrInEden = barrierBranch(regT1, regT2);
+    Jump ownerIsRememberedOrInEden = barrierBranch(*vm(), regT1, regT2);
     callOperation(operationWriteBarrierSlowPath, regT1);
     ownerIsRememberedOrInEden.link(this);
 
@@ -1283,7 +1283,7 @@ void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode)
 
 void JIT::emitWriteBarrier(JSCell* owner)
 {
-    Jump ownerIsRememberedOrInEden = barrierBranch(owner, regT0);
+    Jump ownerIsRememberedOrInEden = barrierBranch(*vm(), owner, regT0);
     callOperation(operationWriteBarrierSlowPath, owner);
     ownerIsRememberedOrInEden.link(this);
 }
index dc0cc1a..d523b6f 100644 (file)
@@ -41,7 +41,8 @@ namespace JSC {
     class JSInterfaceJIT : public CCallHelpers, public GPRInfo, public FPRInfo {
     public:
         JSInterfaceJIT(VM* vm, CodeBlock* codeBlock = 0)
-            : CCallHelpers(vm, codeBlock)
+            : CCallHelpers(codeBlock)
+            , m_vm(vm)
         {
         }
 
@@ -77,6 +78,10 @@ namespace JSC {
         inline Address intPayloadFor(int index, RegisterID base = callFrameRegister);
         inline Address intTagFor(int index, RegisterID base = callFrameRegister);
         inline Address addressFor(int index, RegisterID base = callFrameRegister);
+
+        VM* vm() const { return m_vm; }
+
+        VM* m_vm;
     };
 
     struct ThunkHelpers {
index 1978480..7f41cfb 100644 (file)
@@ -177,7 +177,7 @@ static InlineCacheAction tryCacheGetByID(ExecState* exec, JSValue baseValue, con
                 && slot.slotBase() == baseValue
                 && InlineAccess::isCacheableArrayLength(stubInfo, jsCast<JSArray*>(baseValue))) {
 
-                bool generatedCodeInline = InlineAccess::generateArrayLength(*codeBlock->vm(), stubInfo, jsCast<JSArray*>(baseValue));
+                bool generatedCodeInline = InlineAccess::generateArrayLength(stubInfo, jsCast<JSArray*>(baseValue));
                 if (generatedCodeInline) {
                     ftlThunkAwareRepatchCall(codeBlock, stubInfo.slowPathCallLocation(), appropriateOptimizingGetByIdFunction(kind));
                     stubInfo.initArrayLength();
@@ -233,7 +233,7 @@ static InlineCacheAction tryCacheGetByID(ExecState* exec, JSValue baseValue, con
             && !structure->needImpurePropertyWatchpoint()
             && !loadTargetFromProxy) {
 
-            bool generatedCodeInline = InlineAccess::generateSelfPropertyAccess(*codeBlock->vm(), stubInfo, structure, slot.cachedOffset());
+            bool generatedCodeInline = InlineAccess::generateSelfPropertyAccess(stubInfo, structure, slot.cachedOffset());
             if (generatedCodeInline) {
                 LOG_IC((ICEvent::GetByIdSelfPatch, structure->classInfo(), propertyName));
                 structure->startWatchingPropertyForReplacements(vm, slot.cachedOffset());
@@ -329,7 +329,7 @@ static InlineCacheAction tryCacheGetByID(ExecState* exec, JSValue baseValue, con
         LOG_IC((ICEvent::GetByIdReplaceWithJump, baseValue.classInfoOrNull(vm), propertyName));
         
         RELEASE_ASSERT(result.code());
-        InlineAccess::rewireStubAsJump(exec->vm(), stubInfo, CodeLocationLabel(result.code()));
+        InlineAccess::rewireStubAsJump(stubInfo, CodeLocationLabel(result.code()));
     }
     
     return result.shouldGiveUpNow() ? GiveUpOnCache : RetryCacheLater;
@@ -396,7 +396,7 @@ static InlineCacheAction tryCachePutByID(ExecState* exec, JSValue baseValue, Str
                 && !structure->needImpurePropertyWatchpoint()
                 && !structure->inferredTypeFor(ident.impl())) {
                 
-                bool generatedCodeInline = InlineAccess::generateSelfPropertyReplace(vm, stubInfo, structure, slot.cachedOffset());
+                bool generatedCodeInline = InlineAccess::generateSelfPropertyReplace(stubInfo, structure, slot.cachedOffset());
                 if (generatedCodeInline) {
                     LOG_IC((ICEvent::PutByIdSelfPatch, structure->classInfo(), ident));
                     ftlThunkAwareRepatchCall(codeBlock, stubInfo.slowPathCallLocation(), appropriateOptimizingPutByIdFunction(slot, putKind));
@@ -483,7 +483,7 @@ static InlineCacheAction tryCachePutByID(ExecState* exec, JSValue baseValue, Str
         
         RELEASE_ASSERT(result.code());
 
-        InlineAccess::rewireStubAsJump(vm, stubInfo, CodeLocationLabel(result.code()));
+        InlineAccess::rewireStubAsJump(stubInfo, CodeLocationLabel(result.code()));
     }
     
     return result.shouldGiveUpNow() ? GiveUpOnCache : RetryCacheLater;
@@ -785,7 +785,7 @@ void linkPolymorphicCall(
     
     GPRReg calleeGPR = static_cast<GPRReg>(callLinkInfo.calleeGPR());
     
-    CCallHelpers stubJit(&vm, callerCodeBlock);
+    CCallHelpers stubJit(callerCodeBlock);
     
     CCallHelpers::JumpList slowPath;
     
@@ -981,7 +981,7 @@ void linkPolymorphicCall(
 void resetGetByID(CodeBlock* codeBlock, StructureStubInfo& stubInfo, GetByIDKind kind)
 {
     ftlThunkAwareRepatchCall(codeBlock, stubInfo.slowPathCallLocation(), appropriateOptimizingGetByIdFunction(kind));
-    InlineAccess::rewireStubAsJump(*codeBlock->vm(), stubInfo, stubInfo.slowPathStartLocation());
+    InlineAccess::rewireStubAsJump(stubInfo, stubInfo.slowPathStartLocation());
 }
 
 void resetPutByID(CodeBlock* codeBlock, StructureStubInfo& stubInfo)
@@ -1000,7 +1000,7 @@ void resetPutByID(CodeBlock* codeBlock, StructureStubInfo& stubInfo)
     }
 
     ftlThunkAwareRepatchCall(codeBlock, stubInfo.slowPathCallLocation(), optimizedFunction);
-    InlineAccess::rewireStubAsJump(*codeBlock->vm(), stubInfo, stubInfo.slowPathStartLocation());
+    InlineAccess::rewireStubAsJump(stubInfo, stubInfo.slowPathStartLocation());
 }
 
 void resetIn(CodeBlock*, StructureStubInfo& stubInfo)
index 3807ec9..9fc658f 100644 (file)
@@ -60,7 +60,7 @@ void emitSetVarargsFrame(CCallHelpers& jit, GPRReg lengthGPR, bool lengthInclude
     jit.addPtr(GPRInfo::callFrameRegister, resultGPR);
 }
 
-static void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, ValueRecovery argCountRecovery, VirtualRegister firstArgumentReg, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase)
+static void emitSetupVarargsFrameFastCase(VM& vm, CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, ValueRecovery argCountRecovery, VirtualRegister firstArgumentReg, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase)
 {
     CCallHelpers::JumpList end;
     
@@ -82,7 +82,7 @@ static void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlots
     
     emitSetVarargsFrame(jit, scratchGPR1, true, numUsedSlotsGPR, scratchGPR2);
 
-    slowCase.append(jit.branchPtr(CCallHelpers::Above, CCallHelpers::AbsoluteAddress(jit.vm()->addressOfSoftStackLimit()), scratchGPR2));
+    slowCase.append(jit.branchPtr(CCallHelpers::Above, CCallHelpers::AbsoluteAddress(vm.addressOfSoftStackLimit()), scratchGPR2));
 
     // Before touching stack values, we should update the stack pointer to protect them from signal stack.
     jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), scratchGPR2, CCallHelpers::stackPointerRegister);
@@ -111,7 +111,7 @@ static void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlots
     done.link(&jit);
 }
 
-void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, InlineCallFrame* inlineCallFrame, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase)
+void emitSetupVarargsFrameFastCase(VM& vm, CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, InlineCallFrame* inlineCallFrame, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase)
 {
     ValueRecovery argumentCountRecovery;
     VirtualRegister firstArgumentReg;
@@ -132,7 +132,7 @@ void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GP
             VirtualRegister(CallFrameSlot::argumentCount), DataFormatInt32);
         firstArgumentReg = VirtualRegister(CallFrame::argumentOffset(0));
     }
-    emitSetupVarargsFrameFastCase(jit, numUsedSlotsGPR, scratchGPR1, scratchGPR2, scratchGPR3, argumentCountRecovery, firstArgumentReg, firstVarArgOffset, slowCase);
+    emitSetupVarargsFrameFastCase(vm, jit, numUsedSlotsGPR, scratchGPR1, scratchGPR2, scratchGPR3, argumentCountRecovery, firstArgumentReg, firstVarArgOffset, slowCase);
 }
 
 } // namespace JSC
index 8639a2a..bb7636e 100644 (file)
@@ -36,7 +36,7 @@ void emitSetVarargsFrame(CCallHelpers&, GPRReg lengthGPR, bool lengthIncludesThi
 
 // Assumes that SP refers to the last in-use stack location, and after this returns SP will point to
 // the newly created frame plus the native header. scratchGPR2 may be the same as numUsedSlotsGPR.
-void emitSetupVarargsFrameFastCase(CCallHelpers&, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, InlineCallFrame*, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase);
+void emitSetupVarargsFrameFastCase(VM&, CCallHelpers&, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, InlineCallFrame*, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase);
 
 } // namespace JSC
 
index 05f41f4..63c3ce0 100644 (file)
@@ -76,7 +76,7 @@ namespace JSC {
         void loadArgumentWithSpecificClass(const ClassInfo* classInfo, int argument, RegisterID dst, RegisterID scratch)
         {
             loadCellArgument(argument, dst);
-            emitLoadStructure(dst, scratch, dst);
+            emitLoadStructure(*vm(), dst, scratch, dst);
             appendFailure(branchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(classInfo)));
             // We have to reload the argument since emitLoadStructure clobbered it.
             loadCellArgument(argument, dst);
index 5217b82..982c4b9 100644 (file)
@@ -63,19 +63,19 @@ inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
 // linking helper (C++ code) decides to throw an exception instead.
 MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
 {
-    CCallHelpers jit(vm);
+    CCallHelpers jit;
     
     // The call pushed a return address, so we need to pop it back off to re-align the stack,
     // even though we won't use it.
     jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
 
-    jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+    jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(*vm);
 
     jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
     jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
     emitPointerValidation(jit, GPRInfo::nonArgGPR0);
     jit.call(GPRInfo::nonArgGPR0);
-    jit.jumpToExceptionHandler();
+    jit.jumpToExceptionHandler(*vm);
 
     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
     return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk"));
@@ -138,7 +138,7 @@ MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
     // to perform linking and lazy compilation if necessary. We expect the callee
     // to be in regT0/regT1 (payload/tag), the CallFrame to have already
     // been adjusted, and all other registers to be available for use.
-    CCallHelpers jit(vm);
+    CCallHelpers jit;
     
     slowPathFor(jit, vm, operationLinkCall);
     
@@ -150,7 +150,7 @@ MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
 // object construction then you're going to lose big time anyway.
 MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM* vm)
 {
-    CCallHelpers jit(vm);
+    CCallHelpers jit;
     
     slowPathFor(jit, vm, operationLinkPolymorphicCall);
     
@@ -169,7 +169,7 @@ MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
     // jump to the callee, or save the return address to the call frame while we
     // make a C++ function call to the appropriate JIT operation.
 
-    CCallHelpers jit(vm);
+    CCallHelpers jit;
     
     CCallHelpers::JumpList slowCase;
     
@@ -358,7 +358,7 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k
     // Handle an exception
     exceptionHandler.link(&jit);
 
-    jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+    jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(*vm);
     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
 
 #if CPU(X86) && USE(JSVALUE32_64)
@@ -380,7 +380,7 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k
     jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
 #endif
 
-    jit.jumpToExceptionHandler();
+    jit.jumpToExceptionHandler(*vm);
 
     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
     return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJumpWithSavedTags ? "Tail With Saved Tags " : entryType == EnterViaJumpWithoutSavedTags ? "Tail Without Saved Tags " : "", toCString(kind).data()));
@@ -1033,7 +1033,7 @@ MacroAssemblerCodeRef randomThunkGenerator(VM* vm)
         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
 
 #if USE(JSVALUE64)
-    jit.emitRandomThunk(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT3, SpecializedThunkJIT::fpRegT0);
+    jit.emitRandomThunk(*vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT3, SpecializedThunkJIT::fpRegT0);
     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
 
     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "random");
@@ -1044,7 +1044,7 @@ MacroAssemblerCodeRef randomThunkGenerator(VM* vm)
 
 MacroAssemblerCodeRef boundThisNoArgsFunctionCallGenerator(VM* vm)
 {
-    CCallHelpers jit(vm);
+    CCallHelpers jit;
     
     jit.emitFunctionPrologue();
     
@@ -1136,20 +1136,20 @@ MacroAssemblerCodeRef boundThisNoArgsFunctionCallGenerator(VM* vm)
 #if ENABLE(WEBASSEMBLY)
 MacroAssemblerCodeRef throwExceptionFromWasmThunkGenerator(VM* vm)
 {
-    CCallHelpers jit(vm);
+    CCallHelpers jit;
 
     // The thing that jumps here must move ExceptionType into the argumentGPR1 and jump here.
     // We're allowed to use temp registers here, but not callee saves.
     {
         RegisterSet usedRegisters = RegisterSet::stubUnavailableRegisters();
         usedRegisters.set(GPRInfo::argumentGPR1);
-        jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(usedRegisters);
+        jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(*vm, usedRegisters);
     }
 
     jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
     jit.loadWasmContext(GPRInfo::argumentGPR2);
     CCallHelpers::Call call = jit.call();
-    jit.jumpToExceptionHandler();
+    jit.jumpToExceptionHandler(*vm);
 
     void (*throwWasmException)(ExecState*, Wasm::ExceptionType, JSWebAssemblyInstance*) = [] (ExecState* exec, Wasm::ExceptionType type, JSWebAssemblyInstance* wasmContext) {
         VM* vm = &exec->vm();
index c3fadfc..124f007 100644 (file)
@@ -1282,8 +1282,8 @@ Expected<std::unique_ptr<WasmInternalFunction>, String> parseAndCompile(VM& vm,
 {
     auto result = std::make_unique<WasmInternalFunction>();
 
-    compilationContext.jsEntrypointJIT = std::make_unique<CCallHelpers>(&vm);
-    compilationContext.wasmEntrypointJIT = std::make_unique<CCallHelpers>(&vm);
+    compilationContext.jsEntrypointJIT = std::make_unique<CCallHelpers>();
+    compilationContext.wasmEntrypointJIT = std::make_unique<CCallHelpers>();
 
     Procedure procedure;
 
index e0cab1f..d3a1949 100644 (file)
@@ -40,7 +40,7 @@
 
 namespace JSC { namespace Wasm {
 
-typedef CCallHelpers JIT;
+using JIT = CCallHelpers;
 
 static void materializeImportJSCell(JIT& jit, unsigned importIndex, GPRReg result)
 {
@@ -57,7 +57,7 @@ static MacroAssemblerCodeRef wasmToJs(VM* vm, Bag<CallLinkInfo>& callLinkInfos,
     const JSCCallingConvention& jsCC = jscCallingConvention();
     const Signature* signature = SignatureInformation::get(vm, signatureIndex);
     unsigned argCount = signature->argumentCount();
-    JIT jit(vm, nullptr);
+    JIT jit;
 
     // Below, we assume that the JS calling convention is always on the stack.
     ASSERT(!jsCC.m_gprArgs.size());
@@ -88,7 +88,7 @@ static MacroAssemblerCodeRef wasmToJs(VM* vm, Bag<CallLinkInfo>& callLinkInfos,
         }
 
         if (hasBadI64Use) {
-            jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+            jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(*vm);
             jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
             jit.loadWasmContext(GPRInfo::argumentGPR1);
 
@@ -97,7 +97,7 @@ static MacroAssemblerCodeRef wasmToJs(VM* vm, Bag<CallLinkInfo>& callLinkInfos,
             jit.storePtr(GPRInfo::argumentGPR2, JIT::Address(GPRInfo::callFrameRegister, CallFrameSlot::callee * static_cast<int>(sizeof(Register))));
 
             auto call = jit.call();
-            jit.jumpToExceptionHandler();
+            jit.jumpToExceptionHandler(*vm);
 
             void (*throwBadI64)(ExecState*, JSWebAssemblyInstance*) = [] (ExecState* exec, JSWebAssemblyInstance* wasmContext) -> void {
                 VM* vm = &exec->vm();
@@ -309,7 +309,7 @@ static MacroAssemblerCodeRef wasmToJs(VM* vm, Bag<CallLinkInfo>& callLinkInfos,
         slowPath.link(&jit);
         jit.setupArgumentsWithExecState(GPRInfo::returnValueGPR);
         auto call = jit.call();
-        exceptionChecks.append(jit.emitJumpIfException());
+        exceptionChecks.append(jit.emitJumpIfException(*vm));
 
         int32_t (*convertToI32)(ExecState*, JSValue) = [] (ExecState* exec, JSValue v) -> int32_t { 
             VM* vm = &exec->vm();
@@ -342,7 +342,7 @@ static MacroAssemblerCodeRef wasmToJs(VM* vm, Bag<CallLinkInfo>& callLinkInfos,
         notANumber.link(&jit);
         jit.setupArgumentsWithExecState(GPRInfo::returnValueGPR);
         auto call = jit.call();
-        exceptionChecks.append(jit.emitJumpIfException());
+        exceptionChecks.append(jit.emitJumpIfException(*vm));
 
         float (*convertToF32)(ExecState*, JSValue) = [] (ExecState* exec, JSValue v) -> float { 
             VM* vm = &exec->vm();
@@ -374,7 +374,7 @@ static MacroAssemblerCodeRef wasmToJs(VM* vm, Bag<CallLinkInfo>& callLinkInfos,
         notANumber.link(&jit);
         jit.setupArgumentsWithExecState(GPRInfo::returnValueGPR);
         auto call = jit.call();
-        exceptionChecks.append(jit.emitJumpIfException());
+        exceptionChecks.append(jit.emitJumpIfException(*vm));
 
         double (*convertToF64)(ExecState*, JSValue) = [] (ExecState* exec, JSValue v) -> double { 
             VM* vm = &exec->vm();
@@ -395,10 +395,10 @@ static MacroAssemblerCodeRef wasmToJs(VM* vm, Bag<CallLinkInfo>& callLinkInfos,
 
     if (!exceptionChecks.empty()) {
         exceptionChecks.link(&jit);
-        jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+        jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(*vm);
         jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
         auto call = jit.call();
-        jit.jumpToExceptionHandler();
+        jit.jumpToExceptionHandler(*vm);
 
         void (*doUnwinding)(ExecState*) = [] (ExecState* exec) -> void {
             VM* vm = &exec->vm();
@@ -429,7 +429,7 @@ static MacroAssemblerCodeRef wasmToJs(VM* vm, Bag<CallLinkInfo>& callLinkInfos,
 static MacroAssemblerCodeRef wasmToWasm(VM* vm, unsigned importIndex)
 {
     const PinnedRegisterInfo& pinnedRegs = PinnedRegisterInfo::get();
-    JIT jit(vm, nullptr);
+    JIT jit;
 
     GPRReg scratch = GPRInfo::nonPreservedNonArgumentGPR;