[iOS] Upstream JavaScriptCore support for ARM64
authordbates@webkit.org <dbates@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 15 Oct 2013 22:16:39 +0000 (22:16 +0000)
committerdbates@webkit.org <dbates@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 15 Oct 2013 22:16:39 +0000 (22:16 +0000)
https://bugs.webkit.org/show_bug.cgi?id=122762

Source/JavaScriptCore:

Reviewed by Oliver Hunt and Filip Pizlo.

* Configurations/Base.xcconfig:
* Configurations/DebugRelease.xcconfig:
* Configurations/JavaScriptCore.xcconfig:
* Configurations/ToolExecutable.xcconfig:
* JavaScriptCore.xcodeproj/project.pbxproj:
* assembler/ARM64Assembler.h: Added.
* assembler/AbstractMacroAssembler.h:
(JSC::isARM64):
(JSC::AbstractMacroAssembler::Label::Label):
(JSC::AbstractMacroAssembler::Jump::Jump):
(JSC::AbstractMacroAssembler::Jump::link):
(JSC::AbstractMacroAssembler::Jump::linkTo):
(JSC::AbstractMacroAssembler::CachedTempRegister::CachedTempRegister):
(JSC::AbstractMacroAssembler::CachedTempRegister::registerIDInvalidate):
(JSC::AbstractMacroAssembler::CachedTempRegister::registerIDNoInvalidate):
(JSC::AbstractMacroAssembler::CachedTempRegister::value):
(JSC::AbstractMacroAssembler::CachedTempRegister::setValue):
(JSC::AbstractMacroAssembler::CachedTempRegister::invalidate):
(JSC::AbstractMacroAssembler::invalidateAllTempRegisters):
(JSC::AbstractMacroAssembler::isTempRegisterValid):
(JSC::AbstractMacroAssembler::clearTempRegisterValid):
(JSC::AbstractMacroAssembler::setTempRegisterValid):
* assembler/LinkBuffer.cpp:
(JSC::LinkBuffer::copyCompactAndLinkCode):
(JSC::LinkBuffer::linkCode):
* assembler/LinkBuffer.h:
* assembler/MacroAssembler.h:
(JSC::MacroAssembler::isPtrAlignedAddressOffset):
(JSC::MacroAssembler::pushToSave):
(JSC::MacroAssembler::popToRestore):
(JSC::MacroAssembler::patchableBranchTest32):
* assembler/MacroAssemblerARM64.h: Added.
* assembler/MacroAssemblerARMv7.h:
* dfg/DFGFixupPhase.cpp:
(JSC::DFG::FixupPhase::fixupNode):
* dfg/DFGOSRExitCompiler32_64.cpp:
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGOSRExitCompiler64.cpp:
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::compileArithDiv):
(JSC::DFG::SpeculativeJIT::compileArithMod):
* disassembler/ARM64/A64DOpcode.cpp: Added.
* disassembler/ARM64/A64DOpcode.h: Added.
* disassembler/ARM64Disassembler.cpp: Added.
* heap/MachineStackMarker.cpp:
(JSC::getPlatformThreadRegisters):
(JSC::otherThreadStackPointer):
* heap/Region.h:
* jit/AssemblyHelpers.h:
(JSC::AssemblyHelpers::debugCall):
* jit/CCallHelpers.h:
* jit/ExecutableAllocator.h:
* jit/FPRInfo.h:
(JSC::FPRInfo::toRegister):
(JSC::FPRInfo::toIndex):
(JSC::FPRInfo::debugName):
* jit/GPRInfo.h:
(JSC::GPRInfo::toRegister):
(JSC::GPRInfo::toIndex):
(JSC::GPRInfo::debugName):
* jit/JITInlines.h:
(JSC::JIT::restoreArgumentReferenceForTrampoline):
* jit/JITOperationWrappers.h:
* jit/JITOperations.cpp:
* jit/JITStubs.cpp:
(JSC::performPlatformSpecificJITAssertions):
(JSC::tryCachePutByID):
* jit/JITStubs.h:
(JSC::JITStackFrame::returnAddressSlot):
* jit/JITStubsARM64.h: Added.
* jit/JSInterfaceJIT.h:
* jit/Repatch.cpp:
(JSC::emitRestoreScratch):
(JSC::generateProtoChainAccessStub):
(JSC::tryCacheGetByID):
(JSC::emitPutReplaceStub):
(JSC::tryCachePutByID):
(JSC::tryRepatchIn):
* jit/ScratchRegisterAllocator.h:
(JSC::ScratchRegisterAllocator::preserveReusedRegistersByPushing):
(JSC::ScratchRegisterAllocator::restoreReusedRegistersByPopping):
* jit/ThunkGenerators.cpp:
(JSC::nativeForGenerator):
(JSC::floorThunkGenerator):
(JSC::ceilThunkGenerator):
* jsc.cpp:
(main):
* llint/LLIntOfflineAsmConfig.h:
* llint/LLIntSlowPaths.cpp:
(JSC::LLInt::handleHostCall):
* llint/LowLevelInterpreter.asm:
* llint/LowLevelInterpreter64.asm:
* offlineasm/arm.rb:
* offlineasm/arm64.rb: Added.
* offlineasm/backends.rb:
* offlineasm/instructions.rb:
* offlineasm/risc.rb:
* offlineasm/transform.rb:
* yarr/YarrJIT.cpp:
(JSC::Yarr::YarrGenerator::alignCallFrameSizeInBytes):
(JSC::Yarr::YarrGenerator::initCallFrame):
(JSC::Yarr::YarrGenerator::removeCallFrame):
(JSC::Yarr::YarrGenerator::generateEnter):
* yarr/YarrJIT.h:

Source/WTF:

Reviewed by Oliver Hunt.

* Configurations/Base.xcconfig:
* wtf/Atomics.h:
(WTF::weakCompareAndSwap):
(WTF::armV7_dmb):
* wtf/FastMalloc.cpp:
* wtf/Platform.h:
* wtf/dtoa.cpp:
* wtf/dtoa/utils.h:
* wtf/text/ASCIIFastPath.h:
(WTF::copyLCharsFromUCharSource):
* wtf/text/StringImpl.h:

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@157474 268f45cc-cd09-0410-ab3c-d52691b4dbfc

59 files changed:
Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/Configurations/Base.xcconfig
Source/JavaScriptCore/Configurations/DebugRelease.xcconfig
Source/JavaScriptCore/Configurations/JavaScriptCore.xcconfig
Source/JavaScriptCore/Configurations/ToolExecutable.xcconfig
Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
Source/JavaScriptCore/assembler/ARM64Assembler.h [new file with mode: 0644]
Source/JavaScriptCore/assembler/AbstractMacroAssembler.h
Source/JavaScriptCore/assembler/LinkBuffer.cpp
Source/JavaScriptCore/assembler/LinkBuffer.h
Source/JavaScriptCore/assembler/MacroAssembler.h
Source/JavaScriptCore/assembler/MacroAssemblerARM64.h [new file with mode: 0644]
Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
Source/JavaScriptCore/disassembler/ARM64/A64DOpcode.cpp [new file with mode: 0644]
Source/JavaScriptCore/disassembler/ARM64/A64DOpcode.h [new file with mode: 0644]
Source/JavaScriptCore/disassembler/ARM64Disassembler.cpp [new file with mode: 0644]
Source/JavaScriptCore/heap/MachineStackMarker.cpp
Source/JavaScriptCore/heap/Region.h
Source/JavaScriptCore/jit/AssemblyHelpers.h
Source/JavaScriptCore/jit/CCallHelpers.h
Source/JavaScriptCore/jit/ExecutableAllocator.h
Source/JavaScriptCore/jit/FPRInfo.h
Source/JavaScriptCore/jit/GPRInfo.h
Source/JavaScriptCore/jit/JITInlines.h
Source/JavaScriptCore/jit/JITOperationWrappers.h
Source/JavaScriptCore/jit/JITOperations.cpp
Source/JavaScriptCore/jit/JITStubs.cpp
Source/JavaScriptCore/jit/JITStubs.h
Source/JavaScriptCore/jit/JITStubsARM64.h [new file with mode: 0644]
Source/JavaScriptCore/jit/JSInterfaceJIT.h
Source/JavaScriptCore/jit/Repatch.cpp
Source/JavaScriptCore/jit/ScratchRegisterAllocator.h
Source/JavaScriptCore/jit/ThunkGenerators.cpp
Source/JavaScriptCore/jsc.cpp
Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
Source/JavaScriptCore/llint/LowLevelInterpreter.asm
Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
Source/JavaScriptCore/offlineasm/arm.rb
Source/JavaScriptCore/offlineasm/arm64.rb [new file with mode: 0644]
Source/JavaScriptCore/offlineasm/backends.rb
Source/JavaScriptCore/offlineasm/instructions.rb
Source/JavaScriptCore/offlineasm/risc.rb
Source/JavaScriptCore/offlineasm/transform.rb
Source/JavaScriptCore/yarr/YarrJIT.cpp
Source/JavaScriptCore/yarr/YarrJIT.h
Source/WTF/ChangeLog
Source/WTF/Configurations/Base.xcconfig
Source/WTF/wtf/Atomics.h
Source/WTF/wtf/FastMalloc.cpp
Source/WTF/wtf/Platform.h
Source/WTF/wtf/dtoa.cpp
Source/WTF/wtf/dtoa/utils.h
Source/WTF/wtf/text/ASCIIFastPath.h
Source/WTF/wtf/text/StringImpl.h

index c5eff6e..6eb6ae3 100644 (file)
@@ -1,3 +1,116 @@
+2013-10-15  Daniel Bates  <dabates@apple.com>
+
+        [iOS] Upstream JavaScriptCore support for ARM64
+        https://bugs.webkit.org/show_bug.cgi?id=122762
+
+        Reviewed by Oliver Hunt and Filip Pizlo.
+
+        * Configurations/Base.xcconfig:
+        * Configurations/DebugRelease.xcconfig:
+        * Configurations/JavaScriptCore.xcconfig:
+        * Configurations/ToolExecutable.xcconfig:
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * assembler/ARM64Assembler.h: Added.
+        * assembler/AbstractMacroAssembler.h:
+        (JSC::isARM64):
+        (JSC::AbstractMacroAssembler::Label::Label):
+        (JSC::AbstractMacroAssembler::Jump::Jump):
+        (JSC::AbstractMacroAssembler::Jump::link):
+        (JSC::AbstractMacroAssembler::Jump::linkTo):
+        (JSC::AbstractMacroAssembler::CachedTempRegister::CachedTempRegister):
+        (JSC::AbstractMacroAssembler::CachedTempRegister::registerIDInvalidate):
+        (JSC::AbstractMacroAssembler::CachedTempRegister::registerIDNoInvalidate):
+        (JSC::AbstractMacroAssembler::CachedTempRegister::value):
+        (JSC::AbstractMacroAssembler::CachedTempRegister::setValue):
+        (JSC::AbstractMacroAssembler::CachedTempRegister::invalidate):
+        (JSC::AbstractMacroAssembler::invalidateAllTempRegisters):
+        (JSC::AbstractMacroAssembler::isTempRegisterValid):
+        (JSC::AbstractMacroAssembler::clearTempRegisterValid):
+        (JSC::AbstractMacroAssembler::setTempRegisterValid):
+        * assembler/LinkBuffer.cpp:
+        (JSC::LinkBuffer::copyCompactAndLinkCode):
+        (JSC::LinkBuffer::linkCode):
+        * assembler/LinkBuffer.h:
+        * assembler/MacroAssembler.h:
+        (JSC::MacroAssembler::isPtrAlignedAddressOffset):
+        (JSC::MacroAssembler::pushToSave):
+        (JSC::MacroAssembler::popToRestore):
+        (JSC::MacroAssembler::patchableBranchTest32):
+        * assembler/MacroAssemblerARM64.h: Added.
+        * assembler/MacroAssemblerARMv7.h:
+        * dfg/DFGFixupPhase.cpp:
+        (JSC::DFG::FixupPhase::fixupNode):
+        * dfg/DFGOSRExitCompiler32_64.cpp:
+        (JSC::DFG::OSRExitCompiler::compileExit):
+        * dfg/DFGOSRExitCompiler64.cpp:
+        (JSC::DFG::OSRExitCompiler::compileExit):
+        * dfg/DFGSpeculativeJIT.cpp:
+        (JSC::DFG::SpeculativeJIT::compileArithDiv):
+        (JSC::DFG::SpeculativeJIT::compileArithMod):
+        * disassembler/ARM64/A64DOpcode.cpp: Added.
+        * disassembler/ARM64/A64DOpcode.h: Added.
+        * disassembler/ARM64Disassembler.cpp: Added.
+        * heap/MachineStackMarker.cpp:
+        (JSC::getPlatformThreadRegisters):
+        (JSC::otherThreadStackPointer):
+        * heap/Region.h:
+        * jit/AssemblyHelpers.h:
+        (JSC::AssemblyHelpers::debugCall):
+        * jit/CCallHelpers.h:
+        * jit/ExecutableAllocator.h:
+        * jit/FPRInfo.h:
+        (JSC::FPRInfo::toRegister):
+        (JSC::FPRInfo::toIndex):
+        (JSC::FPRInfo::debugName):
+        * jit/GPRInfo.h:
+        (JSC::GPRInfo::toRegister):
+        (JSC::GPRInfo::toIndex):
+        (JSC::GPRInfo::debugName):
+        * jit/JITInlines.h:
+        (JSC::JIT::restoreArgumentReferenceForTrampoline):
+        * jit/JITOperationWrappers.h:
+        * jit/JITOperations.cpp:
+        * jit/JITStubs.cpp:
+        (JSC::performPlatformSpecificJITAssertions):
+        (JSC::tryCachePutByID):
+        * jit/JITStubs.h:
+        (JSC::JITStackFrame::returnAddressSlot):
+        * jit/JITStubsARM64.h: Added.
+        * jit/JSInterfaceJIT.h:
+        * jit/Repatch.cpp:
+        (JSC::emitRestoreScratch):
+        (JSC::generateProtoChainAccessStub):
+        (JSC::tryCacheGetByID):
+        (JSC::emitPutReplaceStub):
+        (JSC::tryCachePutByID):
+        (JSC::tryRepatchIn):
+        * jit/ScratchRegisterAllocator.h:
+        (JSC::ScratchRegisterAllocator::preserveReusedRegistersByPushing):
+        (JSC::ScratchRegisterAllocator::restoreReusedRegistersByPopping):
+        * jit/ThunkGenerators.cpp:
+        (JSC::nativeForGenerator):
+        (JSC::floorThunkGenerator):
+        (JSC::ceilThunkGenerator):
+        * jsc.cpp:
+        (main):
+        * llint/LLIntOfflineAsmConfig.h:
+        * llint/LLIntSlowPaths.cpp:
+        (JSC::LLInt::handleHostCall):
+        * llint/LowLevelInterpreter.asm:
+        * llint/LowLevelInterpreter64.asm:
+        * offlineasm/arm.rb:
+        * offlineasm/arm64.rb: Added.
+        * offlineasm/backends.rb:
+        * offlineasm/instructions.rb:
+        * offlineasm/risc.rb:
+        * offlineasm/transform.rb:
+        * yarr/YarrJIT.cpp:
+        (JSC::Yarr::YarrGenerator::alignCallFrameSizeInBytes):
+        (JSC::Yarr::YarrGenerator::initCallFrame):
+        (JSC::Yarr::YarrGenerator::removeCallFrame):
+        (JSC::Yarr::YarrGenerator::generateEnter):
+        * yarr/YarrJIT.h:
+
 2013-10-15  Mark Lam  <mark.lam@apple.com>
 
         Fix 3 operand sub operation in C loop LLINT.
index 4d9e231..8fe606b 100644 (file)
@@ -59,6 +59,7 @@ GCC_WARN_64_TO_32_BIT_CONVERSION = $(GCC_WARN_64_TO_32_BIT_CONVERSION_$(CURRENT_
 GCC_WARN_64_TO_32_BIT_CONVERSION_ = YES;
 GCC_WARN_64_TO_32_BIT_CONVERSION_armv7 = YES;
 GCC_WARN_64_TO_32_BIT_CONVERSION_armv7s = YES;
+GCC_WARN_64_TO_32_BIT_CONVERSION_arm64 = NO;
 GCC_WARN_64_TO_32_BIT_CONVERSION_i386 = YES;
 GCC_WARN_64_TO_32_BIT_CONVERSION_x86_64 = NO;
 GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = NO;
@@ -74,9 +75,9 @@ GCC_WARN_UNUSED_VARIABLE = YES;
 LINKER_DISPLAYS_MANGLED_NAMES = YES;
 PREBINDING = NO;
 VALID_ARCHS = $(VALID_ARCHS_$(PLATFORM_NAME));
-VALID_ARCHS_iphoneos = $(ARCHS_STANDARD_32_BIT);
-VALID_ARCHS_iphonesimulator = $(ARCHS_STANDARD_32_BIT);
-VALID_ARCHS_macosx = i386 ppc x86_64 ppc64 $(ARCHS_UNIVERSAL_IPHONE_OS);
+VALID_ARCHS_iphoneos = $(ARCHS_STANDARD_32_64_BIT);
+VALID_ARCHS_iphonesimulator = $(ARCHS_STANDARD_32_64_BIT);
+VALID_ARCHS_macosx = i386 ppc x86_64 ppc64;
 WARNING_CFLAGS = -Wall -Wextra -Wcast-qual -Wchar-subscripts -Wextra-tokens -Wformat=2 -Winit-self -Wmissing-format-attribute -Wmissing-noreturn -Wpacked -Wpointer-arith -Wredundant-decls -Wundef -Wwrite-strings -Wexit-time-destructors -Wglobal-constructors -Wtautological-compare;
 HEADER_SEARCH_PATHS = . icu "${BUILT_PRODUCTS_DIR}/usr/local/include" $(HEADER_SEARCH_PATHS);
 
index f3e6e90..9cbc48b 100644 (file)
@@ -25,8 +25,8 @@
 #include "Base.xcconfig"
 
 ARCHS = $(ARCHS_$(PLATFORM_NAME));
-ARCHS_iphoneos = $(ARCHS_UNIVERSAL_IPHONE_OS);
-ARCHS_iphonesimulator = $(NATIVE_ARCH);
+ARCHS_iphoneos = $(ARCHS_STANDARD_32_64_BIT);
+ARCHS_iphonesimulator = $(ARCHS_STANDARD_32_64_BIT);
 ARCHS_macosx = $(ARCHS_STANDARD_32_64_BIT);
 
 ONLY_ACTIVE_ARCH = YES;
index 0716885..aef1910 100644 (file)
@@ -28,6 +28,7 @@ JSVALUE_MODEL = $(JSVALUE_MODEL_$(CURRENT_ARCH));
 JSVALUE_MODEL_ = UNKNOWN_JSVALUE_MODEL;
 JSVALUE_MODEL_armv6 = 32_64;
 JSVALUE_MODEL_armv7 = 32_64;
+JSVALUE_MODEL_arm64 = 64;
 JSVALUE_MODEL_i386 = 32_64;
 JSVALUE_MODEL_ppc = 32_64;
 JSVALUE_MODEL_x86_64 = 64;
index 8208d0d..f19bbca 100644 (file)
@@ -39,6 +39,7 @@ SKIP_INSTALL_YES = NO;
 GCC_ENABLE_OBJC_GC = NO;
 CLANG_ENABLE_OBJC_ARC = $(CLANG_ENABLE_OBJC_ARC_$(CURRENT_ARCH));
 CLANG_ENABLE_OBJC_ARC_x86_64 = YES;
+CLANG_ENABLE_OBJC_ARC_arm64 = YES;
 CLANG_ENABLE_OBJC_ARC_armv7 = YES;
 CLANG_ENABLE_OBJC_ARC_armv7s = YES;
 
index bdd8950..27b0b48 100644 (file)
                0F714CA516EA92F200F3EBEB /* DFGBackwardsPropagationPhase.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F714CA216EA92ED00F3EBEB /* DFGBackwardsPropagationPhase.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0F73D7AE165A142D00ACAB71 /* ClosureCallStubRoutine.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F73D7AB165A142A00ACAB71 /* ClosureCallStubRoutine.cpp */; };
                0F73D7AF165A143000ACAB71 /* ClosureCallStubRoutine.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F73D7AC165A142A00ACAB71 /* ClosureCallStubRoutine.h */; settings = {ATTRIBUTES = (Private, ); }; };
+               0F743BAA16B88249009F9277 /* ARM64Disassembler.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 652A3A201651C66100A80AFE /* ARM64Disassembler.cpp */; };
                0F766D2815A8CC1E008F363E /* JITStubRoutine.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F766D2615A8CC1B008F363E /* JITStubRoutine.cpp */; };
                0F766D2B15A8CC38008F363E /* JITStubRoutineSet.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F766D2915A8CC34008F363E /* JITStubRoutineSet.cpp */; };
                0F766D2C15A8CC3A008F363E /* JITStubRoutineSet.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F766D2A15A8CC34008F363E /* JITStubRoutineSet.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0FF729BF166AD360000F5BA3 /* ProfilerOrigin.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FF729A0166AD347000F5BA3 /* ProfilerOrigin.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0FF729C0166AD360000F5BA3 /* ProfilerOriginStack.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FF729A2166AD347000F5BA3 /* ProfilerOriginStack.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0FF922D414F46B410041A24E /* LLIntOffsetsExtractor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F4680A114BA7F8200BFE272 /* LLIntOffsetsExtractor.cpp */; };
+               0FFA549716B8835000B3A982 /* A64DOpcode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 652A3A221651C69700A80AFE /* A64DOpcode.cpp */; };
+               0FFA549816B8835300B3A982 /* A64DOpcode.h in Headers */ = {isa = PBXBuildFile; fileRef = 652A3A231651C69700A80AFE /* A64DOpcode.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0FFB921816D02EB20055A5DB /* DFGAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FB4B51916B62772003F696B /* DFGAllocator.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0FFB921A16D02EC50055A5DB /* DFGBasicBlockInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FD5652216AB780A00197653 /* DFGBasicBlockInlines.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0FFB921B16D02F010055A5DB /* DFGNodeAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FB4B51F16B62772003F696B /* DFGNodeAllocator.h */; settings = {ATTRIBUTES = (Private, ); }; };
                371D842D17C98B6E00ECF994 /* libz.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 371D842C17C98B6E00ECF994 /* libz.dylib */; };
                41359CF30FDD89AD00206180 /* DateConversion.h in Headers */ = {isa = PBXBuildFile; fileRef = D21202290AD4310C00ED79B6 /* DateConversion.h */; };
                4443AE3316E188D90076F110 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 51F0EB6105C86C6B00E6DF1B /* Foundation.framework */; };
+               44DF93E617A1D9200097B97B /* JITStubsARM64.h in Headers */ = {isa = PBXBuildFile; fileRef = 44DF93E517A1D9200097B97B /* JITStubsARM64.h */; };
                451539B912DC994500EF7AC4 /* Yarr.h in Headers */ = {isa = PBXBuildFile; fileRef = 451539B812DC994500EF7AC4 /* Yarr.h */; settings = {ATTRIBUTES = (Private, ); }; };
                5D53726F0E1C54880021E549 /* Tracing.h in Headers */ = {isa = PBXBuildFile; fileRef = 5D53726E0E1C54880021E549 /* Tracing.h */; };
                5D5D8AD10E0D0EBE00F9C692 /* libedit.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 5D5D8AD00E0D0EBE00F9C692 /* libedit.dylib */; };
                2AD8932917E3868F00668276 /* HeapIterationScope.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = HeapIterationScope.h; sourceTree = "<group>"; };
                371D842C17C98B6E00ECF994 /* libz.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = libz.dylib; path = usr/lib/libz.dylib; sourceTree = SDKROOT; };
                449097EE0F8F81B50076A327 /* FeatureDefines.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = FeatureDefines.xcconfig; sourceTree = "<group>"; };
+               44DF93E517A1D9200097B97B /* JITStubsARM64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JITStubsARM64.h; sourceTree = "<group>"; };
                451539B812DC994500EF7AC4 /* Yarr.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Yarr.h; path = yarr/Yarr.h; sourceTree = "<group>"; };
                45E12D8806A49B0F00E9DF84 /* jsc.cpp */ = {isa = PBXFileReference; fileEncoding = 30; indentWidth = 4; lastKnownFileType = sourcecode.cpp.cpp; path = jsc.cpp; sourceTree = "<group>"; tabWidth = 4; };
                51F0EB6105C86C6B00E6DF1B /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = /System/Library/Frameworks/Foundation.framework; sourceTree = "<absolute>"; };
                6507D2970E871E4A00D7D896 /* JSTypeInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSTypeInfo.h; sourceTree = "<group>"; };
                651122E5140469BA002B101D /* testRegExp.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = testRegExp.cpp; sourceTree = "<group>"; };
                6511230514046A4C002B101D /* testRegExp */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = testRegExp; sourceTree = BUILT_PRODUCTS_DIR; };
+               652A3A201651C66100A80AFE /* ARM64Disassembler.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = ARM64Disassembler.cpp; path = disassembler/ARM64Disassembler.cpp; sourceTree = "<group>"; };
+               652A3A221651C69700A80AFE /* A64DOpcode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = A64DOpcode.cpp; path = disassembler/ARM64/A64DOpcode.cpp; sourceTree = "<group>"; };
+               652A3A231651C69700A80AFE /* A64DOpcode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = A64DOpcode.h; path = disassembler/ARM64/A64DOpcode.h; sourceTree = "<group>"; };
                65303D631447B9E100D3F904 /* ParserTokens.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ParserTokens.h; sourceTree = "<group>"; };
                65400C0F0A69BAF200509887 /* PropertyNameArray.cpp */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.cpp.cpp; path = PropertyNameArray.cpp; sourceTree = "<group>"; };
                65400C100A69BAF200509887 /* PropertyNameArray.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = PropertyNameArray.h; sourceTree = "<group>"; };
                863C6D981521111200585E4E /* YarrCanonicalizeUCS2.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = YarrCanonicalizeUCS2.cpp; path = yarr/YarrCanonicalizeUCS2.cpp; sourceTree = "<group>"; };
                863C6D991521111200585E4E /* YarrCanonicalizeUCS2.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = YarrCanonicalizeUCS2.h; path = yarr/YarrCanonicalizeUCS2.h; sourceTree = "<group>"; };
                863C6D9A1521111200585E4E /* YarrCanonicalizeUCS2.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; name = YarrCanonicalizeUCS2.js; path = yarr/YarrCanonicalizeUCS2.js; sourceTree = "<group>"; };
+               8640923B156EED3B00566CB2 /* ARM64Assembler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ARM64Assembler.h; sourceTree = "<group>"; };
+               8640923C156EED3B00566CB2 /* MacroAssemblerARM64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MacroAssemblerARM64.h; sourceTree = "<group>"; };
                865A30F0135007E100CDB49E /* JSCJSValueInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSCJSValueInlines.h; sourceTree = "<group>"; };
                865F408710E7D56300947361 /* APIShims.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = APIShims.h; sourceTree = "<group>"; };
                866739D013BFDE710023D87C /* BigInteger.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BigInteger.h; sourceTree = "<group>"; };
                0FF4272E158EBCCE004CB9FF /* disassembler */ = {
                        isa = PBXGroup;
                        children = (
+                               652A3A1A1651A92400A80AFE /* ARM64 */,
                                65C028591717966800351E35 /* ARMv7 */,
                                0FF42733158EBD64004CB9FF /* udis86 */,
+                               652A3A201651C66100A80AFE /* ARM64Disassembler.cpp */,
                                65C0284F171795E200351E35 /* ARMv7Disassembler.cpp */,
                                0F9D336E165DBB8D005AD387 /* Disassembler.cpp */,
                                0FF4272F158EBD44004CB9FF /* Disassembler.h */,
                                14A23D6C0F4E19CE0023CDAD /* JITStubs.cpp */,
                                14A6581A0F4E36F4000150FD /* JITStubs.h */,
                                FEF6835A174343CC00A32E25 /* JITStubsARM.h */,
+                               44DF93E517A1D9200097B97B /* JITStubsARM64.h */,
                                FEF6835B174343CC00A32E25 /* JITStubsARMv7.h */,
                                A7A4AE0917973B4D005612B1 /* JITStubsMIPS.h */,
                                A7A4AE0B17973B4D005612B1 /* JITStubsSH4.h */,
                        tabWidth = 4;
                        usesTabs = 0;
                };
+               652A3A1A1651A92400A80AFE /* ARM64 */ = {
+                       isa = PBXGroup;
+                       children = (
+                               652A3A221651C69700A80AFE /* A64DOpcode.cpp */,
+                               652A3A231651C69700A80AFE /* A64DOpcode.h */,
+                       );
+                       name = ARM64;
+                       sourceTree = "<group>";
+               };
                65C028591717966800351E35 /* ARMv7 */ = {
                        isa = PBXGroup;
                        children = (
                        isa = PBXGroup;
                        children = (
                                860161DF0F3A83C100F84710 /* AbstractMacroAssembler.h */,
+                               8640923B156EED3B00566CB2 /* ARM64Assembler.h */,
                                86D3B2BF10156BDE002865E7 /* ARMAssembler.cpp */,
                                86D3B2C010156BDE002865E7 /* ARMAssembler.h */,
                                A74DE1CB120B86D600D40D5B /* ARMv7Assembler.cpp */,
                                86C36EE90EE1289D00B3DF59 /* MacroAssembler.h */,
                                86C568DD11A213EE0007F7F0 /* MacroAssemblerARM.cpp */,
                                86D3B2C210156BDE002865E7 /* MacroAssemblerARM.h */,
+                               8640923C156EED3B00566CB2 /* MacroAssemblerARM64.h */,
                                A729009B17976C6000317298 /* MacroAssemblerARMv7.cpp */,
                                86ADD1440FDDEA980006EEC2 /* MacroAssemblerARMv7.h */,
                                863B23DF0FC60E6200703AA4 /* MacroAssemblerCodeRef.h */,
                        buildActionMask = 2147483647;
                        files = (
                                0FFB921F16D033050055A5DB /* (null) in Headers */,
+                               0FFA549816B8835300B3A982 /* A64DOpcode.h in Headers */,
                                860161E30F3A83C100F84710 /* AbstractMacroAssembler.h in Headers */,
                                0F55F0F514D1063C00AC7649 /* AbstractPC.h in Headers */,
                                2A48D1911772365B00C65A5F /* APICallbackFunction.h in Headers */,
                                0F766D2C15A8CC3A008F363E /* JITStubRoutineSet.h in Headers */,
                                14C5242B0F5355E900BA3D04 /* JITStubs.h in Headers */,
                                FEF6835E174343CC00A32E25 /* JITStubsARM.h in Headers */,
+                               44DF93E617A1D9200097B97B /* JITStubsARM64.h in Headers */,
                                FEF6835F174343CC00A32E25 /* JITStubsARMv7.h in Headers */,
                                A7A4AE0D17973B4D005612B1 /* JITStubsMIPS.h in Headers */,
                                A7A4AE0F17973B4D005612B1 /* JITStubsSH4.h in Headers */,
                        isa = PBXSourcesBuildPhase;
                        buildActionMask = 2147483647;
                        files = (
+                               0FFA549716B8835000B3A982 /* A64DOpcode.cpp in Sources */,
                                0F55F0F414D1063900AC7649 /* AbstractPC.cpp in Sources */,
                                147F39BD107EC37600427A48 /* ArgList.cpp in Sources */,
                                147F39BE107EC37600427A48 /* Arguments.cpp in Sources */,
+                               0F743BAA16B88249009F9277 /* ARM64Disassembler.cpp in Sources */,
                                86D3B2C310156BDE002865E7 /* ARMAssembler.cpp in Sources */,
                                A74DE1D0120B875600D40D5B /* ARMv7Assembler.cpp in Sources */,
                                65C02850171795E200351E35 /* ARMv7Disassembler.cpp in Sources */,
diff --git a/Source/JavaScriptCore/assembler/ARM64Assembler.h b/Source/JavaScriptCore/assembler/ARM64Assembler.h
new file mode 100644 (file)
index 0000000..ddb85f7
--- /dev/null
@@ -0,0 +1,3501 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef ARM64Assembler_h
+#define ARM64Assembler_h
+
+#if ENABLE(ASSEMBLER) && CPU(ARM64)
+
+#include "AssemblerBuffer.h"
+#include <wtf/Assertions.h>
+#include <wtf/Vector.h>
+#include <stdint.h>
+
+#define CHECK_DATASIZE_OF(datasize) ASSERT(datasize == 32 || datasize == 64)
+#define DATASIZE_OF(datasize) ((datasize == 64) ? Datasize_64 : Datasize_32)
+#define MEMOPSIZE_OF(datasize) ((datasize == 8 || datasize == 128) ? MemOpSize_8_or_128 : (datasize == 16) ? MemOpSize_16 : (datasize == 32) ? MemOpSize_32 : MemOpSize_64)
+#define CHECK_DATASIZE() CHECK_DATASIZE_OF(datasize)
+#define DATASIZE DATASIZE_OF(datasize)
+#define MEMOPSIZE MEMOPSIZE_OF(datasize)
+#define CHECK_FP_MEMOP_DATASIZE() ASSERT(datasize == 8 || datasize == 16 || datasize == 32 || datasize == 64 || datasize == 128)
+
+namespace JSC {
+
+ALWAYS_INLINE bool isInt9(int32_t value)
+{
+    return value == ((value << 23) >> 23);
+}
+
+ALWAYS_INLINE bool isUInt5(int32_t value)
+{
+    return !(value & ~0x1f);
+}
+
+ALWAYS_INLINE bool isUInt12(int32_t value)
+{
+    return !(value & ~0xfff);
+}
+
+ALWAYS_INLINE bool isUInt12(intptr_t value)
+{
+    return !(value & ~0xfffL);
+}
+
+class UInt5 {
+public:
+    explicit UInt5(int value)
+        : m_value(value)
+    {
+        ASSERT(isUInt5(value));
+    }
+
+    operator int() { return m_value; }
+
+private:
+    int m_value;
+};
+
+class UInt12 {
+public:
+    explicit UInt12(int value)
+        : m_value(value)
+    {
+        ASSERT(isUInt12(value));
+    }
+
+    operator int() { return m_value; }
+
+private:
+    int m_value;
+};
+
+class PostIndex {
+public:
+    explicit PostIndex(int value)
+        : m_value(value)
+    {
+        ASSERT(isInt9(value));
+    }
+
+    operator int() { return m_value; }
+
+private:
+    int m_value;
+};
+
+class PreIndex {
+public:
+    explicit PreIndex(int value)
+        : m_value(value)
+    {
+        ASSERT(isInt9(value));
+    }
+
+    operator int() { return m_value; }
+
+private:
+    int m_value;
+};
+
+class LogicalImmediate {
+public:
+    static LogicalImmediate create32(uint32_t value)
+    {
+        // Check for 0, -1 - these cannot be encoded.
+        if (!value || !~value)
+            return InvalidLogicalImmediate;
+
+        // First look for a 32-bit pattern, then for repeating 16-bit
+        // patterns, 8-bit, 4-bit, and finally 2-bit.
+
+        unsigned hsb, lsb;
+        bool inverted;
+        if (findBitRange<32>(value, hsb, lsb, inverted))
+            return encodeLogicalImmediate<32>(hsb, lsb, inverted);
+
+        if ((value & 0xffff) != (value >> 16))
+            return InvalidLogicalImmediate;
+        value &= 0xffff;
+
+        if (findBitRange<16>(value, hsb, lsb, inverted))
+            return encodeLogicalImmediate<16>(hsb, lsb, inverted);
+
+        if ((value & 0xff) != (value >> 8))
+            return InvalidLogicalImmediate;
+        value &= 0xff;
+
+        if (findBitRange<8>(value, hsb, lsb, inverted))
+            return encodeLogicalImmediate<8>(hsb, lsb, inverted);
+
+        if ((value & 0xf) != (value >> 4))
+            return InvalidLogicalImmediate;
+        value &= 0xf;
+
+        if (findBitRange<4>(value, hsb, lsb, inverted))
+            return encodeLogicalImmediate<4>(hsb, lsb, inverted);
+
+        if ((value & 0x3) != (value >> 2))
+            return InvalidLogicalImmediate;
+        value &= 0x3;
+
+        if (findBitRange<2>(value, hsb, lsb, inverted))
+            return encodeLogicalImmediate<2>(hsb, lsb, inverted);
+
+        return InvalidLogicalImmediate;
+    }
+
+    static LogicalImmediate create64(uint64_t value)
+    {
+        // Check for 0, -1 - these cannot be encoded.
+        if (!value || !~value)
+            return InvalidLogicalImmediate;
+
+        // Look for a contiguous bit range.
+        unsigned hsb, lsb;
+        bool inverted;
+        if (findBitRange<64>(value, hsb, lsb, inverted))
+            return encodeLogicalImmediate<64>(hsb, lsb, inverted);
+
+        // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern.
+        if (static_cast<uint32_t>(value) == static_cast<uint32_t>(value >> 32))
+            return create32(static_cast<uint32_t>(value));
+        return InvalidLogicalImmediate;
+    }
+
+    int value() const
+    {
+        ASSERT(isValid());
+        return m_value;
+    }
+
+    bool isValid() const
+    {
+        return m_value != InvalidLogicalImmediate;
+    }
+
+    bool is64bit() const
+    {
+        return m_value & (1 << 12);
+    }
+
+private:
+    LogicalImmediate(int value)
+        : m_value(value)
+    {
+    }
+
+    // Generate a mask with bits in the range hsb..0 set, for example:
+    //   hsb:63 = 0xffffffffffffffff
+    //   hsb:42 = 0x000007ffffffffff
+    //   hsb: 0 = 0x0000000000000001
+    static uint64_t mask(unsigned hsb)
+    {
+        ASSERT(hsb < 64);
+        return 0xffffffffffffffffull >> (63 - hsb);
+    }
+
+    template<unsigned N>
+    static void partialHSB(uint64_t& value, unsigned&result)
+    {
+        if (value & (0xffffffffffffffffull << N)) {
+            result += N;
+            value >>= N;
+        }
+    }
+
+    // Find the bit number of the highest bit set in a non-zero value, for example:
+    //   0x8080808080808080 = hsb:63
+    //   0x0000000000000001 = hsb: 0
+    //   0x000007ffffe00000 = hsb:42
+    static unsigned highestSetBit(uint64_t value)
+    {
+        ASSERT(value);
+        unsigned hsb = 0;
+        partialHSB<32>(value, hsb);
+        partialHSB<16>(value, hsb);
+        partialHSB<8>(value, hsb);
+        partialHSB<4>(value, hsb);
+        partialHSB<2>(value, hsb);
+        partialHSB<1>(value, hsb);
+        return hsb;
+    }
+
+    // This function takes a value and a bit width, where value obeys the following constraints:
+    //   * bits outside of the width of the value must be zero.
+    //   * bits within the width of value must neither be all clear or all set.
+    // The input is inspected to detect values that consist of either two or three contiguous
+    // ranges of bits. The output range hsb..lsb will describe the second range of the value.
+    // if the range is set, inverted will be false, and if the range is clear, inverted will
+    // be true. For example (with width 8):
+    //   00001111 = hsb:3, lsb:0, inverted:false
+    //   11110000 = hsb:3, lsb:0, inverted:true
+    //   00111100 = hsb:5, lsb:2, inverted:false
+    //   11000011 = hsb:5, lsb:2, inverted:true
+    template<unsigned width>
+    static bool findBitRange(uint64_t value, unsigned& hsb, unsigned& lsb, bool& inverted)
+    {
+        ASSERT(value & mask(width - 1));
+        ASSERT(value != mask(width - 1));
+        ASSERT(!(value & ~mask(width - 1)));
+
+        // Detect cases where the top bit is set; if so, flip all the bits & set invert.
+        // This halves the number of patterns we need to look for.
+        const uint64_t msb = 1ull << (width - 1);
+        if ((inverted = (value & msb)))
+            value ^= mask(width - 1);
+
+        // Find the highest set bit in value, generate a corresponding mask & flip all
+        // bits under it.
+        hsb = highestSetBit(value);
+        value ^= mask(hsb);
+        if (!value) {
+            // If this cleared the value, then the range hsb..0 was all set.
+            lsb = 0;
+            return true;
+        }
+
+        // Try making one more mask, and flipping the bits!
+        lsb = highestSetBit(value);
+        value ^= mask(lsb);
+        if (!value) {
+            // Success - but lsb actually points to the hsb of a third range - add one
+            // to get to the lsb of the mid range.
+            ++lsb;
+            return true;
+        }
+
+        return false;
+    }
+
+    // Encodes the set of immN:immr:imms fields found in a logical immediate.
+    template<unsigned width>
+    static int encodeLogicalImmediate(unsigned hsb, unsigned lsb, bool inverted)
+    {
+        // Check width is a power of 2!
+        ASSERT(!(width & (width -1)));
+        ASSERT(width <= 64 && width >= 2);
+        ASSERT(hsb >= lsb);
+        ASSERT(hsb < width);
+
+        int immN = 0;
+        int imms = 0;
+        int immr = 0;
+
+        // For 64-bit values this is easy - just set immN to true, and imms just
+        // contains the bit number of the highest set bit of the set range. For
+        // values with narrower widths, these are encoded by a leading set of
+        // one bits, followed by a zero bit, followed by the remaining set of bits
+        // being the high bit of the range. For a 32-bit immediate there are no
+        // leading one bits, just a zero followed by a five bit number. For a
+        // 16-bit immediate there is one one bit, a zero bit, and then a four bit
+        // bit-position, etc.
+        if (width == 64)
+            immN = 1;
+        else
+            imms = 63 & ~(width + width - 1);
+
+        if (inverted) {
+            // if width is 64 & hsb is 62, then we have a value something like:
+            //   0x80000000ffffffff (in this case with lsb 32).
+            // The ror should be by 1, imms (effectively set width minus 1) is
+            // 32. Set width is full width minus cleared width.
+            immr = (width - 1) - hsb;
+            imms |= (width - ((hsb - lsb) + 1)) - 1;
+        } else {
+            // if width is 64 & hsb is 62, then we have a value something like:
+            //   0x7fffffff00000000 (in this case with lsb 32).
+            // The value is effectively rol'ed by lsb, which is equivalent to
+            // a ror by width - lsb (or 0, in the case where lsb is 0). imms
+            // is hsb - lsb.
+            immr = (width - lsb) & (width - 1);
+            imms |= hsb - lsb;
+        }
+
+        return immN << 12 | immr << 6 | imms;
+    }
+
+    static const int InvalidLogicalImmediate = -1;
+
+    int m_value;
+};
+
+inline uint16_t getHalfword(uint64_t value, int which)
+{
+    return value >> (which << 4);
+}
+
+namespace ARM64Registers {
+    typedef enum {
+        // ´┐╝Parameter/result registers
+        x0,
+        x1,
+        x2,
+        x3,
+        x4,
+        x5,
+        x6,
+        x7,
+        // Indirect result location register
+        x8,
+        // Temporary registers
+        x9,
+        x10,
+        x11,
+        x12,
+        x13,
+        x14,
+        x15,
+        // Intra-procedure-call scratch registers (temporary)
+        x16, ip0 = x16,
+        x17, ip1 = x17,
+        // Platform Register (temporary)
+        x18,
+        // Callee-saved
+        x19,
+        x20,
+        x21,
+        x22,
+        x23,
+        x24,
+        x25,
+        x26,
+        x27,
+        x28,
+        // Special
+        x29, fp = x29,
+        x30, lr = x30,
+        sp,
+        zr = 0x3f,
+    } RegisterID;
+
+    typedef enum {
+        // Parameter/result registers
+        q0,
+        q1,
+        q2,
+        q3,
+        q4,
+        q5,
+        q6,
+        q7,
+        // Callee-saved (up to 64-bits only!)
+        q8,
+        q9,
+        q10,
+        q11,
+        q12,
+        q13,
+        q14,
+        q15,
+        // Temporary registers
+        q16,
+        q17,
+        q18,
+        q19,
+        q20,
+        q21,
+        q22,
+        q23,
+        q24,
+        q25,
+        q26,
+        q27,
+        q28,
+        q29,
+        q30,
+        q31,
+    } FPRegisterID;
+
+    static bool isSp(RegisterID reg) { return reg == sp; }
+    static bool isZr(RegisterID reg) { return reg == zr; }
+}
+
+class ARM64Assembler {
+public:
+    typedef ARM64Registers::RegisterID RegisterID;
+    typedef ARM64Registers::FPRegisterID FPRegisterID;
+
+private:
+    static bool isSp(RegisterID reg) { return ARM64Registers::isSp(reg); }
+    static bool isZr(RegisterID reg) { return ARM64Registers::isZr(reg); }
+
+public:
+    ARM64Assembler()
+        : m_indexOfLastWatchpoint(INT_MIN)
+        , m_indexOfTailOfLastWatchpoint(INT_MIN)
+    {
+    }
+
+    // (HS, LO, HI, LS) -> (AE, B, A, BE)
+    // (VS, VC) -> (O, NO)
+    typedef enum {
+        ConditionEQ,
+        ConditionNE,
+        ConditionHS, ConditionCS = ConditionHS,
+        ConditionLO, ConditionCC = ConditionLO,
+        ConditionMI,
+        ConditionPL,
+        ConditionVS,
+        ConditionVC,
+        ConditionHI,
+        ConditionLS,
+        ConditionGE,
+        ConditionLT,
+        ConditionGT,
+        ConditionLE,
+        ConditionAL,
+        ConditionInvalid
+    } Condition;
+
+    static Condition invert(Condition cond)
+    {
+        return static_cast<Condition>(cond ^ 1);
+    }
+
+    typedef enum {
+        LSL,
+        LSR,
+        ASR,
+        ROR
+    } ShiftType;
+
+    typedef enum {
+        UXTB,
+        UXTH,
+        UXTW,
+        UXTX,
+        SXTB,
+        SXTH,
+        SXTW,
+        SXTX
+    } ExtendType;
+
+    enum SetFlags {
+        DontSetFlags,
+        S
+    };
+
+#define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 4) | (index))
+#define JUMP_ENUM_SIZE(jump) ((jump) >> 4)
+    enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0),
+        JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)),
+        JumpCondition = JUMP_ENUM_WITH_SIZE(2, 2 * sizeof(uint32_t)),
+        JumpCompareAndBranch = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)),
+        JumpTestBit = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)),
+        JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)),
+        JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)),
+        JumpCompareAndBranchFixedSize = JUMP_ENUM_WITH_SIZE(7, 2 * sizeof(uint32_t)),
+        JumpTestBitFixedSize = JUMP_ENUM_WITH_SIZE(8, 2 * sizeof(uint32_t)),
+    };
+    enum JumpLinkType {
+        LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
+        LinkJumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)),
+        LinkJumpConditionDirect = JUMP_ENUM_WITH_SIZE(2, 1 * sizeof(uint32_t)),
+        LinkJumpCondition = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)),
+        LinkJumpCompareAndBranch = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)),
+        LinkJumpCompareAndBranchDirect = JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)),
+        LinkJumpTestBit = JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)),
+        LinkJumpTestBitDirect = JUMP_ENUM_WITH_SIZE(7, 1 * sizeof(uint32_t)),
+    };
+
+    class LinkRecord {
+    public:
+        LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
+        {
+            data.realTypes.m_from = from;
+            data.realTypes.m_to = to;
+            data.realTypes.m_type = type;
+            data.realTypes.m_linkType = LinkInvalid;
+            data.realTypes.m_condition = condition;
+        }
+        LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister)
+        {
+            data.realTypes.m_from = from;
+            data.realTypes.m_to = to;
+            data.realTypes.m_type = type;
+            data.realTypes.m_linkType = LinkInvalid;
+            data.realTypes.m_condition = condition;
+            data.realTypes.m_is64Bit = is64Bit;
+            data.realTypes.m_compareRegister = compareRegister;
+        }
+        LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister)
+        {
+            data.realTypes.m_from = from;
+            data.realTypes.m_to = to;
+            data.realTypes.m_type = type;
+            data.realTypes.m_linkType = LinkInvalid;
+            data.realTypes.m_condition = condition;
+            data.realTypes.m_bitNumber = bitNumber;
+            data.realTypes.m_compareRegister = compareRegister;
+        }
+        void operator=(const LinkRecord& other)
+        {
+            data.copyTypes.content[0] = other.data.copyTypes.content[0];
+            data.copyTypes.content[1] = other.data.copyTypes.content[1];
+            data.copyTypes.content[2] = other.data.copyTypes.content[2];
+        }
+        intptr_t from() const { return data.realTypes.m_from; }
+        void setFrom(intptr_t from) { data.realTypes.m_from = from; }
+        intptr_t to() const { return data.realTypes.m_to; }
+        JumpType type() const { return data.realTypes.m_type; }
+        JumpLinkType linkType() const { return data.realTypes.m_linkType; }
+        void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
+        Condition condition() const { return data.realTypes.m_condition; }
+        bool is64Bit() const { return data.realTypes.m_is64Bit; }
+        unsigned bitNumber() const { return data.realTypes.m_bitNumber; }
+        RegisterID compareRegister() const { return data.realTypes.m_compareRegister; }
+
+    private:
+        union {
+            struct RealTypes {
+                intptr_t m_from : 48;
+                intptr_t m_to : 48;
+                JumpType m_type : 8;
+                JumpLinkType m_linkType : 8;
+                Condition m_condition : 4;
+                bool m_is64Bit : 1;
+                unsigned m_bitNumber : 6;
+                RegisterID m_compareRegister : 5;
+            } realTypes;
+            struct CopyTypes {
+                uint64_t content[3];
+            } copyTypes;
+            COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
+        } data;
+    };
+
+    // bits(N) VFPExpandImm(bits(8) imm8);
+    //
+    // Encoding of floating point immediates is a litte complicated. Here's a
+    // high level description:
+    //     +/-m*2-n where m and n are integers, 16 <= m <= 31, 0 <= n <= 7
+    // and the algirithm for expanding to a single precision float:
+    //     return imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19);
+    //
+    // The trickiest bit is how the exponent is handled. The following table
+    // may help clarify things a little:
+    //     654
+    //     100 01111100 124 -3 1020 01111111100
+    //     101 01111101 125 -2 1021 01111111101
+    //     110 01111110 126 -1 1022 01111111110
+    //     111 01111111 127  0 1023 01111111111
+    //     000 10000000 128  1 1024 10000000000
+    //     001 10000001 129  2 1025 10000000001
+    //     010 10000010 130  3 1026 10000000010
+    //     011 10000011 131  4 1027 10000000011
+    // The first column shows the bit pattern stored in bits 6-4 of the arm
+    // encoded immediate. The second column shows the 8-bit IEEE 754 single
+    // -precision exponent in binary, the third column shows the raw decimal
+    // value. IEEE 754 single-precision numbers are stored with a bias of 127
+    // to the exponent, so the fourth column shows the resulting exponent.
+    // From this was can see that the exponent can be in the range -3..4,
+    // which agrees with the high level description given above. The fifth
+    // and sixth columns shows the value stored in a IEEE 754 double-precision
+    // number to represent these exponents in decimal and binary, given the
+    // bias of 1023.
+    //
+    // Ultimately, detecting doubles that can be encoded as immediates on arm
+    // and encoding doubles is actually not too bad. A floating point value can
+    // be encoded by retaining the sign bit, the low three bits of the exponent
+    // and the high 4 bits of the mantissa. To validly be able to encode an
+    // immediate the remainder of the mantissa must be zero, and the high part
+    // of the exponent must match the top bit retained, bar the highest bit
+    // which must be its inverse.
+    static bool canEncodeFPImm(double d)
+    {
+        // Discard the sign bit, the low two bits of the exponent & the highest
+        // four bits of the mantissa.
+        uint64_t masked = bitwise_cast<uint64_t>(d) & 0x7fc0ffffffffffffull;
+        return (masked == 0x3fc0000000000000ull) || (masked == 0x4000000000000000ull);
+    }
+
+    template<int datasize>
+    static bool canEncodePImmOffset(int32_t offset)
+    {
+        int32_t maxPImm = 4095 * (datasize / 8);
+        if (offset < 0)
+            return false;
+        if (offset > maxPImm)
+            return false;
+        if (offset & ((datasize / 8 ) - 1))
+            return false;
+        return true;
+    }
+
+    static bool canEncodeSImmOffset(int32_t offset)
+    {
+        return isInt9(offset);
+    }
+
+private:
+    int encodeFPImm(double d)
+    {
+        ASSERT(canEncodeFPImm(d));
+        uint64_t u64 = bitwise_cast<uint64_t>(d);
+        return (static_cast<int>(u64 >> 56) & 0x80) | (static_cast<int>(u64 >> 48) & 0x7f);
+    }
+
+    template<int datasize>
+    int encodeShiftAmount(int amount)
+    {
+        ASSERT(!amount || datasize == (8 << amount));
+        return amount;
+    }
+
+    template<int datasize>
+    static int encodePositiveImmediate(unsigned pimm)
+    {
+        ASSERT(!(pimm & ((datasize / 8) - 1)));
+        return pimm / (datasize / 8);
+    }
+
+    enum Datasize {
+        Datasize_32,
+        Datasize_64,
+        Datasize_64_top,
+        Datasize_16
+    };
+
+    enum MemOpSize {
+        MemOpSize_8_or_128,
+        MemOpSize_16,
+        MemOpSize_32,
+        MemOpSize_64,
+    };
+
+    enum BranchType {
+        BranchType_JMP,
+        BranchType_CALL,
+        BranchType_RET
+    };
+
+    enum AddOp {
+        AddOp_ADD,
+        AddOp_SUB
+    };
+
+    enum BitfieldOp {
+        BitfieldOp_SBFM,
+        BitfieldOp_BFM,
+        BitfieldOp_UBFM
+    };
+
+    enum DataOp1Source {
+        DataOp_RBIT,
+        DataOp_REV16,
+        DataOp_REV32,
+        DataOp_REV64,
+        DataOp_CLZ,
+        DataOp_CLS
+    };
+
+    enum DataOp2Source {
+        DataOp_UDIV = 2,
+        DataOp_SDIV = 3,
+        DataOp_LSLV = 8,
+        DataOp_LSRV = 9,
+        DataOp_ASRV = 10,
+        DataOp_RORV = 11
+    };
+
+    enum DataOp3Source {
+        DataOp_MADD = 0,
+        DataOp_MSUB = 1,
+        DataOp_SMADDL = 2,
+        DataOp_SMSUBL = 3,
+        DataOp_SMULH = 4,
+        DataOp_UMADDL = 10,
+        DataOp_UMSUBL = 11,
+        DataOp_UMULH = 12
+    };
+
+    enum ExcepnOp {
+        ExcepnOp_EXCEPTION = 0,
+        ExcepnOp_BREAKPOINT = 1,
+        ExcepnOp_HALT = 2,
+        ExcepnOp_DCPS = 5
+    };
+
+    enum FPCmpOp {
+        FPCmpOp_FCMP = 0x00,
+        FPCmpOp_FCMP0 = 0x08,
+        FPCmpOp_FCMPE = 0x10,
+        FPCmpOp_FCMPE0 = 0x18
+    };
+
+    enum FPCondCmpOp {
+        FPCondCmpOp_FCMP,
+        FPCondCmpOp_FCMPE
+    };
+
+    enum FPDataOp1Source {
+        FPDataOp_FMOV = 0,
+        FPDataOp_FABS = 1,
+        FPDataOp_FNEG = 2,
+        FPDataOp_FSQRT = 3,
+        FPDataOp_FCVT_toSingle = 4,
+        FPDataOp_FCVT_toDouble = 5,
+        FPDataOp_FCVT_toHalf = 7,
+        FPDataOp_FRINTN = 8,
+        FPDataOp_FRINTP = 9,
+        FPDataOp_FRINTM = 10,
+        FPDataOp_FRINTZ = 11,
+        FPDataOp_FRINTA = 12,
+        FPDataOp_FRINTX = 14,
+        FPDataOp_FRINTI = 15
+    };
+
+    enum FPDataOp2Source {
+        FPDataOp_FMUL,
+        FPDataOp_FDIV,
+        FPDataOp_FADD,
+        FPDataOp_FSUB,
+        FPDataOp_FMAX,
+        FPDataOp_FMIN,
+        FPDataOp_FMAXNM,
+        FPDataOp_FMINNM,
+        FPDataOp_FNMUL
+    };
+
+    enum FPIntConvOp {
+        FPIntConvOp_FCVTNS = 0x00,
+        FPIntConvOp_FCVTNU = 0x01,
+        FPIntConvOp_SCVTF = 0x02,
+        FPIntConvOp_UCVTF = 0x03,
+        FPIntConvOp_FCVTAS = 0x04,
+        FPIntConvOp_FCVTAU = 0x05,
+        FPIntConvOp_FMOV_QtoX = 0x06,
+        FPIntConvOp_FMOV_XtoQ = 0x07,
+        FPIntConvOp_FCVTPS = 0x08,
+        FPIntConvOp_FCVTPU = 0x09,
+        FPIntConvOp_FMOV_QtoX_top = 0x0e,
+        FPIntConvOp_FMOV_XtoQ_top = 0x0f,
+        FPIntConvOp_FCVTMS = 0x10,
+        FPIntConvOp_FCVTMU = 0x11,
+        FPIntConvOp_FCVTZS = 0x18,
+        FPIntConvOp_FCVTZU = 0x19,
+    };
+
+    enum LogicalOp {
+        LogicalOp_AND,
+        LogicalOp_ORR,
+        LogicalOp_EOR,
+        LogicalOp_ANDS
+    };
+
+    enum MemOp {
+        MemOp_STORE,
+        MemOp_LOAD,
+        MemOp_STORE_V128, 
+        MemOp_LOAD_V128,
+        MemOp_PREFETCH = 2, // size must be 3
+        MemOp_LOAD_signed64 = 2, // size may be 0, 1 or 2
+        MemOp_LOAD_signed32 = 3 // size may be 0 or 1
+    };
+
+    enum MoveWideOp {
+        MoveWideOp_N = 0,
+        MoveWideOp_Z = 2,
+        MoveWideOp_K = 3 
+    };
+
+    enum LdrLiteralOp {
+        LdrLiteralOp_32BIT = 0,
+        LdrLiteralOp_64BIT = 1,
+        LdrLiteralOp_LDRSW = 2,
+        LdrLiteralOp_128BIT = 2
+    };
+
+public:
+    // Integer Instructions:
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void adc(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        CHECK_DATASIZE();
+        insn(addSubtractWithCarry(DATASIZE, AddOp_ADD, setFlags, rm, rn, rd));
+    }
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, UInt12 imm12, int shift = 0)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!shift || shift == 12);
+        insn(addSubtractImmediate(DATASIZE, AddOp_ADD, setFlags, shift == 12, imm12, rn, rd));
+    }
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        add<datasize, setFlags>(rd, rn, rm, LSL, 0);
+    }
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(addSubtractExtendedRegister(DATASIZE, AddOp_ADD, setFlags, rm, extend, amount, rn, rd));
+    }
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        CHECK_DATASIZE();
+        if (isSp(rn)) {
+            ASSERT(shift == LSL);
+            add<datasize, setFlags>(rd, rn, rm, UXTX, amount);
+        } else
+            insn(addSubtractShiftedRegister(DATASIZE, AddOp_ADD, setFlags, shift, rm, amount, rn, rd));
+    }
+
+    ALWAYS_INLINE void adr(RegisterID rd, int offset)
+    {
+        insn(pcRelative(false, offset, rd));
+    }
+
+    ALWAYS_INLINE void adrp(RegisterID rd, int offset)
+    {
+        ASSERT(!(offset & 0xfff));
+        insn(pcRelative(true, offset >> 12, rd));
+    }
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        and_<datasize, setFlags>(rd, rn, rm, LSL, 0);
+    }
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(logicalShiftedRegister(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, shift, false, rm, amount, rn, rd));
+    }
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, LogicalImmediate imm)
+    {
+        CHECK_DATASIZE();
+        insn(logicalImmediate(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, imm.value(), rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, int shift)
+    {
+        ASSERT(shift < datasize);
+        sbfm<datasize>(rd, rn, shift, datasize - 1);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        asrv<datasize>(rd, rn, rm);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void asrv(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing2Source(DATASIZE, rm, DataOp_ASRV, rn, rd));
+    }
+
+    ALWAYS_INLINE void b(int32_t offset = 0)
+    {
+        ASSERT(!(offset & 3));
+        offset >>= 2;
+        ASSERT(offset == (offset << 6) >> 6);
+        insn(unconditionalBranchImmediate(false, offset));
+    }
+
+    ALWAYS_INLINE void b_cond(Condition cond, int32_t offset = 0)
+    {
+        ASSERT(!(offset & 3));
+        offset >>= 2;
+        ASSERT(offset == (offset << 13) >> 13);
+        insn(conditionalBranchImmediate(offset, cond));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void bfi(RegisterID rd, RegisterID rn, int lsb, int width)
+    {
+        bfm<datasize>(rd, rn, (datasize - lsb) & (datasize - 1), width - 1);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void bfm(RegisterID rd, RegisterID rn, int immr, int imms)
+    {
+        CHECK_DATASIZE();
+        insn(bitfield(DATASIZE, BitfieldOp_BFM, immr, imms, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void bfxil(RegisterID rd, RegisterID rn, int lsb, int width)
+    {
+        bfm<datasize>(rd, rn, lsb, lsb + width - 1);
+    }
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void bic(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        bic<datasize, setFlags>(rd, rn, rm, LSL, 0);
+    }
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void bic(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(logicalShiftedRegister(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, shift, true, rm, amount, rn, rd));
+    }
+
+    ALWAYS_INLINE void bl(int32_t offset = 0)
+    {
+        ASSERT(!(offset & 3));
+        offset >>= 2;
+        insn(unconditionalBranchImmediate(true, offset));
+    }
+
+    ALWAYS_INLINE void blr(RegisterID rn)
+    {
+        insn(unconditionalBranchRegister(BranchType_CALL, rn));
+    }
+
+    ALWAYS_INLINE void br(RegisterID rn)
+    {
+        insn(unconditionalBranchRegister(BranchType_JMP, rn));
+    }
+
+    ALWAYS_INLINE void brk(uint16_t imm)
+    {
+        insn(excepnGeneration(ExcepnOp_BREAKPOINT, imm, 0));
+    }
+    
+    template<int datasize>
+    ALWAYS_INLINE void cbnz(RegisterID rt, int32_t offset = 0)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!(offset & 3));
+        offset >>= 2;
+        insn(compareAndBranchImmediate(DATASIZE, true, offset, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void cbz(RegisterID rt, int32_t offset = 0)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!(offset & 3));
+        offset >>= 2;
+        insn(compareAndBranchImmediate(DATASIZE, false, offset, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ccmn(RegisterID rn, RegisterID rm, int nzcv, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(conditionalCompareRegister(DATASIZE, AddOp_ADD, rm, cond, rn, nzcv));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ccmn(RegisterID rn, UInt5 imm, int nzcv, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(conditionalCompareImmediate(DATASIZE, AddOp_ADD, imm, cond, rn, nzcv));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ccmp(RegisterID rn, RegisterID rm, int nzcv, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(conditionalCompareRegister(DATASIZE, AddOp_SUB, rm, cond, rn, nzcv));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ccmp(RegisterID rn, UInt5 imm, int nzcv, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(conditionalCompareImmediate(DATASIZE, AddOp_SUB, imm, cond, rn, nzcv));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void cinc(RegisterID rd, RegisterID rn, Condition cond)
+    {
+        csinc<datasize>(rd, rn, rn, invert(cond));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void cinv(RegisterID rd, RegisterID rn, Condition cond)
+    {
+        csinv<datasize>(rd, rn, rn, invert(cond));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void cls(RegisterID rd, RegisterID rn)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing1Source(DATASIZE, DataOp_CLS, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void clz(RegisterID rd, RegisterID rn)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing1Source(DATASIZE, DataOp_CLZ, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void cmn(RegisterID rn, UInt12 imm12, int shift = 0)
+    {
+        add<datasize, S>(ARM64Registers::zr, rn, imm12, shift);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm)
+    {
+        add<datasize, S>(ARM64Registers::zr, rn, rm);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        add<datasize, S>(ARM64Registers::zr, rn, rm, extend, amount);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        add<datasize, S>(ARM64Registers::zr, rn, rm, shift, amount);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void cmp(RegisterID rn, UInt12 imm12, int shift = 0)
+    {
+        sub<datasize, S>(ARM64Registers::zr, rn, imm12, shift);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
+    {
+        sub<datasize, S>(ARM64Registers::zr, rn, rm);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        sub<datasize, S>(ARM64Registers::zr, rn, rm, extend, amount);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        sub<datasize, S>(ARM64Registers::zr, rn, rm, shift, amount);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void cneg(RegisterID rd, RegisterID rn, Condition cond)
+    {
+        csneg<datasize>(rd, rn, rn, invert(cond));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void csel(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(conditionalSelect(DATASIZE, false, rm, cond, false, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void cset(RegisterID rd, Condition cond)
+    {
+        csinc<datasize>(rd, ARM64Registers::zr, ARM64Registers::zr, invert(cond));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void csetm(RegisterID rd, Condition cond)
+    {
+        csinv<datasize>(rd, ARM64Registers::zr, ARM64Registers::zr, invert(cond));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void csinc(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(conditionalSelect(DATASIZE, false, rm, cond, true, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void csinv(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(conditionalSelect(DATASIZE, true, rm, cond, false, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void csneg(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(conditionalSelect(DATASIZE, true, rm, cond, true, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void eon(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        eon<datasize>(rd, rn, rm, LSL, 0);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void eon(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(logicalShiftedRegister(DATASIZE, LogicalOp_EOR, shift, true, rm, amount, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        eor<datasize>(rd, rn, rm, LSL, 0);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(logicalShiftedRegister(DATASIZE, LogicalOp_EOR, shift, false, rm, amount, rn, rd));
+    }
+    
+    template<int datasize>
+    ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, LogicalImmediate imm)
+    {
+        CHECK_DATASIZE();
+        insn(logicalImmediate(DATASIZE, LogicalOp_EOR, imm.value(), rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void extr(RegisterID rd, RegisterID rn, RegisterID rm, int lsb)
+    {
+        CHECK_DATASIZE();
+        insn(extract(DATASIZE, rm, lsb, rn, rd));
+    }
+
+    ALWAYS_INLINE void hint(int imm)
+    {
+        insn(hintPseudo(imm));
+    }
+
+    ALWAYS_INLINE void hlt(uint16_t imm)
+    {
+        insn(excepnGeneration(ExcepnOp_HALT, imm, 0));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        ldr<datasize>(rt, rn, rm, UXTX, 0);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, false, MemOp_LOAD, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, false, MemOp_LOAD, encodePositiveImmediate<datasize>(pimm), rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPostIndex(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPreIndex(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldr_literal(RegisterID rt, int offset = 0)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!(offset & 3));
+        insn(loadRegisterLiteral(datasize == 64 ? LdrLiteralOp_64BIT : LdrLiteralOp_32BIT, false, offset >> 2, rt));
+    }
+
+    ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        // Not calling the 5 argument form of ldrb, since is amount is ommitted S is false.
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_LOAD, rm, UXTX, false, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        ASSERT_UNUSED(amount, !amount);
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_LOAD, rm, extend, true, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, MemOp_LOAD, encodePositiveImmediate<8>(pimm), rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        ldrh(rt, rn, rm, UXTX, 0);
+    }
+
+    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        ASSERT(!amount || amount == 1);
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, MemOp_LOAD, rm, extend, amount == 1, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, MemOp_LOAD, encodePositiveImmediate<16>(pimm), rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        insn(loadStoreRegisterPostIndex(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        insn(loadStoreRegisterPreIndex(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        CHECK_DATASIZE();
+        // Not calling the 5 argument form of ldrsb, since is amount is ommitted S is false.
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, UXTX, false, rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        CHECK_DATASIZE();
+        ASSERT_UNUSED(amount, !amount);
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, extend, true, rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, encodePositiveImmediate<8>(pimm), rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        ldrsh<datasize>(rt, rn, rm, UXTX, 0);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!amount || amount == 1);
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, extend, amount == 1, rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, encodePositiveImmediate<16>(pimm), rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPostIndex(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPreIndex(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        ldrsw(rt, rn, rm, UXTX, 0);
+    }
+
+    ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        ASSERT(!amount || amount == 2);
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_32, false, MemOp_LOAD_signed64, rm, extend, amount == 2, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        insn(loadStoreRegisterUnsignedImmediate(MemOpSize_32, false, MemOp_LOAD_signed64, encodePositiveImmediate<32>(pimm), rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        insn(loadStoreRegisterPostIndex(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        insn(loadStoreRegisterPreIndex(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrsw_literal(RegisterID rt, int offset = 0)
+    {
+        ASSERT(!(offset & 3));
+        insn(loadRegisterLiteral(LdrLiteralOp_LDRSW, false, offset >> 2, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldur(RegisterID rt, RegisterID rn, int simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldurb(RegisterID rt, RegisterID rn, int simm)
+    {
+        insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldurh(RegisterID rt, RegisterID rn, int simm)
+    {
+        insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldursb(RegisterID rt, RegisterID rn, int simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldursh(RegisterID rt, RegisterID rn, int simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldursw(RegisterID rt, RegisterID rn, int simm)
+    {
+        insn(loadStoreRegisterUnscaledImmediate(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, int shift)
+    {
+        ASSERT(shift < datasize);
+        ubfm<datasize>(rd, rn, (datasize - shift) & (datasize - 1), datasize - 1 - shift);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        lslv<datasize>(rd, rn, rm);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void lslv(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing2Source(DATASIZE, rm, DataOp_LSLV, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, int shift)
+    {
+        ASSERT(shift < datasize);
+        ubfm<datasize>(rd, rn, shift, datasize - 1);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        lsrv<datasize>(rd, rn, rm);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void lsrv(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing2Source(DATASIZE, rm, DataOp_LSRV, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void madd(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing3Source(DATASIZE, DataOp_MADD, rm, ra, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void mneg(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        msub<datasize>(rd, rn, rm, ARM64Registers::zr);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
+    {
+        if (isSp(rd) || isSp(rm))
+            add<datasize>(rd, rm, UInt12(0));
+        else
+            orr<datasize>(rd, ARM64Registers::zr, rm);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void movi(RegisterID rd, LogicalImmediate imm)
+    {
+        orr<datasize>(rd, ARM64Registers::zr, imm);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void movk(RegisterID rd, uint16_t value, int shift = 0)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!(shift & 0xf));
+        insn(moveWideImediate(DATASIZE, MoveWideOp_K, shift >> 4, value, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void movn(RegisterID rd, uint16_t value, int shift = 0)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!(shift & 0xf));
+        insn(moveWideImediate(DATASIZE, MoveWideOp_N, shift >> 4, value, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void movz(RegisterID rd, uint16_t value, int shift = 0)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!(shift & 0xf));
+        insn(moveWideImediate(DATASIZE, MoveWideOp_Z, shift >> 4, value, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void msub(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing3Source(DATASIZE, DataOp_MSUB, rm, ra, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void mul(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        madd<datasize>(rd, rn, rm, ARM64Registers::zr);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
+    {
+        orn<datasize>(rd, ARM64Registers::zr, rm);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftType shift, int amount)
+    {
+        orn<datasize>(rd, ARM64Registers::zr, rm, shift, amount);
+    }
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
+    {
+        sub<datasize, setFlags>(rd, ARM64Registers::zr, rm);
+    }
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm, ShiftType shift, int amount)
+    {
+        sub<datasize, setFlags>(rd, ARM64Registers::zr, rm, shift, amount);
+    }
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void ngc(RegisterID rd, RegisterID rm)
+    {
+        sbc<datasize, setFlags>(rd, ARM64Registers::zr, rm);
+    }
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void ngc(RegisterID rd, RegisterID rm, ShiftType shift, int amount)
+    {
+        sbc<datasize, setFlags>(rd, ARM64Registers::zr, rm, shift, amount);
+    }
+
+    ALWAYS_INLINE void nop()
+    {
+        insn(nopPseudo());
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void orn(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        orn<datasize>(rd, rn, rm, LSL, 0);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void orn(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(logicalShiftedRegister(DATASIZE, LogicalOp_ORR, shift, true, rm, amount, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        orr<datasize>(rd, rn, rm, LSL, 0);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(logicalShiftedRegister(DATASIZE, LogicalOp_ORR, shift, false, rm, amount, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, LogicalImmediate imm)
+    {
+        CHECK_DATASIZE();
+        insn(logicalImmediate(DATASIZE, LogicalOp_ORR, imm.value(), rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void rbit(RegisterID rd, RegisterID rn)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing1Source(DATASIZE, DataOp_RBIT, rn, rd));
+    }
+
+    ALWAYS_INLINE void ret(RegisterID rn = ARM64Registers::lr)
+    {
+        insn(unconditionalBranchRegister(BranchType_RET, rn));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void rev(RegisterID rd, RegisterID rn)
+    {
+        CHECK_DATASIZE();
+        if (datasize == 32) // 'rev' mnemonic means REV32 or REV64 depending on the operand width.
+            insn(dataProcessing1Source(Datasize_32, DataOp_REV32, rn, rd));
+        else
+            insn(dataProcessing1Source(Datasize_64, DataOp_REV64, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void rev16(RegisterID rd, RegisterID rn)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing1Source(DATASIZE, DataOp_REV16, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void rev32(RegisterID rd, RegisterID rn)
+    {
+        ASSERT(datasize == 64); // 'rev32' only valid with 64-bit operands.
+        insn(dataProcessing1Source(Datasize_64, DataOp_REV32, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        rorv<datasize>(rd, rn, rm);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ror(RegisterID rd, RegisterID rs, int shift)
+    {
+        extr<datasize>(rd, rs, rs, shift);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void rorv(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing2Source(DATASIZE, rm, DataOp_RORV, rn, rd));
+    }
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void sbc(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        CHECK_DATASIZE();
+        insn(addSubtractWithCarry(DATASIZE, AddOp_SUB, setFlags, rm, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void sbfiz(RegisterID rd, RegisterID rn, int lsb, int width)
+    {
+        sbfm<datasize>(rd, rn, (datasize - lsb) & (datasize - 1), width - 1);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void sbfm(RegisterID rd, RegisterID rn, int immr, int imms)
+    {
+        CHECK_DATASIZE();
+        insn(bitfield(DATASIZE, BitfieldOp_SBFM, immr, imms, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void sbfx(RegisterID rd, RegisterID rn, int lsb, int width)
+    {
+        sbfm<datasize>(rd, rn, lsb, lsb + width - 1);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing2Source(DATASIZE, rm, DataOp_SDIV, rn, rd));
+    }
+
+    ALWAYS_INLINE void smaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
+    {
+        insn(dataProcessing3Source(Datasize_64, DataOp_SMADDL, rm, ra, rn, rd));
+    }
+
+    ALWAYS_INLINE void smnegl(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        smsubl(rd, rn, rm, ARM64Registers::zr);
+    }
+
+    ALWAYS_INLINE void smsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
+    {
+        insn(dataProcessing3Source(Datasize_64, DataOp_SMSUBL, rm, ra, rn, rd));
+    }
+
+    ALWAYS_INLINE void smulh(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        insn(dataProcessing3Source(Datasize_64, DataOp_SMULH, rm, ARM64Registers::zr, rn, rd));
+    }
+
+    ALWAYS_INLINE void smull(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        smaddl(rd, rn, rm, ARM64Registers::zr);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        str<datasize>(rt, rn, rm, UXTX, 0);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, false, MemOp_STORE, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, false, MemOp_STORE, encodePositiveImmediate<datasize>(pimm), rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPostIndex(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPreIndex(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        // Not calling the 5 argument form of strb, since is amount is ommitted S is false.
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_STORE, rm, UXTX, false, rn, rt));
+    }
+
+    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        ASSERT_UNUSED(amount, !amount);
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_STORE, rm, extend, true, rn, rt));
+    }
+
+    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, MemOp_STORE, encodePositiveImmediate<8>(pimm), rn, rt));
+    }
+
+    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        strh(rt, rn, rm, UXTX, 0);
+    }
+
+    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        ASSERT(!amount || amount == 1);
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, MemOp_STORE, rm, extend, amount == 1, rn, rt));
+    }
+
+    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, MemOp_STORE, encodePositiveImmediate<16>(pimm), rn, rt));
+    }
+
+    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        insn(loadStoreRegisterPostIndex(MemOpSize_16, false, MemOp_STORE, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        insn(loadStoreRegisterPreIndex(MemOpSize_16, false, MemOp_STORE, simm, rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void stur(RegisterID rt, RegisterID rn, int simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void sturb(RegisterID rt, RegisterID rn, int simm)
+    {
+        insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void sturh(RegisterID rt, RegisterID rn, int simm)
+    {
+        insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, MemOp_STORE, simm, rn, rt));
+    }
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, UInt12 imm12, int shift = 0)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!shift || shift == 12);
+        insn(addSubtractImmediate(DATASIZE, AddOp_SUB, setFlags, shift == 12, imm12, rn, rd));
+    }
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        sub<datasize, setFlags>(rd, rn, rm, LSL, 0);
+    }
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(addSubtractExtendedRegister(DATASIZE, AddOp_SUB, setFlags, rm, extend, amount, rn, rd));
+    }
+
+    template<int datasize, SetFlags setFlags = DontSetFlags>
+    ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        CHECK_DATASIZE();
+        if (isSp(rn)) {
+            ASSERT(shift == LSL);
+            sub<datasize, setFlags>(rd, rn, rm, UXTX, amount);
+        } else
+            insn(addSubtractShiftedRegister(DATASIZE, AddOp_SUB, setFlags, shift, rm, amount, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void sxtb(RegisterID rd, RegisterID rn)
+    {
+        sbfm<datasize>(rd, rn, 0, 7);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void sxth(RegisterID rd, RegisterID rn)
+    {
+        sbfm<datasize>(rd, rn, 0, 15);
+    }
+
+    ALWAYS_INLINE void sxtw(RegisterID rd, RegisterID rn)
+    {
+        sbfm<64>(rd, rn, 0, 31);
+    }
+
+    ALWAYS_INLINE void tbz(RegisterID rt, int imm, int offset = 0)
+    {
+        ASSERT(!(offset & 3));
+        offset >>= 2;
+        insn(testAndBranchImmediate(false, imm, offset, rt));
+    }
+
+    ALWAYS_INLINE void tbnz(RegisterID rt, int imm, int offset = 0)
+    {
+        ASSERT(!(offset & 3));
+        offset >>= 2;
+        insn(testAndBranchImmediate(true, imm, offset, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
+    {
+        and_<datasize, S>(ARM64Registers::zr, rn, rm);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        and_<datasize, S>(ARM64Registers::zr, rn, rm, shift, amount);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void tst(RegisterID rn, LogicalImmediate imm)
+    {
+        and_<datasize, S>(ARM64Registers::zr, rn, imm);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ubfiz(RegisterID rd, RegisterID rn, int lsb, int width)
+    {
+        ubfm<datasize>(rd, rn, (datasize - lsb) & (datasize - 1), width - 1);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ubfm(RegisterID rd, RegisterID rn, int immr, int imms)
+    {
+        CHECK_DATASIZE();
+        insn(bitfield(DATASIZE, BitfieldOp_UBFM, immr, imms, rn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, int lsb, int width)
+    {
+        ubfm<datasize>(rd, rn, lsb, lsb + width - 1);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing2Source(DATASIZE, rm, DataOp_UDIV, rn, rd));
+    }
+
+    ALWAYS_INLINE void umaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
+    {
+        insn(dataProcessing3Source(Datasize_64, DataOp_UMADDL, rm, ra, rn, rd));
+    }
+
+    ALWAYS_INLINE void umnegl(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        umsubl(rd, rn, rm, ARM64Registers::zr);
+    }
+
+    ALWAYS_INLINE void umsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
+    {
+        insn(dataProcessing3Source(Datasize_64, DataOp_UMSUBL, rm, ra, rn, rd));
+    }
+
+    ALWAYS_INLINE void umulh(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        insn(dataProcessing3Source(Datasize_64, DataOp_UMULH, rm, ARM64Registers::zr, rn, rd));
+    }
+
+    ALWAYS_INLINE void umull(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        umaddl(rd, rn, rm, ARM64Registers::zr);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void uxtb(RegisterID rd, RegisterID rn)
+    {
+        ubfm<datasize>(rd, rn, 0, 7);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void uxth(RegisterID rd, RegisterID rn)
+    {
+        ubfm<datasize>(rd, rn, 0, 15);
+    }
+
+    ALWAYS_INLINE void uxtw(RegisterID rd, RegisterID rn)
+    {
+        ubfm<64>(rd, rn, 0, 31);
+    }
+
+    // Floating Point Instructions:
+
+    template<int datasize>
+    ALWAYS_INLINE void fabs(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FABS, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FADD, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fccmp(FPRegisterID vn, FPRegisterID vm, int nzcv, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointConditionalCompare(DATASIZE, vm, cond, vn, FPCondCmpOp_FCMP, nzcv));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fccmpe(FPRegisterID vn, FPRegisterID vm, int nzcv, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointConditionalCompare(DATASIZE, vm, cond, vn, FPCondCmpOp_FCMPE, nzcv));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fcmp(FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointCompare(DATASIZE, vm, vn, FPCmpOp_FCMP));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fcmp_0(FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointCompare(DATASIZE, static_cast<FPRegisterID>(0), vn, FPCmpOp_FCMP0));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fcmpe(FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointCompare(DATASIZE, vm, vn, FPCmpOp_FCMPE));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fcmpe_0(FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointCompare(DATASIZE, static_cast<FPRegisterID>(0), vn, FPCmpOp_FCMPE0));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fcsel(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointConditionalSelect(DATASIZE, vm, cond, vn, vd));
+    }
+
+    template<int dstsize, int srcsize>
+    ALWAYS_INLINE void fcvt(FPRegisterID vd, FPRegisterID vn)
+    {
+        ASSERT(dstsize == 16 || dstsize == 32 || dstsize == 64);
+        ASSERT(srcsize == 16 || srcsize == 32 || srcsize == 64);
+        ASSERT(dstsize != srcsize);
+        Datasize type = (srcsize == 64) ? Datasize_64 : (srcsize == 32) ? Datasize_32 : Datasize_16;
+        FPDataOp1Source opcode = (dstsize == 64) ? FPDataOp_FCVT_toDouble : (dstsize == 32) ? FPDataOp_FCVT_toSingle : FPDataOp_FCVT_toHalf;
+        insn(floatingPointDataProcessing1Source(type, opcode, vn, vd));
+    }
+
+    template<int dstsize, int srcsize>
+    ALWAYS_INLINE void fcvtas(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTAS, vn, rd));
+    }
+
+    template<int dstsize, int srcsize>
+    ALWAYS_INLINE void fcvtau(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTAU, vn, rd));
+    }
+
+    template<int dstsize, int srcsize>
+    ALWAYS_INLINE void fcvtms(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTMS, vn, rd));
+    }
+
+    template<int dstsize, int srcsize>
+    ALWAYS_INLINE void fcvtmu(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTMU, vn, rd));
+    }
+
+    template<int dstsize, int srcsize>
+    ALWAYS_INLINE void fcvtns(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTNS, vn, rd));
+    }
+
+    template<int dstsize, int srcsize>
+    ALWAYS_INLINE void fcvtnu(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTNU, vn, rd));
+    }
+
+    template<int dstsize, int srcsize>
+    ALWAYS_INLINE void fcvtps(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTPS, vn, rd));
+    }
+
+    template<int dstsize, int srcsize>
+    ALWAYS_INLINE void fcvtpu(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTPU, vn, rd));
+    }
+
+    template<int dstsize, int srcsize>
+    ALWAYS_INLINE void fcvtzs(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTZS, vn, rd));
+    }
+
+    template<int dstsize, int srcsize>
+    ALWAYS_INLINE void fcvtzu(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTZU, vn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fdiv(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FDIV, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fmadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing3Source(DATASIZE, false, vm, AddOp_ADD, va, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fmax(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMAX, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fmaxnm(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMAXNM, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fmin(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMIN, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fminnm(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMINNM, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fmov(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FMOV, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fmov(FPRegisterID vd, RegisterID rn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointIntegerConversions(DATASIZE, DATASIZE, FPIntConvOp_FMOV_XtoQ, rn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fmov(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointIntegerConversions(DATASIZE, DATASIZE, FPIntConvOp_FMOV_QtoX, vn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fmov(FPRegisterID vd, double imm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointImmediate(DATASIZE, encodeFPImm(imm), vd));
+    }
+
+    ALWAYS_INLINE void fmov_top(FPRegisterID vd, RegisterID rn)
+    {
+        insn(floatingPointIntegerConversions(Datasize_64, Datasize_64, FPIntConvOp_FMOV_XtoQ_top, rn, vd));
+    }
+
+    ALWAYS_INLINE void fmov_top(RegisterID rd, FPRegisterID vn)
+    {
+        insn(floatingPointIntegerConversions(Datasize_64, Datasize_64, FPIntConvOp_FMOV_QtoX_top, vn, rd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fmsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing3Source(DATASIZE, false, vm, AddOp_SUB, va, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fmul(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMUL, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fneg(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FNEG, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fnmadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing3Source(DATASIZE, true, vm, AddOp_ADD, va, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fnmsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing3Source(DATASIZE, true, vm, AddOp_SUB, va, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fnmul(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FNMUL, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void frinta(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTA, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void frinti(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTI, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void frintm(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTM, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void frintn(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTN, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void frintp(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTP, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void frintx(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTX, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void frintz(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTZ, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fsqrt(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FSQRT, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void fsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FSUB, vn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        ldr<datasize>(rt, rn, rm, UXTX, 0);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, encodePositiveImmediate<datasize>(pimm), rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        insn(loadStoreRegisterPostIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        insn(loadStoreRegisterPreIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldr_literal(FPRegisterID rt, int offset = 0)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        ASSERT(datasize >= 32);
+        ASSERT(!(offset & 3));
+        insn(loadRegisterLiteral(datasize == 128 ? LdrLiteralOp_128BIT : datasize == 64 ? LdrLiteralOp_64BIT : LdrLiteralOp_32BIT, true, offset >> 2, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void ldur(FPRegisterID rt, RegisterID rn, int simm)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt));
+    }
+
+    template<int dstsize, int srcsize>
+    ALWAYS_INLINE void scvtf(FPRegisterID vd, RegisterID rn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize), DATASIZE_OF(dstsize), FPIntConvOp_SCVTF, rn, vd));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        str<datasize>(rt, rn, rm, UXTX, 0);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, rm, extend, encodeShiftAmount<datasize>(amount), rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, encodePositiveImmediate<datasize>(pimm), rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        insn(loadStoreRegisterPostIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        insn(loadStoreRegisterPreIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt));
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void stur(FPRegisterID rt, RegisterID rn, int simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt));
+    }
+
+    template<int dstsize, int srcsize>
+    ALWAYS_INLINE void ucvtf(FPRegisterID vd, RegisterID rn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize), DATASIZE_OF(dstsize), FPIntConvOp_UCVTF, rn, vd));
+    }
+
+    // Admin methods:
+
+    AssemblerLabel labelIgnoringWatchpoints()
+    {
+        return m_buffer.label();
+    }
+
+    AssemblerLabel labelForWatchpoint()
+    {
+        AssemblerLabel result = m_buffer.label();
+        if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
+            result = label();
+        m_indexOfLastWatchpoint = result.m_offset;
+        m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+        return result;
+    }
+
+    AssemblerLabel label()
+    {
+        AssemblerLabel result = m_buffer.label();
+        while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+            nop();
+            result = m_buffer.label();
+        }
+        return result;
+    }
+
+    AssemblerLabel align(int alignment)
+    {
+        ASSERT(!(alignment & 3));
+        while (!m_buffer.isAligned(alignment))
+            brk(0);
+        return label();
+    }
+    
+    static void* getRelocatedAddress(void* code, AssemblerLabel label)
+    {
+        ASSERT(label.isSet());
+        return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
+    }
+    
+    static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+    {
+        return b.m_offset - a.m_offset;
+    }
+
+    int executableOffsetFor(int location)
+    {
+        if (!location)
+            return 0;
+        return static_cast<int32_t*>(m_buffer.data())[location / sizeof(int32_t) - 1];
+    }
+    
+    PassRefPtr<ExecutableMemoryHandle> executableCopy(VM& vm, void* ownerUID, JITCompilationEffort effort)
+    {
+        return m_buffer.executableCopy(vm, ownerUID, effort);
+    }
+
+    void* unlinkedCode() { return m_buffer.data(); }
+    size_t codeSize() const { return m_buffer.codeSize(); }
+
+    static unsigned getCallReturnOffset(AssemblerLabel call)
+    {
+        ASSERT(call.isSet());
+        return call.m_offset;
+    }
+
+    // Linking & patching:
+    //
+    // 'link' and 'patch' methods are for use on unprotected code - such as the code
+    // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+    // code has been finalized it is (platform support permitting) within a non-
+    // writable region of memory; to modify the code in an execute-only execuable
+    // pool the 'repatch' and 'relink' methods should be used.
+
+    void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
+    {
+        ASSERT(to.isSet());
+        ASSERT(from.isSet());
+        m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
+    }
+
+    void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister)
+    {
+        ASSERT(to.isSet());
+        ASSERT(from.isSet());
+        m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition, is64Bit, compareRegister));
+    }
+
+    void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister)
+    {
+        ASSERT(to.isSet());
+        ASSERT(from.isSet());
+        m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition, bitNumber, compareRegister));
+    }
+
+    void linkJump(AssemblerLabel from, AssemblerLabel to)
+    {
+        ASSERT(from.isSet());
+        ASSERT(to.isSet());
+        relinkJumpOrCall<false>(addressOf(from), addressOf(to));
+    }
+    
+    static void linkJump(void* code, AssemblerLabel from, void* to)
+    {
+        ASSERT(from.isSet());
+        relinkJumpOrCall<false>(addressOf(code, from), to);
+    }
+
+    static void linkCall(void* code, AssemblerLabel from, void* to)
+    {
+        ASSERT(from.isSet());
+        linkJumpOrCall<true>(addressOf(code, from) - 1, to);
+    }
+
+    static void linkPointer(void* code, AssemblerLabel where, void* valuePtr)
+    {
+        linkPointer(addressOf(code, where), valuePtr);
+    }
+
+    static void replaceWithJump(void* where, void* to)
+    {
+        intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(where)) >> 2;
+        ASSERT(static_cast<int>(offset) == offset);
+        *static_cast<int*>(where) = unconditionalBranchImmediate(false, static_cast<int>(offset));
+        cacheFlush(where, sizeof(int));
+    }
+    
+    static ptrdiff_t maxJumpReplacementSize()
+    {
+        return 4;
+    }
+    
+    static void replaceWithLoad(void* where)
+    {
+        Datasize sf;
+        AddOp op;
+        SetFlags S;
+        int shift;
+        int imm12;
+        RegisterID rn;
+        RegisterID rd;
+        if (disassembleAddSubtractImmediate(where, sf, op, S, shift, imm12, rn, rd)) {
+            ASSERT(sf == Datasize_64);
+            ASSERT(op == AddOp_ADD);
+            ASSERT(!S);
+            ASSERT(!shift);
+            ASSERT(!(imm12 & ~0xff8));
+            *static_cast<int*>(where) = loadStoreRegisterUnsignedImmediate(MemOpSize_64, false, MemOp_LOAD, encodePositiveImmediate<64>(imm12), rn, rd);
+            cacheFlush(where, sizeof(int));
+        }
+#if !ASSERT_DISABLED
+        else {
+            MemOpSize size;
+            bool V;
+            MemOp opc;
+            int imm12;
+            RegisterID rn;
+            RegisterID rt;
+            ASSERT(disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt));
+            ASSERT(size == MemOpSize_64);
+            ASSERT(!V);
+            ASSERT(opc == MemOp_LOAD);
+            ASSERT(!(imm12 & ~0x1ff));
+        }
+#endif
+    }
+
+    static void replaceWithAddressComputation(void* where)
+    {
+        MemOpSize size;
+        bool V;
+        MemOp opc;
+        int imm12;
+        RegisterID rn;
+        RegisterID rt;
+        if (disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt)) {
+            ASSERT(size == MemOpSize_64);
+            ASSERT(!V);
+            ASSERT(opc == MemOp_LOAD);
+            ASSERT(!(imm12 & ~0x1ff));
+            *static_cast<int*>(where) = addSubtractImmediate(Datasize_64, AddOp_ADD, DontSetFlags, 0, imm12 * sizeof(void*), rn, rt);
+            cacheFlush(where, sizeof(int));
+        }
+#if !ASSERT_DISABLED
+        else {
+            Datasize sf;
+            AddOp op;
+            SetFlags S;
+            int shift;
+            int imm12;
+            RegisterID rn;
+            RegisterID rd;
+            ASSERT(disassembleAddSubtractImmediate(where, sf, op, S, shift, imm12, rn, rd));
+            ASSERT(sf == Datasize_64);
+            ASSERT(op == AddOp_ADD);
+            ASSERT(!S);
+            ASSERT(!shift);
+            ASSERT(!(imm12 & ~0xff8));
+        }
+#endif
+    }
+
+    static void repatchPointer(void* where, void* valuePtr)
+    {
+        linkPointer(static_cast<int*>(where), valuePtr, true);
+    }
+
+    static void setPointer(int* address, void* valuePtr, RegisterID rd, bool flush)
+    {
+        uintptr_t value = reinterpret_cast<uintptr_t>(valuePtr);
+        address[0] = moveWideImediate(Datasize_64, MoveWideOp_Z, 0, getHalfword(value, 0), rd);
+        address[1] = moveWideImediate(Datasize_64, MoveWideOp_K, 1, getHalfword(value, 1), rd);
+        address[2] = moveWideImediate(Datasize_64, MoveWideOp_K, 2, getHalfword(value, 2), rd);
+
+        if (flush)
+            cacheFlush(address, sizeof(int) * 3);
+    }
+
+    static void repatchInt32(void* where, int32_t value)
+    {
+        int* address = static_cast<int*>(where);
+
+        Datasize sf;
+        MoveWideOp opc;
+        int hw;
+        uint16_t imm16;
+        RegisterID rd;
+        bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rd);
+        ASSERT_UNUSED(expected, expected && !sf && (opc == MoveWideOp_Z || opc == MoveWideOp_N) && !hw);
+        ASSERT(checkMovk<Datasize_32>(address[1], 1, rd));
+
+        if (value >= 0) {
+            address[0] = moveWideImediate(Datasize_32, MoveWideOp_Z, 0, getHalfword(value, 0), rd);
+            address[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd);
+        } else {
+            address[0] = moveWideImediate(Datasize_32, MoveWideOp_N, 0, ~getHalfword(value, 0), rd);
+            address[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd);
+        }
+
+        cacheFlush(where, sizeof(int) * 2);
+    }
+
+    static void* readPointer(void* where)
+    {
+        int* address = static_cast<int*>(where);
+
+        Datasize sf;
+        MoveWideOp opc;
+        int hw;
+        uint16_t imm16;
+        RegisterID rdFirst, rd;
+
+        bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rdFirst);
+        ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_Z && !hw);
+        uintptr_t result = imm16;
+
+        expected = disassembleMoveWideImediate(address + 1, sf, opc, hw, imm16, rd);
+        ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_K && hw == 1 && rd == rdFirst);
+        result |= static_cast<uintptr_t>(imm16) << 16;
+
+        expected = disassembleMoveWideImediate(address + 2, sf, opc, hw, imm16, rd);
+        ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_K && hw == 2 && rd == rdFirst);
+        result |= static_cast<uintptr_t>(imm16) << 32;
+
+        return reinterpret_cast<void*>(result);
+    }
+
+    static void* readCallTarget(void* from)
+    {
+        return readPointer(reinterpret_cast<int*>(from) - 4);
+    }
+
+    static void relinkJump(void* from, void* to)
+    {
+        relinkJumpOrCall<false>(reinterpret_cast<int*>(from), to);
+        cacheFlush(from, sizeof(int));
+    }
+    
+    static void relinkCall(void* from, void* to)
+    {
+        relinkJumpOrCall<true>(reinterpret_cast<int*>(from) - 1, to);
+        cacheFlush(reinterpret_cast<int*>(from) - 1, sizeof(int));
+    }
+    
+    static void repatchCompact(void* where, int32_t value)
+    {
+        ASSERT(!(value & ~0x3ff8));
+
+        MemOpSize size;
+        bool V;
+        MemOp opc;
+        int imm12;
+        RegisterID rn;
+        RegisterID rt;
+        bool expected = disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt);
+        ASSERT_UNUSED(expected, expected && size >= MemOpSize_32 && !V && opc == MemOp_LOAD); // expect 32/64 bit load to GPR.
+
+        if (size == MemOpSize_32)
+            imm12 = encodePositiveImmediate<32>(value);
+        else
+            imm12 = encodePositiveImmediate<64>(value);
+        *static_cast<int*>(where) = loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, rt);
+
+        cacheFlush(where, sizeof(int));
+    }
+
+    unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+    static void cacheFlush(void* code, size_t size)
+    {
+#if OS(IOS)
+        sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
+#else
+#error "The cacheFlush support is missing on this platform."
+#endif
+    }
+
+    // Assembler admin methods:
+
+    int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
+
+    static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
+    {
+        return a.from() < b.from();
+    }
+
+    bool canCompact(JumpType jumpType)
+    {
+        // Fixed jumps cannot be compacted
+        return (jumpType == JumpNoCondition) || (jumpType == JumpCondition) || (jumpType == JumpCompareAndBranch) || (jumpType == JumpTestBit);
+    }
+
+    JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
+    {
+        switch (jumpType) {
+        case JumpFixed:
+            return LinkInvalid;
+        case JumpNoConditionFixedSize:
+            return LinkJumpNoCondition;
+        case JumpConditionFixedSize:
+            return LinkJumpCondition;
+        case JumpCompareAndBranchFixedSize:
+            return LinkJumpCompareAndBranch;
+        case JumpTestBitFixedSize:
+            return LinkJumpTestBit;
+        case JumpNoCondition:
+            return LinkJumpNoCondition;
+        case JumpCondition: {
+            ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3));
+            ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3));
+            intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from));
+
+            if (((relative << 43) >> 43) == relative)
+                return LinkJumpConditionDirect;
+
+            return LinkJumpCondition;
+            }
+        case JumpCompareAndBranch:  {
+            ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3));
+            ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3));
+            intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from));
+
+            if (((relative << 43) >> 43) == relative)
+                return LinkJumpCompareAndBranchDirect;
+
+            return LinkJumpCompareAndBranch;
+        }
+        case JumpTestBit:   {
+            ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3));
+            ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3));
+            intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from));
+
+            if (((relative << 50) >> 50) == relative)
+                return LinkJumpTestBitDirect;
+
+            return LinkJumpTestBit;
+        }
+        default:
+            ASSERT_NOT_REACHED();
+        }
+
+        return LinkJumpNoCondition;
+    }
+
+    JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
+    {
+        JumpLinkType linkType = computeJumpType(record.type(), from, to);
+        record.setLinkType(linkType);
+        return linkType;
+    }
+
+    void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
+    {
+        int32_t ptr = regionStart / sizeof(int32_t);
+        const int32_t end = regionEnd / sizeof(int32_t);
+        int32_t* offsets = static_cast<int32_t*>(m_buffer.data());
+        while (ptr < end)
+            offsets[ptr++] = offset;
+    }
+
+    Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
+    {
+        std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
+        return m_jumpsToLink;
+    }
+
+    void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
+    {
+        switch (record.linkType()) {
+        case LinkJumpNoCondition:
+            linkJumpOrCall<false>(reinterpret_cast<int*>(from), to);
+            break;
+        case LinkJumpConditionDirect:
+            linkConditionalBranch<true>(record.condition(), reinterpret_cast<int*>(from), to);
+            break;
+        case LinkJumpCondition:
+            linkConditionalBranch<false>(record.condition(), reinterpret_cast<int*>(from) - 1, to);
+            break;
+        case LinkJumpCompareAndBranchDirect:
+            linkCompareAndBranch<true>(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast<int*>(from), to);
+            break;
+        case LinkJumpCompareAndBranch:
+            linkCompareAndBranch<false>(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast<int*>(from) - 1, to);
+            break;
+        case LinkJumpTestBitDirect:
+            linkTestAndBranch<true>(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast<int*>(from), to);
+            break;
+        case LinkJumpTestBit:
+            linkTestAndBranch<false>(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast<int*>(from) - 1, to);
+            break;
+        default:
+            ASSERT_NOT_REACHED();
+            break;
+        }
+    }
+
+private:
+    template<Datasize size>
+    static bool checkMovk(int insn, int _hw, RegisterID _rd)
+    {
+        Datasize sf;
+        MoveWideOp opc;
+        int hw;
+        uint16_t imm16;
+        RegisterID rd;
+        bool expected = disassembleMoveWideImediate(&insn, sf, opc, hw, imm16, rd);
+
+        return expected
+            && sf == size
+            && opc == MoveWideOp_K
+            && hw == _hw
+            && rd == _rd;
+    }
+
+    static void linkPointer(int* address, void* valuePtr, bool flush = false)
+    {
+        Datasize sf;
+        MoveWideOp opc;
+        int hw;
+        uint16_t imm16;
+        RegisterID rd;
+        bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rd);
+        ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_Z && !hw);
+        ASSERT(checkMovk<Datasize_64>(address[1], 1, rd));
+        ASSERT(checkMovk<Datasize_64>(address[2], 2, rd));
+
+        setPointer(address, valuePtr, rd, flush);
+    }
+
+    template<bool isCall>
+    static void linkJumpOrCall(int* from, void* to)
+    {
+        bool link;
+        int imm26;
+        bool isUnconditionalBranchImmediateOrNop = disassembleUnconditionalBranchImmediate(from, link, imm26) || disassembleNop(from);
+
+        ASSERT_UNUSED(isUnconditionalBranchImmediateOrNop, isUnconditionalBranchImmediateOrNop);
+        ASSERT_UNUSED(isCall, (link == isCall) || disassembleNop(from));
+        ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
+        ASSERT(!(reinterpret_cast<intptr_t>(to) & 3));
+        intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2;
+        ASSERT(static_cast<int>(offset) == offset);
+
+        *from = unconditionalBranchImmediate(isCall, static_cast<int>(offset));
+    }
+
+    template<bool isDirect>
+    static void linkCompareAndBranch(Condition condition, bool is64Bit, RegisterID rt, int* from, void* to)
+    {
+        ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
+        ASSERT(!(reinterpret_cast<intptr_t>(to) & 3));
+        intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2;
+        ASSERT(((offset << 38) >> 38) == offset);
+
+        bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits
+        ASSERT(!isDirect || useDirect);
+
+        if (useDirect || isDirect) {
+            *from = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, condition == ConditionNE, static_cast<int>(offset), rt);
+            if (!isDirect)
+                *(from + 1) = nopPseudo();
+        } else {
+            *from = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, invert(condition) == ConditionNE, 2, rt);
+            linkJumpOrCall<false>(from + 1, to);
+        }
+    }
+
+    template<bool isDirect>
+    static void linkConditionalBranch(Condition condition, int* from, void* to)
+    {
+        ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
+        ASSERT(!(reinterpret_cast<intptr_t>(to) & 3));
+        intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2;
+        ASSERT(((offset << 38) >> 38) == offset);
+
+        bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits
+        ASSERT(!isDirect || useDirect);
+
+        if (useDirect || isDirect) {
+            *from = conditionalBranchImmediate(static_cast<int>(offset), condition);
+            if (!isDirect)
+                *(from + 1) = nopPseudo();
+        } else {
+            *from = conditionalBranchImmediate(2, invert(condition));
+            linkJumpOrCall<false>(from + 1, to);
+        }
+    }
+
+    template<bool isDirect>
+    static void linkTestAndBranch(Condition condition, unsigned bitNumber, RegisterID rt, int* from, void* to)
+    {
+        ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
+        ASSERT(!(reinterpret_cast<intptr_t>(to) & 3));
+        intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2;
+        ASSERT(static_cast<int>(offset) == offset);
+        ASSERT(((offset << 38) >> 38) == offset);
+
+        bool useDirect = ((offset << 50) >> 50) == offset; // Fits in 14 bits
+        ASSERT(!isDirect || useDirect);
+
+        if (useDirect || isDirect) {
+            *from = testAndBranchImmediate(condition == ConditionNE, static_cast<int>(bitNumber), static_cast<int>(offset), rt);
+            if (!isDirect)
+                *(from + 1) = nopPseudo();
+        } else {
+            *from = testAndBranchImmediate(invert(condition) == ConditionNE, static_cast<int>(bitNumber), 2, rt);
+            linkJumpOrCall<false>(from + 1, to);
+        }
+    }
+
+    template<bool isCall>
+    static void relinkJumpOrCall(int* from, void* to)
+    {
+        if (!isCall && disassembleNop(from)) {
+            unsigned op01;
+            int imm19;
+            Condition condition;
+            bool isConditionalBranchImmediate = disassembleConditionalBranchImmediate(from - 1, op01, imm19, condition);
+
+            if (isConditionalBranchImmediate) {
+                ASSERT_UNUSED(op01, !op01);
+                ASSERT_UNUSED(isCall, !isCall);
+
+                if (imm19 == 8)
+                    condition = invert(condition);
+
+                linkConditionalBranch<false>(condition, from - 1, to);
+                return;
+            }
+
+            Datasize opSize;
+            bool op;
+            RegisterID rt;
+            bool isCompareAndBranchImmediate = disassembleCompareAndBranchImmediate(from - 1, opSize, op, imm19, rt);
+
+            if (isCompareAndBranchImmediate) {
+                if (imm19 == 8)
+                    op = !op;
+
+                linkCompareAndBranch<false>(op ? ConditionNE : ConditionEQ, opSize == Datasize_64, rt, from - 1, to);
+                return;
+            }
+
+            int imm14;
+            unsigned bitNumber;
+            bool isTestAndBranchImmediate = disassembleTestAndBranchImmediate(from - 1, op, bitNumber, imm14, rt);
+
+            if (isTestAndBranchImmediate) {
+                if (imm14 == 8)
+                    op = !op;
+
+                linkTestAndBranch<false>(op ? ConditionNE : ConditionEQ, bitNumber, rt, from - 1, to);
+                return;
+            }
+        }
+
+        linkJumpOrCall<isCall>(from, to);
+    }
+
+    static int* addressOf(void* code, AssemblerLabel label)
+    {
+        return reinterpret_cast<int*>(static_cast<char*>(code) + label.m_offset);
+    }
+
+    int* addressOf(AssemblerLabel label)
+    {
+        return addressOf(m_buffer.data(), label);
+    }
+
+    static RegisterID disassembleXOrSp(int reg) { return reg == 31 ? ARM64Registers::sp : static_cast<RegisterID>(reg); }
+    static RegisterID disassembleXOrZr(int reg) { return reg == 31 ? ARM64Registers::zr : static_cast<RegisterID>(reg); }
+    static RegisterID disassembleXOrZrOrSp(bool useZr, int reg) { return reg == 31 ? (useZr ? ARM64Registers::zr : ARM64Registers::sp) : static_cast<RegisterID>(reg); }
+
+    static bool disassembleAddSubtractImmediate(void* address, Datasize& sf, AddOp& op, SetFlags& S, int& shift, int& imm12, RegisterID& rn, RegisterID& rd)
+    {
+        int insn = *static_cast<int*>(address);
+        sf = static_cast<Datasize>((insn >> 31) & 1);
+        op = static_cast<AddOp>((insn >> 30) & 1);
+        S = static_cast<SetFlags>((insn >> 29) & 1);
+        shift = (insn >> 22) & 3;
+        imm12 = (insn >> 10) & 0x3ff;
+        rn = disassembleXOrSp((insn >> 5) & 0x1f);
+        rd = disassembleXOrZrOrSp(S, insn & 0x1f);
+        return (insn & 0x1f000000) == 0x11000000;
+    }
+
+    static bool disassembleLoadStoreRegisterUnsignedImmediate(void* address, MemOpSize& size, bool& V, MemOp& opc, int& imm12, RegisterID& rn, RegisterID& rt)
+    {
+        int insn = *static_cast<int*>(address);
+        size = static_cast<MemOpSize>((insn >> 30) & 3);
+        V = (insn >> 26) & 1;
+        opc = static_cast<MemOp>((insn >> 22) & 3);
+        imm12 = (insn >> 10) & 0xfff;
+        rn = disassembleXOrSp((insn >> 5) & 0x1f);
+        rt = disassembleXOrZr(insn & 0x1f);
+        return (insn & 0x3b000000) == 0x39000000;
+    }
+
+    static bool disassembleMoveWideImediate(void* address, Datasize& sf, MoveWideOp& opc, int& hw, uint16_t& imm16, RegisterID& rd)
+    {
+        int insn = *static_cast<int*>(address);
+        sf = static_cast<Datasize>((insn >> 31) & 1);
+        opc = static_cast<MoveWideOp>((insn >> 29) & 3);
+        hw = (insn >> 21) & 3;
+        imm16 = insn >> 5;
+        rd = disassembleXOrZr(insn & 0x1f);
+        return (insn & 0x1f800000) == 0x12800000;
+    }
+
+    static bool disassembleNop(void* address)
+    {
+        unsigned insn = *static_cast<unsigned*>(address);
+        return insn == 0xd503201f;
+    }
+
+    static bool disassembleCompareAndBranchImmediate(void* address, Datasize& sf, bool& op, int& imm19, RegisterID& rt)
+    {
+        int insn = *static_cast<int*>(address);
+        sf = static_cast<Datasize>((insn >> 31) & 1);
+        op = (insn >> 24) & 0x1;
+        imm19 = (insn << 8) >> 13;
+        rt = static_cast<RegisterID>(insn & 0x1f);
+        return (insn & 0x7e000000) == 0x34000000;
+        
+    }
+
+    static bool disassembleConditionalBranchImmediate(void* address, unsigned& op01, int& imm19, Condition &condition)
+    {
+        int insn = *static_cast<int*>(address);
+        op01 = ((insn >> 23) & 0x2) | ((insn >> 4) & 0x1);
+        imm19 = (insn << 8) >> 13;
+        condition = static_cast<Condition>(insn & 0xf);
+        return (insn & 0xfe000000) == 0x54000000;
+    }
+
+    static bool disassembleTestAndBranchImmediate(void* address, bool& op, unsigned& bitNumber, int& imm14, RegisterID& rt)
+    {
+        int insn = *static_cast<int*>(address);
+        op = (insn >> 24) & 0x1;
+        imm14 = (insn << 13) >> 18;
+        bitNumber = static_cast<unsigned>((((insn >> 26) & 0x20)) | ((insn > 19) & 0x1f));
+        rt = static_cast<RegisterID>(insn & 0x1f);
+        return (insn & 0x7e000000) == 0x36000000;
+        
+    }
+
+    static bool disassembleUnconditionalBranchImmediate(void* address, bool& op, int& imm26)
+    {
+        int insn = *static_cast<int*>(address);
+        op = (insn >> 31) & 1;
+        imm26 = (insn << 6) >> 6;
+        return (insn & 0x7c000000) == 0x14000000;
+    }
+
+    static int xOrSp(RegisterID reg) { ASSERT(!isZr(reg)); return reg; }
+    static int xOrZr(RegisterID reg) { ASSERT(!isSp(reg)); return reg & 31; }
+    static FPRegisterID xOrZrAsFPR(RegisterID reg) { return static_cast<FPRegisterID>(xOrZr(reg)); }
+    static int xOrZrOrSp(bool useZr, RegisterID reg) { return useZr ? xOrZr(reg) : xOrSp(reg); }
+
+    ALWAYS_INLINE void insn(int instruction)
+    {
+        m_buffer.putInt(instruction);
+    }
+
+    ALWAYS_INLINE static int addSubtractExtendedRegister(Datasize sf, AddOp op, SetFlags S, RegisterID rm, ExtendType option, int imm3, RegisterID rn, RegisterID rd)
+    {
+        ASSERT(imm3 < 5);
+        // The only allocated values for opt is 0.
+        const int opt = 0;
+        return (0x0b200000 | sf << 31 | op << 30 | S << 29 | opt << 22 | xOrZr(rm) << 16 | option << 13 | (imm3 & 0x7) << 10 | xOrSp(rn) << 5 | xOrZrOrSp(S, rd));
+    }
+
+    ALWAYS_INLINE static int addSubtractImmediate(Datasize sf, AddOp op, SetFlags S, int shift, int imm12, RegisterID rn, RegisterID rd)
+    {
+        ASSERT(shift < 2);
+        ASSERT(isUInt12(imm12));
+        return (0x11000000 | sf << 31 | op << 30 | S << 29 | shift << 22 | (imm12 & 0xfff) << 10 | xOrSp(rn) << 5 | xOrZrOrSp(S, rd));
+    }
+
+    ALWAYS_INLINE static int addSubtractShiftedRegister(Datasize sf, AddOp op, SetFlags S, ShiftType shift, RegisterID rm, int imm6, RegisterID rn, RegisterID rd)
+    {
+        ASSERT(shift < 3);
+        ASSERT(!(imm6 & (sf ? ~63 : ~31)));
+        return (0x0b000000 | sf << 31 | op << 30 | S << 29 | shift << 22 | xOrZr(rm) << 16 | (imm6 & 0x3f) << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+    }
+
+    ALWAYS_INLINE static int addSubtractWithCarry(Datasize sf, AddOp op, SetFlags S, RegisterID rm, RegisterID rn, RegisterID rd)
+    {
+        const int opcode2 = 0;
+        return (0x1a000000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | opcode2 << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+    }
+
+    ALWAYS_INLINE static int bitfield(Datasize sf, BitfieldOp opc, int immr, int imms, RegisterID rn, RegisterID rd)
+    {
+        ASSERT(immr < (sf ? 64 : 32));
+        ASSERT(imms < (sf ? 64 : 32));
+        const int N = sf;
+        return (0x13000000 | sf << 31 | opc << 29 | N << 22 | immr << 16 | imms << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+    }
+
+    // 'op' means negate
+    ALWAYS_INLINE static int compareAndBranchImmediate(Datasize sf, bool op, int32_t imm19, RegisterID rt)
+    {
+        ASSERT(imm19 == (imm19 << 13) >> 13);
+        return (0x34000000 | sf << 31 | op << 24 | (imm19 & 0x7ffff) << 5 | xOrZr(rt));
+    }
+
+    ALWAYS_INLINE static int conditionalBranchImmediate(int32_t imm19, Condition cond)
+    {
+        ASSERT(imm19 == (imm19 << 13) >> 13);
+        ASSERT(!(cond & ~15));
+        // The only allocated values for o1 & o0 are 0.
+        const int o1 = 0;
+        const int o0 = 0;
+        return (0x54000000 | o1 << 24 | (imm19 & 0x7ffff) << 5 | o0 << 4 | cond);
+    }
+
+    ALWAYS_INLINE static int conditionalCompareImmediate(Datasize sf, AddOp op, int imm5, Condition cond, RegisterID rn, int nzcv)
+    {
+        ASSERT(!(imm5 & ~0x1f));
+        ASSERT(nzcv < 16);
+        const int S = 1;
+        const int o2 = 0;
+        const int o3 = 0;
+        return (0x1a400800 | sf << 31 | op << 30 | S << 29 | (imm5 & 0x1f) << 16 | cond << 12 | o2 << 10 | xOrZr(rn) << 5 | o3 << 4 | nzcv);
+    }
+
+    ALWAYS_INLINE static int conditionalCompareRegister(Datasize sf, AddOp op, RegisterID rm, Condition cond, RegisterID rn, int nzcv)
+    {
+        ASSERT(nzcv < 16);
+        const int S = 1;
+        const int o2 = 0;
+        const int o3 = 0;
+        return (0x1a400000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | cond << 12 | o2 << 10 | xOrZr(rn) << 5 | o3 << 4 | nzcv);
+    }
+
+    // 'op' means negate
+    // 'op2' means increment
+    ALWAYS_INLINE static int conditionalSelect(Datasize sf, bool op, RegisterID rm, Condition cond, bool op2, RegisterID rn, RegisterID rd)
+    {
+        const int S = 0;
+        return (0x1a800000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | cond << 12 | op2 << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+    }
+
+    ALWAYS_INLINE static int dataProcessing1Source(Datasize sf, DataOp1Source opcode, RegisterID rn, RegisterID rd)
+    {
+        const int S = 0;
+        const int opcode2 = 0;
+        return (0x5ac00000 | sf << 31 | S << 29 | opcode2 << 16 | opcode << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+    }
+
+    ALWAYS_INLINE static int dataProcessing2Source(Datasize sf, RegisterID rm, DataOp2Source opcode, RegisterID rn, RegisterID rd)
+    {
+        const int S = 0;
+        return (0x1ac00000 | sf << 31 | S << 29 | xOrZr(rm) << 16 | opcode << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+    }
+
+    ALWAYS_INLINE static int dataProcessing3Source(Datasize sf, DataOp3Source opcode, RegisterID rm, RegisterID ra, RegisterID rn, RegisterID rd)
+    {
+        int op54 = opcode >> 4;
+        int op31 = (opcode >> 1) & 7;
+        int op0 = opcode & 1;
+        return (0x1b000000 | sf << 31 | op54 << 29 | op31 << 21 | xOrZr(rm) << 16 | op0 << 15 | xOrZr(ra) << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+    }
+
+    ALWAYS_INLINE static int excepnGeneration(ExcepnOp opc, uint16_t imm16, int LL)
+    {
+        ASSERT((opc == ExcepnOp_BREAKPOINT || opc == ExcepnOp_HALT) ? !LL : (LL && (LL < 4)));
+        const int op2 = 0;
+        return (0xd4000000 | opc << 21 | imm16 << 5 | op2 << 2 | LL);
+    }
+
+    ALWAYS_INLINE static int extract(Datasize sf, RegisterID rm, int imms, RegisterID rn, RegisterID rd)
+    {
+        ASSERT(imms < (sf ? 64 : 32));
+        const int op21 = 0;
+        const int N = sf;
+        const int o0 = 0;
+        return (0x13800000 | sf << 31 | op21 << 29 | N << 22 | o0 << 21 | xOrZr(rm) << 16 | imms << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+    }
+
+    ALWAYS_INLINE static int floatingPointCompare(Datasize type, FPRegisterID rm, FPRegisterID rn, FPCmpOp opcode2)
+    {
+        const int M = 0;
+        const int S = 0;
+        const int op = 0;
+        return (0x1e202000 | M << 31 | S << 29 | type << 22 | rm << 16 | op << 14 | rn << 5 | opcode2);
+    }
+
+    ALWAYS_INLINE static int floatingPointConditionalCompare(Datasize type, FPRegisterID rm, Condition cond, FPRegisterID rn, FPCondCmpOp op, int nzcv)
+    {
+        ASSERT(nzcv < 16);
+        const int M = 0;
+        const int S = 0;
+        return (0x1e200400 | M << 31 | S << 29 | type << 22 | rm << 16 | cond << 12 | rn << 5 | op << 4 | nzcv);
+    }
+
+    ALWAYS_INLINE static int floatingPointConditionalSelect(Datasize type, FPRegisterID rm, Condition cond, FPRegisterID rn, FPRegisterID rd)
+    {
+        const int M = 0;
+        const int S = 0;
+        return (0x1e200c00 | M << 31 | S << 29 | type << 22 | rm << 16 | cond << 12 | rn << 5 | rd);
+    }
+
+    ALWAYS_INLINE static int floatingPointImmediate(Datasize type, int imm8, FPRegisterID rd)
+    {
+        const int M = 0;
+        const int S = 0;
+        const int imm5 = 0;
+        return (0x1e201000 | M << 31 | S << 29 | type << 22 | (imm8 & 0xff) << 13 | imm5 << 5 | rd);
+    }
+
+    ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, FPRegisterID rn, FPRegisterID rd)
+    {
+        const int S = 0;
+        return (0x1e200000 | sf << 31 | S << 29 | type << 22 | rmodeOpcode << 16 | rn << 5 | rd);
+    }
+
+    ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, FPRegisterID rn, RegisterID rd)
+    {
+        return floatingPointIntegerConversions(sf, type, rmodeOpcode, rn, xOrZrAsFPR(rd));
+    }
+
+    ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, RegisterID rn, FPRegisterID rd)
+    {
+        return floatingPointIntegerConversions(sf, type, rmodeOpcode, xOrZrAsFPR(rn), rd);
+    }
+
+    ALWAYS_INLINE static int floatingPointDataProcessing1Source(Datasize type, FPDataOp1Source opcode, FPRegisterID rn, FPRegisterID rd)
+    {
+        const int M = 0;
+        const int S = 0;
+        return (0x1e204000 | M << 31 | S << 29 | type << 22 | opcode << 15 | rn << 5 | rd);
+    }
+
+    ALWAYS_INLINE static int floatingPointDataProcessing2Source(Datasize type, FPRegisterID rm, FPDataOp2Source opcode, FPRegisterID rn, FPRegisterID rd)
+    {
+        const int M = 0;
+        const int S = 0;
+        return (0x1e200800 | M << 31 | S << 29 | type << 22 | rm << 16 | opcode << 12 | rn << 5 | rd);
+    }
+
+    // 'o1' means negate
+    ALWAYS_INLINE static int floatingPointDataProcessing3Source(Datasize type, bool o1, FPRegisterID rm, AddOp o2, FPRegisterID ra, FPRegisterID rn, FPRegisterID rd)
+    {
+        const int M = 0;
+        const int S = 0;
+        return (0x1f000000 | M << 31 | S << 29 | type << 22 | o1 << 21 | rm << 16 | o2 << 15 | ra << 10 | rn << 5 | rd);
+    }
+
+    // 'V' means vector
+    ALWAYS_INLINE static int loadRegisterLiteral(LdrLiteralOp opc, bool V, int imm19, FPRegisterID rt)
+    {
+        ASSERT(((imm19 << 13) >> 13) == imm19);
+        return (0x18000000 | opc << 30 | V << 26 | (imm19 & 0x7ffff) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadRegisterLiteral(LdrLiteralOp opc, bool V, int imm19, RegisterID rt)
+    {
+        return loadRegisterLiteral(opc, V, imm19, xOrZrAsFPR(rt));
+    }
+
+    // 'V' means vector
+    ALWAYS_INLINE static int loadStoreRegisterPostIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt)
+    {
+        ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
+        ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
+        ASSERT(isInt9(imm9));
+        return (0x38000400 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadStoreRegisterPostIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt)
+    {
+        return loadStoreRegisterPostIndex(size, V, opc, imm9, rn, xOrZrAsFPR(rt));
+    }
+
+    // 'V' means vector
+    ALWAYS_INLINE static int loadStoreRegisterPreIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt)
+    {
+        ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
+        ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
+        ASSERT(isInt9(imm9));
+        return (0x38000c00 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadStoreRegisterPreIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt)
+    {
+        return loadStoreRegisterPreIndex(size, V, opc, imm9, rn, xOrZrAsFPR(rt));
+    }
+
+    // 'V' means vector
+    // 'S' means shift rm
+    ALWAYS_INLINE static int loadStoreRegisterRegisterOffset(MemOpSize size, bool V, MemOp opc, RegisterID rm, ExtendType option, bool S, RegisterID rn, FPRegisterID rt)
+    {
+        ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
+        ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
+        ASSERT(option & 2); // The ExtendType for the address must be 32/64 bit, signed or unsigned - not 8/16bit.
+        return (0x38200800 | size << 30 | V << 26 | opc << 22 | xOrZr(rm) << 16 | option << 13 | S << 12 | xOrSp(rn) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadStoreRegisterRegisterOffset(MemOpSize size, bool V, MemOp opc, RegisterID rm, ExtendType option, bool S, RegisterID rn, RegisterID rt)
+    {
+        return loadStoreRegisterRegisterOffset(size, V, opc, rm, option, S, rn, xOrZrAsFPR(rt));
+    }
+
+    // 'V' means vector
+    ALWAYS_INLINE static int loadStoreRegisterUnscaledImmediate(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt)
+    {
+        ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
+        ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
+        ASSERT(isInt9(imm9));
+        return (0x38000000 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadStoreRegisterUnscaledImmediate(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt)
+    {
+        ASSERT(isInt9(imm9));
+        return loadStoreRegisterUnscaledImmediate(size, V, opc, imm9, rn, xOrZrAsFPR(rt));
+    }
+
+    // 'V' means vector
+    ALWAYS_INLINE static int loadStoreRegisterUnsignedImmediate(MemOpSize size, bool V, MemOp opc, int imm12, RegisterID rn, FPRegisterID rt)
+    {
+        ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
+        ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
+        ASSERT(isUInt12(imm12));
+        return (0x39000000 | size << 30 | V << 26 | opc << 22 | (imm12 & 0xfff) << 10 | xOrSp(rn) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadStoreRegisterUnsignedImmediate(MemOpSize size, bool V, MemOp opc, int imm12, RegisterID rn, RegisterID rt)
+    {
+        return loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, xOrZrAsFPR(rt));
+    }
+
+    ALWAYS_INLINE static int logicalImmediate(Datasize sf, LogicalOp opc, int N_immr_imms, RegisterID rn, RegisterID rd)
+    {
+        ASSERT(!(N_immr_imms & (sf ? ~0x1fff : ~0xfff)));
+        return (0x12000000 | sf << 31 | opc << 29 | N_immr_imms << 10 | xOrZr(rn) << 5 | xOrZrOrSp(opc == LogicalOp_ANDS, rd));
+    }
+
+    // 'N' means negate rm
+    ALWAYS_INLINE static int logicalShiftedRegister(Datasize sf, LogicalOp opc, ShiftType shift, bool N, RegisterID rm, int imm6, RegisterID rn, RegisterID rd)
+    {
+        ASSERT(!(imm6 & (sf ? ~63 : ~31)));
+        return (0x0a000000 | sf << 31 | opc << 29 | shift << 22 | N << 21 | xOrZr(rm) << 16 | (imm6 & 0x3f) << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+    }
+
+    ALWAYS_INLINE static int moveWideImediate(Datasize sf, MoveWideOp opc, int hw, uint16_t imm16, RegisterID rd)
+    {
+        ASSERT(hw < (sf ? 4 : 2));
+        return (0x12800000 | sf << 31 | opc << 29 | hw << 21 | (int)imm16 << 5 | xOrZr(rd));
+    }
+
+    // 'op' means link
+    ALWAYS_INLINE static int unconditionalBranchImmediate(bool op, int32_t imm26)
+    {
+        ASSERT(imm26 == (imm26 << 6) >> 6);
+        return (0x14000000 | op << 31 | (imm26 & 0x3ffffff));
+    }
+
+    // 'op' means page
+    ALWAYS_INLINE static int pcRelative(bool op, int32_t imm21, RegisterID rd)
+    {
+        ASSERT(imm21 == (imm21 << 11) >> 11);
+        int32_t immlo = imm21 & 3;
+        int32_t immhi = (imm21 >> 2) & 0x7ffff;
+        return (0x10000000 | op << 31 | immlo << 29 | immhi << 5 | xOrZr(rd));
+    }
+
+    ALWAYS_INLINE static int system(bool L, int op0, int op1, int crn, int crm, int op2, RegisterID rt)
+    {
+        return (0xd5000000 | L << 21 | op0 << 19 | op1 << 16 | crn << 12 | crm << 8 | op2 << 5 | xOrZr(rt));
+    }
+
+    ALWAYS_INLINE static int hintPseudo(int imm)
+    {
+        ASSERT(!(imm & ~0x7f));
+        return system(0, 0, 3, 2, (imm >> 3) & 0xf, imm & 0x7, ARM64Registers::zr);
+    }
+
+    ALWAYS_INLINE static int nopPseudo()
+    {
+        return hintPseudo(0);
+    }
+    
+    // 'op' means negate
+    ALWAYS_INLINE static int testAndBranchImmediate(bool op, int b50, int imm14, RegisterID rt)
+    {
+        ASSERT(!(b50 & ~0x3f));
+        ASSERT(imm14 == (imm14 << 18) >> 18);
+        int b5 = b50 >> 5;
+        int b40 = b50 & 0x1f;
+        return (0x36000000 | b5 << 31 | op << 24 | b40 << 19 | (imm14 & 0x3fff) << 5 | xOrZr(rt));
+    }
+
+    ALWAYS_INLINE static int unconditionalBranchRegister(BranchType opc, RegisterID rn)
+    {
+        // The only allocated values for op2 is 0x1f, for op3 & op4 are 0.
+        const int op2 = 0x1f;
+        const int op3 = 0;
+        const int op4 = 0;
+        return (0xd6000000 | opc << 21 | op2 << 16 | op3 << 10 | xOrZr(rn) << 5 | op4);
+    }
+
+    AssemblerBuffer m_buffer;
+    Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink;
+    int m_indexOfLastWatchpoint;
+    int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#undef CHECK_DATASIZE_OF
+#undef DATASIZE_OF
+#undef MEMOPSIZE_OF
+#undef CHECK_DATASIZE
+#undef DATASIZE
+#undef MEMOPSIZE
+#undef CHECK_FP_MEMOP_DATASIZE
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM64)
+
+#endif // ARM64Assembler_h
index 18e0a34..fe15a4f 100644 (file)
@@ -45,6 +45,15 @@ inline bool isARMv7s()
 #endif
 }
 
+inline bool isARM64()
+{
+#if CPU(ARM64)
+    return true;
+#else
+    return false;
+#endif
+}
+
 inline bool isX86()
 {
 #if CPU(X86_64) || CPU(X86)
@@ -291,7 +300,7 @@ public:
         {
         }
 
-#if CPU(X86_64)
+#if CPU(X86_64) || CPU(ARM64)
         explicit TrustedImm64(TrustedImmPtr ptr)
             : m_value(ptr.asIntptr())
         {
@@ -307,7 +316,7 @@ public:
             : TrustedImm64(value)
         {
         }
-#if CPU(X86_64)
+#if CPU(X86_64) || CPU(ARM64)
         explicit Imm64(TrustedImmPtr ptr)
             : TrustedImm64(ptr)
         {
@@ -346,8 +355,9 @@ public:
         Label(AbstractMacroAssembler<AssemblerType>* masm)
             : m_label(masm->m_assembler.label())
         {
+            masm->invalidateAllTempRegisters();
         }
-        
+
         bool isSet() const { return m_label.isSet(); }
     private:
         AssemblerLabel m_label;
@@ -400,7 +410,7 @@ public:
             : m_label(masm->m_assembler.label())
         {
         }
-        
+
         bool isSet() const { return m_label.isSet(); }
         
     private:
@@ -448,7 +458,7 @@ public:
             : m_label(masm->m_assembler.label())
         {
         }
-    
+
         DataLabelCompact(AssemblerLabel label)
             : m_label(label)
         {
@@ -527,6 +537,33 @@ public:
             , m_condition(condition)
         {
         }
+#elif CPU(ARM64)
+        Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type = ARM64Assembler::JumpNoCondition, ARM64Assembler::Condition condition = ARM64Assembler::ConditionInvalid)
+            : m_label(jmp)
+            , m_type(type)
+            , m_condition(condition)
+        {
+        }
+
+        Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, bool is64Bit, ARM64Assembler::RegisterID compareRegister)
+            : m_label(jmp)
+            , m_type(type)
+            , m_condition(condition)
+            , m_is64Bit(is64Bit)
+            , m_compareRegister(compareRegister)
+        {
+            ASSERT((type == ARM64Assembler::JumpCompareAndBranch) || (type == ARM64Assembler::JumpCompareAndBranchFixedSize));
+        }
+
+        Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister)
+            : m_label(jmp)
+            , m_type(type)
+            , m_condition(condition)
+            , m_bitNumber(bitNumber)
+            , m_compareRegister(compareRegister)
+        {
+            ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize));
+        }
 #elif CPU(SH4)
         Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar)
             : m_label(jmp)
@@ -549,12 +586,21 @@ public:
 
         void link(AbstractMacroAssembler<AssemblerType>* masm) const
         {
+            masm->invalidateAllTempRegisters();
+
 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
             masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset());
 #endif
 
 #if CPU(ARM_THUMB2)
             masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
+#elif CPU(ARM64)
+            if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
+                masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_is64Bit, m_compareRegister);
+            else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
+                masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister);
+            else
+                masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
 #elif CPU(SH4)
             masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type);
 #else
@@ -570,6 +616,13 @@ public:
 
 #if CPU(ARM_THUMB2)
             masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
+#elif CPU(ARM64)
+            if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
+                masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_is64Bit, m_compareRegister);
+            else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
+                masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_bitNumber, m_compareRegister);
+            else
+                masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
 #else
             masm->m_assembler.linkJump(m_label, label.m_label);
 #endif
@@ -582,6 +635,12 @@ public:
 #if CPU(ARM_THUMB2)
         ARMv7Assembler::JumpType m_type;
         ARMv7Assembler::Condition m_condition;
+#elif CPU(ARM64)
+        ARM64Assembler::JumpType m_type;
+        ARM64Assembler::Condition m_condition;
+        bool m_is64Bit;
+        unsigned m_bitNumber;
+        ARM64Assembler::RegisterID m_compareRegister;
 #endif
 #if CPU(SH4)
         SH4Assembler::JumpType m_type;
@@ -780,6 +839,70 @@ protected:
     static bool shouldBlindForSpecificArch(uint32_t) { return true; }
     static bool shouldBlindForSpecificArch(uint64_t) { return true; }
 
+    class CachedTempRegister {
+        friend class DataLabelPtr;
+        friend class DataLabel32;
+        friend class DataLabelCompact;
+        friend class Jump;
+        friend class Label;
+
+    public:
+        CachedTempRegister(AbstractMacroAssembler<AssemblerType>* masm, RegisterID registerID)
+            : m_masm(masm)
+            , m_registerID(registerID)
+            , m_value(0)
+            , m_validBit(1 << static_cast<unsigned>(registerID))
+        {
+            ASSERT(static_cast<unsigned>(registerID) < (sizeof(unsigned) * 8));
+        }
+
+        ALWAYS_INLINE RegisterID registerIDInvalidate() { invalidate(); return m_registerID; }
+
+        ALWAYS_INLINE RegisterID registerIDNoInvalidate() { return m_registerID; }
+
+        bool value(intptr_t& value)
+        {
+            value = m_value;
+            return m_masm->isTempRegisterValid(m_validBit);
+        }
+
+        void setValue(intptr_t value)
+        {
+            m_value = value;
+            m_masm->setTempRegisterValid(m_validBit);
+        }
+
+        ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); }
+
+    private:
+        AbstractMacroAssembler<AssemblerType>* m_masm;
+        RegisterID m_registerID;
+        intptr_t m_value;
+        unsigned m_validBit;
+    };
+
+    ALWAYS_INLINE void invalidateAllTempRegisters()
+    {
+        m_tempRegistersValidBits = 0;
+    }
+
+    ALWAYS_INLINE bool isTempRegisterValid(unsigned registerMask)
+    {
+        return (m_tempRegistersValidBits & registerMask);
+    }
+
+    ALWAYS_INLINE void clearTempRegisterValid(unsigned registerMask)
+    {
+        m_tempRegistersValidBits &=  ~registerMask;
+    }
+
+    ALWAYS_INLINE void setTempRegisterValid(unsigned registerMask)
+    {
+        m_tempRegistersValidBits |= registerMask;
+    }
+
+    unsigned m_tempRegistersValidBits;
+
     friend class LinkBuffer;
     friend class RepatchBuffer;
 
index 42b5667..ed6899a 100644 (file)
@@ -59,17 +59,10 @@ LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassembly(const char* format,
     return result;
 }
 
-void LinkBuffer::linkCode(void* ownerUID, JITCompilationEffort effort)
+#if ENABLE(BRANCH_COMPACTION)
+template <typename InstructionType>
+void LinkBuffer::copyCompactAndLinkCode(void* ownerUID, JITCompilationEffort effort)
 {
-    ASSERT(!m_code);
-#if !ENABLE(BRANCH_COMPACTION)
-    m_executableMemory = m_assembler->m_assembler.executableCopy(*m_vm, ownerUID, effort);
-    if (!m_executableMemory)
-        return;
-    m_code = m_executableMemory->start();
-    m_size = m_assembler->m_assembler.codeSize();
-    ASSERT(m_code);
-#else
     m_initialSize = m_assembler->m_assembler.codeSize();
     m_executableMemory = m_vm->executableAllocator.allocate(*m_vm, m_initialSize, ownerUID, effort);
     if (!m_executableMemory)
@@ -89,9 +82,9 @@ void LinkBuffer::linkCode(void* ownerUID, JITCompilationEffort effort)
             
         // Copy the instructions from the last jump to the current one.
         size_t regionSize = jumpsToLink[i].from() - readPtr;
-        uint16_t* copySource = reinterpret_cast_ptr<uint16_t*>(inData + readPtr);
-        uint16_t* copyEnd = reinterpret_cast_ptr<uint16_t*>(inData + readPtr + regionSize);
-        uint16_t* copyDst = reinterpret_cast_ptr<uint16_t*>(outData + writePtr);
+        InstructionType* copySource = reinterpret_cast_ptr<InstructionType*>(inData + readPtr);
+        InstructionType* copyEnd = reinterpret_cast_ptr<InstructionType*>(inData + readPtr + regionSize);
+        InstructionType* copyDst = reinterpret_cast_ptr<InstructionType*>(outData + writePtr);
         ASSERT(!(regionSize % 2));
         ASSERT(!(readPtr % 2));
         ASSERT(!(writePtr % 2));
@@ -141,6 +134,24 @@ void LinkBuffer::linkCode(void* ownerUID, JITCompilationEffort effort)
 #if DUMP_CODE
     dumpCode(m_code, m_size);
 #endif
+}
+#endif
+
+
+void LinkBuffer::linkCode(void* ownerUID, JITCompilationEffort effort)
+{
+    ASSERT(!m_code);
+#if !ENABLE(BRANCH_COMPACTION)
+    m_executableMemory = m_assembler->m_assembler.executableCopy(*m_vm, ownerUID, effort);
+    if (!m_executableMemory)
+        return;
+    m_code = m_executableMemory->start();
+    m_size = m_assembler->m_assembler.codeSize();
+    ASSERT(m_code);
+#elif CPU(ARM_THUMB2)
+    copyCompactAndLinkCode<uint16_t>(ownerUID, effort);
+#elif CPU(ARM64)
+    copyCompactAndLinkCode<uint32_t>(ownerUID, effort);
 #endif
 }
 
index 392dad9..7005278 100644 (file)
@@ -243,6 +243,10 @@ private:
     }
 
     void linkCode(void* ownerUID, JITCompilationEffort);
+#if ENABLE(BRANCH_COMPACTION)
+    template <typename InstructionType>
+    void copyCompactAndLinkCode(void* ownerUID, JITCompilationEffort);
+#endif
 
     void performFinalization();
 
index 0a188e9..91387c3 100644 (file)
 #include "MacroAssemblerARMv7.h"
 namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
 
+#elif CPU(ARM64)
+#include "MacroAssemblerARM64.h"
+namespace JSC { typedef MacroAssemblerARM64 MacroAssemblerBase; };
+
 #elif CPU(ARM_TRADITIONAL)
 #include "MacroAssemblerARM.h"
 namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
@@ -115,6 +119,11 @@ public:
     using MacroAssemblerBase::urshift32;
     using MacroAssemblerBase::xor32;
 
+    static bool isPtrAlignedAddressOffset(ptrdiff_t value)
+    {
+        return value == static_cast<int32_t>(value);
+    }
+
     static const double twoToThe32; // This is super useful for some double code.
 
     // Utilities used by the DFG JIT.
@@ -211,7 +220,28 @@ public:
         storePtr(imm, addressForPoke(index));
     }
 
-#if CPU(X86_64)
+#if !CPU(ARM64)
+    void pushToSave(RegisterID src)
+    {
+        push(src);
+    }
+    void popToRestore(RegisterID dest)
+    {
+        pop(dest);
+    }
+    void pushToSave(FPRegisterID src)
+    {
+        subPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
+        storeDouble(src, stackPointerRegister);
+    }
+    void popToRestore(FPRegisterID dest)
+    {
+        loadDouble(stackPointerRegister, dest);
+        addPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
+    }
+#endif // !CPU(ARM64)
+
+#if CPU(X86_64) || CPU(ARM64)
     void peek64(RegisterID dest, int index = 0)
     {
         load64(Address(stackPointerRegister, (index * sizeof(void*))), dest);
@@ -288,7 +318,7 @@ public:
         branchTestPtr(cond, reg).linkTo(target, this);
     }
 
-#if !CPU(ARM_THUMB2)
+#if !CPU(ARM_THUMB2) && !CPU(ARM64)
     PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
     {
         return PatchableJump(branchPtr(cond, left, right));
@@ -308,14 +338,12 @@ public:
     {
         return PatchableJump(branchTest32(cond, reg, mask));
     }
-#endif // !CPU(ARM_THUMB2)
 
-#if !CPU(ARM)
     PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
     {
         return PatchableJump(branch32(cond, reg, imm));
     }
-#endif // !(CPU(ARM)
+#endif
 
     void jump(Label target)
     {
@@ -360,7 +388,7 @@ public:
     // Ptr methods
     // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
     // FIXME: should this use a test for 32-bitness instead of this specific exception?
-#if !CPU(X86_64)
+#if !CPU(X86_64) && !CPU(ARM64)
     void addPtr(Address src, RegisterID dest)
     {
         add32(src, dest);
@@ -1319,7 +1347,7 @@ public:
         storePtr(value, addressForPoke(index));
     }
     
-#if CPU(X86_64)
+#if CPU(X86_64) || CPU(ARM64)
     void poke(Imm64 value, int index = 0)
     {
         store64(value, addressForPoke(index));
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
new file mode 100644 (file)
index 0000000..8e6c163
--- /dev/null
@@ -0,0 +1,2650 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef MacroAssemblerARM64_h
+#define MacroAssemblerARM64_h
+
+#if ENABLE(ASSEMBLER)
+
+#include "ARM64Assembler.h"
+#include "AbstractMacroAssembler.h"
+#include <wtf/MathExtras.h>
+
+namespace JSC {
+
+class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler> {
+    static const RegisterID dataTempRegister = ARM64Registers::ip0;
+    static const RegisterID memoryTempRegister = ARM64Registers::ip1;
+    static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31;
+    static const ARM64Assembler::SetFlags S = ARM64Assembler::S;
+    static const intptr_t maskHalfWord0 = 0xffffl;
+    static const intptr_t maskHalfWord1 = 0xffff0000l;
+    static const intptr_t maskUpperWord = 0xffffffff00000000l;
+
+    // 4 instructions - 3 to load the function pointer, + blr.
+    static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER = -16;
+    
+public:
+    MacroAssemblerARM64()
+        : m_dataMemoryTempRegister(this, dataTempRegister)
+        , m_cachedMemoryTempRegister(this, memoryTempRegister)
+        , m_makeJumpPatchable(false)
+    {
+    }
+
+    typedef ARM64Registers::FPRegisterID FPRegisterID;
+    typedef ARM64Assembler::LinkRecord LinkRecord;
+    typedef ARM64Assembler::JumpType JumpType;
+    typedef ARM64Assembler::JumpLinkType JumpLinkType;
+    typedef ARM64Assembler::Condition Condition;
+
+    static const ARM64Assembler::Condition DefaultCondition = ARM64Assembler::ConditionInvalid;
+    static const ARM64Assembler::JumpType DefaultJump = ARM64Assembler::JumpNoConditionFixedSize;
+
+    Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
+    void* unlinkedCode() { return m_assembler.unlinkedCode(); }
+    bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
+    JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
+    JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
+    void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
+    int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
+    void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
+    int executableOffsetFor(int location) { return m_assembler.executableOffsetFor(location); }
+
+    static const Scale ScalePtr = TimesEight;
+
+    static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+    {
+        // This is the largest 32-bit access allowed, aligned to 64-bit boundary.
+        return !(value & ~0x3ff8);
+    }
+
+    enum RelationalCondition {
+        Equal = ARM64Assembler::ConditionEQ,
+        NotEqual = ARM64Assembler::ConditionNE,
+        Above = ARM64Assembler::ConditionHI,
+        AboveOrEqual = ARM64Assembler::ConditionHS,
+        Below = ARM64Assembler::ConditionLO,
+        BelowOrEqual = ARM64Assembler::ConditionLS,
+        GreaterThan = ARM64Assembler::ConditionGT,
+        GreaterThanOrEqual = ARM64Assembler::ConditionGE,
+        LessThan = ARM64Assembler::ConditionLT,
+        LessThanOrEqual = ARM64Assembler::ConditionLE
+    };
+
+    enum ResultCondition {
+        Overflow = ARM64Assembler::ConditionVS,
+        Signed = ARM64Assembler::ConditionMI,
+        PositiveOrZero = ARM64Assembler::ConditionPL,
+        Zero = ARM64Assembler::ConditionEQ,
+        NonZero = ARM64Assembler::ConditionNE
+    };
+
+    enum ZeroCondition {
+        IsZero = ARM64Assembler::ConditionEQ,
+        IsNonZero = ARM64Assembler::ConditionNE
+    };
+
+    enum DoubleCondition {
+        // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+        DoubleEqual = ARM64Assembler::ConditionEQ,
+        DoubleNotEqual = ARM64Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
+        DoubleGreaterThan = ARM64Assembler::ConditionGT,
+        DoubleGreaterThanOrEqual = ARM64Assembler::ConditionGE,
+        DoubleLessThan = ARM64Assembler::ConditionLO,
+        DoubleLessThanOrEqual = ARM64Assembler::ConditionLS,
+        // If either operand is NaN, these conditions always evaluate to true.
+        DoubleEqualOrUnordered = ARM64Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
+        DoubleNotEqualOrUnordered = ARM64Assembler::ConditionNE,
+        DoubleGreaterThanOrUnordered = ARM64Assembler::ConditionHI,
+        DoubleGreaterThanOrEqualOrUnordered = ARM64Assembler::ConditionHS,
+        DoubleLessThanOrUnordered = ARM64Assembler::ConditionLT,
+        DoubleLessThanOrEqualOrUnordered = ARM64Assembler::ConditionLE,
+    };
+
+    static const RegisterID stackPointerRegister = ARM64Registers::sp;
+    static const RegisterID linkRegister = ARM64Registers::lr;
+
+
+    // Integer operations:
+
+    void add32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.add<32>(dest, dest, src);
+    }
+
+    void add32(TrustedImm32 imm, RegisterID dest)
+    {
+        add32(imm, dest, dest);
+    }
+
+    void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (isUInt12(imm.m_value))
+            m_assembler.add<32>(dest, src, UInt12(imm.m_value));
+        else if (isUInt12(-imm.m_value))
+            m_assembler.sub<32>(dest, src, UInt12(-imm.m_value));
+        else {
+            move(imm, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.add<32>(dest, src, dataTempRegister);
+        }
+    }
+
+    void add32(TrustedImm32 imm, Address address)
+    {
+        load32(address, getCachedDataTempRegisterIDAndInvalidate());
+
+        if (isUInt12(imm.m_value))
+            m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+        else if (isUInt12(-imm.m_value))
+            m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+        else {
+            move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+            m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
+        }
+
+        store32(dataTempRegister, address);
+    }
+
+    void add32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+
+        if (isUInt12(imm.m_value)) {
+            m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+            store32(dataTempRegister, address.m_ptr);
+            return;
+        }
+
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+            store32(dataTempRegister, address.m_ptr);
+            return;
+        }
+
+        move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
+        store32(dataTempRegister, address.m_ptr);
+    }
+
+    void add32(Address src, RegisterID dest)
+    {
+        load32(src, getCachedDataTempRegisterIDAndInvalidate());
+        add32(dataTempRegister, dest);
+    }
+
+    void add64(RegisterID src, RegisterID dest)
+    {
+        m_assembler.add<64>(dest, dest, src);
+    }
+
+    void add64(TrustedImm32 imm, RegisterID dest)
+    {
+        if (isUInt12(imm.m_value)) {
+            m_assembler.add<64>(dest, dest, UInt12(imm.m_value));
+            return;
+        }
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.sub<64>(dest, dest, UInt12(-imm.m_value));
+            return;
+        }
+
+        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(dest, dest, dataTempRegister);
+    }
+
+    void add64(TrustedImm64 imm, RegisterID dest)
+    {
+        intptr_t immediate = imm.m_value;
+
+        if (isUInt12(immediate)) {
+            m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
+            return;
+        }
+        if (isUInt12(-immediate)) {
+            m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
+            return;
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(dest, dest, dataTempRegister);
+    }
+
+    void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (isUInt12(imm.m_value)) {
+            m_assembler.add<64>(dest, src, UInt12(imm.m_value));
+            return;
+        }
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.sub<64>(dest, src, UInt12(-imm.m_value));
+            return;
+        }
+
+        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(dest, src, dataTempRegister);
+    }
+
+    void add64(TrustedImm32 imm, Address address)
+    {
+        load64(address, getCachedDataTempRegisterIDAndInvalidate());
+
+        if (isUInt12(imm.m_value))
+            m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+        else if (isUInt12(-imm.m_value))
+            m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+        else {
+            signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+            m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
+        }
+
+        store64(dataTempRegister, address);
+    }
+
+    void add64(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+
+        if (isUInt12(imm.m_value)) {
+            m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+            store64(dataTempRegister, address.m_ptr);
+            return;
+        }
+
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+            store64(dataTempRegister, address.m_ptr);
+            return;
+        }
+
+        signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
+        store64(dataTempRegister, address.m_ptr);
+    }
+
+    void add64(Address src, RegisterID dest)
+    {
+        load64(src, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(dest, dest, dataTempRegister);
+    }
+
+    void add64(AbsoluteAddress src, RegisterID dest)
+    {
+        load64(src.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(dest, dest, dataTempRegister);
+    }
+
+    void and32(RegisterID src, RegisterID dest)
+    {
+        and32(dest, src, dest);
+    }
+
+    void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.and_<32>(dest, op1, op2);
+    }
+
+    void and32(TrustedImm32 imm, RegisterID dest)
+    {
+        and32(imm, dest, dest);
+    }
+
+    void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
+
+        if (logicalImm.isValid()) {
+            m_assembler.and_<32>(dest, src, logicalImm);
+            return;
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.and_<32>(dest, src, dataTempRegister);
+    }
+
+    void and32(Address src, RegisterID dest)
+    {
+        load32(src, dataTempRegister);
+        and32(dataTempRegister, dest);
+    }
+
+    void and64(RegisterID src, RegisterID dest)
+    {
+        m_assembler.and_<64>(dest, dest, src);
+    }
+
+    void and64(TrustedImm32 imm, RegisterID dest)
+    {
+        LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
+
+        if (logicalImm.isValid()) {
+            m_assembler.and_<64>(dest, dest, logicalImm);
+            return;
+        }
+
+        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.and_<64>(dest, dest, dataTempRegister);
+    }
+
+    void countLeadingZeros32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.clz<32>(dest, src);
+    }
+
+    void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        m_assembler.lsl<32>(dest, src, shiftAmount);
+    }
+
+    void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.lsl<32>(dest, src, imm.m_value & 0x1f);
+    }
+
+    void lshift32(RegisterID shiftAmount, RegisterID dest)
+    {
+        lshift32(dest, shiftAmount, dest);
+    }
+
+    void lshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        lshift32(dest, imm, dest);
+    }
+
+    void mul32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.mul<32>(dest, dest, src);
+    }
+
+    void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.mul<32>(dest, src, dataTempRegister);
+    }
+
+    void neg32(RegisterID dest)
+    {
+        m_assembler.neg<32>(dest, dest);
+    }
+
+    void neg64(RegisterID dest)
+    {
+        m_assembler.neg<64>(dest, dest);
+    }
+
+    void or32(RegisterID src, RegisterID dest)
+    {
+        or32(dest, src, dest);
+    }
+
+    void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.orr<32>(dest, op1, op2);
+    }
+
+    void or32(TrustedImm32 imm, RegisterID dest)
+    {
+        or32(imm, dest, dest);
+    }
+
+    void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
+
+        if (logicalImm.isValid()) {
+            m_assembler.orr<32>(dest, src, logicalImm);
+            return;
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.orr<32>(dest, src, dataTempRegister);
+    }
+
+    void or32(RegisterID src, AbsoluteAddress address)
+    {
+        load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.orr<32>(dataTempRegister, dataTempRegister, src);
+        store32(dataTempRegister, address.m_ptr);
+    }
+
+    void or64(RegisterID src, RegisterID dest)
+    {
+        or64(dest, src, dest);
+    }
+
+    void or64(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.orr<64>(dest, op1, op2);
+    }
+
+    void or64(TrustedImm32 imm, RegisterID dest)
+    {
+        or64(imm, dest, dest);
+    }
+
+    void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
+
+        if (logicalImm.isValid()) {
+            m_assembler.orr<64>(dest, dest, logicalImm);
+            return;
+        }
+
+        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.orr<64>(dest, src, dataTempRegister);
+    }
+    
+    void or64(TrustedImm64 imm, RegisterID dest)
+    {
+        LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
+
+        if (logicalImm.isValid()) {
+            m_assembler.orr<64>(dest, dest, logicalImm);
+            return;
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.orr<64>(dest, dest, dataTempRegister);
+    }
+
+    void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
+    {
+        m_assembler.ror<64>(srcDst, srcDst, imm.m_value & 63);
+    }
+
+    void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        m_assembler.asr<32>(dest, src, shiftAmount);
+    }
+
+    void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.asr<32>(dest, src, imm.m_value & 0x1f);
+    }
+
+    void rshift32(RegisterID shiftAmount, RegisterID dest)
+    {
+        rshift32(dest, shiftAmount, dest);
+    }
+    
+    void rshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        rshift32(dest, imm, dest);
+    }
+
+    void sub32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.sub<32>(dest, dest, src);
+    }
+
+    void sub32(TrustedImm32 imm, RegisterID dest)
+    {
+        if (isUInt12(imm.m_value)) {
+            m_assembler.sub<32>(dest, dest, UInt12(imm.m_value));
+            return;
+        }
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.add<32>(dest, dest, UInt12(-imm.m_value));
+            return;
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.sub<32>(dest, dest, dataTempRegister);
+    }
+
+    void sub32(TrustedImm32 imm, Address address)
+    {
+        load32(address, getCachedDataTempRegisterIDAndInvalidate());
+
+        if (isUInt12(imm.m_value))
+            m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+        else if (isUInt12(-imm.m_value))
+            m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+        else {
+            move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+            m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
+        }
+
+        store32(dataTempRegister, address);
+    }
+
+    void sub32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+
+        if (isUInt12(imm.m_value)) {
+            m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+            store32(dataTempRegister, address.m_ptr);
+            return;
+        }
+
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+            store32(dataTempRegister, address.m_ptr);
+            return;
+        }
+
+        move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
+        store32(dataTempRegister, address.m_ptr);
+    }
+
+    void sub32(Address src, RegisterID dest)
+    {
+        load32(src, getCachedDataTempRegisterIDAndInvalidate());
+        sub32(dataTempRegister, dest);
+    }
+
+    void sub64(RegisterID src, RegisterID dest)
+    {
+        m_assembler.sub<64>(dest, dest, src);
+    }
+    
+    void sub64(TrustedImm32 imm, RegisterID dest)
+    {
+        if (isUInt12(imm.m_value)) {
+            m_assembler.sub<64>(dest, dest, UInt12(imm.m_value));
+            return;
+        }
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.add<64>(dest, dest, UInt12(-imm.m_value));
+            return;
+        }
+
+        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.sub<64>(dest, dest, dataTempRegister);
+    }
+    
+    void sub64(TrustedImm64 imm, RegisterID dest)
+    {
+        intptr_t immediate = imm.m_value;
+
+        if (isUInt12(immediate)) {
+            m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
+            return;
+        }
+        if (isUInt12(-immediate)) {
+            m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
+            return;
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.sub<64>(dest, dest, dataTempRegister);
+    }
+
+    void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        m_assembler.lsr<32>(dest, src, shiftAmount);
+    }
+    
+    void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.lsr<32>(dest, src, imm.m_value & 0x1f);
+    }
+
+    void urshift32(RegisterID shiftAmount, RegisterID dest)
+    {
+        urshift32(dest, shiftAmount, dest);
+    }
+    
+    void urshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        urshift32(dest, imm, dest);
+    }
+
+    void xor32(RegisterID src, RegisterID dest)
+    {
+        xor32(dest, src, dest);
+    }
+
+    void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.eor<32>(dest, op1, op2);
+    }
+
+    void xor32(TrustedImm32 imm, RegisterID dest)
+    {
+        xor32(imm, dest, dest);
+    }
+
+    void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (imm.m_value == -1)
+            m_assembler.mvn<32>(dest, src);
+        else {
+            LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
+
+            if (logicalImm.isValid()) {
+                m_assembler.eor<32>(dest, dest, logicalImm);
+                return;
+            }
+
+            move(imm, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.eor<32>(dest, src, dataTempRegister);
+        }
+    }
+
+    void xor64(RegisterID src, Address address)
+    {
+        load64(address, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.eor<64>(dataTempRegister, dataTempRegister, src);
+        store64(dataTempRegister, address);
+    }
+
+    void xor64(RegisterID src, RegisterID dest)
+    {
+        xor64(dest, src, dest);
+    }
+
+    void xor64(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.eor<64>(dest, op1, op2);
+    }
+
+    void xor64(TrustedImm32 imm, RegisterID dest)
+    {
+        xor64(imm, dest, dest);
+    }
+
+    void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (imm.m_value == -1)
+            m_assembler.mvn<64>(dest, src);
+        else {
+            LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
+
+            if (logicalImm.isValid()) {
+                m_assembler.eor<64>(dest, dest, logicalImm);
+                return;
+            }
+
+            signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.eor<64>(dest, src, dataTempRegister);
+        }
+    }
+
+
+    // Memory access operations:
+
+    void load64(ImplicitAddress address, RegisterID dest)
+    {
+        if (tryLoadWithOffset<64>(dest, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
+    }
+
+    void load64(BaseIndex address, RegisterID dest)
+    {
+        if (!address.offset && (!address.scale || address.scale == 3)) {
+            m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
+    }
+
+    void load64(const void* address, RegisterID dest)
+    {
+        load<64>(address, dest);
+    }
+
+    DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        DataLabel32 label(this);
+        signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldr<64>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
+        return label;
+    }
+    
+    DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
+        DataLabelCompact label(this);
+        m_assembler.ldr<64>(dest, address.base, address.offset);
+        return label;
+    }
+
+    ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+    {
+        ConvertibleLoadLabel result(this);
+        ASSERT(!(address.offset & ~0xff8));
+        m_assembler.ldr<64>(dest, address.base, address.offset);
+        return result;
+    }
+
+    void load32(ImplicitAddress address, RegisterID dest)
+    {
+        if (tryLoadWithOffset<32>(dest, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
+    }
+
+    void load32(BaseIndex address, RegisterID dest)
+    {
+        if (!address.offset && (!address.scale || address.scale == 2)) {
+            m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
+    }
+
+    void load32(const void* address, RegisterID dest)
+    {
+        load<32>(address, dest);
+    }
+
+    DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        DataLabel32 label(this);
+        signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldr<32>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
+        return label;
+    }
+    
+    DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
+        DataLabelCompact label(this);
+        m_assembler.ldr<32>(dest, address.base, address.offset);
+        return label;
+    }
+
+    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+    {
+        load32(address, dest);
+    }
+
+    void load16(ImplicitAddress address, RegisterID dest)
+    {
+        if (tryLoadWithOffset<16>(dest, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldrh(dest, address.base, memoryTempRegister);
+    }
+    
+    void load16(BaseIndex address, RegisterID dest)
+    {
+        if (!address.offset && (!address.scale || address.scale == 1)) {
+            m_assembler.ldrh(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.ldrh(dest, address.base, memoryTempRegister);
+    }
+    
+    void load16Unaligned(BaseIndex address, RegisterID dest)
+    {
+        load16(address, dest);
+    }
+
+    void load16Signed(BaseIndex address, RegisterID dest)
+    {
+        if (!address.offset && (!address.scale || address.scale == 1)) {
+            m_assembler.ldrsh<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.ldrsh<64>(dest, address.base, memoryTempRegister);
+    }
+
+    void load8(ImplicitAddress address, RegisterID dest)
+    {
+        if (tryLoadWithOffset<8>(dest, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldrb(dest, address.base, memoryTempRegister);
+    }
+
+    void load8(BaseIndex address, RegisterID dest)
+    {
+        if (!address.offset && !address.scale) {
+            m_assembler.ldrb(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.ldrb(dest, address.base, memoryTempRegister);
+    }
+    
+    void load8(const void* address, RegisterID dest)
+    {
+        moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
+        m_assembler.ldrb(dest, memoryTempRegister, ARM64Registers::zr);
+    }
+
+    void load8Signed(BaseIndex address, RegisterID dest)
+    {
+        if (!address.offset && !address.scale) {
+            m_assembler.ldrsb<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.ldrsb<64>(dest, address.base, memoryTempRegister);
+    }
+
+    void store64(RegisterID src, ImplicitAddress address)
+    {
+        if (tryStoreWithOffset<64>(src, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.str<64>(src, address.base, memoryTempRegister);
+    }
+
+    void store64(RegisterID src, BaseIndex address)
+    {
+        if (!address.offset && (!address.scale || address.scale == 3)) {
+            m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.str<64>(src, address.base, memoryTempRegister);
+    }
+    
+    void store64(RegisterID src, const void* address)
+    {
+        store<64>(src, address);
+    }
+
+    void store64(TrustedImm64 imm, ImplicitAddress address)
+    {
+        if (!imm.m_value) {
+            store64(ARM64Registers::zr, address);
+            return;
+        }
+
+        moveToCachedReg(imm, m_dataMemoryTempRegister);
+        store64(dataTempRegister, address);
+    }
+
+    void store64(TrustedImm64 imm, BaseIndex address)
+    {
+        if (!imm.m_value) {
+            store64(ARM64Registers::zr, address);
+            return;
+        }
+
+        moveToCachedReg(imm, m_dataMemoryTempRegister);
+        store64(dataTempRegister, address);
+    }
+    
+    DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
+    {
+        DataLabel32 label(this);
+        signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.str<64>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
+        return label;
+    }
+
+    void store32(RegisterID src, ImplicitAddress address)
+    {
+        if (tryStoreWithOffset<32>(src, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.str<32>(src, address.base, memoryTempRegister);
+    }
+
+    void store32(RegisterID src, BaseIndex address)
+    {
+        if (!address.offset && (!address.scale || address.scale == 2)) {
+            m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.str<32>(src, address.base, memoryTempRegister);
+    }
+
+    void store32(RegisterID src, const void* address)
+    {
+        store<32>(src, address);
+    }
+
+    void store32(TrustedImm32 imm, ImplicitAddress address)
+    {
+        if (!imm.m_value) {
+            store32(ARM64Registers::zr, address);
+            return;
+        }
+
+        moveToCachedReg(imm, m_dataMemoryTempRegister);
+        store32(dataTempRegister, address);
+    }
+
+    void store32(TrustedImm32 imm, BaseIndex address)
+    {
+        if (!imm.m_value) {
+            store32(ARM64Registers::zr, address);
+            return;
+        }
+
+        moveToCachedReg(imm, m_dataMemoryTempRegister);
+        store32(dataTempRegister, address);
+    }
+
+    void store32(TrustedImm32 imm, const void* address)
+    {
+        if (!imm.m_value) {
+            store32(ARM64Registers::zr, address);
+            return;
+        }
+
+        moveToCachedReg(imm, m_dataMemoryTempRegister);
+        store32(dataTempRegister, address);
+    }
+
+    DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+    {
+        DataLabel32 label(this);
+        signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.str<32>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
+        return label;
+    }
+
+    void store16(RegisterID src, BaseIndex address)
+    {
+        if (!address.offset && (!address.scale || address.scale == 1)) {
+            m_assembler.strh(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.strh(src, address.base, memoryTempRegister);
+    }
+
+    void store8(RegisterID src, BaseIndex address)
+    {
+        if (!address.offset && !address.scale) {
+            m_assembler.strb(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.strb(src, address.base, memoryTempRegister);
+    }
+
+    void store8(RegisterID src, void* address)
+    {
+        move(ImmPtr(address), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.strb(src, memoryTempRegister, 0);
+    }
+
+    void store8(TrustedImm32 imm, void* address)
+    {
+        if (!imm.m_value) {
+            store8(ARM64Registers::zr, address);
+            return;
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        store8(dataTempRegister, address);
+    }
+
+
+    // Floating-point operations:
+
+    static bool supportsFloatingPoint() { return true; }
+    static bool supportsFloatingPointTruncate() { return true; }
+    static bool supportsFloatingPointSqrt() { return true; }
+    static bool supportsFloatingPointAbs() { return true; }
+
+    enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+
+    void absDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fabs<64>(dest, src);
+    }
+
+    void addDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        addDouble(dest, src, dest);
+    }
+
+    void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.fadd<64>(dest, op1, op2);
+    }
+
+    void addDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, fpTempRegister);
+        addDouble(fpTempRegister, dest);
+    }
+
+    void addDouble(AbsoluteAddress address, FPRegisterID dest)
+    {
+        loadDouble(address.m_ptr, fpTempRegister);
+        addDouble(fpTempRegister, dest);
+    }
+
+    void ceilDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.frintp<64>(dest, src);
+    }
+
+    void floorDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.frintm<64>(dest, src);
+    }
+
+    // Convert 'src' to an integer, and places the resulting 'dest'.
+    // If the result is not representable as a 32 bit value, branch.
+    // May also branch for some values that are representable in 32 bits
+    // (specifically, in this case, 0).
+    void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
+    {
+        m_assembler.fcvtns<32, 64>(dest, src);
+
+        // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+        m_assembler.scvtf<64, 32>(fpTempRegister, dest);
+        failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
+
+        // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+        if (negZeroCheck)
+            failureCases.append(branchTest32(Zero, dest));
+    }
+
+    Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+    {
+        m_assembler.fcmp<64>(left, right);
+
+        if (cond == DoubleNotEqual) {
+            // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
+            Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+            Jump result = makeBranch(ARM64Assembler::ConditionNE);
+            unordered.link(this);
+            return result;
+        }
+        if (cond == DoubleEqualOrUnordered) {
+            Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+            Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
+            unordered.link(this);
+            // We get here if either unordered or equal.
+            Jump result = jump();
+            notEqual.link(this);
+            return result;
+        }
+        return makeBranch(cond);
+    }
+
+    Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
+    {
+        m_assembler.fcmp_0<64>(reg);
+        Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+        Jump result = makeBranch(ARM64Assembler::ConditionNE);
+        unordered.link(this);
+        return result;
+    }
+
+    Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
+    {
+        m_assembler.fcmp_0<64>(reg);
+        Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+        Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
+        unordered.link(this);
+        // We get here if either unordered or equal.
+        Jump result = jump();
+        notEqual.link(this);
+        return result;
+    }
+
+    Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+    {
+        // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
+        m_assembler.fcvtzs<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src);
+        zeroExtend32ToPtr(dataTempRegister, dest);
+        // Check thlow 32-bits sign extend to be equal to the full value.
+        m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0);
+        return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
+    }
+
+    Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+    {
+        // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
+        m_assembler.fcvtzs<64, 64>(dest, src);
+        // Check thlow 32-bits zero extend to be equal to the full value.
+        m_assembler.cmp<64>(dest, dest, ARM64Assembler::UXTW, 0);
+        return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
+    }
+
+    void convertDoubleToFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fcvt<32, 64>(dest, src);
+    }
+
+    void convertFloatToDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fcvt<64, 32>(dest, src);
+    }
+    
+    void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
+    {
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        convertInt32ToDouble(dataTempRegister, dest);
+    }
+    
+    void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.scvtf<64, 32>(dest, src);
+    }
+
+    void convertInt32ToDouble(Address address, FPRegisterID dest)
+    {
+        load32(address, getCachedDataTempRegisterIDAndInvalidate());
+        convertInt32ToDouble(dataTempRegister, dest);
+    }
+
+    void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
+    {
+        load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+        convertInt32ToDouble(dataTempRegister, dest);
+    }
+    
+    void divDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        divDouble(dest, src, dest);
+    }
+
+    void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.fdiv<64>(dest, op1, op2);
+    }
+
+    void loadDouble(ImplicitAddress address, FPRegisterID dest)
+    {
+        if (tryLoadWithOffset<64>(dest, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
+    }
+
+    void loadDouble(BaseIndex address, FPRegisterID dest)
+    {
+        if (!address.offset && (!address.scale || address.scale == 3)) {
+            m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
+    }
+    
+    void loadDouble(const void* address, FPRegisterID dest)
+    {
+        moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
+        m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr);
+    }
+
+    void loadFloat(BaseIndex address, FPRegisterID dest)
+    {
+        if (!address.offset && (!address.scale || address.scale == 2)) {
+            m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
+    }
+
+    void moveDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fmov<64>(dest, src);
+    }
+
+    void moveDoubleTo64(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fmov<64>(dest, src);
+    }
+
+    void move64ToDouble(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fmov<64>(dest, src);
+    }
+
+    void mulDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        mulDouble(dest, src, dest);
+    }
+
+    void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.fmul<64>(dest, op1, op2);
+    }
+
+    void mulDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, fpTempRegister);
+        mulDouble(fpTempRegister, dest);
+    }
+
+    void negateDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fneg<64>(dest, src);
+    }
+
+    void sqrtDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fsqrt<64>(dest, src);
+    }
+
+    void storeDouble(FPRegisterID src, ImplicitAddress address)
+    {
+        if (tryStoreWithOffset<64>(src, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.str<64>(src, address.base, memoryTempRegister);
+    }
+
+    void storeDouble(FPRegisterID src, const void* address)
+    {
+        moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
+        m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr);
+    }
+
+    void storeDouble(FPRegisterID src, BaseIndex address)
+    {
+        if (!address.offset && (!address.scale || address.scale == 3)) {
+            m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.str<64>(src, address.base, memoryTempRegister);
+    }
+    
+    void storeFloat(FPRegisterID src, BaseIndex address)
+    {
+        if (!address.offset && (!address.scale || address.scale == 2)) {
+            m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.str<32>(src, address.base, memoryTempRegister);
+    }
+
+    void subDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        subDouble(dest, src, dest);
+    }
+
+    void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.fsub<64>(dest, op1, op2);
+    }
+
+    void subDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, fpTempRegister);
+        subDouble(fpTempRegister, dest);
+    }
+
+    // Result is undefined if the value is outside of the integer range.
+    void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fcvtzs<32, 64>(dest, src);
+    }
+
+    void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fcvtzu<32, 64>(dest, src);
+    }
+
+
+    // Stack manipulation operations:
+    //
+    // The ABI is assumed to provide a stack abstraction to memory,
+    // containing machine word sized units of data. Push and pop
+    // operations add and remove a single register sized unit of data
+    // to or from the stack. These operations are not supported on
+    // ARM64. Peek and poke operations read or write values on the
+    // stack, without moving the current stack position. Additionally,
+    // there are popToRestore and pushToSave operations, which are
+    // designed just for quick-and-dirty saving and restoring of
+    // temporary values. These operations don't claim to have any
+    // ABI compatibility.
+    
+    void pop(RegisterID) NO_RETURN_DUE_TO_CRASH
+    {
+        CRASH();
+    }
+
+    void push(RegisterID) NO_RETURN_DUE_TO_CRASH
+    {
+        CRASH();
+    }
+
+    void push(Address) NO_RETURN_DUE_TO_CRASH
+    {
+        CRASH();
+    }
+
+    void push(TrustedImm32) NO_RETURN_DUE_TO_CRASH
+    {
+        CRASH();
+    }
+
+    void popToRestore(RegisterID dest)
+    {
+        m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16));
+    }
+
+    void pushToSave(RegisterID src)
+    {
+        m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16));
+    }
+
+    void pushToSave(Address address)
+    {
+        load32(address, getCachedDataTempRegisterIDAndInvalidate());
+        pushToSave(dataTempRegister);
+    }
+
+    void pushToSave(TrustedImm32 imm)
+    {
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        pushToSave(dataTempRegister);
+    }
+    
+    void popToRestore(FPRegisterID dest)
+    {
+        loadDouble(stackPointerRegister, dest);
+        add64(TrustedImm32(16), stackPointerRegister);
+    }
+    
+    void pushToSave(FPRegisterID src)
+    {
+        sub64(TrustedImm32(16), stackPointerRegister);
+        storeDouble(src, stackPointerRegister);
+    }
+
+
+    // Register move operations:
+
+    void move(RegisterID src, RegisterID dest)
+    {
+        if (src != dest)
+            m_assembler.mov<64>(dest, src);
+    }
+
+    void move(TrustedImm32 imm, RegisterID dest)
+    {
+        moveInternal<TrustedImm32, int32_t>(imm, dest);
+    }
+
+    void move(TrustedImmPtr imm, RegisterID dest)
+    {
+        moveInternal<TrustedImmPtr, intptr_t>(imm, dest);
+    }
+
+    void move(TrustedImm64 imm, RegisterID dest)
+    {
+        moveInternal<TrustedImm64, int64_t>(imm, dest);
+    }
+
+    void swap(RegisterID reg1, RegisterID reg2)
+    {
+        move(reg1, getCachedDataTempRegisterIDAndInvalidate());
+        move(reg2, reg1);
+        move(dataTempRegister, reg2);
+    }
+    
+    void signExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        m_assembler.sxtw(dest, src);
+    }
+
+    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        m_assembler.uxtw(dest, src);
+    }
+
+
+    // Forwards / external control flow operations:
+    //
+    // This set of jump and conditional branch operations return a Jump
+    // object which may linked at a later point, allow forwards jump,
+    // or jumps that will require external linkage (after the code has been
+    // relocated).
+    //
+    // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+    // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+    // used (representing the names 'below' and 'above').
+    //
+    // Operands to the comparision are provided in the expected order, e.g.
+    // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
+    // treated as a signed 32bit value, is less than or equal to 5.
+    //
+    // jz and jnz test whether the first operand is equal to zero, and take
+    // an optional second operand of a mask under which to perform the test.
+
+    Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+    {
+        m_assembler.cmp<32>(left, right);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+    {
+        if (isUInt12(right.m_value))
+            m_assembler.cmp<32>(left, UInt12(right.m_value));
+        else if (isUInt12(-right.m_value))
+            m_assembler.cmn<32>(left, UInt12(-right.m_value));
+        else {
+            moveToCachedReg(right, m_dataMemoryTempRegister);
+            m_assembler.cmp<32>(left, dataTempRegister);
+        }
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+    {
+        load32(right, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, left, memoryTempRegister);
+    }
+
+    Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+    {
+        load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, memoryTempRegister, right);
+    }
+
+    Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+    {
+        load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, memoryTempRegister, right);
+    }
+
+    Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, memoryTempRegister, right);
+    }
+
+    Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+    {
+        load32(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, memoryTempRegister, right);
+    }
+
+    Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+    {
+        load32(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, memoryTempRegister, right);
+    }
+
+    Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
+    {
+        m_assembler.cmp<64>(left, right);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
+    {
+        intptr_t immediate = right.m_value;
+        if (isUInt12(immediate))
+            m_assembler.cmp<64>(left, UInt12(static_cast<int32_t>(immediate)));
+        else if (isUInt12(-immediate))
+            m_assembler.cmn<64>(left, UInt12(static_cast<int32_t>(-immediate)));
+        else {
+            moveToCachedReg(right, m_dataMemoryTempRegister);
+            m_assembler.cmp<64>(left, dataTempRegister);
+        }
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branch64(RelationalCondition cond, RegisterID left, Address right)
+    {
+        load64(right, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch64(cond, left, memoryTempRegister);
+    }
+
+    Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+    {
+        load64(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch64(cond, memoryTempRegister, right);
+    }
+
+    Jump branch64(RelationalCondition cond, Address left, RegisterID right)
+    {
+        load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch64(cond, memoryTempRegister, right);
+    }
+
+    Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
+    {
+        load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch64(cond, memoryTempRegister, right);
+    }
+
+    Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+    {
+        ASSERT(!(0xffffff00 & right.m_value));
+        load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, memoryTempRegister, right);
+    }
+
+    Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        ASSERT(!(0xffffff00 & right.m_value));
+        load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, memoryTempRegister, right);
+    }
+    
+    Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+    {
+        m_assembler.tst<32>(reg, mask);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        if (mask.m_value == -1) {
+            if ((cond == Zero) || (cond == NonZero))
+                return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg));
+            m_assembler.tst<32>(reg, reg);
+        } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
+            return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
+        else {
+            if ((cond == Zero) || (cond == NonZero)) {
+                LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
+
+                if (logicalImm.isValid()) {
+                    m_assembler.tst<32>(reg, logicalImm);
+                    return Jump(makeBranch(cond));
+                }
+            }
+
+            move(mask, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.tst<32>(reg, dataTempRegister);
+        }
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branchTest32(cond, memoryTempRegister, mask);
+    }
+
+    Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branchTest32(cond, memoryTempRegister, mask);
+    }
+
+    Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
+    {
+        m_assembler.tst<64>(reg, mask);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        if (mask.m_value == -1) {
+            if ((cond == Zero) || (cond == NonZero))
+                return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
+            m_assembler.tst<64>(reg, reg);
+        } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
+            return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
+        else {
+            if ((cond == Zero) || (cond == NonZero)) {
+                LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
+
+                if (logicalImm.isValid()) {
+                    m_assembler.tst<64>(reg, logicalImm);
+                    return Jump(makeBranch(cond));
+                }
+            }
+
+            signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.tst<64>(reg, dataTempRegister);
+        }
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchTest64(ResultCondition cond, Address address, RegisterID mask)
+    {
+        load64(address, getCachedDataTempRegisterIDAndInvalidate());
+        return branchTest64(cond, dataTempRegister, mask);
+    }
+
+    Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        load64(address, getCachedDataTempRegisterIDAndInvalidate());
+        return branchTest64(cond, dataTempRegister, mask);
+    }
+
+    Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        load64(address, getCachedDataTempRegisterIDAndInvalidate());
+        return branchTest64(cond, dataTempRegister, mask);
+    }
+
+    Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+        return branchTest64(cond, dataTempRegister, mask);
+    }
+
+    Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        load8(address, getCachedDataTempRegisterIDAndInvalidate());
+        return branchTest32(cond, dataTempRegister, mask);
+    }
+
+    Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        load8(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+        return branchTest32(cond, dataTempRegister, mask);
+    }
+
+    Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        move(ImmPtr(reinterpret_cast<void*>(address.offset)), getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister);
+        return branchTest32(cond, dataTempRegister, mask);
+    }
+
+    Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        return branch32(cond, left, right);
+    }
+
+
+    // Arithmetic control flow operations:
+    //
+    // This set of conditional branch operations branch based
+    // on the result of an arithmetic operation. The operation
+    // is performed as normal, storing the result.
+    //
+    // * jz operations branch if the result is zero.
+    // * jo operations branch if the (signed) arithmetic
+    //   operation caused an overflow to occur.
+    
+    Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.add<32, S>(dest, op1, op2);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+    {
+        if (isUInt12(imm.m_value)) {
+            m_assembler.add<32, S>(dest, op1, UInt12(imm.m_value));
+            return Jump(makeBranch(cond));
+        }
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.sub<32, S>(dest, op1, UInt12(-imm.m_value));
+            return Jump(makeBranch(cond));
+        }
+
+        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+        return branchAdd32(cond, op1, dataTempRegister, dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchAdd32(cond, dest, src, dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        return branchAdd32(cond, dest, imm, dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress address)
+    {
+        load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+
+        if (isUInt12(imm.m_value)) {
+            m_assembler.add<32, S>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+            store32(dataTempRegister, address.m_ptr);
+        } else if (isUInt12(-imm.m_value)) {
+            m_assembler.sub<32, S>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+            store32(dataTempRegister, address.m_ptr);
+        } else {
+            move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+            m_assembler.add<32, S>(dataTempRegister, dataTempRegister, memoryTempRegister);
+            store32(dataTempRegister, address.m_ptr);
+        }
+
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchAdd64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.add<64, S>(dest, op1, op2);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchAdd64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+    {
+        if (isUInt12(imm.m_value)) {
+            m_assembler.add<64, S>(dest, op1, UInt12(imm.m_value));
+            return Jump(makeBranch(cond));
+        }
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.sub<64, S>(dest, op1, UInt12(-imm.m_value));
+            return Jump(makeBranch(cond));
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        return branchAdd64(cond, op1, dataTempRegister, dest);
+    }
+
+    Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchAdd64(cond, dest, src, dest);
+    }
+
+    Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        return branchAdd64(cond, dest, imm, dest);
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        ASSERT(cond != Signed);
+
+        if (cond != Overflow) {
+            m_assembler.mul<32>(dest, src1, src2);
+            return branchTest32(cond, dest);
+        }
+
+        // This is a signed multiple of two 32-bit values, producing a 64-bit result.
+        m_assembler.smull(dest, src1, src2);
+        // Copy bits 63..32 of the result to bits 31..0 of dataTempRegister.
+        m_assembler.asr<64>(getCachedDataTempRegisterIDAndInvalidate(), dest, 32);
+        // Splat bit 31 of the result to bits 31..0 of memoryTempRegister.
+        m_assembler.asr<32>(getCachedMemoryTempRegisterIDAndInvalidate(), dest, 31);
+        // After a mul32 the top 32 bits of the register should be clear.
+        zeroExtend32ToPtr(dest, dest);
+        // Check that bits 31..63 of the original result were all equal.
+        return branch32(NotEqual, memoryTempRegister, dataTempRegister);
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchMul32(cond, dest, src, dest);
+    }
+
+    Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        return branchMul32(cond, dataTempRegister, src, dest);
+    }
+
+    Jump branchNeg32(ResultCondition cond, RegisterID dest)
+    {
+        m_assembler.neg<32, S>(dest, dest);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID dest)
+    {
+        m_assembler.neg<32, S>(dest, dest);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.sub<32, S>(dest, op1, op2);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+    {
+        if (isUInt12(imm.m_value)) {
+            m_assembler.sub<32, S>(dest, op1, UInt12(imm.m_value));
+            return Jump(makeBranch(cond));
+        }
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.add<32, S>(dest, op1, UInt12(-imm.m_value));
+            return Jump(makeBranch(cond));
+        }
+
+        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+        return branchSub32(cond, op1, dataTempRegister, dest);
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchSub32(cond, dest, src, dest);
+    }
+
+    Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        return branchSub32(cond, dest, imm, dest);
+    }
+
+    Jump branchSub64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.sub<64, S>(dest, op1, op2);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchSub64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+    {
+        if (isUInt12(imm.m_value)) {
+            m_assembler.sub<64, S>(dest, op1, UInt12(imm.m_value));
+            return Jump(makeBranch(cond));
+        }
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.add<64, S>(dest, op1, UInt12(-imm.m_value));
+            return Jump(makeBranch(cond));
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        return branchSub64(cond, op1, dataTempRegister, dest);
+    }
+
+    Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchSub64(cond, dest, src, dest);
+    }
+
+    Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        return branchSub64(cond, dest, imm, dest);
+    }
+
+
+    // Jumps, calls, returns
+
+    ALWAYS_INLINE Call call()
+    {
+        AssemblerLabel pointerLabel = m_assembler.label();
+        moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
+        invalidateAllTempRegisters();
+        m_assembler.blr(dataTempRegister);
+        AssemblerLabel callLabel = m_assembler.label();
+        ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
+        return Call(callLabel, Call::Linkable);
+    }
+
+    ALWAYS_INLINE Call call(RegisterID target)
+    {
+        invalidateAllTempRegisters();
+        m_assembler.blr(target);
+        return Call(m_assembler.label(), Call::None);
+    }
+
+    ALWAYS_INLINE Call call(Address address)
+    {
+        load64(address, getCachedDataTempRegisterIDAndInvalidate());
+        return call(dataTempRegister);
+    }
+
+    ALWAYS_INLINE Jump jump()
+    {
+        AssemblerLabel label = m_assembler.label();
+        m_assembler.b();
+        return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpNoConditionFixedSize : ARM64Assembler::JumpNoCondition);
+    }
+
+    void jump(RegisterID target)
+    {
+        m_assembler.br(target);
+    }
+
+    void jump(Address address)
+    {
+        load64(address, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.br(dataTempRegister);
+    }
+
+    void jump(AbsoluteAddress address)
+    {
+        move(TrustedImmPtr(address.m_ptr), getCachedDataTempRegisterIDAndInvalidate());
+        load64(Address(dataTempRegister), dataTempRegister);
+        m_assembler.br(dataTempRegister);
+    }
+
+    ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
+    {
+        oldJump.link(this);
+        return tailRecursiveCall();
+    }
+
+    ALWAYS_INLINE Call nearCall()
+    {
+        m_assembler.bl();
+        return Call(m_assembler.label(), Call::LinkableNear);
+    }
+
+    ALWAYS_INLINE void ret()
+    {
+        m_assembler.ret();
+    }
+
+    ALWAYS_INLINE Call tailRecursiveCall()
+    {
+        // Like a normal call, but don't link.
+        AssemblerLabel pointerLabel = m_assembler.label();
+        moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.br(dataTempRegister);
+        AssemblerLabel callLabel = m_assembler.label();
+        ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
+        return Call(callLabel, Call::Linkable);
+    }
+
+
+    // Comparisons operations
+
+    void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.cmp<32>(left, right);
+        m_assembler.cset<32>(dest, ARM64Condition(cond));
+    }
+
+    void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
+    {
+        load32(left, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.cmp<32>(dataTempRegister, right);
+        m_assembler.cset<32>(dest, ARM64Condition(cond));
+    }
+
+    void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+    {
+        move(right, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.cmp<32>(left, dataTempRegister);
+        m_assembler.cset<32>(dest, ARM64Condition(cond));
+    }
+
+    void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.cmp<64>(left, right);
+        m_assembler.cset<32>(dest, ARM64Condition(cond));
+    }
+    
+    void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+    {
+        signExtend32ToPtr(right, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.cmp<64>(left, dataTempRegister);
+        m_assembler.cset<32>(dest, ARM64Condition(cond));
+    }
+
+    void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+    {
+        load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
+        move(right, getCachedDataTempRegisterIDAndInvalidate());
+        compare32(cond, memoryTempRegister, dataTempRegister, dest);
+    }
+    
+    void test32(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
+    {
+        if (mask.m_value == -1)
+            m_assembler.tst<32>(src, src);
+        else {
+            signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.tst<32>(src, dataTempRegister);
+        }
+        m_assembler.cset<32>(dest, ARM64Condition(cond));
+    }
+
+    void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+    {
+        load32(address, getCachedDataTempRegisterIDAndInvalidate());
+        test32(cond, dataTempRegister, mask, dest);
+    }
+
+    void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+    {
+        load8(address, getCachedDataTempRegisterIDAndInvalidate());
+        test32(cond, dataTempRegister, mask, dest);
+    }
+
+    void test64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.tst<64>(op1, op2);
+        m_assembler.cset<32>(dest, ARM64Condition(cond));
+    }
+
+    void test64(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
+    {
+        if (mask.m_value == -1)
+            m_assembler.tst<64>(src, src);
+        else {
+            signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.tst<64>(src, dataTempRegister);
+        }
+        m_assembler.cset<32>(dest, ARM64Condition(cond));
+    }
+
+
+    // Patchable operations
+
+    ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest)
+    {
+        DataLabel32 label(this);
+        moveWithFixedWidth(imm, dest);
+        return label;
+    }
+
+    ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dest)
+    {
+        DataLabelPtr label(this);
+        moveWithFixedWidth(imm, dest);
+        return label;
+    }
+
+    ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        dataLabel = DataLabelPtr(this);
+        moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
+        return branch64(cond, left, dataTempRegister);
+    }
+
+    ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        dataLabel = DataLabelPtr(this);
+        moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
+        return branch64(cond, left, dataTempRegister);
+    }
+
+    PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branch32(cond, left, TrustedImm32(right));
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branchTest32(cond, reg, mask);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branch32(cond, reg, imm);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableJump()
+    {
+        m_makeJumpPatchable = true;
+        Jump result = jump();
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+    {
+        DataLabelPtr label(this);
+        moveWithFixedWidth(initialValue, getCachedDataTempRegisterIDAndInvalidate());
+        store64(dataTempRegister, address);
+        return label;
+    }
+
+    ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address)
+    {
+        return storePtrWithPatch(TrustedImmPtr(0), address);
+    }
+
+    static void reemitInitialMoveWithPatch(void* address, void* value)
+    {
+        ARM64Assembler::setPointer(static_cast<int*>(address), value, dataTempRegister, true);
+    }
+
+    // Miscellaneous operations:
+
+    void breakpoint(uint16_t imm = 0)
+    {
+        m_assembler.brk(imm);
+    }
+
+    void nop()
+    {
+        m_assembler.nop();
+    }
+
+
+    // Misc helper functions.
+
+    // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+    static RelationalCondition invert(RelationalCondition cond)
+    {
+        return static_cast<RelationalCondition>(ARM64Assembler::invert(static_cast<ARM64Assembler::Condition>(cond)));
+    }
+
+    static FunctionPtr readCallTarget(CodeLocationCall call)
+    {
+        return FunctionPtr(reinterpret_cast<void(*)()>(ARM64Assembler::readCallTarget(call.dataLocation())));
+    }
+
+    static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+    {
+        ARM64Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
+    }
+    
+    static ptrdiff_t maxJumpReplacementSize()
+    {
+        return ARM64Assembler::maxJumpReplacementSize();
+    }
+
+    static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+    
+    static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+    {
+        return label.labelAtOffset(0);
+    }
+    
+    static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+        return CodeLocationLabel();
+    }
+    
+    static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
+    {
+        reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue);
+    }
+    
+    static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+    }
+
+protected:
+    ALWAYS_INLINE Jump makeBranch(ARM64Assembler::Condition cond)
+    {
+        m_assembler.b_cond(cond);
+        AssemblerLabel label = m_assembler.label();
+        m_assembler.nop();
+        return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpConditionFixedSize : ARM64Assembler::JumpCondition, cond);
+    }
+    ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(ARM64Condition(cond)); }
+    ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(ARM64Condition(cond)); }
+    ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(ARM64Condition(cond)); }
+
+    template <int dataSize>
+    ALWAYS_INLINE Jump makeCompareAndBranch(ZeroCondition cond, RegisterID reg)
+    {
+        if (cond == IsZero)
+            m_assembler.cbz<dataSize>(reg);
+        else
+            m_assembler.cbnz<dataSize>(reg);
+        AssemblerLabel label = m_assembler.label();
+        m_assembler.nop();
+        return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpCompareAndBranchFixedSize : ARM64Assembler::JumpCompareAndBranch, static_cast<ARM64Assembler::Condition>(cond), dataSize == 64, reg);
+    }
+
+    ALWAYS_INLINE Jump makeTestBitAndBranch(RegisterID reg, unsigned bit, ZeroCondition cond)
+    {
+        ASSERT(bit < 64);
+        bit &= 0x3f;
+        if (cond == IsZero)
+            m_assembler.tbz(reg, bit);
+        else
+            m_assembler.tbnz(reg, bit);
+        AssemblerLabel label = m_assembler.label();
+        m_assembler.nop();
+        return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpTestBitFixedSize : ARM64Assembler::JumpTestBit, static_cast<ARM64Assembler::Condition>(cond), bit, reg);
+    }
+
+    ARM64Assembler::Condition ARM64Condition(RelationalCondition cond)
+    {
+        return static_cast<ARM64Assembler::Condition>(cond);
+    }
+
+    ARM64Assembler::Condition ARM64Condition(ResultCondition cond)
+    {
+        return static_cast<ARM64Assembler::Condition>(cond);
+    }
+
+    ARM64Assembler::Condition ARM64Condition(DoubleCondition cond)
+    {
+        return static_cast<ARM64Assembler::Condition>(cond);
+    }
+    
+private:
+    ALWAYS_INLINE RegisterID getCachedDataTempRegisterIDAndInvalidate() { return m_dataMemoryTempRegister.registerIDInvalidate(); }
+    ALWAYS_INLINE RegisterID getCachedMemoryTempRegisterIDAndInvalidate() { return m_cachedMemoryTempRegister.registerIDInvalidate(); }
+
+    ALWAYS_INLINE bool isInIntRange(intptr_t value)
+    {
+        return value == ((value << 32) >> 32);
+    }
+
+    template<typename ImmediateType, typename rawType>
+    void moveInternal(ImmediateType imm, RegisterID dest)
+    {
+        const int dataSize = sizeof(rawType) * 8;
+        const int numberHalfWords = dataSize / 16;
+        rawType value = bitwise_cast<rawType>(imm.m_value);
+        uint16_t halfword[numberHalfWords];
+
+        // Handle 0 and ~0 here to simplify code below
+        if (!value) {
+            m_assembler.movz<dataSize>(dest, 0);
+            return;
+        }
+        if (!~value) {
+            m_assembler.movn<dataSize>(dest, 0);
+            return;
+        }
+
+        LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(value)) : LogicalImmediate::create32(static_cast<uint32_t>(value));
+
+        if (logicalImm.isValid()) {
+            m_assembler.movi<dataSize>(dest, logicalImm);
+            return;
+        }
+
+        // Figure out how many halfwords are 0 or FFFF, then choose movz or movn accordingly.
+        int zeroOrNegateVote = 0;
+        for (int i = 0; i < numberHalfWords; ++i) {
+            halfword[i] = getHalfword(value, i);
+            if (!halfword[i])
+                zeroOrNegateVote++;
+            else if (halfword[i] == 0xffff)
+                zeroOrNegateVote--;
+        }
+
+        bool needToClearRegister = true;
+        if (zeroOrNegateVote >= 0) {
+            for (int i = 0; i < numberHalfWords; i++) {
+                if (halfword[i]) {
+                    if (needToClearRegister) {
+                        m_assembler.movz<dataSize>(dest, halfword[i], 16*i);
+                        needToClearRegister = false;
+                    } else
+                        m_assembler.movk<dataSize>(dest, halfword[i], 16*i);
+                }
+            }
+        } else {
+            for (int i = 0; i < numberHalfWords; i++) {
+                if (halfword[i] != 0xffff) {
+                    if (needToClearRegister) {
+                        m_assembler.movn<dataSize>(dest, ~halfword[i], 16*i);
+                        needToClearRegister = false;
+                    } else
+                        m_assembler.movk<dataSize>(dest, halfword[i], 16*i);
+                }
+            }
+        }
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        m_assembler.ldr<datasize>(rt, rn, pimm);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
+    {
+        m_assembler.ldur<datasize>(rt, rn, simm);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        m_assembler.str<datasize>(rt, rn, pimm);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
+    {
+        m_assembler.stur<datasize>(rt, rn, simm);
+    }
+
+    void moveWithFixedWidth(TrustedImm32 imm, RegisterID dest)
+    {
+        int32_t value = imm.m_value;
+        m_assembler.movz<32>(dest, getHalfword(value, 0));
+        m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
+    }
+
+    void moveWithFixedWidth(TrustedImmPtr imm, RegisterID dest)
+    {
+        intptr_t value = reinterpret_cast<intptr_t>(imm.m_value);
+        m_assembler.movz<64>(dest, getHalfword(value, 0));
+        m_assembler.movk<64>(dest, getHalfword(value, 1), 16);
+        m_assembler.movk<64>(dest, getHalfword(value, 2), 32);
+    }
+
+    void signExtend32ToPtrWithFixedWidth(int32_t value, RegisterID dest)
+    {
+        if (value >= 0) {
+            m_assembler.movz<32>(dest, getHalfword(value, 0));
+            m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
+        } else {
+            m_assembler.movn<32>(dest, ~getHalfword(value, 0));
+            m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
+        }
+    }
+
+    void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
+    {
+        move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm.m_value))), dest);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void load(const void* address, RegisterID dest)
+    {
+        intptr_t currentRegisterContents;
+        if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
+            intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
+            intptr_t addressDelta = addressAsInt - currentRegisterContents;
+
+            if (isInIntRange(addressDelta)) {
+                if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
+                    m_assembler.ldur<datasize>(dest,  memoryTempRegister, addressDelta);
+                    return;
+                }
+
+                if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) {
+                    m_assembler.ldr<datasize>(dest,  memoryTempRegister, addressDelta);
+                    return;
+                }
+            }
+
+            if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
+                m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
+                m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
+                m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
+                return;
+            }
+        }
+
+        move(TrustedImmPtr(address), memoryTempRegister);
+        m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
+        m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE void store(RegisterID src, const void* address)
+    {
+        intptr_t currentRegisterContents;
+        if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
+            intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
+            intptr_t addressDelta = addressAsInt - currentRegisterContents;
+
+            if (isInIntRange(addressDelta)) {
+                if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
+                    m_assembler.stur<datasize>(src, memoryTempRegister, addressDelta);
+                    return;
+                }
+
+                if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) {
+                    m_assembler.str<datasize>(src, memoryTempRegister, addressDelta);
+                    return;
+                }
+            }
+
+            if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
+                m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
+                m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
+                m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
+                return;
+            }
+        }
+
+        move(TrustedImmPtr(address), memoryTempRegister);
+        m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
+        m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
+    }
+
+    template <int dataSize>
+    ALWAYS_INLINE bool tryMoveUsingCacheRegisterContents(intptr_t immediate, CachedTempRegister& dest)
+    {
+        intptr_t currentRegisterContents;
+        if (dest.value(currentRegisterContents)) {
+            if (currentRegisterContents == immediate)
+                return true;
+
+            LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(immediate)) : LogicalImmediate::create32(static_cast<uint32_t>(immediate));
+
+            if (logicalImm.isValid()) {
+                m_assembler.movi<dataSize>(dest.registerIDNoInvalidate(), logicalImm);
+                dest.setValue(immediate);
+                return true;
+            }
+
+            if ((immediate & maskUpperWord) == (currentRegisterContents & maskUpperWord)) {
+                if ((immediate & maskHalfWord1) != (currentRegisterContents & maskHalfWord1))
+                    m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), (immediate & maskHalfWord1) >> 16, 16);
+
+                if ((immediate & maskHalfWord0) != (currentRegisterContents & maskHalfWord0))
+                    m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), immediate & maskHalfWord0, 0);
+
+                dest.setValue(immediate);
+                return true;
+            }
+        }
+
+        return false;
+    }
+
+    void moveToCachedReg(TrustedImm32 imm, CachedTempRegister& dest)
+    {
+        if (tryMoveUsingCacheRegisterContents<32>(static_cast<intptr_t>(imm.m_value), dest))
+            return;
+
+        moveInternal<TrustedImm32, int32_t>(imm, dest.registerIDNoInvalidate());
+        dest.setValue(imm.m_value);
+    }
+
+    void moveToCachedReg(TrustedImmPtr imm, CachedTempRegister& dest)
+    {
+        if (tryMoveUsingCacheRegisterContents<64>(imm.asIntptr(), dest))
+            return;
+
+        moveInternal<TrustedImmPtr, intptr_t>(imm, dest.registerIDNoInvalidate());
+        dest.setValue(imm.asIntptr());
+    }
+
+    void moveToCachedReg(TrustedImm64 imm, CachedTempRegister& dest)
+    {
+        if (tryMoveUsingCacheRegisterContents<64>(static_cast<intptr_t>(imm.m_value), dest))
+            return;
+
+        moveInternal<TrustedImm64, int64_t>(imm, dest.registerIDNoInvalidate());
+        dest.setValue(imm.m_value);
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE bool tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
+    {
+        if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+            loadUnscaledImmediate<datasize>(rt, rn, offset);
+            return true;
+        }
+        if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
+            loadUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
+            return true;
+        }
+        return false;
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
+    {
+        if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+            m_assembler.ldur<datasize>(rt, rn, offset);
+            return true;
+        }
+        if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
+            m_assembler.ldr<datasize>(rt, rn, static_cast<unsigned>(offset));
+            return true;
+        }
+        return false;
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE bool tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
+    {
+        if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+            storeUnscaledImmediate<datasize>(rt, rn, offset);
+            return true;
+        }
+        if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
+            storeUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
+            return true;
+        }
+        return false;
+    }
+
+    template<int datasize>
+    ALWAYS_INLINE bool tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
+    {
+        if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+            m_assembler.stur<datasize>(rt, rn, offset);
+            return true;
+        }
+        if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
+            m_assembler.str<datasize>(rt, rn, static_cast<unsigned>(offset));
+            return true;
+        }
+        return false;
+    }
+
+    friend class LinkBuffer;
+    friend class RepatchBuffer;
+
+    static void linkCall(void* code, Call call, FunctionPtr function)
+    {
+        if (call.isFlagSet(Call::Near))
+            ARM64Assembler::linkCall(code, call.m_label, function.value());
+        else
+            ARM64Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value());
+    }
+
+    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+    {
+        ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
+    }
+
+    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+    {
+        ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
+    }
+
+    CachedTempRegister m_dataMemoryTempRegister;
+    CachedTempRegister m_cachedMemoryTempRegister;
+    bool m_makeJumpPatchable;
+};
+
+// Extend the {load,store}{Unsigned,Unscaled}Immediate templated general register methods to cover all load/store sizes
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+    m_assembler.ldrb(rt, rn, pimm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+    m_assembler.ldrh(rt, rn, pimm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
+{
+    m_assembler.ldurb(rt, rn, simm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
+{
+    m_assembler.ldurh(rt, rn, simm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+    m_assembler.strb(rt, rn, pimm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+    m_assembler.strh(rt, rn, pimm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
+{
+    m_assembler.sturb(rt, rn, simm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
+{
+    m_assembler.sturh(rt, rn, simm);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerARM64_h
index b64c1c5..b3076a0 100644 (file)
@@ -55,6 +55,10 @@ public:
     typedef ARMv7Assembler::LinkRecord LinkRecord;
     typedef ARMv7Assembler::JumpType JumpType;
     typedef ARMv7Assembler::JumpLinkType JumpLinkType;
+    typedef ARMv7Assembler::Condition Condition;
+
+    static const ARMv7Assembler::Condition DefaultCondition = ARMv7Assembler::ConditionInvalid;
+    static const ARMv7Assembler::JumpType DefaultJump = ARMv7Assembler::JumpNoConditionFixedSize;
 
     static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
     {
index 24ad230..1a3855c 100644 (file)
@@ -233,7 +233,7 @@ private:
         case ArithMod: {
             if (Node::shouldSpeculateInt32ForArithmetic(node->child1().node(), node->child2().node())
                 && node->canSpeculateInt32()) {
-                if (isX86() || isARMv7s()) {
+                if (isX86() || isARM64() || isARMv7s()) {
                     fixEdge<Int32Use>(node->child1());
                     fixEdge<Int32Use>(node->child2());
                     break;
index 288266a..58f442e 100644 (file)
@@ -120,8 +120,13 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
                 scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2);
                 scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1);
                 
+#if CPU(ARM64)
+                m_jit.pushToSave(scratch1);
+                m_jit.pushToSave(scratch2);
+#else
                 m_jit.push(scratch1);
                 m_jit.push(scratch2);
+#endif
                 
                 GPRReg value;
                 if (exit.m_jsValueSource.isAddress()) {
@@ -137,8 +142,13 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
                 m_jit.lshift32(scratch1, scratch2);
                 m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
                 
+#if CPU(ARM64)
+                m_jit.popToRestore(scratch2);
+                m_jit.popToRestore(scratch1);
+#else
                 m_jit.pop(scratch2);
                 m_jit.pop(scratch1);
+#endif
             }
         }
         
@@ -149,14 +159,22 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
                 // Save a register so we can use it.
                 GPRReg scratch = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base());
                 
+#if CPU(ARM64)
+                m_jit.pushToSave(scratch);
+#else
                 m_jit.push(scratch);
+#endif
 
                 m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
                 m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
                 m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
                 m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
                 
+#if CPU(ARM64)
+                m_jit.popToRestore(scratch);
+#else
                 m_jit.pop(scratch);
+#endif
             } else if (exit.m_jsValueSource.hasKnownTag()) {
                 m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
                 m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
index 4f6683a..f9c8b96 100644 (file)
@@ -113,8 +113,13 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
                 scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister);
                 scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1);
                 
+#if CPU(ARM64)
+                m_jit.pushToSave(scratch1);
+                m_jit.pushToSave(scratch2);
+#else
                 m_jit.push(scratch1);
                 m_jit.push(scratch2);
+#endif
                 
                 GPRReg value;
                 if (exit.m_jsValueSource.isAddress()) {
@@ -130,8 +135,13 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
                 m_jit.lshift32(scratch1, scratch2);
                 m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
                 
+#if CPU(ARM64)
+                m_jit.popToRestore(scratch2);
+                m_jit.popToRestore(scratch1);
+#else
                 m_jit.pop(scratch2);
                 m_jit.pop(scratch1);
+#endif
             }
         }
         
index 458fd3f..e698dbf 100644 (file)
@@ -37,6 +37,7 @@
 #include "DFGSlowPathGenerator.h"
 #include "JSCJSValueInlines.h"
 #include "LinkBuffer.h"
+#include <wtf/MathExtras.h>
 
 namespace JSC { namespace DFG {
 
@@ -3507,6 +3508,32 @@ void SpeculativeJIT::compileArithDiv(Node* node)
         }
 
         int32Result(quotient.gpr(), node);
+#elif CPU(ARM64)
+        SpeculateIntegerOperand op1(this, node->child1());
+        SpeculateIntegerOperand op2(this, node->child2());
+        GPRReg op1GPR = op1.gpr();
+        GPRReg op2GPR = op2.gpr();
+        GPRTemporary quotient(this);
+        GPRTemporary multiplyAnswer(this);
+
+        // If the user cares about negative zero, then speculate that we're not about
+        // to produce negative zero.
+        if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
+            MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
+            speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
+            numeratorNonZero.link(&m_jit);
+        }
+
+        m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
+
+        // Check that there was no remainder. If there had been, then we'd be obligated to
+        // produce a double result instead.
+        if (nodeUsedAsNumber(node->arithNodeFlags())) {
+            speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
+            speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
+        }
+
+        int32Result(quotient.gpr(), node);
 #else
         RELEASE_ASSERT_NOT_REACHED();
 #endif
@@ -3758,6 +3785,29 @@ void SpeculativeJIT::compileArithMod(Node* node)
         }
 
         int32Result(quotientThenRemainderGPR, node);
+#elif CPU(ARM64)
+        GPRTemporary temp(this);
+        GPRTemporary quotientThenRemainder(this);
+        GPRTemporary multiplyAnswer(this);
+        GPRReg dividendGPR = op1.gpr();
+        GPRReg divisorGPR = op2.gpr();
+        GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
+        GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
+
+        m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
+        speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
+        m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
+
+        // If the user cares about negative zero, then speculate that we're not about
+        // to produce negative zero.
+        if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
+            // Check that we're not about to create negative zero.
+            JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
+            speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
+            numeratorPositive.link(&m_jit);
+        }
+
+        int32Result(quotientThenRemainderGPR, node);
 #else // not architecture that can do integer division
         RELEASE_ASSERT_NOT_REACHED();
 #endif
diff --git a/Source/JavaScriptCore/disassembler/ARM64/A64DOpcode.cpp b/Source/JavaScriptCore/disassembler/ARM64/A64DOpcode.cpp
new file mode 100644 (file)
index 0000000..0ea817a
--- /dev/null
@@ -0,0 +1,1132 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "A64DOpcode.h"
+
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+
+namespace JSC { namespace ARM64Disassembler {
+
+A64DOpcode::OpcodeGroup* A64DOpcode::opcodeTable[32];
+
+const char* const A64DOpcode::s_conditionNames[16] = {
+    "eq", "ne", "hs", "lo", "mi", "pl", "vs", "vc",
+    "hi", "ls", "ge", "lt", "gt", "le", "al", "ne"
+};
+
+const char* const A64DOpcode::s_optionName[8] = {
+    "uxtb", "uxth", "uxtw", "uxtx", "sxtb", "sxth", "sxtw", "sxtx"
+};
+
+const char* const A64DOpcode::s_shiftNames[4] = {
+    "lsl", "lsr", "asl", "ror"
+};
+
+const char A64DOpcode::s_FPRegisterPrefix[5] = {
+    'b', 'h', 's', 'd', 'q'
+};
+
+struct OpcodeGroupInitializer {
+    unsigned m_opcodeGroupNumber;
+    uint32_t m_mask;
+    uint32_t m_pattern;
+    const char* (*m_format)(A64DOpcode*);
+};
+
+#define OPCODE_GROUP_ENTRY(groupIndex, groupClass) \
+{ groupIndex, groupClass::mask, groupClass::pattern, groupClass::format }
+
+static OpcodeGroupInitializer opcodeGroupList[] = {
+    OPCODE_GROUP_ENTRY(0x0a, A64DOpcodeLogicalShiftedRegister),
+    OPCODE_GROUP_ENTRY(0x0b, A64DOpcodeAddSubtractExtendedRegister),
+    OPCODE_GROUP_ENTRY(0x0b, A64DOpcodeAddSubtractShiftedRegister),
+    OPCODE_GROUP_ENTRY(0x11, A64DOpcodeAddSubtractImmediate),
+    OPCODE_GROUP_ENTRY(0x12, A64DOpcodeMoveWide),
+    OPCODE_GROUP_ENTRY(0x12, A64DOpcodeLogicalImmediate),
+    OPCODE_GROUP_ENTRY(0x13, A64DOpcodeBitfield),
+    OPCODE_GROUP_ENTRY(0x13, A64DOpcodeExtract),
+    OPCODE_GROUP_ENTRY(0x14, A64DOpcodeUnconditionalBranchImmediate),
+    OPCODE_GROUP_ENTRY(0x14, A64DOpcodeConditionalBranchImmediate),
+    OPCODE_GROUP_ENTRY(0x14, A64DOpcodeCompareAndBranchImmediate),
+    OPCODE_GROUP_ENTRY(0x14, A64OpcodeExceptionGeneration),
+    OPCODE_GROUP_ENTRY(0x15, A64DOpcodeUnconditionalBranchImmediate),
+    OPCODE_GROUP_ENTRY(0x15, A64DOpcodeConditionalBranchImmediate),
+    OPCODE_GROUP_ENTRY(0x15, A64DOpcodeCompareAndBranchImmediate),
+    OPCODE_GROUP_ENTRY(0x15, A64DOpcodeHint),
+    OPCODE_GROUP_ENTRY(0x16, A64DOpcodeUnconditionalBranchImmediate),
+    OPCODE_GROUP_ENTRY(0x16, A64DOpcodeUnconditionalBranchRegister),
+    OPCODE_GROUP_ENTRY(0x16, A64DOpcodeTestAndBranchImmediate),
+    OPCODE_GROUP_ENTRY(0x17, A64DOpcodeUnconditionalBranchImmediate),
+    OPCODE_GROUP_ENTRY(0x17, A64DOpcodeUnconditionalBranchRegister),
+    OPCODE_GROUP_ENTRY(0x17, A64DOpcodeTestAndBranchImmediate),
+    OPCODE_GROUP_ENTRY(0x18, A64DOpcodeLoadStoreImmediate),
+    OPCODE_GROUP_ENTRY(0x18, A64DOpcodeLoadStoreRegisterOffset),
+    OPCODE_GROUP_ENTRY(0x19, A64DOpcodeLoadStoreUnsignedImmediate),
+    OPCODE_GROUP_ENTRY(0x1a, A64DOpcodeConditionalSelect),
+    OPCODE_GROUP_ENTRY(0x1a, A64DOpcodeDataProcessing2Source),
+    OPCODE_GROUP_ENTRY(0x1b, A64DOpcodeDataProcessing3Source),
+    OPCODE_GROUP_ENTRY(0x1c, A64DOpcodeLoadStoreImmediate),
+    OPCODE_GROUP_ENTRY(0x1c, A64DOpcodeLoadStoreRegisterOffset),
+    OPCODE_GROUP_ENTRY(0x1d, A64DOpcodeLoadStoreUnsignedImmediate),
+    OPCODE_GROUP_ENTRY(0x1e, A64DOpcodeFloatingPointCompare),
+    OPCODE_GROUP_ENTRY(0x1e, A64DOpcodeFloatingPointDataProcessing2Source),
+    OPCODE_GROUP_ENTRY(0x1e, A64DOpcodeFloatingPointDataProcessing1Source),
+    OPCODE_GROUP_ENTRY(0x1e, A64DOpcodeFloatingFixedPointConversions),
+    OPCODE_GROUP_ENTRY(0x1e, A64DOpcodeFloatingPointIntegerConversions),
+};
+
+bool A64DOpcode::s_initialized = false;
+
+void A64DOpcode::init()
+{
+    if (s_initialized)
+        return;
+
+    OpcodeGroup* lastGroups[32];
+
+    for (unsigned i = 0; i < 32; i++) {
+        opcodeTable[i] = 0;
+        lastGroups[i] = 0;
+    }
+
+    for (unsigned i = 0; i < sizeof(opcodeGroupList) / sizeof(struct OpcodeGroupInitializer); i++) {
+        OpcodeGroup* newOpcodeGroup = new OpcodeGroup(opcodeGroupList[i].m_mask, opcodeGroupList[i].m_pattern, opcodeGroupList[i].m_format);
+        uint32_t opcodeGroupNumber = opcodeGroupList[i].m_opcodeGroupNumber;
+
+        if (!opcodeTable[opcodeGroupNumber])
+            opcodeTable[opcodeGroupNumber] = newOpcodeGroup;
+        else
+            lastGroups[opcodeGroupNumber]->setNext(newOpcodeGroup);
+        lastGroups[opcodeGroupNumber] = newOpcodeGroup;
+    }
+
+    s_initialized = true;
+}
+
+void A64DOpcode::setPCAndOpcode(uint32_t* newPC, uint32_t newOpcode)
+{
+    m_currentPC = newPC;
+    m_opcode = newOpcode;
+    m_bufferOffset = 0;
+    m_formatBuffer[0] = '\0';
+}
+
+const char* A64DOpcode::disassemble(uint32_t* currentPC)
+{
+    setPCAndOpcode(currentPC, *currentPC);
+
+    OpcodeGroup* opGroup = opcodeTable[opcodeGroupNumber(m_opcode)];
+
+    while (opGroup) {
+        if (opGroup->matches(m_opcode))
+            return opGroup->format(this);
+        opGroup = opGroup->next();
+    }
+
+    return A64DOpcode::format();
+}
+
+void A64DOpcode::bufferPrintf(const char* format, ...)
+{
+    if (m_bufferOffset >= bufferSize)
+        return;
+
+    va_list argList;
+    va_start(argList, format);
+
+    m_bufferOffset += vsnprintf(m_formatBuffer + m_bufferOffset, bufferSize - m_bufferOffset, format, argList);
+
+    va_end(argList);
+}
+
+const char* A64DOpcode::format()
+{
+    bufferPrintf("   .long  %08x", m_opcode);
+    return m_formatBuffer;
+}
+
+void A64DOpcode::appendRegisterName(unsigned registerNumber, bool is64Bit)
+{
+    if (registerNumber == 30) {
+        bufferPrintf(is64Bit ? "lr" : "wlr");
+        return;
+    }
+
+    bufferPrintf("%c%u", is64Bit ? 'x' : 'w', registerNumber);
+}
+
+void A64DOpcode::appendFPRegisterName(unsigned registerNumber, unsigned registerSize)
+{
+    bufferPrintf("%c%u", FPRegisterPrefix(registerSize), registerNumber);
+}
+
+const char* const A64DOpcodeAddSubtract::s_opNames[4] = { "add", "adds", "sub", "subs" };
+
+const char* A64DOpcodeAddSubtractImmediate::format()
+{
+    if (isCMP())
+        appendInstructionName(cmpName());
+    else {
+        if (isMovSP())
+            appendInstructionName("mov");
+        else
+            appendInstructionName(opName());
+        appendSPOrRegisterName(rd(), is64Bit());
+        appendSeparator();
+    }
+    appendSPOrRegisterName(rn(), is64Bit());
+
+    if (!isMovSP()) {
+        appendSeparator();
+        appendUnsignedImmediate(immed12());
+        if (shift()) {
+            appendSeparator();
+            appendString(shift() == 1 ? "lsl" : "reserved");
+        }
+    }
+    return m_formatBuffer;
+}
+
+const char* A64DOpcodeAddSubtractExtendedRegister::format()
+{
+    if (immediate3() > 4)
+        return A64DOpcode::format();
+
+    if (isCMP())
+        appendInstructionName(cmpName());
+    else {
+        appendInstructionName(opName());
+        appendSPOrRegisterName(rd(), is64Bit());
+        appendSeparator();
+    }
+    appendSPOrRegisterName(rn(), is64Bit());
+    appendSeparator();
+    appendZROrRegisterName(rm(), is64Bit() && ((option() & 0x3) == 0x3));
+    appendSeparator();
+    if (option() == 0x2 && ((rd() == 31) || (rn() == 31)))
+        appendString("lsl");
+    else
+        appendString(optionName());
+    if (immediate3()) {
+        appendCharacter(' ');
+        appendUnsignedImmediate(immediate3());
+    }
+
+    return m_formatBuffer;
+}
+
+const char* A64DOpcodeAddSubtractShiftedRegister::format()
+{
+    if (!is64Bit() && immediate6() & 0x20)
+        return A64DOpcode::format();
+
+    if (shift() == 0x3)
+        return A64DOpcode::format();
+
+    if (isCMP())
+        appendInstructionName(cmpName());
+    else {
+        if (isNeg())
+            appendInstructionName(cmpName());
+        else
+            appendInstructionName(opName());
+        appendSPOrRegisterName(rd(), is64Bit());
+        appendSeparator();
+    }
+    if (!isNeg()) {
+        appendRegisterName(rn(), is64Bit());
+        appendSeparator();
+    }
+    appendZROrRegisterName(rm(), is64Bit());
+    if (immediate6()) {
+        appendSeparator();
+        appendShiftType(shift());
+        appendUnsignedImmediate(immediate6());
+    }
+
+    return m_formatBuffer;
+}
+
+const char* const A64DOpcodeBitfield::s_opNames[3] = { "sbfm", "bfm", "ubfm" };
+const char* const A64DOpcodeBitfield::s_extendPseudoOpNames[3][3] = {
+    { "sxtb", "sxth", "sxtw" }, { 0, 0, 0} , { "uxtb", "uxth", "uxtw" } };
+const char* const A64DOpcodeBitfield::s_insertOpNames[3] = { "sbfiz", "bfi", "ubfiz" };
+const char* const A64DOpcodeBitfield::s_extractOpNames[3] = { "sbfx", "bf", "ubfx" };
+
+const char* A64DOpcodeBitfield::format()
+{
+    if (opc() == 0x3)
+        return A64DOpcode::format();
+
+    if (is64Bit() != nBit())
+        return A64DOpcode::format();
+
+    if (!is64Bit() && ((immediateR() & 0x20) || (immediateS() & 0x20)))
+        return A64DOpcode::format();
+
+    if (!(opc() & 0x1) && !immediateR()) {
+        // [un]signed {btye,half-word,word} extend
+        bool isSTXType = false;
+        if (immediateS() == 7) {
+            appendInstructionName(extendPseudoOpNames(0));
+            isSTXType = true;
+        } else if (immediateS() == 15) {
+            appendInstructionName(extendPseudoOpNames(1));
+            isSTXType = true;
+        } else if (immediateS() == 31 && is64Bit()) {
+            appendInstructionName(extendPseudoOpNames(2));
+            isSTXType = true;
+        }
+
+        if (isSTXType) {
+            appendRegisterName(rd(), is64Bit());
+            appendSeparator();
+            appendRegisterName(rn(), false);
+
+            return m_formatBuffer;
+        }
+    }
+
+    if (opc() == 0x2 && immediateS() == (immediateR() + 1)) {
+        // lsl
+        appendInstructionName("lsl");
+        appendRegisterName(rd(), is64Bit());
+        appendSeparator();
+        appendRegisterName(rn(), is64Bit());
+        appendSeparator();
+        appendUnsignedImmediate((is64Bit() ? 63u : 31u) - immediateR());
+
+        return m_formatBuffer;
+    } else if (!(opc() & 0x1) && ((immediateS() & 0x1f) == 0x1f) && (is64Bit() == (immediateS() >> 5))) {
+        // asr/lsr
+        appendInstructionName(!opc() ? "ars" : "lsr");
+
+        appendRegisterName(rd(), is64Bit());
+        appendSeparator();
+        appendRegisterName(rn(), is64Bit());
+        appendSeparator();
+        appendUnsignedImmediate(immediateR());
+
+        return m_formatBuffer;
+    } else if (immediateS() < immediateR()) {
+        // bit field insert
+        appendInstructionName(insertOpNames());
+
+        appendRegisterName(rd(), is64Bit());
+        appendSeparator();
+        appendRegisterName(rn(), is64Bit());
+        appendSeparator();
+        appendUnsignedImmediate((is64Bit() ? 64u : 32u) - immediateR());
+        appendSeparator();
+        appendUnsignedImmediate(immediateS() + 1);
+
+        return m_formatBuffer;
+    } else {
+        // bit field extract
+        appendInstructionName(extractOpNames());
+
+        appendRegisterName(rd(), is64Bit());
+        appendSeparator();
+        appendRegisterName(rn(), is64Bit());
+        appendSeparator();
+        appendUnsignedImmediate(immediateR());
+        appendSeparator();
+        appendUnsignedImmediate(immediateS() - immediateR() + 1);
+
+        return m_formatBuffer;
+    }
+
+    appendInstructionName(opName());
+    appendRegisterName(rd(), is64Bit());
+    appendSeparator();
+    appendRegisterName(rn(), is64Bit());
+    appendSeparator();
+    appendUnsignedImmediate(immediateR());
+    appendSeparator();
+    appendUnsignedImmediate(immediateS());
+
+    return m_formatBuffer;
+}
+
+const char* A64DOpcodeCompareAndBranchImmediate::format()
+{
+    appendInstructionName(opBit() ? "cbnz" : "cbz");
+    appendRegisterName(rt(), is64Bit());
+    appendSeparator();    
+    appendPCRelativeOffset(m_currentPC, static_cast<int32_t>(immediate19()));
+    return m_formatBuffer;
+}
+
+const char* A64DOpcodeConditionalBranchImmediate::format()
+{
+    bufferPrintf("   b.%-5.5s", conditionName(condition()));
+    appendPCRelativeOffset(m_currentPC, static_cast<int32_t>(immediate19()));
+    return m_formatBuffer;
+}
+
+const char* const A64DOpcodeConditionalSelect::s_opNames[4] = {
+    "csel", "csinc", "csinv", "csneg"
+};
+
+const char* A64DOpcodeConditionalSelect::format()
+{
+    if (sBit())
+        return A64DOpcode::format();
+
+    if (op2() & 0x2)
+        return A64DOpcode::format();
+
+    if (rn() == rm() && (opNum() == 1 || opNum() == 2)) {
+        if (rn() == 31) {
+            appendInstructionName((opNum() == 1) ? "cset" : "csetm");
+            appendRegisterName(rd(), is64Bit());
+        } else {
+            appendInstructionName((opNum() == 1) ? "cinc" : "cinv");
+            appendRegisterName(rd(), is64Bit());
+            appendSeparator();
+            appendZROrRegisterName(rn(), is64Bit());
+        }
+        appendSeparator();
+        appendString(conditionName(condition() ^ 0x1));
+
+        return m_formatBuffer;
+    }
+
+    appendInstructionName(opName());
+    appendRegisterName(rd(), is64Bit());
+    appendSeparator();
+    appendZROrRegisterName(rn(), is64Bit());
+    appendSeparator();
+    appendZROrRegisterName(rm(), is64Bit());
+    appendSeparator();
+    appendString(conditionName(condition()));
+
+    return m_formatBuffer;
+
+}
+
+const char* const A64DOpcodeDataProcessing2Source::s_opNames[8] = {
+    0, 0, "udiv", "sdiv", "lsl", "lsr", "asr", "ror" // We use the pseudo-op names for the shift/rotate instructions
+};
+
+const char* A64DOpcodeDataProcessing2Source::format()
+{
+    if (sBit())
+        return A64DOpcode::format();
+
+    if (!(opCode() & 0x3e))
+        return A64DOpcode::format();
+
+    if (opCode() & 0x30)
+        return A64DOpcode::format();
+
+    if ((opCode() & 0x34) == 0x4)
+        return A64DOpcode::format();
+
+    appendInstructionName(opName());
+    appendRegisterName(rd(), is64Bit());
+    appendSeparator();
+    appendRegisterName(rn(), is64Bit());
+    appendSeparator();
+    appendRegisterName(rm(), is64Bit());
+
+    return m_formatBuffer;
+}
+
+const char* const A64DOpcodeDataProcessing3Source::s_opNames[16] = {
+    "madd", "msub", "smaddl", "smsubl", "smulh", 0, 0, 0,
+    0, 0, "umaddl", "umsubl", "umulh", 0, 0, 0
+};
+
+const char* const A64DOpcodeDataProcessing3Source::s_pseudoOpNames[16] = {
+    "mul", "mneg", "smull", "smnegl", "smulh", 0, 0, 0,
+    0, 0, "umull", "umnegl", "umulh", 0, 0, 0
+};
+
+const char* A64DOpcodeDataProcessing3Source::format()
+{
+    if (op54())
+        return A64DOpcode::format();
+
+    if (opNum() > 12)
+        return A64DOpcode::format();
+
+    if (!is64Bit() && opNum() > 1)
+        return A64DOpcode::format();
+
+    if (!opName())
+        return A64DOpcode::format();
+
+    appendInstructionName(opName());
+    appendRegisterName(rd(), is64Bit());
+    appendSeparator();
+    bool srcOneAndTwoAre64Bit = is64Bit() & !(opNum() & 0x2);
+    appendRegisterName(rn(), srcOneAndTwoAre64Bit);
+    appendSeparator();
+    appendRegisterName(rm(), srcOneAndTwoAre64Bit);
+
+    if ((ra() != 31) || !(opNum() & 0x4)) {
+        appendSeparator();
+        appendRegisterName(ra(), is64Bit());
+    }
+
+    return m_formatBuffer;
+}
+
+const char* A64OpcodeExceptionGeneration::format()
+{
+    const char* opname = 0;
+    if (!op2()) {
+        switch (opc()) {
+        case 0x0: // SVC, HVC & SMC
+            switch (ll()) {
+            case 0x1:
+                opname = "svc";
+                break;
+            case 0x2:
+                opname = "hvc";
+                break;
+            case 0x3:
+                opname = "smc";
+                break;
+            }
+            break;
+        case 0x1: // BRK
+            if (!ll())
+                opname = "brk";
+            break;
+        case 0x2: // HLT
+            if (!ll())
+                opname = "hlt";
+            break;
+        case 0x5: // DPCS1-3
+            switch (ll()) {
+            case 0x1:
+                opname = "dpcs1";
+                break;
+            case 0x2:
+                opname = "dpcs2";
+                break;
+            case 0x3:
+                opname = "dpcs3";
+                break;
+            }
+            break;
+        }
+    }
+
+    if (!opname)
+        return A64DOpcode::format();
+
+    appendInstructionName(opname);
+    appendUnsignedImmediate(immediate16());
+    return m_formatBuffer;
+}
+
+const char* A64DOpcodeExtract::format()
+{
+    if (!op21() || !o0Bit())
+        return A64DOpcode::format();
+
+    if (is64Bit() != nBit())
+        return A64DOpcode::format();
+
+    if (is64Bit() && (immediateS() & 0x20))
+        return A64DOpcode::format();
+
+    const char* opName = (rn() == rm()) ? "ror" : "extr";
+
+    appendInstructionName(opName);
+    appendRegisterName(rd(), is64Bit());
+    appendSeparator();
+    appendRegisterName(rn(), is64Bit());
+    appendSeparator();
+    appendRegisterName(rm(), is64Bit());
+    appendSeparator();
+    appendUnsignedImmediate(immediateS());
+
+    return m_formatBuffer;
+}
+
+const char* A64DOpcodeFloatingPointCompare::format()
+{
+    if (mBit())
+        return A64DOpcode::format();
+
+    if (sBit())
+        return A64DOpcode::format();
+
+    if (type() & 0x2)
+        return A64DOpcode::format();
+
+    if (op())
+        return A64DOpcode::format();
+
+    if (opCode2() & 0x7)
+        return A64DOpcode::format();
+
+    appendInstructionName(opName());
+    unsigned registerSize = type() + 2;
+    appendFPRegisterName(rn(), registerSize);
+    appendSeparator();
+    if (opCode2() & 0x8)
+        bufferPrintf("#0.0");
+    else
+        appendFPRegisterName(rm(), registerSize);
+    
+    return m_formatBuffer;
+}
+
+const char* const A64DOpcodeFloatingPointDataProcessing1Source::s_opNames[16] = {
+    "fmov", "fabs", "fneg", "fsqrt", "fcvt", "fcvt", 0, "fcvt",
+    "frintn", "frintp", "frintm", "frintz", "frinta", 0, "frintx", "frinti"
+};
+
+const char* A64DOpcodeFloatingPointDataProcessing1Source::format()
+{
+    if (mBit())
+        return A64DOpcode::format();
+
+    if (sBit())
+        return A64DOpcode::format();
+
+    if (opNum() > 16)
+        return A64DOpcode::format();
+
+    switch (type()) {
+    case 0:
+        if ((opNum() == 0x4) || (opNum() == 0x6) || (opNum() == 0xd))
+            return A64DOpcode::format();
+        break;
+    case 1:
+        if ((opNum() == 0x5) || (opNum() == 0x6) || (opNum() == 0xd))
+            return A64DOpcode::format();
+        break;
+    case 2:
+        return A64DOpcode::format();
+    case 3:
+        if ((opNum() < 0x4) || (opNum() > 0x5))
+            return A64DOpcode::format();
+        break;
+    }
+
+    appendInstructionName(opName());
+    if ((opNum() >= 0x4) && (opNum() <= 0x7)) {
+        unsigned srcRegisterSize = type() ^ 0x2; // 0:s, 1:d & 3:h
+        unsigned destRegisterSize = (opNum() & 0x3) ^ 0x2;
+        appendFPRegisterName(rd(), destRegisterSize);
+        appendSeparator();
+        appendFPRegisterName(rn(), srcRegisterSize);
+    } else {
+        unsigned registerSize = type() + 2;
+        appendFPRegisterName(rd(), registerSize);
+        appendSeparator();
+        appendFPRegisterName(rn(), registerSize);
+    }
+
+    return m_formatBuffer;
+}
+
+const char* const A64DOpcodeFloatingPointDataProcessing2Source::s_opNames[16] = {
+    "fmul", "fdiv", "fadd", "fsub", "fmax", "fmin", "fmaxnm", "fminnm", "fnmul"
+};
+
+const char* A64DOpcodeFloatingPointDataProcessing2Source::format()
+{
+    if (mBit())
+        return A64DOpcode::format();
+
+    if (sBit())
+        return A64DOpcode::format();
+
+    if (type() & 0x2)
+        return A64DOpcode::format();
+
+    if (opNum() > 8)
+        return A64DOpcode::format();
+
+    appendInstructionName(opName());
+    unsigned registerSize = type() + 2;
+    appendFPRegisterName(rd(), registerSize);
+    appendSeparator();
+    appendFPRegisterName(rn(), registerSize);
+    appendSeparator();
+    appendFPRegisterName(rm(), registerSize);
+
+    return m_formatBuffer;
+}
+
+const char* const A64DOpcodeFloatingFixedPointConversions::s_opNames[4] = {
+    "fcvtzs", "fcvtzu", "scvtf", "ucvtf"
+};
+
+const char* A64DOpcodeFloatingFixedPointConversions::format()
+{
+    if (sBit())
+        return A64DOpcode::format();
+
+    if (type() & 0x2)
+        return A64DOpcode::format();
+
+    if (opcode() & 0x4)
+        return A64DOpcode::format();
+
+    if (!(rmode() & 0x1) && !(opcode() & 0x6))
+        return A64DOpcode::format();
+
+    if ((rmode() & 0x1) && (opcode() & 0x6) == 0x2)
+        return A64DOpcode::format();
+
+    if (!(rmode() & 0x2) && !(opcode() & 0x6))
+        return A64DOpcode::format();
+
+    if ((rmode() & 0x2) && (opcode() & 0x6) == 0x2)
+        return A64DOpcode::format();
+
+    if (!is64Bit() && scale() >= 32)
+        return A64DOpcode::format();
+
+    appendInstructionName(opName());
+    unsigned FPRegisterSize = type() + 2;
+    bool destIsFP = !rmode();
+    
+    if (destIsFP) {
+        appendFPRegisterName(rd(), FPRegisterSize);
+        appendSeparator();
+        appendRegisterName(rn(), is64Bit());
+    } else {
+        appendRegisterName(rd(), is64Bit());
+        appendSeparator();
+        appendFPRegisterName(rn(), FPRegisterSize);
+    }
+    appendSeparator();
+    appendUnsignedImmediate(64 - scale());
+    
+    return m_formatBuffer;
+}
+
+const char* const A64DOpcodeFloatingPointIntegerConversions::s_opNames[32] = {
+    "fcvtns", "fcvtnu", "scvtf", "ucvtf", "fcvtas", "fcvtau", "fmov", "fmov",
+    "fcvtps", "fcvtpu", 0, 0, 0, 0, "fmov", "fmov",
+    "fcvtms", "fcvtmu", 0, 0, 0, 0, 0, 0,
+    "fcvtzs", "fcvtzu", 0, 0, 0, 0, 0, 0
+};
+
+const char* A64DOpcodeFloatingPointIntegerConversions::format()
+{
+    if (sBit())
+        return A64DOpcode::format();
+
+    if (type() == 0x3)
+        return A64DOpcode::format();
+
+    if (((rmode() & 0x1) || (rmode() & 0x2)) && (((opcode() & 0x6) == 0x2) || ((opcode() & 0x6) == 0x4)))
+        return A64DOpcode::format();
+
+    if ((type() == 0x2) && (!(opcode() & 0x4) || ((opcode() & 0x6) == 0x4)))
+        return A64DOpcode::format();
+
+    if (!type() && (rmode() & 0x1) && ((opcode() & 0x6) == 0x6))
+        return A64DOpcode::format();
+
+    if (is64Bit() && type() == 0x2 && ((opNum() & 0xe) == 0x6))
+        return A64DOpcode::format();
+
+    if (!opName())
+        return A64DOpcode::format();
+
+    if ((opNum() & 0x1e) == 0xe) {
+        // Handle fmov to/from upper half of quad separately
+        if (!is64Bit() || (type() != 0x2))
+            return A64DOpcode::format();
+
+        appendInstructionName(opName());
+        if (opcode() & 0x1) {
+            // fmov Vd.D[1], Xn
+            bufferPrintf("V%u.D[1]", rd());
+            appendSeparator();
+            appendRegisterName(rn());
+        } else {
+            // fmov Xd, Vn.D[1]
+            appendRegisterName(rd());
+            appendSeparator();
+            bufferPrintf("V%u.D[1]", rn());
+        }
+
+        return m_formatBuffer;
+    }
+
+    appendInstructionName(opName());
+    unsigned FPRegisterSize = type() + 2;
+    bool destIsFP = ((opNum() == 2) || (opNum() == 3) || (opNum() == 7));
+
+    if (destIsFP) {
+        appendFPRegisterName(rd(), FPRegisterSize);
+        appendSeparator();
+        appendRegisterName(rn(), is64Bit());
+    } else {
+        appendRegisterName(rd(), is64Bit());
+        appendSeparator();
+        appendFPRegisterName(rn(), FPRegisterSize);
+    }
+
+    return m_formatBuffer;
+}
+
+const char* const A64DOpcodeHint::s_opNames[6] = {
+    "nop", "yield", "wfe", "wfi", "sev", "sevl"
+};
+
+const char* A64DOpcodeHint::format()
+{
+    appendInstructionName(opName());
+
+    if (immediate7() > 5)
+        appendUnsignedImmediate(immediate7());
+
+    return m_formatBuffer;
+}
+
+// A zero in an entry of the table means the instruction is Unallocated
+const char* const A64DOpcodeLoadStore::s_opNames[32] = {
+    "strb", "ldrb", "ldrsb", "ldrsb", "str", "ldr", "str", "ldr",
+    "strh", "ldrh", "ldrsh", "ldrsh", "str", "ldr", 0, 0,
+    "str", "ldr", "ldrsw", 0, "str", "ldr", 0, 0,
+    "str", "ldr", 0, 0, "str", "ldr", 0, 0
+};
+
+// A zero in an entry of the table means the instruction is Unallocated
+const char* const A64DOpcodeLoadStoreImmediate::s_unprivilegedOpNames[32] = {
+    "sttrb", "ldtrb", "ldtrsb", "ldtrsb", 0, 0, 0, 0,
+    "sttrh", "ldtrh", "ldtrsh", "ldtrsh", 0, 0, 0, 0,
+    "sttr", "ldtr", "ldtrsw", 0, 0, 0, 0, 0,
+    "sttr", "ldtr", 0, 0, 0, 0, 0, 0
+};
+
+// A zero in an entry of the table means the instruction is Unallocated
+const char* const A64DOpcodeLoadStoreImmediate::s_unscaledOpNames[32] = {
+    "sturb", "ldurb", "ldursb", "ldursb", "stur", "ldur", "stur", "ldur",
+    "sturh", "ldurh", "ldursh", "ldursh", "stur", "ldur", 0, 0,
+    "stur", "ldur", "ldursw", 0, "stur", "ldur", 0, 0,
+    "stur", "ldur", "prfum", 0, "stur", "ldur", 0, 0
+};
+
+const char* A64DOpcodeLoadStoreImmediate::format()
+{
+    const char* thisOpName;
+
+    if (type() & 0x1)
+        thisOpName = opName();
+    else if (!type())
+        thisOpName = unscaledOpName();
+    else
+        thisOpName = unprivilegedOpName();
+
+    if (!thisOpName)
+        return A64DOpcode::format();
+
+    appendInstructionName(thisOpName);
+    if (vBit())
+        appendFPRegisterName(rt(), size());
+    else
+        appendRegisterName(rt(), is64BitRT());
+    appendSeparator();
+    appendCharacter('[');
+    appendSPOrRegisterName(rn());
+
+    switch (type()) {
+    case 0: // Unscaled Immediate
+        if (immediate9()) {
+            appendSeparator();
+            appendSignedImmediate(immediate9());
+        }
+        appendCharacter(']');
+        break;
+    case 1: // Immediate Post-Indexed
+        appendCharacter(']');
+        if (immediate9()) {
+            appendSeparator();
+            appendSignedImmediate(immediate9());
+        }
+        break;
+    case 2: // Unprivileged
+        if (immediate9()) {
+            appendSeparator();
+            appendSignedImmediate(immediate9());
+        }
+        appendCharacter(']');
+        break;
+    case 3: // Immediate Pre-Indexed
+        if (immediate9()) {
+            appendSeparator();
+            appendSignedImmediate(immediate9());
+        }
+        appendCharacter(']');
+        appendCharacter('!');
+        break;
+    }
+
+    return m_formatBuffer;
+}
+
+const char* A64DOpcodeLoadStoreRegisterOffset::format()
+{
+    const char* thisOpName = opName();
+
+    if (!thisOpName)
+        return A64DOpcode::format();
+
+    if (!(option() & 0x2))
+        return A64DOpcode::format();
+
+    appendInstructionName(thisOpName);
+    unsigned scale;
+    if (vBit()) {
+        appendFPRegisterName(rt(), size());
+        scale = ((opc() & 2)<<1) | size();
+    } else {
+        appendRegisterName(rt(), is64BitRT());
+        scale = size();
+    }
+    appendSeparator();
+    appendCharacter('[');
+    appendSPOrRegisterName(rn());
+    appendSeparator();
+    appendZROrRegisterName(rm(), (option() & 0x3) == 0x3);
+
+    unsigned shift = sBit() ? scale : 0;
+
+    if (option() == 0x3) {
+        if (shift) {
+            appendSeparator();
+            appendString("lsl ");
+            appendUnsignedImmediate(shift);
+        }
+    } else {
+        appendSeparator();
+        appendString(optionName());
+        if (shift)
+            appendUnsignedImmediate(shift);
+    }
+
+    appendCharacter(']');
+
+    return m_formatBuffer;
+}
+
+const char* A64DOpcodeLoadStoreUnsignedImmediate::format()
+{
+    const char* thisOpName = opName();
+
+    if (!thisOpName)
+        return A64DOpcode::format();
+
+    appendInstructionName(thisOpName);
+    unsigned scale;
+    if (vBit()) {
+        appendFPRegisterName(rt(), size());
+        scale = ((opc() & 2)<<1) | size();
+    } else {
+        appendRegisterName(rt(), is64BitRT());
+        scale = size();
+    }
+    appendSeparator();
+    appendCharacter('[');
+    appendSPOrRegisterName(rn());
+
+    if (immediate12()) {
+        appendSeparator();
+        appendUnsignedImmediate(immediate12() << scale);
+    }
+
+    appendCharacter(']');
+
+    return m_formatBuffer;
+}
+
+// A zero in an entry of the table means the instruction is Unallocated
+const char* const A64DOpcodeLogical::s_opNames[8] = {
+    "and", "bic", "orr", "orn", "eor", "eon", "ands", "bics"
+};
+
+const char* A64DOpcodeLogicalShiftedRegister::format()
+{
+    if (!is64Bit() && immediate6() & 0x20)
+        return A64DOpcode::format();
+
+    if (isTst())
+        appendInstructionName("tst");
+    else {
+        if (isMov())
+            appendInstructionName("mov");
+        else
+            appendInstructionName(opName(opNumber()));
+        appendSPOrRegisterName(rd(), is64Bit());
+        appendSeparator();
+    }
+
+    if (!isMov()) {
+        appendRegisterName(rn(), is64Bit());
+        appendSeparator();
+    }
+
+    appendZROrRegisterName(rm(), is64Bit());
+    if (immediate6()) {
+        appendSeparator();
+        appendShiftType(shift());
+        appendUnsignedImmediate(immediate6());
+    }
+
+    return m_formatBuffer;
+}
+
+static unsigned highestBitSet(unsigned value)
+{
+    unsigned result = 0;
+
+    while (value >>= 1)
+        result++;
+
+    return result;
+}
+
+static uint64_t rotateRight(uint64_t value, unsigned width, unsigned shift)
+{
+    uint64_t result = value;
+
+    if (shift)
+        result = (value >> (shift % width)) | (value << (width - shift));
+
+    return result;
+}
+
+static uint64_t replicate(uint64_t value, unsigned width)
+{
+    uint64_t result = 0;
+
+    for (unsigned totalBits = 0; totalBits < 64; totalBits += width)
+        result = (result << width) | value;
+
+    return result;
+}
+
+const char* A64DOpcodeLogicalImmediate::format()
+{
+    if (!is64Bit() && nBit())
+        return A64DOpcode::format();
+
+    unsigned len = highestBitSet(nBit() << 6 | (immediateS() ^ 0x3f));
+    unsigned levels = (1 << len) - 1; // len number of 1 bits starting at LSB
+
+    if ((immediateS() & levels) == levels)
+        return A64DOpcode::format();
+
+    unsigned r = immediateR() & levels;
+    unsigned s = immediateS() & levels;
+    unsigned eSize = 1 << len;
+    uint64_t pattern = rotateRight((1ull << (s + 1)) - 1, eSize, r);
+
+    uint64_t immediate = replicate(pattern, eSize);
+
+    if (!is64Bit())
+        immediate &= 0xffffffffull;
+
+    if (isTst())
+        appendInstructionName("tst");
+    else {
+        if (isMov())
+            appendInstructionName("mov");
+        else
+            appendInstructionName(opName(opNumber()));
+        appendRegisterName(rd(), is64Bit());
+        appendSeparator();
+    }
+    if (!isMov()) {
+        appendRegisterName(rn(), is64Bit());
+        appendSeparator();
+    }
+    appendUnsignedImmediate64(immediate);
+
+    return m_formatBuffer;
+}
+
+const char* const A64DOpcodeMoveWide::s_opNames[4] = { "movn", "", "movz", "movk" };
+
+const char* A64DOpcodeMoveWide::format()
+{
+    if (opc() == 1)
+        return A64DOpcode::format();
+    if (!size() && hw() >= 2)
+        return A64DOpcode::format();
+
+    appendInstructionName(opName());
+    appendRegisterName(rd(), is64Bit());
+    appendSeparator();
+    appendUnsignedImmediate(immediate16());
+    if (hw()) {
+        appendSeparator();
+        appendShiftAmount(hw());
+    }
+
+    return m_formatBuffer;
+}
+
+const char* A64DOpcodeTestAndBranchImmediate::format()
+{
+    appendInstructionName(opBit() ? "tbnz" : "tbz");
+    appendRegisterName(rt());
+    appendSeparator();
+    appendUnsignedImmediate(bitNumber());
+    appendSeparator();
+    appendPCRelativeOffset(m_currentPC, static_cast<int32_t>(immediate14()));
+    return m_formatBuffer;
+}
+
+const char* A64DOpcodeUnconditionalBranchImmediate::format()
+{
+    appendInstructionName(op() ? "bl" : "b");
+    appendPCRelativeOffset(m_currentPC, static_cast<int32_t>(immediate26()));
+    return m_formatBuffer;
+}
+
+const char* const A64DOpcodeUnconditionalBranchRegister::s_opNames[8] = { "br", "blr", "ret", "", "eret", "drps", "", "" };
+
+const char* A64DOpcodeUnconditionalBranchRegister::format()
+{
+    unsigned opcValue = opc();
+    if (opcValue == 3 || opcValue > 5)
+        return A64DOpcode::format();
+    if (((opcValue & 0xe) == 0x4) && rn() != 0x1f)
+        return A64DOpcode::format();
+    appendInstructionName(opName());
+    if (opcValue <= 2)
+        appendRegisterName(rn());
+    return m_formatBuffer;
+}
+
+} } // namespace JSC::ARM64Disassembler
diff --git a/Source/JavaScriptCore/disassembler/ARM64/A64DOpcode.h b/Source/JavaScriptCore/disassembler/ARM64/A64DOpcode.h
new file mode 100644 (file)
index 0000000..ed18d30
--- /dev/null
@@ -0,0 +1,692 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef A64DOpcode_h
+#define A64DOpcode_h
+
+#include <wtf/Assertions.h>
+#include <stdint.h>
+
+namespace JSC { namespace ARM64Disassembler {
+
+class A64DOpcode {
+private:
+    class OpcodeGroup {
+    public:
+        OpcodeGroup(uint32_t opcodeMask, uint32_t opcodePattern, const char* (*format)(A64DOpcode*))
+            : m_opcodeMask(opcodeMask)
+            , m_opcodePattern(opcodePattern)
+            , m_format(format)
+            , m_next(0)
+        {
+        }
+
+        void setNext(OpcodeGroup* next)
+        {
+            m_next = next;
+        }
+
+        OpcodeGroup* next()
+        {
+            return m_next;
+        }
+
+        bool matches(uint32_t opcode)
+        {
+            return (opcode & m_opcodeMask) == m_opcodePattern;
+        }
+
+        const char* format(A64DOpcode* thisObj)
+        {
+            return m_format(thisObj);
+        }
+
+    private:
+        uint32_t m_opcodeMask;
+        uint32_t m_opcodePattern;
+        const char* (*m_format)(A64DOpcode*);
+        OpcodeGroup* m_next;
+    };
+
+public:
+    static void init();
+
+    A64DOpcode()
+        : m_opcode(0)
+        , m_bufferOffset(0)
+    {
+        init();
+        m_formatBuffer[0] = '\0';
+    }
+
+    const char* disassemble(uint32_t* currentPC);
+
+protected:
+    void setPCAndOpcode(uint32_t*, uint32_t);
+    const char* format();
+
+    static const char* const s_conditionNames[16];
+    static const char* const s_shiftNames[4];
+    static const char* const s_optionName[8];
+    static const char s_FPRegisterPrefix[5];
+
+    static const char* conditionName(unsigned condition) { return s_conditionNames[condition & 0xf]; }
+    static const char* shiftName(unsigned shiftValue) { return s_shiftNames[shiftValue & 0x3]; }
+    const char* optionName() { return s_optionName[option()]; }
+    static char FPRegisterPrefix(unsigned FPRegisterSize)
+    {
+        if (FPRegisterSize > 4)
+            FPRegisterSize = 4;
+        return s_FPRegisterPrefix[FPRegisterSize];
+    }
+
+    unsigned opcodeGroupNumber(uint32_t opcode) { return (opcode >> 24) & 0x1f; }
+
+    bool is64Bit() { return m_opcode & 0x80000000; }
+    unsigned size() { return m_opcode >> 30; }
+    unsigned option() { return (m_opcode >> 13) & 0x7; }
+    unsigned rd() { return m_opcode & 0x1f; }
+    unsigned rt() { return m_opcode & 0x1f; }
+    unsigned rn() { return (m_opcode >> 5) & 0x1f; }
+    unsigned rm() { return (m_opcode >> 16) & 0x1f; }
+
+    void bufferPrintf(const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3);
+
+    void appendInstructionName(const char* instructionName)
+    {
+        bufferPrintf("   %-7.7s", instructionName);
+    }
+
+    void appendRegisterName(unsigned registerNumber, bool is64Bit = true);
+    void appendSPOrRegisterName(unsigned registerNumber, bool is64Bit = true)
+    {
+        if (registerNumber == 31) {
+            bufferPrintf(is64Bit ? "sp" : "wsp");
+            return;
+        }
+        appendRegisterName(registerNumber, is64Bit);
+    }
+
+    void appendZROrRegisterName(unsigned registerNumber, bool is64Bit = true)
+    {
+        if (registerNumber == 31) {
+            bufferPrintf(is64Bit ? "xzr" : "wzr");
+            return;
+        }
+        appendRegisterName(registerNumber, is64Bit);
+    }
+
+    void appendFPRegisterName(unsigned registerNumber, unsigned registerSize);
+
+    void appendSeparator()
+    {
+        bufferPrintf(", ");
+    }
+
+    void appendCharacter(const char c)
+    {
+        bufferPrintf("%c", c);
+    }
+
+    void appendString(const char* string)
+    {
+        bufferPrintf("%s", string);
+    }
+
+    void appendShiftType(unsigned shiftValue)
+    {
+        bufferPrintf("%s ", shiftName(shiftValue));
+    }
+
+    void appendSignedImmediate(int immediate)
+    {
+        bufferPrintf("#%d", immediate);
+    }
+
+    void appendUnsignedImmediate(unsigned immediate)
+    {
+        bufferPrintf("#%u", immediate);
+    }
+
+    void appendUnsignedImmediate64(uint64_t immediate)
+    {
+        bufferPrintf("#0x%llx", immediate);
+    }
+
+    void appendPCRelativeOffset(uint32_t* pc, int32_t immediate)
+    {
+        bufferPrintf("0x%llx", reinterpret_cast<uint64_t>(pc + immediate));
+    }
+
+    void appendShiftAmount(unsigned amount)
+    {
+        bufferPrintf("lsl #%u", 16 * amount);
+    }
+
+    static const int bufferSize = 81;
+
+    char m_formatBuffer[bufferSize];
+    uint32_t* m_currentPC;
+    uint32_t m_opcode;
+    int m_bufferOffset;
+
+private:
+    static OpcodeGroup* opcodeTable[32];
+
+    static bool s_initialized;
+};
+
+#define DEFINE_STATIC_FORMAT(klass, thisObj) \
+   static const char* format(A64DOpcode* thisObj) { return reinterpret_cast< klass *>(thisObj)->format(); }
+
+class A64DOpcodeAddSubtract : public A64DOpcode {
+private:
+    static const char* const s_opNames[4];
+
+public:
+    const char* opName() { return s_opNames[opAndS()]; }
+    const char* cmpName() { return op() ? "cmp" : "cmn"; }
+
+    bool isCMP() { return (sBit() && rd() == 31); }
+    unsigned op() { return (m_opcode >> 30) & 0x1; }
+    unsigned sBit() { return (m_opcode >> 29) & 0x1; }
+    unsigned opAndS() { return (m_opcode >> 29) & 0x3; }
+};
+
+class A64DOpcodeAddSubtractImmediate : public A64DOpcodeAddSubtract {
+public:
+    static const uint32_t mask = 0x1f000000;
+    static const uint32_t pattern = 0x11000000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeAddSubtractImmediate, thisObj);
+
+    const char* format();
+
+    bool isMovSP() { return (!opAndS() && !immed12() && ((rd() == 31) || rn() == 31)); }
+    unsigned shift() { return (m_opcode >> 22) & 0x3; }
+    unsigned immed12() { return (m_opcode >> 10) & 0xfff; }
+};
+
+class A64DOpcodeAddSubtractExtendedRegister : public A64DOpcodeAddSubtract {
+public:
+    static const uint32_t mask = 0x1fe00000;
+    static const uint32_t pattern = 0x0b200000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeAddSubtractExtendedRegister, thisObj);
+
+    const char* format();
+
+    unsigned immediate3() { return (m_opcode >> 10) & 0x7; }
+};
+
+class A64DOpcodeAddSubtractShiftedRegister : public A64DOpcodeAddSubtract {
+public:
+    static const uint32_t mask = 0x1f200000;
+    static const uint32_t pattern = 0x0b000000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeAddSubtractShiftedRegister, thisObj);
+
+    const char* format();
+
+    bool isNeg() { return (op() && rn() == 31); }
+    const char* negName() { return sBit() ? "negs" : "neg"; }
+    unsigned shift() { return (m_opcode >> 22) & 0x3; }
+    int immediate6() { return (static_cast<int>((m_opcode >> 10) & 0x3f) << 26) >> 26; }
+};
+
+class A64DOpcodeBitfield : public A64DOpcode {
+private:
+    static const char* const s_opNames[3];
+    static const char* const s_extendPseudoOpNames[3][3];
+    static const char* const s_insertOpNames[3];
+    static const char* const s_extractOpNames[3];
+
+public:
+    static const uint32_t mask = 0x1f800000;
+    static const uint32_t pattern = 0x13000000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeBitfield, thisObj);
+
+    const char* format();
+
+    const char* opName() { return s_opNames[opc()]; }
+    const char* extendPseudoOpNames(unsigned opSize) { return s_extendPseudoOpNames[opc()][opSize]; }
+    const char* insertOpNames() { return s_insertOpNames[opc()]; }
+    const char* extractOpNames() { return s_extractOpNames[opc()]; }
+
+    unsigned opc() { return (m_opcode >> 29) & 0x3; }
+    unsigned nBit() { return (m_opcode >> 22) & 0x1; }
+    unsigned immediateR() { return (m_opcode >> 16) & 0x3f; }
+    unsigned immediateS() { return (m_opcode >> 10) & 0x3f; }
+};
+
+class A64DOpcodeCompareAndBranchImmediate : public A64DOpcode {
+public:
+    static const uint32_t mask = 0x7e000000;
+    static const uint32_t pattern = 0x34000000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeCompareAndBranchImmediate, thisObj);
+
+    const char* format();
+
+    unsigned opBit() { return (m_opcode >> 24) & 0x1; }
+    int immediate19() { return (static_cast<int>((m_opcode >> 5) & 0x7ffff) << 13) >> 13; }
+};
+
+class A64DOpcodeConditionalBranchImmediate : public A64DOpcode {
+public:
+    static const uint32_t mask = 0xff000010;
+    static const uint32_t pattern = 0x54000000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeConditionalBranchImmediate, thisObj);
+
+    const char* format();
+
+    unsigned condition() { return m_opcode & 0xf; }
+    int immediate19() { return (static_cast<int>((m_opcode >> 5) & 0x7ffff) << 13) >> 13; }
+};
+
+class A64DOpcodeConditionalSelect : public A64DOpcode {
+private:
+    static const char* const s_opNames[4];
+
+public:
+    static const uint32_t mask = 0x1fe00010;
+    static const uint32_t pattern = 0x1a800000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeConditionalSelect, thisObj);
+
+    const char* format();
+
+    const char* opName() { return s_opNames[opNum()]; }
+    unsigned opNum() { return (op() << 1 | (op2() & 0x1)); }
+    unsigned op() { return (m_opcode >> 30) & 0x1; }
+    unsigned sBit() { return (m_opcode >> 29) & 0x1; }
+    unsigned condition() { return (m_opcode >> 12) & 0xf; }
+    unsigned op2() { return (m_opcode >> 10) & 0x3; }
+};
+
+class A64DOpcodeDataProcessing2Source : public A64DOpcode {
+private:
+    static const char* const s_opNames[8];
+
+public:
+    static const uint32_t mask = 0x5fe00000;
+    static const uint32_t pattern = 0x1ac00000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeDataProcessing2Source, thisObj);
+
+    const char* format();
+
+    const char* opName() { return s_opNames[opNameIndex()]; }
+    unsigned sBit() { return (m_opcode >> 29) & 0x1; }
+    unsigned opCode() { return (m_opcode >> 10) & 0x3f; }
+    unsigned opNameIndex() { return ((m_opcode >> 11) & 0x4) | ((m_opcode >> 10) & 0x3); }
+};
+
+class A64DOpcodeDataProcessing3Source : public A64DOpcode {
+private:
+    static const char* const s_opNames[16];
+    static const char* const s_pseudoOpNames[16];
+
+public:
+    static const uint32_t mask = 0x1f000000;
+    static const uint32_t pattern = 0x1b000000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeDataProcessing3Source, thisObj);
+
+    const char* format();
+
+    const char* opName() { return ra() == 31 ? s_opNames[opNum() & 0xf] : s_pseudoOpNames[opNum() & 0xf]; }
+    unsigned ra() { return (m_opcode >> 10) & 0x1f; }
+    unsigned op54() { return (m_opcode >> 29) & 0x3; }
+    unsigned op31() { return (m_opcode >> 21) & 0x7; }
+    unsigned op0() { return (m_opcode >> 15) & 0x1; }
+    unsigned opNum() { return ((m_opcode >> 25) & 0x30) | ((m_opcode >> 20) & 0xe) | ((m_opcode >> 15) & 0x1); }
+};
+
+class A64OpcodeExceptionGeneration : public A64DOpcode {
+public:
+    static const uint32_t mask = 0xff000010;
+    static const uint32_t pattern = 0xd4000000;
+
+    DEFINE_STATIC_FORMAT(A64OpcodeExceptionGeneration, thisObj);
+
+    const char* format();
+
+    unsigned opc() { return (m_opcode>>21) & 0x7; }
+    unsigned op2() { return (m_opcode>>2) & 0x7; }
+    unsigned ll() { return m_opcode & 0x3; }
+    int immediate16() { return (static_cast<int>((m_opcode >> 5) & 0xffff) << 16) >> 16; }
+};
+
+class A64DOpcodeExtract : public A64DOpcode {
+public:
+    static const uint32_t mask = 0x1f800000;
+    static const uint32_t pattern = 0x13800000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeExtract, thisObj);
+
+    const char* format();
+
+    unsigned op21() { return (m_opcode >> 29) & 0x3; }
+    unsigned nBit() { return (m_opcode >> 22) & 0x1; }
+    unsigned o0Bit() { return (m_opcode >> 21) & 0x1; }
+    unsigned immediateS() { return (m_opcode >> 10) & 0x3f; }
+};
+
+class A64DOpcodeFloatingPointOps : public A64DOpcode {
+public:
+    unsigned mBit() { return (m_opcode >> 31) & 0x1; }
+    unsigned sBit() { return (m_opcode >> 29) & 0x1; }
+    unsigned type() { return (m_opcode >> 22) & 0x3; }
+};
+
+class A64DOpcodeFloatingPointCompare : public A64DOpcodeFloatingPointOps {
+private:
+    static const char* const s_opNames[16];
+    
+public:
+    static const uint32_t mask = 0x5f203c00;
+    static const uint32_t pattern = 0x1e202000;
+    
+    DEFINE_STATIC_FORMAT(A64DOpcodeFloatingPointCompare, thisObj);
+    
+    const char* format();
+
+    const char* opName() { return (opNum() & 0x2) ? "fcmpe" : "fcmp"; }
+
+    unsigned op() { return (m_opcode >> 14) & 0x3; }
+    unsigned opCode2() { return m_opcode & 0x1f; }
+    unsigned opNum() { return (m_opcode >> 3) & 0x3; }
+};
+
+class A64DOpcodeFloatingPointDataProcessing1Source : public A64DOpcodeFloatingPointOps {
+private:
+    static const char* const s_opNames[16];
+
+public:
+    static const uint32_t mask = 0x5f207c00;
+    static const uint32_t pattern = 0x1e204000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeFloatingPointDataProcessing1Source, thisObj);
+
+    const char* format();
+
+    const char* opName() { return s_opNames[opNum()]; }
+
+    unsigned opNum() { return (m_opcode >> 15) & 0x3f; }
+};
+
+class A64DOpcodeFloatingPointDataProcessing2Source : public A64DOpcodeFloatingPointOps {
+private:
+    static const char* const s_opNames[16];
+
+public:
+    static const uint32_t mask = 0x5f200800;
+    static const uint32_t pattern = 0x1e200800;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeFloatingPointDataProcessing2Source, thisObj);
+
+    const char* format();
+
+    const char* opName() { return s_opNames[opNum()]; }
+
+    unsigned opNum() { return (m_opcode >> 12) & 0xf; }
+};
+
+class A64DOpcodeFloatingFixedPointConversions : public A64DOpcodeFloatingPointOps {
+private:
+    static const char* const s_opNames[4];
+    
+public:
+    static const uint32_t mask = 0x5f200000;
+    static const uint32_t pattern = 0x1e000000;
+    
+    DEFINE_STATIC_FORMAT(A64DOpcodeFloatingFixedPointConversions, thisObj);
+    
+    const char* format();
+    
+    const char* opName() { return s_opNames[opNum()]; }
+    unsigned rmode() { return (m_opcode >> 19) & 0x3; }
+    unsigned opcode() { return (m_opcode >> 16) & 0x7; }
+    unsigned scale() { return (m_opcode >> 10) & 0x3f; }
+    unsigned opNum() { return (m_opcode >> 16) & 0x3; }
+};
+
+class A64DOpcodeFloatingPointIntegerConversions : public A64DOpcodeFloatingPointOps {
+private:
+    static const char* const s_opNames[32];
+    
+public:
+    static const uint32_t mask = 0x5f20fc00;
+    static const uint32_t pattern = 0x1e200000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeFloatingPointIntegerConversions, thisObj);
+
+    const char* format();
+
+    const char* opName() { return s_opNames[opNum()]; }
+    unsigned rmode() { return (m_opcode >> 19) & 0x3; }
+    unsigned opcode() { return (m_opcode >> 16) & 0x7; }
+    unsigned opNum() { return (m_opcode >> 16) & 0x1f; }
+};
+
+class A64DOpcodeHint : public A64DOpcode {
+private:
+    static const char* const s_opNames[6];
+
+public:
+    static const uint32_t mask = 0xfffff01f;
+    static const uint32_t pattern = 0xd503201f;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeHint, thisObj);
+
+    const char* format();
+
+    const char* opName() { return immediate7() <= 5 ? s_opNames[immediate7()] : "hint"; }
+    unsigned immediate7() { return (m_opcode >> 5) & 0x7f; }
+};
+
+class A64DOpcodeLoadStore : public A64DOpcode {
+private:
+    static const char* const s_opNames[32];
+
+protected:
+    const char* opName()
+    {
+        return s_opNames[opNumber()];
+    }
+
+    unsigned size() { return (m_opcode >> 30) & 0x3; }
+    unsigned vBit() { return (m_opcode >> 26) & 0x1; }
+    unsigned opc() { return (m_opcode >> 22) & 0x3; }
+    unsigned opNumber() { return (size() <<3 ) | (vBit() << 2) | opc(); }
+    bool is64BitRT() { return ((opNumber() & 0x17) == 0x02) || ((opNumber() & 0x1e) == 0x18); }
+};
+
+class A64DOpcodeLoadStoreImmediate : public A64DOpcodeLoadStore {
+private:
+    static const char* const s_unprivilegedOpNames[32];
+    static const char* const s_unscaledOpNames[32];
+
+public:
+    static const uint32_t mask = 0x3b200000;
+    static const uint32_t pattern = 0x38000000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeLoadStoreImmediate, thisObj);
+
+    const char* format();
+
+    const char* unprivilegedOpName()
+    {
+        return s_unprivilegedOpNames[opNumber()];
+    }
+    const char* unscaledOpName()
+    {
+        return s_unscaledOpNames[opNumber()];
+    }
+    unsigned type() { return (m_opcode >> 10) & 0x3; }
+    int immediate9() { return (static_cast<int>((m_opcode >> 12) & 0x1ff) << 23) >> 23; }
+};
+
+class A64DOpcodeLoadStoreRegisterOffset : public A64DOpcodeLoadStore {
+public:
+    static const uint32_t mask = 0x3b200c00;
+    static const uint32_t pattern = 0x38200800;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeLoadStoreRegisterOffset, thisObj);
+
+    const char* format();
+
+    unsigned option() { return (m_opcode >> 13) & 0x7; }
+    int sBit() { return (m_opcode >> 12) & 0x1; }
+};
+
+class A64DOpcodeLoadStoreUnsignedImmediate : public A64DOpcodeLoadStore {
+public:
+    static const uint32_t mask = 0x3b000000;
+    static const uint32_t pattern = 0x39000000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeLoadStoreUnsignedImmediate, thisObj);
+
+    const char* format();
+
+    unsigned immediate12() { return (m_opcode >> 10) & 0xfff; }
+};
+
+class A64DOpcodeLogical : public A64DOpcode {
+private:
+    static const char* const s_opNames[8];
+
+public:
+    const char* opName(unsigned opNumber)
+    {
+        return s_opNames[opNumber & 0x7];
+    }
+
+    unsigned opc() { return (m_opcode >> 29) & 0x3; }
+    unsigned nBit() { return (m_opcode >> 21) & 0x1; }
+};
+
+class A64DOpcodeLogicalImmediate : public A64DOpcodeLogical {
+public:
+    static const uint32_t mask = 0x1f800000;
+    static const uint32_t pattern = 0x12000000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeLogicalImmediate, thisObj);
+
+    const char* format();
+
+    bool isTst() { return ((opNumber() == 6) && (rd() == 31)); }
+    bool isMov() { return ((opNumber() == 2) && (rn() == 31)); }
+    unsigned opNumber() { return opc() << 1; }
+    unsigned nBit() { return (m_opcode >> 22) & 0x1; }
+    unsigned immediateR() { return (m_opcode >> 16) & 0x3f; }
+    unsigned immediateS() { return (m_opcode >> 10) & 0x3f; }
+};
+
+class A64DOpcodeLogicalShiftedRegister : public A64DOpcodeLogical {
+public:
+    static const uint32_t mask = 0x1f000000;
+    static const uint32_t pattern = 0x0a000000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeLogicalShiftedRegister, thisObj);
+
+    const char* format();
+
+    bool isTst() { return ((opNumber() == 6) && (rd() == 31)); }
+    bool isMov() { return ((opNumber() == 2) && (rn() == 31)); }
+    unsigned opNumber() { return (opc() << 1) | nBit(); }
+    unsigned shift() { return (m_opcode >> 22) & 0x3; }
+    int immediate6() { return (static_cast<int>((m_opcode >> 10) & 0x3f) << 26) >> 26; }
+};
+
+class A64DOpcodeMoveWide : public A64DOpcode {
+private:
+    static const char* const s_opNames[4];
+
+public:
+    static const uint32_t mask = 0x1f800000;
+    static const uint32_t pattern = 0x12800000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeMoveWide, thisObj);
+
+    const char* format();
+
+    const char* opName() { return s_opNames[opc()]; }
+    unsigned opc() { return (m_opcode >> 29) & 0x3; }
+    unsigned hw() { return (m_opcode >> 21) & 0x3; }
+    unsigned immediate16() { return (m_opcode >> 5) & 0xffff; }
+};
+
+class A64DOpcodeTestAndBranchImmediate : public A64DOpcode {
+public:
+    static const uint32_t mask = 0x7e000000;
+    static const uint32_t pattern = 0x36000000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeTestAndBranchImmediate, thisObj);
+
+    const char* format();
+
+    unsigned bitNumber() { return ((m_opcode >> 26) & 0x20) | ((m_opcode >> 19) & 0x1f); }
+    unsigned opBit() { return (m_opcode >> 24) & 0x1; }
+    int immediate14() { return (static_cast<int>((m_opcode >> 5) & 0x3fff) << 18) >> 18; }
+};
+
+class A64DOpcodeUnconditionalBranchImmediate : public A64DOpcode {
+public:
+    static const uint32_t mask = 0x7c000000;
+    static const uint32_t pattern = 0x14000000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeUnconditionalBranchImmediate, thisObj);
+
+    const char* format();
+
+    unsigned op() { return (m_opcode >> 31) & 0x1; }
+    int immediate26() { return (static_cast<int>(m_opcode & 0x3ffffff) << 6) >> 6; }
+};
+
+class A64DOpcodeUnconditionalBranchRegister : public A64DOpcode {
+private:
+    static const char* const s_opNames[8];
+
+public:
+    static const uint32_t mask = 0xfe1ffc1f;
+    static const uint32_t pattern = 0xd61f0000;
+
+    DEFINE_STATIC_FORMAT(A64DOpcodeUnconditionalBranchRegister, thisObj);
+
+    const char* format();
+
+    const char* opName() { return s_opNames[opc()]; }
+    unsigned opc() { return (m_opcode >> 21) & 0xf; }
+};
+
+} } // namespace JSC::ARM64Disassembler
+
+using JSC::ARM64Disassembler::A64DOpcode;
+
+#endif // A64DOpcode_h
diff --git a/Source/JavaScriptCore/disassembler/ARM64Disassembler.cpp b/Source/JavaScriptCore/disassembler/ARM64Disassembler.cpp
new file mode 100644 (file)
index 0000000..713c1a7
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Disassembler.h"
+
+#if USE(ARM64_DISASSEMBLER)
+
+#include "ARM64/A64DOpcode.h"
+#include "MacroAssemblerCodeRef.h"
+
+namespace JSC {
+
+bool tryToDisassemble(const MacroAssemblerCodePtr& codePtr, size_t size, const char* prefix, PrintStream& out, InstructionSubsetHint)
+{
+    A64DOpcode arm64Opcode;
+
+    uint32_t* currentPC = reinterpret_cast<uint32_t*>(codePtr.executableAddress());
+    size_t byteCount = size;
+
+    while (byteCount) {
+        char pcString[20];
+        snprintf(pcString, sizeof(pcString), "0x%lx", reinterpret_cast<unsigned long>(currentPC));
+        out.printf("%s%16s: %s\n", prefix, pcString, arm64Opcode.disassemble(currentPC));
+        currentPC++;
+        byteCount -= sizeof(uint32_t);
+    }
+
+    return true;
+}
+
+} // namespace JSC
+
+#endif // USE(ARM64_DISASSEMBLER)
+
index 78fdfa4..6385448 100644 (file)
@@ -303,6 +303,8 @@ typedef ppc_thread_state_t PlatformThreadRegisters;
 typedef ppc_thread_state64_t PlatformThreadRegisters;
 #elif CPU(ARM)
 typedef arm_thread_state_t PlatformThreadRegisters;
+#elif CPU(ARM64)
+typedef arm_thread_state64_t PlatformThreadRegisters;
 #else
 #error Unknown Architecture
 #endif
@@ -336,6 +338,9 @@ static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, P
 #elif CPU(ARM)
     unsigned user_count = ARM_THREAD_STATE_COUNT;
     thread_state_flavor_t flavor = ARM_THREAD_STATE;
+#elif CPU(ARM64)
+    unsigned user_count = ARM_THREAD_STATE64_COUNT;
+    thread_state_flavor_t flavor = ARM_THREAD_STATE64;
 #else
 #error Unknown Architecture
 #endif
@@ -398,6 +403,8 @@ static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
     return reinterpret_cast<void*>(regs.__r1);
 #elif CPU(ARM)
     return reinterpret_cast<void*>(regs.__sp);
+#elif CPU(ARM64)
+    return reinterpret_cast<void*>(regs.__sp);
 #else
 #error Unknown Architecture
 #endif
index 366f256..c638059 100644 (file)
@@ -37,7 +37,7 @@
 #define ENABLE_SUPER_REGION 0
 
 #ifndef ENABLE_SUPER_REGION
-#if USE(JSVALUE64)
+#if USE(JSVALUE64) && !CPU(ARM64)
 #define ENABLE_SUPER_REGION 1
 #else
 #define ENABLE_SUPER_REGION 0
index 10c809d..6c48c90 100644 (file)
@@ -76,7 +76,7 @@ public:
     }
 #endif // CPU(X86_64) || CPU(X86)
 
-#if CPU(ARM)
+#if CPU(ARM) || CPU(ARM64)
     ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
     {
         move(linkRegister, reg);
@@ -230,7 +230,7 @@ public:
         move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
         storePtr(TrustedImmPtr(scratchSize), GPRInfo::regT0);
 
-#if CPU(X86_64) || CPU(ARM) || CPU(MIPS) || CPU(SH4)
+#if CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS) || CPU(SH4)
         move(TrustedImmPtr(buffer), GPRInfo::argumentGPR2);
         move(TrustedImmPtr(argument), GPRInfo::argumentGPR1);
         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
index fd6b753..9f7deb5 100644 (file)
@@ -558,7 +558,7 @@ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg
             swap(destB, destC);
     }
 
-#if CPU(X86_64)
+#if CPU(X86_64) || CPU(ARM64)
     template<FPRReg destA, FPRReg destB>
     void setupTwoStubArgsFPR(FPRReg srcA, FPRReg srcB)
     {
@@ -622,7 +622,7 @@ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg
 #define POKE_ARGUMENT_OFFSET 0
 #endif
 
-#if CPU(X86_64)
+#if CPU(X86_64) || CPU(ARM64)
     ALWAYS_INLINE void setupArguments(FPRReg arg1)
     {
         moveDouble(arg1, FPRInfo::argumentFPR0);
@@ -870,7 +870,7 @@ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg
         move(arg2, GPRInfo::argumentGPR2);
         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
     }
-#if CPU(X86_64)    
+#if CPU(X86_64) || CPU(ARM64)
     ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm64 arg2)
     {
         move(arg1, GPRInfo::argumentGPR1);
index fe63ddf..1f3ad03 100644 (file)
@@ -102,7 +102,7 @@ class DemandExecutableAllocator;
 #endif
 
 #if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
-#if CPU(ARM)
+#if CPU(ARM) || CPU(ARM64)
 static const size_t fixedExecutableMemoryPoolSize = 16 * 1024 * 1024;
 #elif CPU(X86_64)
 static const size_t fixedExecutableMemoryPoolSize = 1024 * 1024 * 1024;
diff --git